提交 218ecbca authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Set black target version to Python 3.6

上级 67431872
......@@ -17,7 +17,6 @@ python:
stages:
- lint
# - doc
- test
env:
......@@ -49,11 +48,8 @@ jobs:
- stage: lint
script:
- pip install black
- black --check theano/ setup.py
- flake8 theano/ setup.py
# - &doctest
# stage: doc
# env: DOC=1
- black -t py36 --check theano/ setup.py
- flake8
- &normaltest
stage: test
env: FAST_COMPILE=1 FLOAT32=1 PART="theano --ignore=theano/tensor/nnet --ignore=theano/tensor/signal"
......
......@@ -2,18 +2,8 @@
set -e
if [[ $DOC == "1" ]]; then
# this is a hack to deal with the fact that the docs and flake8 config are all set up
# for old versions
conda create --yes -q -n pyenv python=3.6 numpy=1.13.1
conda activate pyenv
conda install --yes -q mkl numpy=1.13.1 scipy=0.19.1 pip flake8=3.5 six=1.11.0 pep8=1.7.1 pyflakes=1.6.0 mkl-service graphviz pytest
python -m pip install pydot-ng
else
conda create --yes -q -n pyenv python=$TRAVIS_PYTHON_VERSION
conda activate pyenv
conda install --yes -q mkl numpy scipy pip flake8 six pep8 pyflakes sphinx mkl-service graphviz pytest # libgfortran
python -m pip install -q pydot-ng sphinx_rtd_theme
fi
python -m pip install --no-deps --upgrade -e .
......@@ -262,7 +262,7 @@ class OpFromGraph(gof.Op):
rop_overrides="default",
connection_pattern=None,
name=None,
**kwargs
**kwargs,
):
if not isinstance(outputs, list):
raise TypeError("outputs must be list, got %s" % type(outputs))
......
......@@ -283,7 +283,7 @@ def shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
name=name,
strict=strict,
allow_downcast=allow_downcast,
**kwargs
**kwargs,
)
utils.add_tag_trace(var)
return var
......
......@@ -15,7 +15,7 @@ class NNet(object):
n_hidden=1,
n_output=1,
lr=1e-3,
**kw
**kw,
):
super(NNet, self).__init__(**kw)
......
......@@ -2382,7 +2382,7 @@ def out2in(*local_opts, **kwargs):
local_opts,
order="out_to_in",
failure_callback=TopoOptimizer.warn_inplace,
**kwargs
**kwargs,
)
if name:
ret.__name__ = name
......@@ -2405,7 +2405,7 @@ def in2out(*local_opts, **kwargs):
local_opts,
order="in_to_out",
failure_callback=TopoOptimizer.warn_inplace,
**kwargs
**kwargs,
)
if name:
ret.__name__ = name
......
......@@ -112,7 +112,7 @@ def init_dev(dev, name=None, preallocate=None):
dev,
sched=config.gpuarray.sched,
single_stream=config.gpuarray.single_stream,
**args
**args,
)
os.environ["THEANO_GPU_IS_ALREADY_ACTIVE"] = "Yes"
theano_gpu_is_already_active = True
......
......@@ -257,7 +257,7 @@ def register_inplace(*tags, **kwargs):
"fast_run",
"inplace",
"gpuarray",
*tags
*tags,
)
return local_opt
......@@ -1667,7 +1667,7 @@ def local_gpu_crossentropycategorical1hotgrad(op, context_name, inputs, outputs)
idx0 = theano.tensor.arange(shape_i(coding, 0))
z = GpuAlloc(context_name, memset_0=True)(
as_gpuarray_variable(np.zeros((), dtype=coding.dtype), context_name),
*[shape_i(coding, i) for i in xrange(coding.ndim)]
*[shape_i(coding, i) for i in xrange(coding.ndim)],
)
gcoding = tensor.set_subtensor(
z[idx0, one_of_n], gpu_neg(gpu_true_div(gy, coding[idx0, one_of_n]))
......
......@@ -145,7 +145,7 @@ class IfElse(Op):
new_outs = new_ifelse(
node.inputs[0],
*(new_ts_inputs + new_fs_inputs),
**dict(return_list=True)
**dict(return_list=True),
)
else:
new_outs = []
......
......@@ -683,14 +683,10 @@ if use_ascii:
)
else:
special = dict(middle_dot=u"\u00B7", big_sigma=u"\u03A3")
special = dict(middle_dot="\u00B7", big_sigma="\u03A3")
greek = dict(
alpha=u"\u03B1",
beta=u"\u03B2",
gamma=u"\u03B3",
delta=u"\u03B4",
epsilon=u"\u03B5",
alpha="\u03B1", beta="\u03B2", gamma="\u03B3", delta="\u03B4", epsilon="\u03B5",
)
......
......@@ -933,7 +933,7 @@ class MRG_RandomStreams(object):
node_rstate,
*mrg_uniform.new(node_rstate, ndim, dtype, size),
size=size,
nstreams=orig_nstreams
nstreams=orig_nstreams,
)
# Add a reference to distinguish from other shared variables
node_rstate.tag.is_rng = True
......@@ -967,7 +967,7 @@ class MRG_RandomStreams(object):
ndim=None,
dtype="int64",
nstreams=None,
**kwargs
**kwargs,
):
# TODO : need description for parameter and return
"""
......@@ -1030,7 +1030,7 @@ class MRG_RandomStreams(object):
ndim=None,
dtype="int64",
nstreams=None,
**kwargs
**kwargs,
):
"""
Sample `size` times from a multinomial distribution defined by
......@@ -1114,7 +1114,7 @@ class MRG_RandomStreams(object):
ndim=None,
dtype="int64",
nstreams=None,
**kwargs
**kwargs,
):
warnings.warn(
"MRG_RandomStreams.multinomial_wo_replacement() is "
......@@ -1130,7 +1130,7 @@ class MRG_RandomStreams(object):
dtype=dtype,
nstreams=nstreams,
ndim=ndim,
**kwargs
**kwargs,
)
def normal(
......@@ -1142,7 +1142,7 @@ class MRG_RandomStreams(object):
dtype=None,
nstreams=None,
truncate=False,
**kwargs
**kwargs,
):
"""
Sample a tensor of values from a normal distribution.
......@@ -1197,7 +1197,7 @@ class MRG_RandomStreams(object):
ndim=1,
dtype=dtype,
nstreams=nstreams,
**kwargs
**kwargs,
)
# box-muller transform
......@@ -1228,7 +1228,7 @@ class MRG_RandomStreams(object):
ndim=1,
dtype=dtype,
nstreams=nstreams,
**kwargs
**kwargs,
)
r_fix = tensor.sqrt(-2.0 * tensor.log(u_fix))
z0_fixed = r_fix[: to_fix0.size] * cos_theta[to_fix0]
......@@ -1292,7 +1292,7 @@ class MRG_RandomStreams(object):
ndim=ndim,
dtype=dtype,
nstreams=nstreams,
**kwargs
**kwargs,
)
......
......@@ -4371,7 +4371,7 @@ class Composite(ScalarOp):
izip(("i%i" % i for i in xrange(len(inames))), inames),
izip(("o%i" % i for i in xrange(len(onames))), onames),
),
**sub
**sub,
)
d["nodename"] = nodename
if "id" not in sub:
......
......@@ -651,7 +651,7 @@ class PushOutSeqScan(gof.Optimizer):
# Do not call make_node for test_value
nw_node = nwScan(
*(node.inputs[:1] + nw_outer + node.inputs[1:]),
**dict(return_list=True)
**dict(return_list=True),
)[0].owner
fgraph.replace_all_validate_remove(
......
......@@ -1335,7 +1335,7 @@ class scan_args(object):
n_shared_outs=len(self.outer_in_shared),
n_mit_mot_outs=sum(len(s) for s in self.mit_mot_out_slices),
mit_mot_out_slices=self.mit_mot_out_slices,
**self.other_info
**self.other_info,
)
)
......
......@@ -7194,7 +7194,7 @@ class Choose(Op):
if isinstance(choice.type, TensorType):
choice = choice.dimshuffle(
0,
*(("x",) * (out_ndim - choice_ndim) + tuple(range(1, choice.ndim)))
*(("x",) * (out_ndim - choice_ndim) + tuple(range(1, choice.ndim))),
)
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
......
......@@ -73,7 +73,7 @@ def conv2d(
filter_dilation=(1, 1),
num_groups=1,
unshared=False,
**kwargs
**kwargs,
):
"""
This function will build the symbolic graph for convolving a mini-batch of a
......
......@@ -48,7 +48,7 @@ def conv2d(
filter_shape=None,
border_mode="valid",
subsample=(1, 1),
**kargs
**kargs,
):
"""
Deprecated, old conv2d interface.
......@@ -174,7 +174,7 @@ def conv2d(
kshp=kshp,
nkern=nkern,
bsize=bsize,
**kargs
**kargs,
)
return op(input, filters)
......
......@@ -884,7 +884,7 @@ class BaseTestConv2d(BaseTestConv):
conv_fn=conv.conv2d,
conv_op=conv.AbstractConv2d,
ref=conv2d_corr,
**kwargs
**kwargs,
):
super().run_fwd(
inputs_shape=inputs_shape,
......@@ -892,7 +892,7 @@ class BaseTestConv2d(BaseTestConv):
conv_fn=conv_fn,
conv_op=conv_op,
ref=ref,
**kwargs
**kwargs,
)
def run_gradweight(
......@@ -902,7 +902,7 @@ class BaseTestConv2d(BaseTestConv):
output_shape,
gradWeights_fn=conv.AbstractConv2d_gradWeights,
ref=conv2d_corr_gw,
**kwargs
**kwargs,
):
super().run_gradweight(
inputs_shape=inputs_shape,
......@@ -910,7 +910,7 @@ class BaseTestConv2d(BaseTestConv):
output_shape=output_shape,
gradWeights_fn=gradWeights_fn,
ref=ref,
**kwargs
**kwargs,
)
def run_gradinput(
......@@ -920,7 +920,7 @@ class BaseTestConv2d(BaseTestConv):
output_shape,
gradInputs_fn=conv.AbstractConv2d_gradInputs,
ref=conv2d_corr_gi,
**kwargs
**kwargs,
):
super().run_gradinput(
inputs_shape=inputs_shape,
......@@ -928,7 +928,7 @@ class BaseTestConv2d(BaseTestConv):
output_shape=output_shape,
gradInputs_fn=gradInputs_fn,
ref=ref,
**kwargs
**kwargs,
)
......@@ -1244,7 +1244,7 @@ class BaseTestConv3d(BaseTestConv):
conv_fn=conv.conv3d,
conv_op=conv.AbstractConv3d,
ref=conv3d_corr,
**kwargs
**kwargs,
):
super().run_fwd(
inputs_shape=inputs_shape,
......@@ -1252,7 +1252,7 @@ class BaseTestConv3d(BaseTestConv):
conv_fn=conv_fn,
conv_op=conv_op,
ref=ref,
**kwargs
**kwargs,
)
def run_gradweight(
......@@ -1262,7 +1262,7 @@ class BaseTestConv3d(BaseTestConv):
output_shape,
gradWeights_fn=conv.AbstractConv3d_gradWeights,
ref=conv3d_corr_gw,
**kwargs
**kwargs,
):
super().run_gradweight(
inputs_shape=inputs_shape,
......@@ -1270,7 +1270,7 @@ class BaseTestConv3d(BaseTestConv):
output_shape=output_shape,
gradWeights_fn=gradWeights_fn,
ref=ref,
**kwargs
**kwargs,
)
def run_gradinput(
......@@ -1280,7 +1280,7 @@ class BaseTestConv3d(BaseTestConv):
output_shape,
gradInputs_fn=conv.AbstractConv3d_gradInputs,
ref=conv3d_corr_gi,
**kwargs
**kwargs,
):
super().run_gradinput(
inputs_shape=inputs_shape,
......@@ -1288,7 +1288,7 @@ class BaseTestConv3d(BaseTestConv):
output_shape=output_shape,
gradInputs_fn=gradInputs_fn,
ref=ref,
**kwargs
**kwargs,
)
......
......@@ -152,7 +152,7 @@ def broadcast_like(value, template, fgraph, dtype=None):
i
for i in xrange(rval.ndim)
if rval.broadcastable[i] and not template.broadcastable[i]
]
],
)
assert rval.type.dtype == dtype
......@@ -2760,7 +2760,7 @@ def local_upcast_elemwise_constant_inputs(node):
new_inputs.append(
T.alloc(
T.cast(cval_i, output_dtype),
*[shape_i(d)(i) for d in xrange(i.ndim)]
*[shape_i(d)(i) for d in xrange(i.ndim)],
)
)
# print >> sys.stderr, "AAA",
......@@ -6223,7 +6223,7 @@ def local_opt_alloc(node):
shapes[i]
for i in xrange(len(shapes))
if i not in node.op.axis
]
],
)
]
except NotScalarConstantError:
......
......@@ -25,7 +25,7 @@ def conv2d(
filter_shape=None,
border_mode="valid",
subsample=(1, 1),
**kargs
**kargs,
):
"""
signal.conv.conv2d performs a basic 2D convolution of the input with the
......@@ -105,7 +105,7 @@ def conv2d(
kshp=kshp,
nkern=nkern,
bsize=bsize,
**kargs
**kargs,
)
output = op(input4D, filters4D)
......
......@@ -1524,7 +1524,7 @@ class IncSubtensor(Op):
sub=sub,
idx_list=self.idx_list,
view_ndim=view_ndim,
**helper_args
**helper_args,
)
# Make a view on the output, as we will write into it.
......
......@@ -872,7 +872,7 @@ TestAddBroadcast = makeBroadcastTester(
randuint32(2, 3),
),
four_inputs_broadcast=(rand(2, 3), rand(1, 3), rand(2, 1), rand(1, 1)),
**_good_broadcast_binary_normal
**_good_broadcast_binary_normal,
),
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
......@@ -986,14 +986,14 @@ TestMulBroadcast = makeBroadcastTester(
good=dict(
three_inputs_same_shapes=(rand(2, 3), rand(2, 3), rand(2, 3)),
four_inputs_broadcast=(rand(2, 3), rand(1, 3), rand(2, 1), rand(1, 1)),
**_good_broadcast_binary_normal
**_good_broadcast_binary_normal,
),
bad_build=_bad_build_broadcast_binary_normal,
bad_runtime=_bad_runtime_broadcast_binary_normal,
grad=dict(
three_inputs_same_shapes=(rand(2, 3), rand(2, 3), rand(2, 3)),
four_inputs_broadcast=(rand(2, 3), rand(1, 3), rand(2, 1), rand(1, 1)),
**_grad_broadcast_binary_normal
**_grad_broadcast_binary_normal,
),
)
......
......@@ -646,7 +646,7 @@ class _tensor_py_operators(object):
self,
*theano.tensor.subtensor.Subtensor.collapse(
args, lambda entry: isinstance(entry, Variable)
)
),
)
def take(self, indices, axis=None, mode="raise"):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论