提交 d6477d58 authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Apply pyupgrade to tests.tensor

上级 db7cf36f
......@@ -97,7 +97,7 @@ class TestConv2D(utt.InferShapeTester):
return rval
output = sym_conv2d(input, filters)
output.name = "conv2d(%s,%s)" % (input.name, filters.name)
output.name = "conv2d({},{})".format(input.name, filters.name)
theano_conv = theano.function([input, filters], output, mode=self.mode)
# initialize input and compute result
......
import numpy as np
import pytest
from six import integer_types
import theano
import theano.tensor as tt
......@@ -75,7 +74,7 @@ class TestCorr2D(utt.InferShapeTester):
return rval
output = sym_CorrMM(input, filters)
output.name = "CorrMM()(%s,%s)" % (input.name, filters.name)
output.name = "CorrMM()({},{})".format(input.name, filters.name)
theano_corr = theano.function([input, filters], output, mode=self.mode)
# initialize input and compute result
......@@ -110,7 +109,7 @@ class TestCorr2D(utt.InferShapeTester):
padHW = np.floor(dil_fil_shape2d / 2).astype("int32")
elif isinstance(border_mode, tuple):
padHW = np.array(border_mode)
elif isinstance(border_mode, integer_types):
elif isinstance(border_mode, int):
padHW = np.array([border_mode, border_mode])
else:
raise NotImplementedError("Unsupported border_mode {}".format(border_mode))
......
import numpy as np
import pytest
from six import integer_types
import theano
import theano.tensor as tt
......@@ -73,7 +72,7 @@ class TestCorr3D(utt.InferShapeTester):
return rval
output = sym_Corr3dMM(input, filters)
output.name = "Corr3dMM()(%s,%s)" % (input.name, filters.name)
output.name = "Corr3dMM()({},{})".format(input.name, filters.name)
theano_corr = theano.function([input, filters], output, mode=self.mode)
# initialize input and compute result
......@@ -112,7 +111,7 @@ class TestCorr3D(utt.InferShapeTester):
padHWD = np.floor(dil_fil_shape3d / 2).astype("int32")
elif isinstance(border_mode, tuple):
padHWD = np.array(border_mode)
elif isinstance(border_mode, integer_types):
elif isinstance(border_mode, int):
padHWD = np.array([border_mode, border_mode, border_mode])
else:
raise NotImplementedError("Unsupported border_mode {}".format(border_mode))
......
......@@ -465,7 +465,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
)
assert (
numpy_output_val.shape == outputshp
), "outshape is %s, calculated shape is %s" % (
), "outshape is {}, calculated shape is {}".format(
outputshp,
numpy_output_val.shape,
)
......@@ -516,7 +516,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
)
assert (
numpy_output_val.shape == outputshp
), "outshape is %s, calculated shape is %s" % (
), "outshape is {}, calculated shape is {}".format(
outputshp,
numpy_output_val.shape,
)
......
......@@ -2546,8 +2546,8 @@ COMPLEX_DTYPES = ALL_DTYPES[-2:]
def multi_dtype_checks(shape1, shape2, dtypes=ALL_DTYPES, nameprefix=""):
for dtype1, dtype2 in itertools.combinations(dtypes, 2):
name1 = "%s_%s_%s" % (nameprefix, dtype1, dtype2)
name2 = "%s_%s_%s" % (nameprefix, dtype2, dtype1)
name1 = "{}_{}_{}".format(nameprefix, dtype1, dtype2)
name2 = "{}_{}_{}".format(nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape1, dtype1)
obj2 = rand_of_dtype(shape2, dtype2)
yield (name1, (obj1, obj2))
......@@ -2556,8 +2556,8 @@ def multi_dtype_checks(shape1, shape2, dtypes=ALL_DTYPES, nameprefix=""):
def multi_dtype_cast_checks(shape, dtypes=ALL_DTYPES, nameprefix=""):
for dtype1, dtype2 in itertools.combinations(dtypes, 2):
name1 = "%s_%s_%s" % (nameprefix, dtype1, dtype2)
name2 = "%s_%s_%s" % (nameprefix, dtype2, dtype1)
name1 = "{}_{}_{}".format(nameprefix, dtype1, dtype2)
name2 = "{}_{}_{}".format(nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape, dtype1)
obj2 = rand_of_dtype(shape, dtype2)
yield (name1, (obj1, dtype2))
......@@ -3176,7 +3176,7 @@ class TestCast:
# Casts from foo to foo
[
(
"%s_%s" % (rand_of_dtype((2,), dtype), dtype),
"{}_{}".format(rand_of_dtype((2,), dtype), dtype),
(rand_of_dtype((2,), dtype), dtype),
)
for dtype in ALL_DTYPES
......@@ -3499,7 +3499,7 @@ class TestShape:
assert (eval_outputs([s]) == [5, 3]).all()
def test_basic1(self):
s = shape(np.ones((2)))
s = shape(np.ones(2))
assert (eval_outputs([s]) == [2]).all()
def test_basic2(self):
......@@ -5393,7 +5393,7 @@ class TestMatinv:
# Here, as_tensor_variable actually uses the data allocated by np.
diff = ab - as_tensor_variable(np.ones((dim, dim), dtype=config.floatX))
# Sum of squared errors
ssdiff = sum((diff ** 2.0))
ssdiff = sum(diff ** 2.0)
g_b = grad(ssdiff, b)
......@@ -6144,7 +6144,7 @@ def test_is_flat():
# given outdim
# Constant variable
assert tt.is_flat(tt.as_tensor_variable(np.zeros((10))))
assert tt.is_flat(tt.as_tensor_variable(np.zeros(10)))
assert tt.is_flat(tt.as_tensor_variable(np.zeros((10, 10, 10))), ndim=3)
assert not tt.is_flat(tt.as_tensor_variable(np.zeros((10, 10, 10))))
......
......@@ -378,7 +378,7 @@ class TestGemm:
)
class TestGemmNoFlags(object):
class TestGemmNoFlags:
gemm = gemm_no_inplace
M = 4
N = 5
......@@ -1467,7 +1467,7 @@ def matrixmultiply(a, b):
return c
class BaseGemv(object):
class BaseGemv:
mode = mode_blas_opt # can be overridden with self.mode
shared = staticmethod(theano.shared)
......
......@@ -321,7 +321,7 @@ class TestCGemvFloat64(BaseGemv, OptimizationTestMixin):
skip_if_blas_ldflags_empty()
class TestCGemvNoFlags(object):
class TestCGemvNoFlags:
mode = mode_blas_opt
gemv = CGemv(inplace=False)
M = 4
......
......@@ -514,7 +514,7 @@ class TestCAReduce(unittest_tools.InferShapeTester):
f = theano.function([x], e.shape, mode=mode)
if not (
scalar_op in [scalar.maximum, scalar.minimum]
and ((xsh == () or np.prod(xsh) == 0))
and (xsh == () or np.prod(xsh) == 0)
):
try:
assert all(f(xv) == zv.shape)
......
......@@ -12,7 +12,7 @@ class TestFourier(utt.InferShapeTester):
rng = np.random.RandomState(43)
def setup_method(self):
super(TestFourier, self).setup_method()
super().setup_method()
self.op_class = Fourier
self.op = fft
......
import numpy as np
import pytest
from six import integer_types
import theano
from theano import function, tensor
......@@ -12,7 +11,7 @@ class TestKeepDims:
def makeKeepDims_local(self, x, y, axis):
if axis is None:
newaxis = list(range(x.ndim))
elif isinstance(axis, integer_types):
elif isinstance(axis, int):
if axis < 0:
newaxis = [axis + x.type.ndim]
else:
......
......@@ -61,10 +61,10 @@ def test_bug_2009_07_17_borrowed_output():
z = g(a, b, c)
z_backup = copy.copy(z)
id_z = id(z)
print(("Output z after first call: %s" % (z,)))
print("Output z after first call: {}".format(z))
a[0, 0] = 1
id_other = id(g(a, b, c))
print(("Output z after second call: %s" % (z,)))
print("Output z after second call: {}".format(z))
# Ensure that calling the function again returns a pointer towards a new
# array.
assert id_z != id_other
......
......@@ -63,7 +63,7 @@ def gen_data():
return rval
class LogisticRegression(object):
class LogisticRegression:
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
......@@ -131,7 +131,7 @@ class LogisticRegression(object):
return tt.log(self.p_y_given_x[tt.arange(y.shape[0]), y])
class HiddenLayer(object):
class HiddenLayer:
def __init__(self, rng, input, n_in, n_out, activation=tt.tanh, name_prefix=""):
"""
Typical hidden layer of a MLP: units are fully-connected and have
......@@ -179,7 +179,7 @@ class HiddenLayer(object):
self.params = [self.W]
class MLP(object):
class MLP:
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
......
......@@ -54,7 +54,7 @@ def test_mpi_roundtrip():
env = os.environ.copy()
flags = env.get("THEANO_FLAGS", "")
keep_flags = ",".join(
(f for f in flags.split(",") if not f.startswith("init_gpu_device"))
f for f in flags.split(",") if not f.startswith("init_gpu_device")
)
env["THEANO_FLAGS"] = keep_flags
p = subprocess.Popen(
......
......@@ -693,10 +693,10 @@ def makeSharedTester(
internal_type_=np.ndarray,
check_internal_type_=lambda a: isinstance(a, np.ndarray),
theano_fct_=lambda a: a * 2,
ref_fct_=lambda a: np.asarray((a * 2)),
ref_fct_=lambda a: np.asarray(a * 2),
cast_value_=np.asarray,
)
class TestSharedOptions(object):
class TestSharedOptions:
pass
......
......@@ -222,7 +222,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
@change_flags(compute_test_value="off")
def test_err_bounds1(self):
n = self.shared((np.ones((2, 3), dtype=self.dtype) * 5))
n = self.shared(np.ones((2, 3), dtype=self.dtype) * 5)
t = n[4:5, 3]
assert isinstance(t.owner.op, Subtensor)
old_stderr = sys.stderr
......@@ -511,7 +511,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
with pytest.raises(TypeError):
test_array.__getitem__((True, False))
with pytest.raises(TypeError):
test_array.__getitem__(([True, False]))
test_array.__getitem__([True, False])
with pytest.raises(TypeError):
test_array.__getitem__(([0, 1], [0, False]))
with pytest.raises(TypeError):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论