提交 64096a4a authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Apply pyupgrade to tests.scan_module

上级 ba16d2d6
...@@ -742,7 +742,7 @@ class TestScan: ...@@ -742,7 +742,7 @@ class TestScan:
# Call verify_grad to ensure the correctness of the second gradients # Call verify_grad to ensure the correctness of the second gradients
floatX = theano.config.floatX floatX = theano.config.floatX
inputs_test_values = [np.random.random((3)).astype(floatX)] inputs_test_values = [np.random.random(3).astype(floatX)]
utt.verify_grad(get_sum_of_grad, inputs_test_values) utt.verify_grad(get_sum_of_grad, inputs_test_values)
def test_verify_second_grad_mitsot1(self): def test_verify_second_grad_mitsot1(self):
...@@ -771,7 +771,7 @@ class TestScan: ...@@ -771,7 +771,7 @@ class TestScan:
floatX = theano.config.floatX floatX = theano.config.floatX
inputs_test_values = [ inputs_test_values = [
np.random.random((2, 3)).astype(floatX), np.random.random((2, 3)).astype(floatX),
np.random.random((3)).astype(floatX), np.random.random(3).astype(floatX),
] ]
utt.verify_grad(get_sum_of_grad, inputs_test_values) utt.verify_grad(get_sum_of_grad, inputs_test_values)
...@@ -3896,7 +3896,7 @@ for{cpu,scan_fn}.2 [id H] '' ...@@ -3896,7 +3896,7 @@ for{cpu,scan_fn}.2 [id H] ''
dtype = theano.config.floatX dtype = theano.config.floatX
seq_value = np.random.random((10, 3)).astype(dtype) seq_value = np.random.random((10, 3)).astype(dtype)
out_init_value = np.random.random((3, 3)).astype(dtype) out_init_value = np.random.random((3, 3)).astype(dtype)
non_seq_value = np.random.random((3)).astype(dtype) non_seq_value = np.random.random(3).astype(dtype)
outputs = fct(seq_value, out_init_value, non_seq_value) outputs = fct(seq_value, out_init_value, non_seq_value)
......
...@@ -89,7 +89,7 @@ class TestGaussNewton: ...@@ -89,7 +89,7 @@ class TestGaussNewton:
# during certain iterations of CG in the HF algorithm. There, # during certain iterations of CG in the HF algorithm. There,
# it's in fact `pi + current update proposal`. For simplicity, # it's in fact `pi + current update proposal`. For simplicity,
# I just multiply by 2 here. # I just multiply by 2 here.
cost_ = theano.clone(cost, replace=dict([(pi, 2 * pi) for pi in params])) cost_ = theano.clone(cost, replace={pi: 2 * pi for pi in params})
# Compute Gauss-Newton-Matrix times some vector `v` which is `p` in CG, # Compute Gauss-Newton-Matrix times some vector `v` which is `p` in CG,
# but for simplicity, I just take the parameters vector because it's # but for simplicity, I just take the parameters vector because it's
...@@ -112,7 +112,7 @@ class TestGaussNewton: ...@@ -112,7 +112,7 @@ class TestGaussNewton:
self._run(100, 10, batch_size=1, mode=mode) self._run(100, 10, batch_size=1, mode=mode)
class GaussNewtonMatrix(object): class GaussNewtonMatrix:
def __init__(self, s): def __init__(self, s):
# `s` is the linear network outputs, i.e. the network output # `s` is the linear network outputs, i.e. the network output
# without having applied the activation function # without having applied the activation function
...@@ -131,7 +131,7 @@ class GaussNewtonMatrix(object): ...@@ -131,7 +131,7 @@ class GaussNewtonMatrix(object):
return JHJv return JHJv
class TestPushOutScanOutputDot(object): class TestPushOutScanOutputDot:
""" """
Test class for the PushOutScanOutput optimizer in the case where the inner Test class for the PushOutScanOutput optimizer in the case where the inner
function of a scan op has an output which is the result of a Dot product function of a scan op has an output which is the result of a Dot product
...@@ -166,7 +166,7 @@ class TestPushOutScanOutputDot(object): ...@@ -166,7 +166,7 @@ class TestPushOutScanOutputDot(object):
# Ensure that the function compiled with the optimization produces # Ensure that the function compiled with the optimization produces
# the same results as the function compiled without # the same results as the function compiled without
v_value = np.random.random((4)).astype(config.floatX) v_value = np.random.random(4).astype(config.floatX)
m_value = np.random.random((4, 5)).astype(config.floatX) m_value = np.random.random((4, 5)).astype(config.floatX)
output_opt = f_opt(v_value, m_value) output_opt = f_opt(v_value, m_value)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论