提交 18930408 authored 作者: Frederic's avatar Frederic

remove not used var.

上级 ec81225c
...@@ -467,7 +467,6 @@ class test_Prod(unittest.TestCase): ...@@ -467,7 +467,6 @@ class test_Prod(unittest.TestCase):
# (and special cases: 1 zero in the row, more than 1 zero in the row) # (and special cases: 1 zero in the row, more than 1 zero in the row)
x_val = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], x_val = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype='float32') dtype='float32')
x = theano.tensor.dmatrix()
# now with verify_grad # now with verify_grad
unittest_tools.verify_grad(Prod(axis=1), [x_val], mode=self.mode) unittest_tools.verify_grad(Prod(axis=1), [x_val], mode=self.mode)
...@@ -672,8 +671,8 @@ class T_sum_dtype(unittest.TestCase): ...@@ -672,8 +671,8 @@ class T_sum_dtype(unittest.TestCase):
if "complex" in input_dtype: if "complex" in input_dtype:
continue continue
# Check that we can take the gradient # Check that we can take the gradient
grad_var = tensor.grad(sum_var.sum(), x, tensor.grad(sum_var.sum(), x,
disconnected_inputs='ignore') disconnected_inputs='ignore')
idx += 1 idx += 1
def test_sum_custom_acc_dtype(self): def test_sum_custom_acc_dtype(self):
...@@ -707,8 +706,8 @@ class T_sum_dtype(unittest.TestCase): ...@@ -707,8 +706,8 @@ class T_sum_dtype(unittest.TestCase):
if "complex" in input_dtype: if "complex" in input_dtype:
continue continue
# Check that we can take the gradient # Check that we can take the gradient
grad_var = tensor.grad(sum_var.sum(), x, tensor.grad(sum_var.sum(), x,
disconnected_inputs='ignore') disconnected_inputs='ignore')
else: else:
self.assertRaises(TypeError, self.assertRaises(TypeError,
x.sum, acc_dtype=acc_dtype, axis=axis) x.sum, acc_dtype=acc_dtype, axis=axis)
...@@ -766,8 +765,8 @@ class T_mean_dtype(unittest.TestCase): ...@@ -766,8 +765,8 @@ class T_mean_dtype(unittest.TestCase):
if "complex" in mean_var.dtype: if "complex" in mean_var.dtype:
continue continue
try: try:
grad_var = tensor.grad(mean_var.sum(), x, tensor.grad(mean_var.sum(), x,
disconnected_inputs='ignore') disconnected_inputs='ignore')
except NotImplementedError: except NotImplementedError:
# TrueDiv does not seem to have a gradient when # TrueDiv does not seem to have a gradient when
# the numerator is complex. # the numerator is complex.
...@@ -843,8 +842,8 @@ class T_prod_dtype(unittest.TestCase): ...@@ -843,8 +842,8 @@ class T_prod_dtype(unittest.TestCase):
if "complex" in output_dtype or "complex" in input_dtype: if "complex" in output_dtype or "complex" in input_dtype:
continue continue
# Check that we can take the gradient # Check that we can take the gradient
grad_var = tensor.grad(prod_var.sum(), x, tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore') disconnected_inputs='ignore')
idx += 1 idx += 1
def test_prod_custom_acc_dtype(self): def test_prod_custom_acc_dtype(self):
...@@ -871,8 +870,8 @@ class T_prod_dtype(unittest.TestCase): ...@@ -871,8 +870,8 @@ class T_prod_dtype(unittest.TestCase):
if "complex" in acc_dtype: if "complex" in acc_dtype:
continue continue
# Check that we can take the gradient # Check that we can take the gradient
grad_var = tensor.grad(prod_var.sum(), x, tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore') disconnected_inputs='ignore')
else: else:
self.assertRaises(TypeError, self.assertRaises(TypeError,
x.prod, acc_dtype=acc_dtype, axis=axis) x.prod, acc_dtype=acc_dtype, axis=axis)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论