提交 ff13ebef authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Remove unused variables from function's input

上级 471aa985
......@@ -872,15 +872,12 @@ class Test_aliasing_rules(unittest.TestCase):
orig_b = numpy.zeros((2,2))-.5
A = self.shared(orig_a)
B = self.shared(orig_b)
C = tensor.dmatrix()
z = numpy.zeros((2,2))
data_of_a = data_of(A)
data_of_b = data_of(B)
f = pfunc([C], [], updates=[(A,B),(B,A)])
f(z)
f = pfunc([], [], updates=[(A,B),(B,A)])
f()
# correctness
assert numpy.all(data_of(A) == -.5)
assert numpy.all(data_of(B) == +.5)
......@@ -902,16 +899,13 @@ class Test_aliasing_rules(unittest.TestCase):
orig_b = numpy.zeros((2,2))-.5
A = self.shared(orig_a)
B = self.shared(orig_b)
C = tensor.dmatrix()
z = numpy.zeros((2,2))
data_of_a = data_of(A)
data_of_b = data_of(B)
f = pfunc([C], [], updates=[(A,B[:,::-1]),(B,A.T)])
f = pfunc([], [], updates=[(A,B[:,::-1]),(B,A.T)])
theano.printing.debugprint(f)
f(z)
f()
# correctness (doesn't actually test the view...)
assert numpy.all(data_of(A) == -.5)
assert numpy.all(data_of(B) == +.5)
......
......@@ -57,9 +57,9 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
y_pred = T.argmax(p_y_given_x, axis=-1)
loss = -T.mean(T.log(p_y_given_x)[T.arange(y.shape[0]), y])
dW = T.grad(loss, dot_result)
classify = theano.function( inputs = [x,y,b,dot_result], outputs = [loss,y_pred,dW],
classify = theano.function( inputs = [y,b,dot_result], outputs = [loss,y_pred,dW],
mode = mode_without_gpu)
classify_gpu = theano.function( inputs = [x,y,b,dot_result], outputs = [loss,y_pred,dW],
classify_gpu = theano.function( inputs = [y,b,dot_result], outputs = [loss,y_pred,dW],
mode = mode_with_gpu)
#theano.printing.debugprint(classify)
#theano.printing.debugprint(classify_gpu)
......@@ -67,8 +67,8 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias():
assert any([isinstance(node.op,T.nnet.CrossentropySoftmaxArgmax1HotWithBias) for node in classify.maker.env.toposort()])
assert any([isinstance(node.op,cuda.nnet.GpuCrossentropySoftmaxArgmax1HotWithBias) for node in classify_gpu.maker.env.toposort()])
out=classify(xx,yy,b_values,dot_value)
gout=classify_gpu(xx,yy,b_values,dot_value)
out=classify(yy,b_values,dot_value)
gout=classify_gpu(yy,b_values,dot_value)
assert len(out)==len(gout)==3
assert numpy.allclose(out[0],gout[0])
......
......@@ -165,7 +165,7 @@ def test_huge_elemwise_fusion():
"""
shape = (2,3,4,5,6)
ttype = tensor.tensor(dtype='float32',broadcastable=(False,)*len(shape))
vars = [tensor.tanh(ttype) for x in range(10)]
vars = [tensor.tanh(ttype) for x in range(7)]
f = pfunc(vars, [vars[0]-vars[1]-vars[2]-vars[3]-vars[4]-vars[5]-vars[6]], mode=mode_with_gpu)
topo = f.maker.env.toposort()
#theano.printing.debugprint(f)
......@@ -177,14 +177,14 @@ def test_huge_elemwise_fusion():
assert isinstance(topo[8].op.scalar_op,theano.scalar.basic.Composite)
#let debugmode catch errors
gen = lambda : theano._asarray(numpy.random.rand(*shape), dtype='float32')
f(gen(),gen(),gen(),gen(),gen(),gen(),gen(),gen(),gen(),gen())
f(gen(),gen(),gen(),gen(),gen(),gen(),gen())
# Test the case where we can't put the computation on the gpu! their is too many
# dimensions to the input to have 2 inputs to the op!
shape = (1,2,3,4,5,6,7,2,2,3,2,1,2,2,2,)
ttype = tensor.tensor(dtype='float32',broadcastable=(False,)*len(shape))
vars = [tensor.tanh(ttype) for x in range(10)]
vars = [tensor.tanh(ttype) for x in range(7)]
f = pfunc(vars, [vars[0]-vars[1]-vars[2]-vars[3]-vars[4]-vars[5]-vars[6]], mode=mode_with_gpu)
topo = f.maker.env.toposort()
#theano.printing.debugprint(f)
......@@ -193,7 +193,7 @@ def test_huge_elemwise_fusion():
assert sum([isinstance(node.op, tensor.Elemwise) for node in topo]) == 1
#let debugmode catch errors
gen = lambda: theano._asarray(numpy.random.rand(*shape), dtype='float32')
f(gen(), gen(), gen(), gen(), gen(), gen(), gen(), gen(), gen(), gen())
f(gen(), gen(), gen(), gen(), gen(), gen(), gen())
def gen(shape):
return theano._asarray(numpy.random.rand(*shape), dtype='float32')
......
......@@ -50,7 +50,7 @@ def test_givens():
data = numpy.float32([1,2,3,4])
x = f32sc(data)
y = x**2
f = theano.function([x], y, givens={x:x+1})
f = theano.function([], y, givens={x:x+1})
class T_updates(unittest.TestCase):
# Test that you can use a TensorType expression to update a
......
......@@ -2222,7 +2222,7 @@ class T_Scan(unittest.TestCase):
sx, upx = theano.scan(sum, sequences=[x])
sy, upy = theano.scan(sum, sequences=[x])
f = theano.function([x, y], [sx, sy], mode=mode_with_opt)
f = theano.function([x], [sx, sy], mode=mode_with_opt)
topo = f.maker.env.toposort()
scans = filter(lambda n:
isinstance(n.op, theano.scan_module.scan_op.Scan), topo)
......@@ -2231,7 +2231,7 @@ class T_Scan(unittest.TestCase):
sx, upx = theano.scan(sum, sequences=[x])
sy, upy = theano.scan(sum, sequences=[x], mode='FAST_COMPILE')
f = theano.function([x, y], [sx, sy],
f = theano.function([x], [sx, sy],
mode=mode_with_opt)
topo = f.maker.env.toposort()
scans = filter(lambda n:
......@@ -2241,7 +2241,7 @@ class T_Scan(unittest.TestCase):
sx, upx = theano.scan(sum, sequences=[x])
sy, upy = theano.scan(sum, sequences=[x], truncate_gradient=1)
f = theano.function([x, y], [sx, sy], mode=mode_with_opt)
f = theano.function([x], [sx, sy], mode=mode_with_opt)
topo = f.maker.env.toposort()
scans = filter(lambda n:
isinstance(n.op, theano.scan_module.scan_op.Scan), topo)
......
......@@ -3553,22 +3553,16 @@ class T_divimpl(unittest.TestCase):
f = fscalar()
c = cscalar()
assert numpy.allclose(function([i, ii, d, f, c], i/d)(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(5.0/7.0))
assert numpy.allclose(function([i, ii, d, f, c], d/i)(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(7.0/5.0))
assert numpy.allclose(function([i, ii, d, f, c], i/f)(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(5.0/11.0))
assert numpy.allclose(function([i, ii, d, f, c], f/i)(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(11.0/5.0))
assert numpy.allclose(function([i, ii, d, f, c], i//ii)(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(5/3))
assert numpy.allclose(function([i, ii, d, f, c], ii//i)(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(3/5))
assert numpy.allclose(function([i, ii, d, f, c], true_div(i,ii))(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(5./3.))
assert numpy.allclose(function([i, ii, d, f, c], true_div(ii,i))(5, 3, 7.0, 11.0, numpy.complex(5,3)),
(3./5.))
assert numpy.allclose(function([i, d], i / d)(5, 7.0), (5.0 / 7.0))
assert numpy.allclose(function([i, d], d / i)(5, 7.0), (7.0 / 5.0))
assert numpy.allclose(function([i, f], i / f)(5, 11.0), (5.0 / 11.0))
assert numpy.allclose(function([i, f], f / i)(5, 11.0), (11.0 / 5.0))
assert numpy.allclose(function([i, ii], i // ii)(5, 3), (5 / 3))
assert numpy.allclose(function([i, ii], ii // i)(5, 3), (3 / 5))
assert numpy.allclose(function([i, ii], true_div(i, ii))(5, 3),
(5. / 3.))
assert numpy.allclose(function([i, ii], true_div(ii, i))(5, 3),
(3. / 5.))
class T_mean(unittest.TestCase):
......
......@@ -660,7 +660,7 @@ def test_inplace0():
X,Y,Z,a,b = T.dmatrix('X'), T.dmatrix('Y'), T.dmatrix('Z'), T.dscalar('a'), T.dscalar('b')
R, S, c = T.dmatrix('R'), T.dmatrix('S'), T.dscalar('c')
f = inplace_func([X,Y,Z,a,b, R, S, c],
f = inplace_func([Z, b, R, S],
[Z * (Z + b * T.dot(R,S).T)], mode='FAST_RUN')
if (gemm_inplace in [n.op for n in f.maker.env.nodes]):
print pp(f.maker.env.outputs[0])
......@@ -678,7 +678,7 @@ def test_inplace0():
def test_inplace1():
X,Y,Z,a,b = XYZab()
# with > 2 terms in the overall addition
f = inplace_func([X,Y,Z,a,b],
f = inplace_func([X, Y, Z],
[Z + Z + T.dot(X,Y)], mode='FAST_RUN')
theano.printing.debugprint(f)
# it doesn't work inplace because we didn't mark Z as mutable input
......
......@@ -713,14 +713,14 @@ def test_local_merge_abs():
mode = theano.compile.mode.get_mode(mode).excluding(
"local_elemwise_fusion")
f = theano.function([x, y, z], (abs(y * z * -2)), mode=mode)
f(x_val, y_val, z_val)
f = theano.function([y, z], (abs(y * z * -2)), mode=mode)
f(y_val, z_val)
theano.printing.debugprint(f)
assert isinstance(f.maker.env.toposort()[1].op.scalar_op, scal.Abs)
assert len(f.maker.env.toposort()) == 2
f = theano.function([x, y, z],abs(x / y), mode=mode)
f(x_val, y_val, z_val)
f = theano.function([x, y],abs(x / y), mode=mode)
f(x_val, y_val)
theano.printing.debugprint(f)
assert isinstance(f.maker.env.toposort()[1].op.scalar_op, scal.Abs)
assert len(f.maker.env.toposort())==2
......@@ -2214,8 +2214,7 @@ class test_shapeoptimizer(unittest.TestCase):
mode = 'FAST_RUN'
v = T.vector()
m = T.matrix()
f = function([v,m], v.dimshuffle('x','x',0).shape[1], mode=mode)
f = function([v], v.dimshuffle('x','x',0).shape[1], mode=mode)
topo = f.maker.env.toposort()
assert len(topo) == 1
assert topo[0].op == theano.compile.function_module.deep_copy_op
......@@ -2371,34 +2370,34 @@ def test_local_mul_specialize():
v = T.vector()
m = T.vector()
f = function([v,m], v*1, mode=mode)
f = function([v], v*1, mode=mode)
nodes = [node.op for node in f.maker.env.toposort()]
print nodes
nodes == [theano.compile.function_module.deep_copy_op]
f = function([v,m], v*0, mode=mode)
f = function([v], v*0, mode=mode)
nodes = [node.op for node in f.maker.env.toposort()]
print nodes
assert nodes == [Shape_i(0), T.alloc]
f = function([v,m], v*(-1), mode=mode)
f = function([v], v*(-1), mode=mode)
nodes = [node.op for node in f.maker.env.toposort()]
print nodes
assert nodes == [T.neg]
f = function([v,m], v*1*(-m), mode=mode)
f = function([v, m], v*1*(-m), mode=mode)
nodes = [node.op for node in f.maker.env.toposort()]
print nodes
theano.printing.debugprint(f)
assert nodes == [T.mul, inplace.neg_inplace]
f = function([v,m], v*0*(-m), mode=mode)
f = function([v, m], v*0*(-m), mode=mode)
nodes = [node.op for node in f.maker.env.toposort()]
print nodes
theano.printing.debugprint(f)
assert nodes == [Shape_i(0), T.alloc]
f = function([v,m], v*(-1)*(-m), mode=mode)
f = function([v, m], v*(-1)*(-m), mode=mode)
nodes = [node.op for node in f.maker.env.toposort()]
print nodes
theano.printing.debugprint(f)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论