提交 180777ee authored 作者: Eric Larsen's avatar Eric Larsen

testing infer_shape: OP MakeVector

上级 9cc19f69
......@@ -533,6 +533,9 @@ class MakeVector(T.Op):
# assume that out has correct dtype. there is no cheap way to check
out[0][...] = inputs
def infer_shape(self, node, ishapes):
return [(len(ishapes),)]
def grad(self, inputs, output_gradients):
# If the output is of an integer dtype, no gradient shall pass
if 'int' in self.dtype:
......
......@@ -29,7 +29,8 @@ from theano.tensor.opt import (
mul_canonizer,
out2in,
Shape_i,
Assert
Assert,
MakeVector
)
from theano import tensor
from theano import tensor as T
......@@ -3386,18 +3387,23 @@ class T_local_sum_dimshuffle(unittest.TestCase):
# test_local_sum_divprod_dimshuffle ((a * b) / (c * d))
def test_make_vector():
b = T.bscalar()
i = T.iscalar()
d = T.dscalar()
class TestMakeVector(utt.InferShapeTester):
#TODO: draw random values instead. Not really important.
val = {b: 2,
i: -3,
d: 0.7}
def setUp(self):
super(TestMakeVector, self).setUp()
def test_make_vector():
b = T.bscalar()
i = T.iscalar()
d = T.dscalar()
#TODO: draw random values instead. Not really important.
val = {b: 2,
i: -3,
d: 0.7}
# Should work
for (dtype, inputs) in [("int8", (b, b)),
# Should work
for (dtype, inputs) in [("int8", (b, b)),
("int32", (i, b)),
("int32", (b, i)),
("float64", (b, i)),
......@@ -3406,55 +3412,55 @@ def test_make_vector():
("float64", ()),
("int64", ()),
]:
mv = opt.MakeVector(dtype=dtype)(*inputs)
assert mv.dtype == dtype
f = theano.function([b, i, d], mv, on_unused_input='ignore')
f_val = f(val[b], val[i], val[d])
#print 'f_val =', f_val
s = mv.sum()
gb = T.grad(s, b, disconnected_inputs='ignore')
gi = T.grad(s, i, disconnected_inputs='ignore')
gd = T.grad(s, d, disconnected_inputs='ignore')
#print 'gb =', gb
#print 'gi =', gi
#print 'gd =', gd
g = theano.function([b, i, d], [gb, gi, gd])
g_val = g(val[b], val[i], val[d])
#print 'g_val =', g_val
if dtype.startswith('int'):
# The gradient should be 0
assert numpy.allclose(g_val, 0)
else:
for var, grval in zip((b, i, d), g_val):
float_inputs = []
if var.dtype.startswith('int'):
assert grval == 0
elif var not in inputs:
assert grval == 0
else:
float_inputs.append(var)
# Build a function that takes float_inputs, use fix values for the
# other inputs, and returns the MakeVector. Use it for verify_grad.
if float_inputs:
def fun(*fl_inputs):
f_inputs = []
for var in f_inputs:
if var in fl_inputs:
# use symbolic variable
f_inputs.append(var)
else:
# use constant value
f_inputs.append(val[var])
return opt.MakeVector(dtype=dtype)(*f_inputs)
mv = opt.MakeVector(dtype=dtype)(*inputs)
assert mv.dtype == dtype
f = theano.function([b, i, d], mv, on_unused_input='ignore')
f_val = f(val[b], val[i], val[d])
#print 'f_val =', f_val
s = mv.sum()
gb = T.grad(s, b, disconnected_inputs='ignore')
gi = T.grad(s, i, disconnected_inputs='ignore')
gd = T.grad(s, d, disconnected_inputs='ignore')
#print 'gb =', gb
#print 'gi =', gi
#print 'gd =', gd
g = theano.function([b, i, d], [gb, gi, gd])
g_val = g(val[b], val[i], val[d])
#print 'g_val =', g_val
if dtype.startswith('int'):
# The gradient should be 0
assert numpy.allclose(g_val, 0)
else:
for var, grval in zip((b, i, d), g_val):
float_inputs = []
if var.dtype.startswith('int'):
assert grval == 0
elif var not in inputs:
assert grval == 0
else:
float_inputs.append(var)
# Build a function that takes float_inputs, use fix values for the
# other inputs, and returns the MakeVector. Use it for verify_grad.
if float_inputs:
def fun(*fl_inputs):
f_inputs = []
for var in f_inputs:
if var in fl_inputs:
# use symbolic variable
f_inputs.append(var)
else:
# use constant value
f_inputs.append(val[var])
return opt.MakeVector(dtype=dtype)(*f_inputs)
utt.verify_grad(fun, [val[ri] for ri in float_inputs])
utt.verify_grad(fun, [val[ri] for ri in float_inputs])
#should fail
for (dtype, inputs) in [("int8", (b, i)),
#should fail
for (dtype, inputs) in [("int8", (b, i)),
("int8", (i, b)),
("int8", (b, d)),
("int8", (i, i)),
......@@ -3462,11 +3468,37 @@ def test_make_vector():
("int32", (i, d)),
("float32", (i, d)),
]:
try:
opt.MakeVector(dtype=dtype)(*inputs)
raise Exception("Theano should have raised an error")
except AssertionError:
pass
try:
opt.MakeVector(dtype=dtype)(*inputs)
raise Exception("Theano should have raised an error")
except AssertionError:
pass
def test_infer_shape(self):
adscal = dscalar()
bdscal = dscalar()
aiscal = iscalar()
biscal = iscalar()
ciscal = iscalar()
discal = iscalar()
adscal_val = numpy.random.rand()
bdscal_val = numpy.random.rand()
aiscal_val = numpy.random.randint(10)
biscal_val = numpy.random.randint(10)
ciscal_val = numpy.random.randint(10)
discal_val = numpy.random.randint(10)
self._compile_and_check([adscal, aiscal],
[MakeVector('float64')(adscal, aiscal)],
[adscal_val, aiscal_val], MakeVector)
self._compile_and_check([adscal, bdscal, aiscal],
[MakeVector('float64')(adscal, bdscal, aiscal)],
[adscal_val, bdscal_val, aiscal_val], MakeVector)
self._compile_and_check([aiscal, biscal, ciscal, discal],
[MakeVector('int32')(aiscal, biscal, ciscal, discal)],
[aiscal_val, biscal_val, ciscal_val, discal_val],
MakeVector)
def test_local_join_1():
......@@ -3684,9 +3716,9 @@ class TestShape_i(utt.InferShapeTester):
if __name__ == '__main__':
t = TestShape_i('setUp')
t = TestMakeVector('setUp')
t.setUp()
t.test_perform()
#t.test_perform()
t.test_infer_shape()
"""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论