提交 f95f64b6 authored 作者: Frederic Bastien's avatar Frederic Bastien

add test for the gradiant of T.max_and_argmax and add test for T.max.

上级 e8b1794f
......@@ -38,7 +38,7 @@ def inplace_func(inputs, outputs, mode=get_default_mode()):
def eval_outputs(outputs):
variables = inplace_func([], outputs)()
if len(variables) == 1:
if isinstance(variables,(tuple,list)) and len(variables) == 1:
return variables[0]
return variables
......@@ -846,6 +846,187 @@ class T_max_and_argmax(unittest.TestCase):
v = eval_outputs(max_and_argmax(n,2)[0].shape)
assert tuple(v)==(2,3)
def test_grad(self):
data = numpy.random.rand(2,3)
n = as_tensor_variable(data)
def check_grad_max(data, max_grad_data, axis=None):
#This work only for axis in [0,None]
assert axis in [0,None]
z = numpy.zeros_like(data)
z = z.flatten()
argmax=numpy.argmax(data,axis=axis)
if argmax.ndim==0:
z[numpy.argmax(data,axis=axis)]+=1
else:
for id,v in enumerate(argmax):
z[v*numpy.prod(data.shape[data.ndim-1:axis:-1])+id]+=1
z = z.reshape(data.shape)
assert numpy.all(max_grad_data == z)
#test grad of max
#axis is the last one
utt.verify_grad(lambda v: max_and_argmax(v)[0], [data])
utt.verify_grad(lambda v: max_and_argmax(v)[1], [data])
utt.verify_grad(lambda v: max_and_argmax(v,axis=[0])[0], [data])
utt.verify_grad(lambda v: max_and_argmax(v,axis=[0])[1], [data])
check_grad_max(data,eval_outputs(grad(max_and_argmax(n,axis=0)[0],n)),axis=0)
utt.verify_grad(lambda v: max_and_argmax(v,axis=[1])[0], [data])
utt.verify_grad(lambda v: max_and_argmax(v,axis=[1])[1], [data])
#check_grad_max(data,eval_outputs(grad(max_and_argmax(n,axis=1)[0],n)),axis=1)
utt.verify_grad(lambda v: max_and_argmax(v.flatten())[0], [data])
utt.verify_grad(lambda v: max_and_argmax(v.flatten())[1], [data])
check_grad_max(data,eval_outputs(grad(max_and_argmax(n.flatten())[0],n)))
class T_max(unittest.TestCase):
def setUp(self):
utt.seed_rng()
MaxAndArgmax.debug = 0
def _test0(self):
n = as_tensor_variable(5.0)
v = eval_outputs(max(n))
self.failUnless(v == 5.0)
v = eval_outputs(max(n)[0].shape)
assert len(v)==0
def test1(self):
n = as_tensor_variable([1,2,3,2,-6])
v = eval_outputs([max(n)])
self.failUnless(v == 3)
v = eval_outputs(max(n).shape)
assert len(v)==0
def test2(self):
data = numpy.random.rand(2,3)
n = as_tensor_variable(data)
v = eval_outputs(max(n,-1))
self.failUnless(numpy.all(v == numpy.max(data,-1)))
v = eval_outputs(max(n).shape)
assert v==(2)
def test2b(self):
data = numpy.random.rand(2,3)
n = as_tensor_variable(data)
v = eval_outputs(max(n,0))
self.failUnless(numpy.all(v == numpy.max(data,0)))
v = eval_outputs(max(n,0).shape)
assert v==(3)
v = eval_outputs(max(n,1).shape)
assert v==(2)
v = eval_outputs(max(n,[0,1]).shape)
assert v.size==0
def test2_invalid(self):
n = as_tensor_variable(numpy.random.rand(2,3))
# Silence expected error messages
_logger = logging.getLogger('theano.gof.opt')
oldlevel = _logger.getEffectiveLevel()
_logger.setLevel(logging.CRITICAL)
try:
try:
eval_outputs(max(n,3))
assert False
except ValueError, e:
pass
finally:
_logger.setLevel(oldlevel)
def test2_invalid_neg(self):
n = as_tensor_variable(numpy.random.rand(2,3))
old_stderr = sys.stderr
sys.stderr = StringIO.StringIO()
try:
try:
eval_outputs(max(n,-3))
assert False
except ValueError, e:
pass
finally:
sys.stderr = old_stderr
def test2_valid_neg(self):
n = as_tensor_variable(numpy.random.rand(2,3))
v = eval_outputs(max(n,-1))
self.failUnless(v.shape == (2,))
v = eval_outputs(max(n,-2))
self.failUnless(v.shape == (3,))
v = eval_outputs(max(n,-1).shape)
assert v==(2)
v = eval_outputs(max(n,-2).shape)
assert v==(3)
def test3(self):
n = as_tensor_variable(numpy.random.rand(2,3,4))
v = eval_outputs(max(n,0))
self.failUnless(v.shape == (3,4))
self.failUnless(numpy.all(v == numpy.max(n.value,0)))
v = eval_outputs(max(n,1))
self.failUnless(v.shape == (2,4))
self.failUnless(numpy.all(v == numpy.max(n.value,1)))
v = eval_outputs(max(n,2))
self.failUnless(v.shape == (2,3))
self.failUnless(numpy.all(v == numpy.max(n.value,2)))
v = eval_outputs(max(n,[0,1]))
self.failUnless(v.shape == (4,))
self.failUnless(numpy.all(v == numpy.max(n.value,1).max(0)))
v = eval_outputs(max(n,[0,2]))
self.failUnless(v.shape == (3,))
self.failUnless(numpy.all(v == numpy.max(n.value,2).max(0)))
v = eval_outputs(max(n,[1,2]))
self.failUnless(v.shape == (2,))
self.failUnless(numpy.all(v == numpy.max(n.value,2).max(1)))
v = eval_outputs(max(n,[0,1,2]))
self.failUnless(v.shape == ())
v = eval_outputs(max(n,0).shape)
assert tuple(v)==(3,4)
v = eval_outputs(max(n,1).shape)
assert tuple(v)==(2,4)
v = eval_outputs(max(n,2).shape)
assert tuple(v)==(2,3)
v = eval_outputs(max(n,[0,1]).shape)
self.failUnless(v == (4,))
v = eval_outputs(max(n,[0,2]).shape)
self.failUnless(v == (3,))
v = eval_outputs(max(n,[1,2]).shape)
self.failUnless(v == (2,))
v = eval_outputs(max(n,[0,1,2]).shape)
self.failUnless(v.size == 0)
def _test_grad(self):
data = numpy.random.rand(2,3)
n = as_tensor_variable(data)
def check_grad_max(data, max_grad_data, axis=None):
#This work only for axis in [0,None]
assert axis in [0,None]
z = numpy.zeros_like(data)
z = z.flatten()
argmax=numpy.argmax(data,axis=axis)
if argmax.ndim==0:
z[numpy.argmax(data,axis=axis)]+=1
else:
for id,v in enumerate(argmax):
z[v*numpy.prod(data.shape[data.ndim-1:axis:-1])+id]+=1
z = z.reshape(data.shape)
assert numpy.all(max_grad_data == z)
#test grad of max
#axis is the last one
utt.verify_grad(lambda v: max(v), [data])
utt.verify_grad(lambda v: max(v,axis=[0]), [data])
check_grad_max(data,eval_outputs(grad(max_and_argmax(n,axis=0)[0],n)),axis=0)
utt.verify_grad(lambda v: max(v,axis=[1]), [data])
#check_grad_max(data,eval_outputs(grad(max_and_argmax(n,axis=1)[0],n)),axis=1)
utt.verify_grad(lambda v: max(v.flatten()), [data])
check_grad_max(data,eval_outputs(grad(max_and_argmax(n.flatten())[0],n)))
class T_subtensor(unittest.TestCase):
def setUp(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论