提交 f49eadd2 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2798 from nouiz/tests

Tests
......@@ -344,7 +344,9 @@ def test_reallocation():
x = tensor.scalar('x')
y = tensor.scalar('y')
z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
for l in ['vm_nogc', 'vm', 'vm_nogc', 'vm']:
# The functinality is currently implement for non lazy and non c VM only.
for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
m = theano.compile.get_mode(theano.Mode(linker=l))
m = m.excluding('fusion', 'inplace')
......
import numpy
import theano
import theano.tensor as T
from theano.tensor.opt import Assert
def test_assert_op_gradient():
x = T.vector('x')
assert_op = Assert()
cost = T.sum(assert_op(x, x.size < 2))
grad = T.grad(cost, x)
func = theano.function([x], grad)
x_val = numpy.ones(shape=(1,))
assert func(x_val) == 1
......@@ -235,7 +235,8 @@ def upcast_int8_nfunc(fn):
def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
bad_runtime=None, grad=None, mode=None, grad_rtol=None,
eps=1e-10, skip=False, test_memmap=True, check_name=True):
eps=1e-10, skip=False, test_memmap=True, check_name=True,
grad_eps=None):
"""
:param check_name:
Use only for tester that aren't in Theano.
......@@ -258,6 +259,7 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
_mode, _grad_rtol, _eps, skip_ = mode, grad_rtol, eps, skip
_test_memmap = test_memmap
_check_name = check_name
_grad_eps = grad_eps
class Checker(unittest.TestCase):
......@@ -463,8 +465,9 @@ def makeTester(name, op, expected, checks=None, good=None, bad_build=None,
inputs = [copy(input) for input in inputs]
try:
utt.verify_grad(self.op, inputs,
mode=self.mode,
rel_tol=_grad_rtol)
mode=self.mode,
rel_tol=_grad_rtol,
eps=_grad_eps)
except Exception, exc:
err_msg = ("Test %s::%s: Error occurred while"
" computing the gradient on the following"
......@@ -976,6 +979,7 @@ ModTester = makeBroadcastTester(
good=copymod(_good_broadcast_div_mod_normal_float,
['complex1', 'complex2']),
grad=_grad_broadcast_div_mod_normal,
grad_eps=1e-5,
)
......@@ -986,6 +990,7 @@ ModInplaceTester = makeBroadcastTester(
good=copymod(_good_broadcast_div_mod_normal_float_inplace,
["complex1", "complex2"]),
grad=_grad_broadcast_div_mod_normal,
grad_eps=1e-5,
inplace=True)
_good_broadcast_pow_normal_float = dict(same_shapes=(rand_ranged(1, 5, (2, 3)), rand_ranged(-3, 3, (2, 3))),
......@@ -2504,12 +2509,12 @@ class T_Clip(unittest.TestCase):
rng = numpy.random.RandomState(utt.fetch_seed())
nvals = 50
xval = rng.rand(nvals)
xval = rng.rand(nvals).astype(config.floatX)
# To ensure that the min < x
yval_mn = rng.rand(nvals) - 1.0
yval_mn = rng.rand(nvals).astype(config.floatX) - 1.0
# To ensure that the max > x
yval_mx = rng.rand(nvals) + 1.0
yval_mx = rng.rand(nvals).astype(config.floatX) + 1.0
aval, = fn(xval, yval_mn)
aval2, = fn2(xval, yval_mx)
......
......@@ -369,12 +369,14 @@ class CompressTester(utt.InferShapeTester):
self.op = compress
def test_op(self):
for axis, cond, shape in zip(self.axis_list, self.cond_list, self.shape_list):
for axis, cond, shape in zip(self.axis_list, self.cond_list,
self.shape_list):
cond_var = theano.tensor.ivector()
data = numpy.random.random(size=shape).astype(theano.config.floatX)
data = numpy.random.random(size=shape).astype(theano.config.floatX)
data_var = theano.tensor.matrix()
f = theano.function([cond_var, data_var], self.op(cond_var, data_var, axis=axis))
f = theano.function([cond_var, data_var],
self.op(cond_var, data_var, axis=axis))
expected = numpy.compress(cond, data, axis=axis)
tested = f(cond, data)
......
......@@ -5053,7 +5053,7 @@ class Test_lift_transpose_through_dot(unittest.TestCase):
a, b = matrices('ab')
g = self.simple_optimize(FunctionGraph([a, b], [tensor.dot(a, b).T]))
sg = '[dot(DimShuffle{1,0}(b), DimShuffle{1,0}(a))]'
assert str(g) == sg
assert str(g) == sg, (str(g), sg)
def test_row_matrix(self):
a = vector('a')
......@@ -5063,7 +5063,7 @@ class Test_lift_transpose_through_dot(unittest.TestCase):
[tensor.dot(a.dimshuffle('x', 0), b).T]),
level='stabilize')
sg = '[dot(DimShuffle{1,0}(b), DimShuffle{0,x}(a))]'
assert str(g) == sg
assert str(g) == sg, (str(g), sg)
def test_matrix_col(self):
a = vector('a')
......@@ -5073,7 +5073,7 @@ class Test_lift_transpose_through_dot(unittest.TestCase):
[tensor.dot(b, a.dimshuffle(0, 'x')).T]),
level='stabilize')
sg = '[dot(DimShuffle{x,0}(a), DimShuffle{1,0}(b))]'
assert str(g) == sg
assert str(g) == sg, (str(g), sg)
def test_local_upcast_elemwise_constant_inputs():
......@@ -5173,6 +5173,18 @@ class TestShapeFeature(unittest.TestCase):
self.assertRaises(IndexError, shape_feature.same_shape, x, o, 1, 0)
self.assertRaises(IndexError, shape_feature.same_shape, x, o, 0, 1)
def test_assert_op_gradient():
x = T.vector('x')
assert_op = Assert()
cost = T.sum(assert_op(x, x.size < 2))
grad = T.grad(cost, x)
func = theano.function([x], grad)
x_val = numpy.ones(shape=(1,), dtype=theano.config.floatX)
assert func(x_val) == 1
if __name__ == '__main__':
t = TestMakeVector('setUp')
t.setUp()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论