提交 2ed94ab4 authored 作者: nouiz's avatar nouiz

Merge pull request #100 from delallea/prep_tests_new_grad_type

A few fixes to prepare for change in grad output
......@@ -632,7 +632,8 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, abs_tol=None, rel_tol=No
g_cost = cast(g_cost, o_output.dtype)
symbolic_grad = grad(cost, tensor_pt, g_cost,
disconnected_inputs='ignore')
disconnected_inputs='ignore',
keep_wrt_type=True)
#if o_output.dtype in ['float32','float64']:
# assert all([x.dtype == o_output.dtype for x in symbolic_grad]),("Expected grad of type %s, got %s "%( symbolic_grad.dtype, o_output.dtyp))
......@@ -644,8 +645,8 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, abs_tol=None, rel_tol=No
analytic_grad = grad_fn(*[p.copy() for p in pt])
if not isinstance(analytic_grad, (list, tuple)):
analytic_grad = [analytic_grad]
# Since `tensor_pt` is a list, `analytic_grad` should be one too.
assert isinstance(analytic_grad, list)
max_arg, max_err_pos, max_abs_err, max_rel_err =\
num_grad.max_err(analytic_grad, abs_tol, rel_tol)
......
......@@ -808,13 +808,13 @@ def test_dot_w_self():
# This can trigger problems in the optimization because what would normally be a gemm must
# not be because the output is aliased to one of the inputs.
A = shared(value = numpy.ones((2,2)))
A = shared(value=numpy.ones((2,2)))
B = T.matrix()
p = T.dot(A,A)*B
grad = T.grad(T.mean(p),[A])
f = theano.function([B], p, updates = { A : A - grad[0]} )
grad = T.grad(T.mean(p), A)
f = theano.function([B], p, updates={A : A - grad})
# tests correctness in debugmode
f(numpy.asarray([[0,1], [2,3]], dtype=config.floatX))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论