提交 53bd748f authored 作者: notoraptor's avatar notoraptor

Re-put test_dnn_reduction_sum_squares into test_dnn.

上级 6b0fe3fb
......@@ -1569,6 +1569,20 @@ def test_dnn_reduction_opt():
yield dnn_reduction, 2, idtype, adtype, odtype
def test_dnn_reduction_sum_squares():
if not dnn.dnn_available(test_ctx_name) or dnn.version(raises=False) < 6000:
raise SkipTest(dnn.dnn_available.msg)
M = T.matrix()
for axis in (None, 0, 1):
out = (M**2).sum(axis=axis)
f = theano.function([M], out, mode=mode_with_gpu)
assert any(isinstance(node.op, dnn.GpuDnnReduction) and node.op.red_op == 'norm2'
for node in f.maker.fgraph.apply_nodes)
M_val = np.random.random((4, 5)).astype(theano.config.floatX)
utt.assert_allclose((M_val**2).sum(axis=axis), f(M_val))
def dnn_reduction_strides(shp, shuffle, slice):
utt.fetch_seed()
inp = GpuArrayType('float32', (False,) * len(shp),
......
......@@ -382,21 +382,6 @@ def test_local_gpu_elemwise_careduce():
utt.assert_allclose(f(data), (data * data).sum(axis=1))
def test_dnn_reduction_sum_squares():
if not dnn.dnn_available(test_ctx_name) or dnn.version(raises=False) < 6000:
raise SkipTest(dnn.dnn_available.msg)
M = tensor.matrix()
for axis in (None, 0, 1):
out = (M**2).sum(axis=axis)
f = theano.function([M], out, mode=mode_with_gpu)
assert any(isinstance(node.op, dnn.GpuDnnReduction) and node.op.red_op == 'norm2'
for node in f.maker.fgraph.apply_nodes)
assert _check_stack_trace(f)
M_val = np.random.random((4, 5)).astype(theano.config.floatX)
utt.assert_allclose((M_val**2).sum(axis=axis), f(M_val))
def test_local_lift_dot22scalar():
x = tensor.matrix()
y = tensor.matrix()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论