提交 02453383 authored 作者: Harm de Vries's avatar Harm de Vries 提交者: Frederic Bastien

Update gpu pool tensor variable test

上级 afad1e87
...@@ -363,28 +363,32 @@ def test_pooling_with_tensor_vars(): ...@@ -363,28 +363,32 @@ def test_pooling_with_tensor_vars():
cast_to_output_type=False, cast_to_output_type=False,
mode=mode_with_gpu) mode=mode_with_gpu)
out2 = pool_2d_i2n(x, ds=(2, 2), strides=(1, 1),
pad=(0, 0),
pool_function=T.max)
mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False mode_without_gpu2.check_isfinite = False
f1 = theano.function([x], fn(x), mode=mode_with_gpu) f_gpu = theano.function([x], fn(x), mode=mode_with_gpu)
assert any([isinstance(node.op, cuda.dnn.GpuDnnPool) assert any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f1.maker.fgraph.apply_nodes]) for node in f_gpu.maker.fgraph.apply_nodes])
f2 = theano.function([x], out2, mode=mode_without_gpu2)
assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool) i = 1
for node in f2.maker.fgraph.apply_nodes])
for shp in [(1, 10, 100, 100), for shp in [(1, 10, 100, 100),
(1, 3, 99, 99), (1, 3, 99, 99),
(32, 1, 147, 197), (32, 1, 147, 197)]:
]:
data = numpy.random.normal(0, 1, shp).astype("float32") data = numpy.random.normal(0, 1, shp).astype("float32")
a = f1(data).__array__() out = pool_2d_i2n(x, ds=(i, i), strides=(1, 1),
pad=(0, 0),
b = f2(data).__array__() pool_function=T.max)
f_cpu = theano.function([x], out, mode=mode_without_gpu2)
assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes])
# Change the window size dynamically for gpu op
ws.set_value(numpy.array([i, i]).astype('int32'))
a = f_gpu(data).__array__()
b = f_cpu(data).__array__()
utt.assert_allclose(a, b) utt.assert_allclose(a, b)
i += 1
def test_old_pool_interface(): def test_old_pool_interface():
...@@ -744,6 +748,7 @@ def test_dnn_tag(): ...@@ -744,6 +748,7 @@ def test_dnn_tag():
class TestDnnInferShapes(utt.InferShapeTester): class TestDnnInferShapes(utt.InferShapeTester):
def setUp(self): def setUp(self):
super(TestDnnInferShapes, self).setUp() super(TestDnnInferShapes, self).setUp()
self.mode = mode_with_gpu self.mode = mode_with_gpu
......
...@@ -1091,8 +1091,6 @@ class GpuDnnPool(DnnBase): ...@@ -1091,8 +1091,6 @@ class GpuDnnPool(DnnBase):
if mode == 'average': if mode == 'average':
mode = 'average_inc_pad' mode = 'average_inc_pad'
assert mode in ('max', 'average_inc_pad', 'average_exc_pad') assert mode in ('max', 'average_inc_pad', 'average_exc_pad')
if version() == -1:
raise Exception("cudnn v1 do not support average_exc_pad")
self.mode = mode self.mode = mode
def get_op_params(self): def get_op_params(self):
......
...@@ -284,11 +284,11 @@ def test_pooling_with_tensor_vars(): ...@@ -284,11 +284,11 @@ def test_pooling_with_tensor_vars():
mode = 'max' mode = 'max'
def fn(x): def fn(x):
dnn_op = dnn.dnn_pool(x, dnn_op = dnn.dnn_pool(
ws=ws, x, ws=ws,
stride=st, stride=st,
pad=pad, pad=pad,
mode=mode) mode=mode)
return dnn_op return dnn_op
for shp in [(1, 1, 2, 2), for shp in [(1, 1, 2, 2),
...@@ -299,28 +299,32 @@ def test_pooling_with_tensor_vars(): ...@@ -299,28 +299,32 @@ def test_pooling_with_tensor_vars():
cast_to_output_type=False, cast_to_output_type=False,
mode=mode_with_gpu) mode=mode_with_gpu)
out2 = pool_2d_i2n(x, ds=(2, 2), strides=(1, 1),
pad=(0, 0),
pool_function=T.max)
mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False mode_without_gpu2.check_isfinite = False
f1 = theano.function([x], fn(x), mode=mode_with_gpu) f_gpu = theano.function([x], fn(x), mode=mode_with_gpu)
assert any([isinstance(node.op, dnn.GpuDnnPool) assert any([isinstance(node.op, dnn.GpuDnnPool)
for node in f1.maker.fgraph.apply_nodes]) for node in f_gpu.maker.fgraph.apply_nodes])
f2 = theano.function([x], out2, mode=mode_without_gpu2)
assert not any([isinstance(node.op, dnn.GpuDnnPool) i = 1
for node in f2.maker.fgraph.apply_nodes])
for shp in [(1, 10, 100, 100), for shp in [(1, 10, 100, 100),
(1, 3, 99, 99), (1, 3, 99, 99),
(32, 1, 147, 197), (32, 1, 147, 197)]:
]:
data = numpy.random.normal(0, 1, shp).astype("float32") data = numpy.random.normal(0, 1, shp).astype("float32")
a = f1(data).__array__() out = pool_2d_i2n(x, ds=(i, i), strides=(1, 1),
pad=(0, 0),
b = f2(data).__array__() pool_function=T.max)
f_cpu = theano.function([x], out, mode=mode_without_gpu2)
assert not any([isinstance(node.op, dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes])
# Change the window size dynamically for gpu op
ws.set_value(numpy.array([i, i]).astype('int32'))
a = f_gpu(data).__array__()
b = f_cpu(data).__array__()
utt.assert_allclose(a, b) utt.assert_allclose(a, b)
i += 1
def test_pooling_opt(): def test_pooling_opt():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论