提交 baa3dd12 authored 作者: Pascal Lamblin's avatar Pascal Lamblin 提交者: GitHub

Merge pull request #5142 from ChihebTrabelsi/pool_2d_rename

Pool 2d rename
...@@ -158,9 +158,9 @@ def test_pooling(): ...@@ -158,9 +158,9 @@ def test_pooling():
continue continue
# We will check that the opt introduced it. # We will check that the opt introduced it.
out = pool_2d(x, (ws, ws), out = pool_2d(x, (ws, ws),
st=(stride, stride), stride=(stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False mode_without_gpu2.check_isfinite = False
...@@ -199,7 +199,7 @@ def test_pooling(): ...@@ -199,7 +199,7 @@ def test_pooling():
# This tests the CPU grad + opt + GPU implementation # This tests the CPU grad + opt + GPU implementation
def fn(x): def fn(x):
return pool_2d(x, (ws, ws), ignore_border=True, return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
utt.verify_grad(fn, [data], mode=mode_with_gpu) utt.verify_grad(fn, [data], mode=mode_with_gpu)
# Confirm that the opt would have inserted it. # Confirm that the opt would have inserted it.
fg = theano.function([x], theano.grad(fn(x).sum(), x), fg = theano.function([x], theano.grad(fn(x).sum(), x),
...@@ -228,14 +228,14 @@ def test_pooling_with_tensor_vars(): ...@@ -228,14 +228,14 @@ def test_pooling_with_tensor_vars():
raise SkipTest(dnn.dnn_available.msg) raise SkipTest(dnn.dnn_available.msg)
x = T.ftensor4() x = T.ftensor4()
ws = theano.shared(numpy.array([2, 2], dtype='int32')) ws = theano.shared(numpy.array([2, 2], dtype='int32'))
st = theano.shared(numpy.array([1, 1], dtype='int32')) stride = theano.shared(numpy.array([1, 1], dtype='int32'))
pad = theano.shared(numpy.array([0, 0], dtype='int32')) pad = theano.shared(numpy.array([0, 0], dtype='int32'))
mode = 'max' mode = 'max'
def fn(x): def fn(x):
dnn_op = dnn.dnn_pool( dnn_op = dnn.dnn_pool(
x, ws=ws, x, ws=ws,
stride=st, stride=stride,
pad=pad, pad=pad,
mode=mode) mode=mode)
return dnn_op return dnn_op
...@@ -255,7 +255,7 @@ def test_pooling_with_tensor_vars(): ...@@ -255,7 +255,7 @@ def test_pooling_with_tensor_vars():
for node in f_gpu.maker.fgraph.apply_nodes]) for node in f_gpu.maker.fgraph.apply_nodes])
# CPU implementation # CPU implementation
out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode) out_cpu = pool_2d(x, ws, ignore_border=True, stride=stride, pad=pad, mode=mode)
f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2) f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)
assert not any([isinstance(node.op, dnn.GpuDnnPool) assert not any([isinstance(node.op, dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes]) for node in f_cpu.maker.fgraph.apply_nodes])
...@@ -307,9 +307,9 @@ def test_pooling3d(): ...@@ -307,9 +307,9 @@ def test_pooling3d():
# Not implemented # Not implemented
continue continue
out = pool_3d(x, (ws, ws, ws), out = pool_3d(x, (ws, ws, ws),
st=(stride, stride, stride), stride=(stride, stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
# GPU implementation # GPU implementation
f_gpu = theano.function([x], out, mode=mode_with_gpu) f_gpu = theano.function([x], out, mode=mode_with_gpu)
...@@ -374,7 +374,7 @@ def test_pooling_opt(): ...@@ -374,7 +374,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad', pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -386,7 +386,7 @@ def test_pooling_opt(): ...@@ -386,7 +386,7 @@ def test_pooling_opt():
# gradient of 2D pooling # gradient of 2D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad', T.grad(pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), ignore_border=True).sum(),
x), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -399,7 +399,7 @@ def test_pooling_opt(): ...@@ -399,7 +399,7 @@ def test_pooling_opt():
# Test sum pooling # Test sum pooling
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 3), mode='sum', pool_2d(x, ws=(2, 3), mode='sum',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -413,7 +413,7 @@ def test_pooling_opt(): ...@@ -413,7 +413,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -425,7 +425,7 @@ def test_pooling_opt(): ...@@ -425,7 +425,7 @@ def test_pooling_opt():
# gradient of 3D pooling # gradient of 3D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', T.grad(pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True).sum(), ignore_border=True).sum(),
x), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -504,7 +504,7 @@ def test_dnn_tag(): ...@@ -504,7 +504,7 @@ def test_dnn_tag():
try: try:
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), ignore_border=True), pool_2d(x, ws=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError): except (AssertionError, RuntimeError):
assert not dnn.dnn_available(test_ctx_name) assert not dnn.dnn_available(test_ctx_name)
......
...@@ -194,9 +194,9 @@ def test_pooling(): ...@@ -194,9 +194,9 @@ def test_pooling():
continue continue
# We will check that the opt introduced it. # We will check that the opt introduced it.
out = pool_2d(x, (ws, ws), out = pool_2d(x, (ws, ws),
st=(stride, stride), stride=(stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2 = mode_without_gpu.including()
mode_without_gpu2.check_isfinite = False mode_without_gpu2.check_isfinite = False
...@@ -235,7 +235,7 @@ def test_pooling(): ...@@ -235,7 +235,7 @@ def test_pooling():
# This tests the CPU grad + opt + GPU implementation # This tests the CPU grad + opt + GPU implementation
def fn(x): def fn(x):
return pool_2d(x, (ws, ws), ignore_border=True, return pool_2d(x, (ws, ws), ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
utt.verify_grad(fn, [data], mode=mode_with_gpu) utt.verify_grad(fn, [data], mode=mode_with_gpu)
# Confirm that the opt would have inserted it. # Confirm that the opt would have inserted it.
fg = theano.function([x], theano.grad(fn(x).sum(), x), fg = theano.function([x], theano.grad(fn(x).sum(), x),
...@@ -264,14 +264,14 @@ def test_pooling_with_tensor_vars(): ...@@ -264,14 +264,14 @@ def test_pooling_with_tensor_vars():
raise SkipTest(cuda.dnn.dnn_available.msg) raise SkipTest(cuda.dnn.dnn_available.msg)
x = T.ftensor4() x = T.ftensor4()
ws = theano.shared(numpy.array([2, 2], dtype='int32')) ws = theano.shared(numpy.array([2, 2], dtype='int32'))
st = theano.shared(numpy.array([1, 1], dtype='int32')) stride = theano.shared(numpy.array([1, 1], dtype='int32'))
pad = theano.shared(numpy.array([0, 0], dtype='int32')) pad = theano.shared(numpy.array([0, 0], dtype='int32'))
mode = 'max' mode = 'max'
def fn(x): def fn(x):
dnn_op = cuda.dnn.dnn_pool( dnn_op = cuda.dnn.dnn_pool(
x, ws=ws, x, ws=ws,
stride=st, stride=stride,
pad=pad, pad=pad,
mode=mode) mode=mode)
return dnn_op return dnn_op
...@@ -291,7 +291,7 @@ def test_pooling_with_tensor_vars(): ...@@ -291,7 +291,7 @@ def test_pooling_with_tensor_vars():
for node in f_gpu.maker.fgraph.apply_nodes]) for node in f_gpu.maker.fgraph.apply_nodes])
# CPU implementation # CPU implementation
out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode) out_cpu = pool_2d(x, ws, ignore_border=True, stride=stride, pad=pad, mode=mode)
f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2) f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)
assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool) assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f_cpu.maker.fgraph.apply_nodes]) for node in f_cpu.maker.fgraph.apply_nodes])
...@@ -364,9 +364,9 @@ def test_pooling3d(): ...@@ -364,9 +364,9 @@ def test_pooling3d():
# Not implemented # Not implemented
continue continue
out = pool_3d(x, (ws, ws, ws), out = pool_3d(x, (ws, ws, ws),
st=(stride, stride, stride), stride=(stride, stride, stride),
ignore_border=True, ignore_border=True,
padding=pad, mode=mode) pad=pad, mode=mode)
# GPU implementation # GPU implementation
f_gpu = theano.function([x], out, mode=mode_with_gpu) f_gpu = theano.function([x], out, mode=mode_with_gpu)
...@@ -431,7 +431,7 @@ def test_pooling_opt(): ...@@ -431,7 +431,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True), pool_2d(x, ws=(2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
...@@ -442,7 +442,7 @@ def test_pooling_opt(): ...@@ -442,7 +442,7 @@ def test_pooling_opt():
# gradient of 2D pooling # gradient of 2D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad', T.grad(pool_2d(x, ws=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x), ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -454,7 +454,7 @@ def test_pooling_opt(): ...@@ -454,7 +454,7 @@ def test_pooling_opt():
# Test sum pooling # Test sum pooling
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 3), mode='sum', pool_2d(x, ws=(2, 3), mode='sum',
ignore_border=True), ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
...@@ -468,7 +468,7 @@ def test_pooling_opt(): ...@@ -468,7 +468,7 @@ def test_pooling_opt():
f = theano.function( f = theano.function(
[x], [x],
pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', ignore_border=True), pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad', ignore_border=True),
mode=mode_with_gpu) mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
...@@ -479,7 +479,7 @@ def test_pooling_opt(): ...@@ -479,7 +479,7 @@ def test_pooling_opt():
# gradient of 3D pooling # gradient of 3D pooling
f = theano.function( f = theano.function(
[x], [x],
T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', T.grad(pool_3d(x, ws=(2, 2, 2), mode='average_inc_pad',
ignore_border=True).sum(), x), ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
...@@ -849,7 +849,7 @@ def test_dnn_tag(): ...@@ -849,7 +849,7 @@ def test_dnn_tag():
try: try:
f = theano.function( f = theano.function(
[x], [x],
pool_2d(x, ds=(2, 2), ignore_border=True), pool_2d(x, ws=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn")) mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError): except (AssertionError, RuntimeError):
assert not cuda.dnn.dnn_available() assert not cuda.dnn.dnn_available()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论