提交 bd7ef367 authored 作者: James Bergstra's avatar James Bergstra

commented out test_maxpool. bring it back later after reviewing

上级 6a2639ad
......@@ -47,23 +47,28 @@ def test_gemm():
assert numpy.allclose(numpy.dot(a0, bval)+numpy.exp(cval), a.value)
def test_maxpool():
"""TODO: test the gpu version!!! """
for d0, d1, r_true, r_false in [(4,4,[[[[5,7],[13,15]]]],[[[[5,7],[13,15]]]]),
(5,5,[[[[6, 8],[ 16, 18], [ 21, 23]]]],
[[[[6, 8, 9],[ 16, 18, 19], [ 21, 23, 24]]]])]:
for border,ret in [(True,r_true),(False, r_false)]:
ret=numpy.array(ret)
a = tcn.blas.DownsampleFactorMax((2,2),border)
dmatrix4 = tensor.TensorType("float32", (False, False, False, False))
b = dmatrix4()
f = pfunc([b], [a(b)])
bval = numpy.arange(0,d0*d1).reshape(1,1,d0,d1)
r = f(bval)[0]
# print bval, bval.shape, border
print r, r.shape
assert (ret==r).all()
if 0:
# This is commented out because it doesn't make sense...
# tcn.blas has no op called DownsampleFactorMax
# tcn.blas has an op called GpuDownsampleFactorMax, but that op requires arguments that are
# CudaNdarrayType variables... so rethink this test?
def test_maxpool():
"""TODO: test the gpu version!!! """
for d0, d1, r_true, r_false in [(4,4,[[[[5,7],[13,15]]]],[[[[5,7],[13,15]]]]),
(5,5,[[[[6, 8],[ 16, 18], [ 21, 23]]]],
[[[[6, 8, 9],[ 16, 18, 19], [ 21, 23, 24]]]])]:
for border,ret in [(True,r_true),(False, r_false)]:
ret=numpy.array(ret)
a = tcn.blas.DownsampleFactorMax((2,2),border)
dmatrix4 = tensor.TensorType("float32", (False, False, False, False))
b = dmatrix4()
f = pfunc([b], [a(b)])
bval = numpy.arange(0,d0*d1).reshape(1,1,d0,d1)
r = f(bval)[0]
# print bval, bval.shape, border
print r, r.shape
assert (ret==r).all()
def test_downsample():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论