提交 1a3a477e authored 作者: Frederic's avatar Frederic

refactored gpu test for conv3d2d to be skipped when cuda isn't therei.

上级 26cc1f73
......@@ -12,6 +12,7 @@ import theano.tensor as T
# Skip test if cuda_ndarray is not available.
import theano.sandbox.cuda as cuda
from theano.tensor.nnet.tests import test_conv3d2d
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
......@@ -133,3 +134,12 @@ def test_deepcopy():
out = f(a_v)
assert out is not a_v
assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
def test_get_diagonal_subtensor_view():
test_conv3d2d.test_get_diagonal_subtensor_view(wrap=cuda.CudaNdarray)
def test_conv3d():
test_conv3d2d.test_conv3d(mode=mode_with_gpu,
shared=cuda.shared_constructor)
......@@ -5,57 +5,21 @@ from scipy import ndimage
import theano
from theano.tensor.nnet.conv3d2d import *
import theano.tests.unittest_tools as utt
from theano.sandbox import cuda
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_get_diagonal_subtensor_view():
x = numpy.arange(20).reshape(5, 4)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
# test that it works in 2d
assert numpy.all(xv01 == [[12, 9, 6, 3], [16, 13, 10, 7]])
x = numpy.arange(24).reshape(4, 3, 2)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
xv02 = get_diagonal_subtensor_view(x, 0, 2)
xv12 = get_diagonal_subtensor_view(x, 1, 2)
#print 'x', x
#print 'xv01', xv01
#print 'xv02', xv02
assert numpy.all(xv01 == [
[[12, 13], [8, 9], [4, 5]],
[[18, 19], [14, 15], [10, 11]]])
assert numpy.all(xv02 == [
[[6, 1], [8, 3], [10, 5]],
[[12, 7], [14, 9], [16, 11]],
[[18, 13], [20, 15], [22, 17]],
])
# diagonal views of each leading matrix is the same
# as the slices out of the diagonal view of the entire 3d tensor
for xi, xvi in zip(x, xv12):
assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
def test_get_diagonal_subtensor_view_gpu():
x = numpy.arange(20, dtype='float32').reshape(5, 4)
x = cuda.CudaNdarray(x)
def test_get_diagonal_subtensor_view(wrap=lambda a: a):
x = numpy.arange(20).reshape(5, 4).astype('float32')
x = wrap(x)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
# test that it works in 2d
assert numpy.all(numpy.asarray(xv01) ==
[[12, 9, 6, 3], [16, 13, 10, 7]])
assert numpy.all(numpy.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])
x = numpy.arange(24).reshape(4, 3, 2)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
......@@ -77,9 +41,8 @@ def test_get_diagonal_subtensor_view_gpu():
# diagonal views of each leading matrix is the same
# as the slices out of the diagonal view of the entire 3d tensor
for xi, xvi in zip(x, numpy.asarray(xv12)):
assert numpy.all(numpy.asarray(xvi) ==
numpy.asarray(get_diagonal_subtensor_view(xi, 0, 1)))
for xi, xvi in zip(x, xv12):
assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
def pyconv3d(signals, filters):
......@@ -103,7 +66,7 @@ def pyconv3d(signals, filters):
return rval
def test_conv3d():
def test_conv3d(mode=mode_without_gpu, shared=theano.tensor._shared):
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 5 , 3, 5 , 5
......@@ -115,37 +78,32 @@ def test_conv3d():
pyres = pyconv3d(signals, filters)
print time.time() - t0
modes = [(mode_without_gpu, theano.tensor._shared)]
if cuda.cuda_available:
modes.append((mode_with_gpu, cuda.shared_constructor))
for mode, shared in modes:
s_signals = shared(signals)
s_filters = shared(filters)
s_output = shared(signals*0)
out = conv3d(s_signals, s_filters,
signals_shape=signals.shape,
filters_shape=filters.shape)
newconv3d = theano.function([], [],
updates={s_output: out},
mode=mode)
t0 = time.time()
newconv3d()
print time.time() - t0
utt.assert_allclose(pyres, s_output.get_value(borrow=True))
gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters])
gnewconv3d = theano.function([], [],
updates=[(s_filters, gfilters),
(s_signals, gsignals)],
mode=mode,
name='grad')
t0 = time.time()
gnewconv3d()
print 'grad', time.time() - t0
s_signals = shared(signals)
s_filters = shared(filters)
s_output = shared(signals*0)
out = conv3d(s_signals, s_filters,
signals_shape=signals.shape,
filters_shape=filters.shape)
newconv3d = theano.function([], [],
updates={s_output: out},
mode=mode)
t0 = time.time()
newconv3d()
print time.time() - t0
utt.assert_allclose(pyres, s_output.get_value(borrow=True))
gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters])
gnewconv3d = theano.function([], [],
updates=[(s_filters, gfilters),
(s_signals, gsignals)],
mode=mode,
name='grad')
t0 = time.time()
gnewconv3d()
print 'grad', time.time() - t0
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 2, 3, 2, 2
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论