提交 d1f66a2f authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Fix tests so that they pass and test what we want.

上级 7a10c464
...@@ -4,7 +4,8 @@ import numpy as np ...@@ -4,7 +4,8 @@ import numpy as np
import theano import theano
import theano.tensor as T import theano.tensor as T
from theano.sandbox.cuda import GpuOp, basic_ops, CudaNdarrayType from theano.sandbox.cuda import (GpuOp, basic_ops, CudaNdarrayType,
CudaNdarray)
import scikits.cuda import scikits.cuda
from scikits.cuda import fft, cublas, misc from scikits.cuda import fft, cublas, misc
......
...@@ -2,7 +2,7 @@ import unittest ...@@ -2,7 +2,7 @@ import unittest
import numpy import numpy
import theano import theano
from theano import unittest_tools as utt from theano.tests import unittest_tools as utt
# Skip tests if cuda_ndarray is not available. # Skip tests if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
...@@ -10,7 +10,7 @@ import theano.sandbox.cuda as cuda_ndarray ...@@ -10,7 +10,7 @@ import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False: if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled') raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda.float32_shared_constructor as shared from theano.sandbox.cuda import float32_shared_constructor as shared
if theano.config.mode == 'FAST_COMPILE': if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu') mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
...@@ -68,15 +68,16 @@ class TestConv2dFFT(unittest.TestCase): ...@@ -68,15 +68,16 @@ class TestConv2dFFT(unittest.TestCase):
conv = theano.tensor.nnet.conv.conv2d(inputs, filters) conv = theano.tensor.nnet.conv.conv2d(inputs, filters)
mode = mode_with_gpu.optimizer_including('conv2d_fft_valid') mode = mode_with_gpu.including('conv_fft_valid')
f_ref = theano.function([], conv) f_ref = theano.function([], conv)
f_fft = theano.function([], conv, mode=mode_with_gpu) f_fft = theano.function([], conv, mode=mode)
# make sure we inserted the fft trickery # make sure we inserted the fft trickery
topo = f_fft.maker.fgraph.toposort() topo = f_fft.maker.fgraph.toposort()
assert len(op for op in topo assert sum(isinstance(n.op, theano.sandbox.cuda.fftconv.CuFFTOp)
if isinstance(op, theano.sandbox.cuda.fftconv.CuFFTOp)) == 1 for n in topo) == 2
res_ref = f_ref() res_ref = f_ref()
res_fft = f_fft() res_fft = f_fft()
...@@ -96,15 +97,15 @@ class TestConv2dFFT(unittest.TestCase): ...@@ -96,15 +97,15 @@ class TestConv2dFFT(unittest.TestCase):
conv = theano.tensor.nnet.conv.conv2d(inputs, filters, conv = theano.tensor.nnet.conv.conv2d(inputs, filters,
border_mode='full') border_mode='full')
mode = mode_with_gpu.optimizer_including('conv2d_fft_full') mode = mode_with_gpu.including('conv_fft_full')
f_ref = theano.function([], conv) f_ref = theano.function([], conv)
f_fft = theano.function([], conv, mode=mode_with_gpu) f_fft = theano.function([], conv, mode=mode)
# make sure we inserted the fft trickery # make sure we inserted the fft trickery
topo = f_fft.maker.fgraph.toposort() topo = f_fft.maker.fgraph.toposort()
assert len(op for op in topo assert sum(isinstance(n.op, theano.sandbox.cuda.fftconv.CuFFTOp)
if isinstance(op, theano.sandbox.cuda.fftconv.CuFFTOp)) == 1 for n in topo) == 2
res_ref = f_ref() res_ref = f_ref()
res_fft = f_fft() res_fft = f_fft()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论