提交 3b806177 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Fixed test when cast_policy == numpy+floatX and floatX == float64

上级 8a58174f
...@@ -5,7 +5,7 @@ import numpy ...@@ -5,7 +5,7 @@ import numpy
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
from theano.compile.pfunc import pfunc from theano.compile.pfunc import pfunc
from theano import tensor from theano import config, tensor
import theano import theano
import theano.sandbox.cuda as cuda import theano.sandbox.cuda as cuda
...@@ -35,6 +35,12 @@ def test_no_shared_var_graph(): ...@@ -35,6 +35,12 @@ def test_no_shared_var_graph():
assert numpy.any(isinstance(x.op,cuda.HostFromGpu) for x in l) assert numpy.any(isinstance(x.op,cuda.HostFromGpu) for x in l)
def test_int_pow(): def test_int_pow():
# This is to ensure that '4' does not upcast to float64.
if config.cast_policy == 'numpy+floatX':
floatX_backup = config.floatX
config.floatX = 'float32'
try:
a = CudaNdarrayType([False])() a = CudaNdarrayType([False])()
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu) f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
...@@ -48,6 +54,10 @@ def test_int_pow(): ...@@ -48,6 +54,10 @@ def test_int_pow():
#theano.printing.debugprint(f) #theano.printing.debugprint(f)
finally:
if config.cast_policy == 'numpy+floatX':
config.floatX = floatX_backup
def test_gpualloc(): def test_gpualloc():
''' '''
This tests tries to catch the scenario when, due to infer_shape, This tests tries to catch the scenario when, due to infer_shape,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论