提交 3b806177 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Fixed test when cast_policy == numpy+floatX and floatX == float64

上级 8a58174f
......@@ -5,7 +5,7 @@ import numpy
from nose.plugins.skip import SkipTest
from theano.compile.pfunc import pfunc
from theano import tensor
from theano import config, tensor
import theano
import theano.sandbox.cuda as cuda
......@@ -35,18 +35,28 @@ def test_no_shared_var_graph():
assert numpy.any(isinstance(x.op,cuda.HostFromGpu) for x in l)
def test_int_pow():
a = CudaNdarrayType([False])()
# This is to ensure that '4' does not upcast to float64.
if config.cast_policy == 'numpy+floatX':
floatX_backup = config.floatX
config.floatX = 'float32'
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
try:
a = CudaNdarrayType([False])()
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuSum', 'GpuElemwise', 'HostFromGpu']
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
f = theano.function([a], tensor.pow(a,4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuElemwise', 'GpuSum', 'HostFromGpu']
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuSum', 'GpuElemwise', 'HostFromGpu']
#theano.printing.debugprint(f)
f = theano.function([a], tensor.pow(a,4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuElemwise', 'GpuSum', 'HostFromGpu']
#theano.printing.debugprint(f)
finally:
if config.cast_policy == 'numpy+floatX':
config.floatX = floatX_backup
def test_gpualloc():
'''
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论