提交 9fd13114 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Revert 9024e481d2b5 now that numpy+floatX is not the default.

上级 1b1d95fa
......@@ -35,28 +35,18 @@ def test_no_shared_var_graph():
assert numpy.any(isinstance(x.op,cuda.HostFromGpu) for x in l)
def test_int_pow():
# This is to ensure that '4' does not upcast to float64.
if config.cast_policy == 'numpy+floatX':
floatX_backup = config.floatX
config.floatX = 'float32'
a = CudaNdarrayType([False])()
try:
a = CudaNdarrayType([False])()
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
f = theano.function([a], (a*4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuSum', 'GpuElemwise', 'HostFromGpu']
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuSum', 'GpuElemwise', 'HostFromGpu']
f = theano.function([a], tensor.pow(a,4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuElemwise', 'GpuSum', 'HostFromGpu']
f = theano.function([a], tensor.pow(a,4).sum(), mode=mode_with_gpu)
op_names = [n.op.__class__.__name__ for n in f.maker.env.toposort()]
assert op_names == ['GpuElemwise', 'GpuSum', 'HostFromGpu']
#theano.printing.debugprint(f)
finally:
if config.cast_policy == 'numpy+floatX':
config.floatX = floatX_backup
#theano.printing.debugprint(f)
def test_gpualloc():
'''
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论