提交 f83e95cd authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Revert 4c71de43057f now that numpy+floatX is not the default.

上级 4799de77
......@@ -318,11 +318,11 @@ def test_elemwise3():
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a')
b = tensor.fvector()
print b.type
fone = tensor.constant(1, dtype='float32')
print (fone + b).type
print (fone + b**a).type
print tensor.exp((fone + b**a)).type
f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) * tensor.exp(fone +
print tensor.constant(1).type
print (1 + b).type
print (1 + b**a).type
print tensor.exp((1 + b**a)).type
f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) * tensor.exp(1 +
b**a).dimshuffle([2,0,3,1]))], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.env.toposort()):
......
......@@ -144,8 +144,7 @@ def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
def test_print_op():
""" Test that print ops don't block gpu optimization"""
b = tensor.fmatrix()
ftwo = tensor.constant(2, dtype='float32')
f = theano.function([b],theano.printing.Print()(b) * ftwo, mode=mode_with_gpu)
f = theano.function([b],theano.printing.Print()(b)*2, mode=mode_with_gpu)
#theano.printing.debugprint(f)
#print f.maker.env.toposort()
#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论