提交 47ccbaca authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Merged

......@@ -318,11 +318,11 @@ def test_elemwise3():
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a')
b = tensor.fvector()
print b.type
print tensor.constant(1).type
print (1 + b).type
print (1 + b**a).type
print tensor.exp((1 + b**a)).type
f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) * tensor.exp(1 +
fone = tensor.constant(1, dtype='float32')
print (fone + b).type
print (fone + b**a).type
print tensor.exp((fone + b**a)).type
f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) * tensor.exp(fone +
b**a).dimshuffle([2,0,3,1]))], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.env.toposort()):
......
......@@ -144,7 +144,8 @@ def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
def test_print_op():
""" Test that print ops don't block gpu optimization"""
b = tensor.fmatrix()
f = theano.function([b],theano.printing.Print()(b)*2, mode=mode_with_gpu)
ftwo = tensor.constant(2, dtype='float32')
f = theano.function([b],theano.printing.Print()(b) * ftwo, mode=mode_with_gpu)
#theano.printing.debugprint(f)
#print f.maker.env.toposort()
#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]
......
......@@ -3606,11 +3606,13 @@ class TestARange(unittest.TestCase):
if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'):
elif config.cast_policy == 'numpy':
numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),
numpy.array(1, dtype=stop.dtype),
numpy.array(1, dtype=step.dtype)).dtype
assert out.dtype == numpy_dtype
elif config.cast_policy == 'numpy+floatX':
assert out.dtype == config.floatX
else:
raise NotImplementedError(config.cast_policy)
arg_vals = [ (0,5,1), (2,11,4), (-5,1.1,1.2), (1.3,2,-2.1), (10,2,2) ]
......@@ -3622,7 +3624,8 @@ class TestARange(unittest.TestCase):
expected_val = numpy.arange(start_v, stop_v, step_v,
dtype=start.type.dtype)
elif config.cast_policy in ('numpy', 'numpy+floatX'):
expected_val = numpy.arange(start_v_, stop_v_, step_v_)
expected_val = numpy.arange(start_v_, stop_v_, step_v_,
dtype=out.dtype)
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f_val == expected_val)
......
......@@ -785,7 +785,7 @@ class test_fusion(unittest.TestCase):
(fx-(fy/fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv/fzv),'float32'),
(fx-theano.tensor.true_div(fy,ftwo),(fx,fy),(fxv,fyv),1,fxv-(fyv/2),'float32'),
(fx-theano.tensor.true_div(fy,fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv/fzv),'float32'),
(fx-theano.tensor.int_div(ix*100,iy*1000),(fx,ix,iy),(fxv,ixv,iyv),4,fxv-((ixv*100)//(iyv*1000)),'float64'),#int32 - float32 = float64 #No c_code for int_div#40
(fx-theano.tensor.int_div(ix*100,iy*1000),(fx,ix,iy),(fxv,ixv,iyv),4,fxv-((ixv*100)//(iyv*1000)), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}), #No c_code for int_div#40
(fx-(fy/ftwo),(fx,fy),(fxv,fyv),1,fxv-(fyv/2),'float32'),
(fx-(fy%fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv%fzv),'float32'),
(fx-(fy>fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv>fzv),'float32'),
......@@ -805,10 +805,10 @@ class test_fusion(unittest.TestCase):
(fx-fy+theano.tensor.round(fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-fyv+numpy.round(fzv),'float32'),
(ix-iy+theano.tensor.iround(fz),(ix,iy,fz),(ixv,iyv,fzv),1,ixv-iyv+numpy.round(fzv),'int64'),
# Bit op
(fx-theano.tensor.or_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv|izv),'float64'),
(fx-theano.tensor.xor(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv^izv),'float64'),#60
(fx-theano.tensor.and_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv&izv),'float64'),
(fx-theano.tensor.invert(iy),(fx,iy),(fxv,iyv),1,fxv-(~iyv),'float64'),
(fx-theano.tensor.or_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv|izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.xor(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv^izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),#60
(fx-theano.tensor.and_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv&izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.invert(iy),(fx,iy),(fxv,iyv),1,fxv-(~iyv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.cast(fy,dtype='float64'),(fx,fy),(fxv,fyv),1,
fxv-numpy.asarray(fyv,'float64'),'float64'),
......@@ -834,6 +834,8 @@ class test_fusion(unittest.TestCase):
fail3=[]
fail4=[]
for id, [g, sym_inputs, val_inputs, nb_elemwise, answer, out_dtype] in enumerate(cases):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
if gpu and (out_dtype!='float32' or any(i.dtype != 'float32' for i in g.owner.inputs)):
print "Skip test %d as the gpu code currently supports only float32" % id
continue
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论