提交 b91eae2e authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Fixed some tests when config.cast_policy == numpy+floatX and floatX == float32

上级 006f4761
...@@ -3606,11 +3606,13 @@ class TestARange(unittest.TestCase): ...@@ -3606,11 +3606,13 @@ class TestARange(unittest.TestCase):
if config.cast_policy == 'custom': if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype assert out.dtype == start.type.dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'): elif config.cast_policy == 'numpy':
numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype), numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),
numpy.array(1, dtype=stop.dtype), numpy.array(1, dtype=stop.dtype),
numpy.array(1, dtype=step.dtype)).dtype numpy.array(1, dtype=step.dtype)).dtype
assert out.dtype == numpy_dtype assert out.dtype == numpy_dtype
elif config.cast_policy == 'numpy+floatX':
assert out.dtype == config.floatX
else: else:
raise NotImplementedError(config.cast_policy) raise NotImplementedError(config.cast_policy)
arg_vals = [ (0,5,1), (2,11,4), (-5,1.1,1.2), (1.3,2,-2.1), (10,2,2) ] arg_vals = [ (0,5,1), (2,11,4), (-5,1.1,1.2), (1.3,2,-2.1), (10,2,2) ]
...@@ -3622,7 +3624,8 @@ class TestARange(unittest.TestCase): ...@@ -3622,7 +3624,8 @@ class TestARange(unittest.TestCase):
expected_val = numpy.arange(start_v, stop_v, step_v, expected_val = numpy.arange(start_v, stop_v, step_v,
dtype=start.type.dtype) dtype=start.type.dtype)
elif config.cast_policy in ('numpy', 'numpy+floatX'): elif config.cast_policy in ('numpy', 'numpy+floatX'):
expected_val = numpy.arange(start_v_, stop_v_, step_v_) expected_val = numpy.arange(start_v_, stop_v_, step_v_,
dtype=out.dtype)
else: else:
raise NotImplementedError(config.cast_policy) raise NotImplementedError(config.cast_policy)
assert numpy.all(f_val == expected_val) assert numpy.all(f_val == expected_val)
......
...@@ -785,7 +785,7 @@ class test_fusion(unittest.TestCase): ...@@ -785,7 +785,7 @@ class test_fusion(unittest.TestCase):
(fx-(fy/fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv/fzv),'float32'), (fx-(fy/fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv/fzv),'float32'),
(fx-theano.tensor.true_div(fy,ftwo),(fx,fy),(fxv,fyv),1,fxv-(fyv/2),'float32'), (fx-theano.tensor.true_div(fy,ftwo),(fx,fy),(fxv,fyv),1,fxv-(fyv/2),'float32'),
(fx-theano.tensor.true_div(fy,fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv/fzv),'float32'), (fx-theano.tensor.true_div(fy,fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv/fzv),'float32'),
(fx-theano.tensor.int_div(ix*100,iy*1000),(fx,ix,iy),(fxv,ixv,iyv),4,fxv-((ixv*100)//(iyv*1000)),'float64'),#int32 - float32 = float64 #No c_code for int_div#40 (fx-theano.tensor.int_div(ix*100,iy*1000),(fx,ix,iy),(fxv,ixv,iyv),4,fxv-((ixv*100)//(iyv*1000)), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}), #No c_code for int_div#40
(fx-(fy/ftwo),(fx,fy),(fxv,fyv),1,fxv-(fyv/2),'float32'), (fx-(fy/ftwo),(fx,fy),(fxv,fyv),1,fxv-(fyv/2),'float32'),
(fx-(fy%fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv%fzv),'float32'), (fx-(fy%fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv%fzv),'float32'),
(fx-(fy>fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv>fzv),'float32'), (fx-(fy>fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-(fyv>fzv),'float32'),
...@@ -805,10 +805,10 @@ class test_fusion(unittest.TestCase): ...@@ -805,10 +805,10 @@ class test_fusion(unittest.TestCase):
(fx-fy+theano.tensor.round(fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-fyv+numpy.round(fzv),'float32'), (fx-fy+theano.tensor.round(fz),(fx,fy,fz),(fxv,fyv,fzv),1,fxv-fyv+numpy.round(fzv),'float32'),
(ix-iy+theano.tensor.iround(fz),(ix,iy,fz),(ixv,iyv,fzv),1,ixv-iyv+numpy.round(fzv),'int64'), (ix-iy+theano.tensor.iround(fz),(ix,iy,fz),(ixv,iyv,fzv),1,ixv-iyv+numpy.round(fzv),'int64'),
# Bit op # Bit op
(fx-theano.tensor.or_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv|izv),'float64'), (fx-theano.tensor.or_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv|izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.xor(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv^izv),'float64'),#60 (fx-theano.tensor.xor(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv^izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),#60
(fx-theano.tensor.and_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv&izv),'float64'), (fx-theano.tensor.and_(iy,iz),(fx,iy,iz),(fxv,iyv,izv),1,fxv-(iyv&izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.invert(iy),(fx,iy),(fxv,iyv),1,fxv-(~iyv),'float64'), (fx-theano.tensor.invert(iy),(fx,iy),(fxv,iyv),1,fxv-(~iyv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.cast(fy,dtype='float64'),(fx,fy),(fxv,fyv),1, (fx-theano.tensor.cast(fy,dtype='float64'),(fx,fy),(fxv,fyv),1,
fxv-numpy.asarray(fyv,'float64'),'float64'), fxv-numpy.asarray(fyv,'float64'),'float64'),
...@@ -834,6 +834,8 @@ class test_fusion(unittest.TestCase): ...@@ -834,6 +834,8 @@ class test_fusion(unittest.TestCase):
fail3=[] fail3=[]
fail4=[] fail4=[]
for id, [g, sym_inputs, val_inputs, nb_elemwise, answer, out_dtype] in enumerate(cases): for id, [g, sym_inputs, val_inputs, nb_elemwise, answer, out_dtype] in enumerate(cases):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
if gpu and (out_dtype!='float32' or any(i.dtype != 'float32' for i in g.owner.inputs)): if gpu and (out_dtype!='float32' or any(i.dtype != 'float32' for i in g.owner.inputs)):
print "Skip test %d as the gpu code currently supports only float32" % id print "Skip test %d as the gpu code currently supports only float32" % id
continue continue
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论