提交 18f73bc6 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Fixed some broken tests when config.cast_policy == 'numpy+floatX'

上级 cfa5ada0
......@@ -49,6 +49,17 @@ def multMatVect(v, A, m1, B, m2):
r[3:] = matVecModM(B, v[3:], m2)
return r
def cast_if_untyped(x, dtype):
"""Return `x` cast as a numpy scalar of type `dtype` if `x` is untyped."""
if hasattr(x, 'dtype'):
# `x` is already typed.
return x
else:
# We intend to do this on regular Python int / float objects.
assert isinstance(x, int) or isinstance(x, float)
return numpy.array(x, dtype=dtype)
#MRG31k3p
#generator constants :
M1 = numpy.int32(2147483647) #2^31 - 1
......@@ -692,7 +703,7 @@ class MRG_RandomStreams(object):
node_rstate.default_update = new_rstate
return sample
def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype='floatX',
def uniform(self, size, low=0, high=1, ndim=None, dtype='floatX',
nstreams=None):
"""
Sample a tensor of given size whose element from a uniform
......@@ -702,6 +713,12 @@ class MRG_RandomStreams(object):
ndim may be a plain integer to supplement the missing
information.
:param low: Lower bound of the interval on which values are sampled.
If not already typed, it is cast into dtype.
:param high: Higher bound of the interval on which values are sampled.
If not already typed, it is cast into dtype.
:param size: Can be a list of integer or Theano variable
(ex: the shape of other Theano Variable)
......@@ -710,6 +727,11 @@ class MRG_RandomStreams(object):
if dtype == 'floatX':
dtype = config.floatX
# We cast `low` and `high` into `dtype` to make sure we do not upcast
# e.g. float32 into float64.
low = cast_if_untyped(low, dtype)
high = cast_if_untyped(high, dtype)
if isinstance(size, tuple):
msg = "size must be a tuple of int or a Theano variable"
assert all([isinstance(i,int) or isinstance(i,Variable)
......@@ -749,6 +771,8 @@ class MRG_RandomStreams(object):
if u.type.broadcastable != r.type.broadcastable:
raise NotImplementedError( 'Increase the size to match the broadcasting pattern of `low` and `high` arguments')
assert r.dtype == dtype
return r
def binomial(self, size=None, n=1, p=0.5, ndim=None, dtype='int64',
......@@ -791,7 +815,7 @@ class MRG_RandomStreams(object):
raise NotImplementedError(("MRG_RandomStreams.multinomial only"
" implemented with n == 1 and pvals.ndim = 2"))
def normal(self, size=None, avg=0.0, std=1.0, ndim=None,
def normal(self, size=None, avg=0, std=1, ndim=None,
dtype='floatX', nstreams=None):
"""
:param size: Can be a list of integers or Theano variables (ex: the
......@@ -809,6 +833,11 @@ class MRG_RandomStreams(object):
if dtype == 'floatX':
dtype = config.floatX
# We cast `avg` and `std` into `dtype` to make sure we do not upcast
# e.g. float32 into float64.
avg = cast_if_untyped(avg, dtype)
std = cast_if_untyped(std, dtype)
evened = False
constant = False
if isinstance(size, tuple) and all([isinstance(i,int) for i in size]):
......@@ -832,15 +861,15 @@ class MRG_RandomStreams(object):
U2 = flattened[prod(flattened.shape) // 2:]
#normal_samples = zeros_like(flattened)
sqrt_ln_U1 = sqrt(-2.0*log(U1))
sqrt_ln_U1 = sqrt(numpy.array(-2.0, dtype=dtype) * log(U1))
# TypeError: 'TensorVariable' object does not support item assignment
# so this doesn't work...
#normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
#normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)
# so trying this instead
first_half = sqrt_ln_U1 * cos(2.0*cast(numpy.pi,dtype)*U2)
second_half = sqrt_ln_U1 * sin(2.0*cast(numpy.pi,dtype)*U2)
first_half = sqrt_ln_U1 * cos(numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
second_half = sqrt_ln_U1 * sin(numpy.array(2.0 * numpy.pi, dtype=dtype)*U2)
normal_samples = join(0, first_half, second_half)
final_samples = None
......@@ -856,6 +885,7 @@ class MRG_RandomStreams(object):
final_samples = avg + std * final_samples
assert final_samples.dtype == dtype
return final_samples
@local_optimizer([None])
......
......@@ -3,7 +3,7 @@ import copy
import numpy
import theano
from theano import tensor, function
from theano import config, function, tensor
import multinomial
from theano.compile.mode import get_default_mode, predefined_linkers
import theano.sandbox.cuda as cuda
......@@ -77,7 +77,14 @@ def test_multinomial_large():
mval = f(pval,uval)
assert mval.shape == pval.shape
assert mval.dtype == pval.dtype
if config.cast_policy == 'custom':
assert mval.dtype == pval.dtype
elif config.cast_policy == 'numpy+floatX':
assert mval.dtype == config.floatX
elif config.cast_policy == 'numpy':
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
assert numpy.allclose(mval.sum(axis=1), 2)
asdf = numpy.asarray([0, 0, 2, 0])+0*pval
assert numpy.allclose(mval, asdf) #broadcast over all rows
......
......@@ -3584,7 +3584,13 @@ class TestARange(unittest.TestCase):
out = arange(start, stop, step)
f = function([start, stop, step], out)
assert out.dtype == start.type.dtype
if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(numpy.array(1, dtype='int32')).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(0,5,1) == numpy.arange(0,5,1))
assert numpy.all(f(2,11,4) == numpy.arange(2,11,4))
assert numpy.all(f(-5,1,1) == numpy.arange(-5,1,1))
......@@ -3598,13 +3604,28 @@ class TestARange(unittest.TestCase):
out = arange(start, stop, step)
f = function([start, stop, step], out)
assert out.dtype == start.type.dtype
if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),
numpy.array(1, dtype=stop.dtype),
numpy.array(1, dtype=step.dtype)).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
arg_vals = [ (0,5,1), (2,11,4), (-5,1.1,1.2), (1.3,2,-2.1), (10,2,2) ]
for arg_v in arg_vals:
start_v, stop_v, step_v = arg_v
start_v_, stop_v_, step_v_ = numpy.asarray(arg_v, dtype=start.type.dtype)
assert numpy.all(f(start_v_, stop_v_, step_v_) == \
numpy.arange(start_v, stop_v, step_v, dtype=start.type.dtype))
f_val = f(start_v_, stop_v_, step_v_)
if config.cast_policy == 'custom':
expected_val = numpy.arange(start_v, stop_v, step_v,
dtype=start.type.dtype)
elif config.cast_policy in ('numpy', 'numpy+floatX'):
expected_val = numpy.arange(start_v_, stop_v_, step_v_)
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f_val == expected_val)
def test_float64(self):
"""Test arange constructor, on float64 outputs"""
......@@ -3617,8 +3638,15 @@ class TestARange(unittest.TestCase):
for arg_v in arg_vals:
start_v, stop_v, step_v = arg_v
start_v_, stop_v_, step_v_ = numpy.asarray(arg_v, dtype=start.type.dtype)
assert numpy.all(f(start_v_, stop_v_, step_v_) == \
numpy.arange(start_v, stop_v, step_v, dtype=start.type.dtype))
f_val = f(start_v_, stop_v_, step_v_)
if config.cast_policy == 'custom':
expected_val = numpy.arange(start_v, stop_v, step_v,
dtype=start.type.dtype)
elif config.cast_policy in ('numpy', 'numpy+floatX'):
expected_val = numpy.arange(start_v_, stop_v_, step_v_)
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f_val == expected_val)
def test_default_step(self):
"""Test that arange constructor uses the correct default step"""
......@@ -3761,8 +3789,8 @@ class TestARange(unittest.TestCase):
start, stop, step = iscalars('start', 'stop', 'step')
out1 = arange(start, stop, step)
out2 = arange(start, stop, step, dtype=start.type.dtype)
out3 = arange(start, stop, 2., dtype=start.type.dtype)
out2 = arange(start, stop, step, dtype=out1.dtype)
out3 = arange(start, stop, 2., dtype=out1.dtype)
out4 = arange(start, stop, 2.)
assert out1.owner.op is out2.owner.op
......@@ -3780,7 +3808,16 @@ class TestARange(unittest.TestCase):
assert len(f.maker.env.toposort())==7
#7 [Elemwise{sub,no_inplace}(stop, start), Elemwise{Cast{float64}}(Elemwise{sub,no_inplace}.0), Elemwise{TrueDiv{output_types_preference=transfer_type{0}}}[(0, 0)](Elemwise{Cast{float64}}.0, step), Elemwise{Ceil{output_types_preference=transfer_type{0}}}[(0, 0)](Elemwise{TrueDiv{output_types_preference=transfer_type{0}}}[(0, 0)].0), Elemwise{Cast{int64}}(Elemwise{Ceil{output_types_preference=transfer_type{0}}}[(0, 0)].0), Elemwise{Maximum{output_types_preference=transfer_type{0}}}[(0, 0)](Elemwise{Cast{int64}}.0, 0), MakeVector(Elemwise{Maximum{output_types_preference=transfer_type{0}}}[(0, 0)].0)]
assert out.dtype == start.type.dtype
if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),
numpy.array(1, dtype=stop.dtype),
numpy.array(1, dtype=step.dtype)).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(0,5,1) == len(numpy.arange(0,5,1)))
assert numpy.all(f(2,11,4) == len(numpy.arange(2,11,4)))
assert numpy.all(f(-5,1,1) == len(numpy.arange(-5,1,1)))
......@@ -3811,7 +3848,16 @@ class TestARange(unittest.TestCase):
assert len(f.maker.env.toposort())==2
#[Elemwise{Cast{int64}}(stop), MakeVector(Elemwise{Cast{int64}}.0)]
assert out.dtype == start.type.dtype
if config.cast_policy == 'custom':
assert out.dtype == start.type.dtype
elif config.cast_policy in ('numpy', 'numpy+floatX'):
numpy_dtype = numpy.arange(0,
numpy.array(1, dtype=stop.dtype),
1).dtype
assert out.dtype == numpy_dtype
else:
raise NotImplementedError(config.cast_policy)
assert numpy.all(f(5) == len(numpy.arange(0,5)))
assert numpy.all(f(11) == len(numpy.arange(0,11)))
assert numpy.all(f(1) == len(numpy.arange(0,1)))
......
......@@ -183,15 +183,15 @@ class test_canonize(unittest.TestCase):
# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add
# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul
#check with dimshuffle of constant
(fx+fy+fz+2,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),
(fx*fy*fz*2,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),
(fx+fy+fz+2,(fx,fy,fz),(fxv,fyv,fzv),1, {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx*fy*fz*2,(fx,fy,fz),(fxv,fyv,fzv),1, {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),
# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),
(2+fx+fy+fz+2,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),
(2*fx*fy*fz*2,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),
(2+fx+fy+fz+2,(fx,fy,fz),(fxv,fyv,fzv),1, {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(2*fx*fy*fz*2,(fx,fy,fz),(fxv,fyv,fzv),1, {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),
# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),
(fx*fy*2*(fx+fy+fz+2),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),
(fx*fy*2*(fx+fy+fz+2),(fx,fy,fz),(fxv,fyv,fzv),2, {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
#check with broadcast of row
# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),
......@@ -220,6 +220,8 @@ class test_canonize(unittest.TestCase):
mode._optimizer=gof.Query(["canonicalize"])
mode._optimizer=mode._optimizer.excluding('local_elemwise_fusion')
for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
#we need the optimisation enabled, debug do this.
mode=mode)
......@@ -445,11 +447,11 @@ class test_canonize(unittest.TestCase):
#test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y
for id,(g, sym_inputs, val_inputs, out_dtype) in enumerate([
(((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),
(((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv],'float32'),
(((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),
(((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv],'float32'),
(((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),
(((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv],'float32'),
(((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
......@@ -468,9 +470,9 @@ class test_canonize(unittest.TestCase):
#test 2 * x / 2 -> x
for id,(g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2*dx)/2,[dx],[dxv],'float64'),
((2*fx)/2,[fx],[fxv],'float32'),
((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dv)/2,[dv],[dvv],'float64'),
((2*fv)/2,[fv],[fvv],'float32'),
((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
......@@ -484,11 +486,11 @@ class test_canonize(unittest.TestCase):
#test x / abs(x) -> sign(x)
for id,(g, sym_inputs, val_inputs, out_dtype) in enumerate([
(dx/abs(dx),[dx],[0.5-dxv],'float64'),
(fx/abs(fx),[fx],[0.5-fxv],'float32'),
(fx/abs(fx),[fx],[0.5-fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(dx/abs(dx),[dx],[0.1*dxv],'float64'),
(fx/abs(fx),[fx],[0.1*fxv],'float32'),
(fx/abs(fx),[fx],[0.1*fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(dv/abs(dv),[dv],[0.5-dvv],'float64'),
(fv/abs(fv),[fv],[0.5-fvv],'float32'),
(fv/abs(fv),[fv],[0.5-fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
f = compile.function(list(sym_inputs), g,
mode=mode)
......@@ -501,12 +503,15 @@ class test_canonize(unittest.TestCase):
#test (2*x) / (3*abs(x)) -> sign(x)
for id,(g, sym_inputs, val_inputs, out_dtype) in enumerate([
((2*dx)/(3*abs(dx)),[dx],[0.5-dxv],'float64'),
((2*fx)/(3*abs(fx)),[fx],[0.5-fxv],'float32'),
((2*fx)/(3*abs(fx)),[fx],[0.5-fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dx)/(3*abs(dx)),[dx],[0.1*dxv],'float64'),
((2*fx)/(3*abs(fx)),[fx],[0.1*fxv],'float32'),
((2*fx)/(3*abs(fx)),[fx],[0.1*fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
((2*dv)/(3*abs(dv)),[dv],[0.5-dvv],'float64'),
((2*fv)/(3*abs(fv)),[fv],[0.5-fvv],'float32'),
((2*fv)/(3*abs(fv)),[fv],[0.5-fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
]):
if isinstance(out_dtype, dict):
out_dtype = out_dtype[config.cast_policy]
f = compile.function(list(sym_inputs), g,
mode=mode)
topo = f.maker.env.toposort()
......@@ -2752,8 +2757,17 @@ def test_local_mul_to_neg():
f1 = theano.function([a], -1*a)
f2 = theano.function([a], -1.0*a)
aval = numpy.random.randint(0,10,(2,2)).astype('int32')
assert f1(aval).dtype == a.dtype
assert f2(aval).dtype == 'float64'
if config.cast_policy == 'custom':
assert f1(aval).dtype == a.dtype
assert f2(aval).dtype == 'float64'
elif config.cast_policy == 'numpy':
assert f1(aval).dtype == str(numpy.array(0).dtype)
assert f2(aval).dtype == 'float64'
elif config.cast_policy == 'numpy+floatX':
assert f1(aval).dtype == str(numpy.array(0).dtype)
assert f2(aval).dtype == config.floatX
else:
raise NotImplementedError(config.cast_policy)
def test_local_add_specialize():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论