提交 28e2a65a authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Replacing numpy.asarray by theano._asarray when a dtype conversion may be…

Replacing numpy.asarray by theano._asarray when a dtype conversion may be performed, to work around Numpy ticket 870 defect
上级 b7e570c6
......@@ -389,7 +389,7 @@ Here is an example showing how to use verify_grad:
>>> def test_flatten_outdimNone():
>>> a = dmatrix()
>>> # ...
>>> a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
>>> a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
>>> # ...
>>> tensor.verify_grad(Flatten(), [a_val])
......
......@@ -67,7 +67,7 @@ BUT, YOU GOTTA RUN THIS CODE AND MAKE SURE IT STILL WORKS NICELY, HEY?
x_data = numpy.random.randn(100, 10)
y_data = numpy.random.randn(100, 3)
y_data = numpy.asarray(y_data == numpy.max(y_data, axis=1), dtype='int64')
y_data = theano._asarray(y_data == numpy.max(y_data, axis=1), dtype='int64')
print "Model Training ..."
for iteration in xrange(1000):
......
......@@ -173,7 +173,10 @@ More simple numpy stuff
``x.reshape(x.size)``
You can also use ``resize`` but there is not reason to ''resize''
* How do you convert the type of a numpy array?
``numpy.asarray(x, dtype = 'int32')``
``theano._asarray(x, dtype = 'int32')``
Note that using ``numpy.asarray`` is potentially dangerous, due to
a problem in numpy where the type may not be properly set (see
numpy's Track ticket #870).
=========================================
......
......@@ -53,6 +53,7 @@ from compile import \
ProfileMode, \
Param, shared
from misc.safe_asarray import _asarray
FancyModule = Module
......
......@@ -211,7 +211,7 @@ class Test_pfunc(unittest.TestCase):
z = tensor.ivector()
c = z*y
f = pfunc([y], c+7, givens = {z : numpy.asarray([4,4,4], dtype='int32')})
f = pfunc([y], c+7, givens = {z : theano._asarray([4,4,4], dtype='int32')})
assert numpy.all(f([1,1,1]) == [11,11,11])
assert x.value == 0
......
......@@ -106,7 +106,7 @@ class Test_SharedVariable(unittest.TestCase):
pass
# check that an assignment of a perfect value results in no copying
uval = numpy.asarray([5,6,7,8], dtype='float64')
uval = theano._asarray([5,6,7,8], dtype='float64')
u.value = uval
assert u.value is uval
......
"""
Helper function to safely convert an array to a new data type.
"""
__docformat__ = "restructuredtext en"
import numpy
def _asarray(a, dtype=None, order=None):
"""Convert the input to a Numpy array.
This function is almost identical to ``numpy.asarray``, but it should be
used instead of its numpy counterpart when a data type is provided in
order to perform type conversion if required.
The reason is that ``numpy.asarray`` may not actually update the array's
data type to the user-provided type. For more information see ticket
http://projects.scipy.org/numpy/ticket/870.
Currently, this issue has only been causing trouble when the target
data type is 'int32', on some computers. As a result, this is the only
situation where we do more than a simple call to ``numpy.asarray``. If it
turns out that a similar problem can occur for more data type, this
function should be updated accordingly.
This function's name starts with a '_' to indicate that it is meant to be
used internally. It is imported so as to be available directly through
theano._asarray
"""
rval = numpy.asarray(a, dtype=dtype, order=order)
if dtype is numpy.int32 or dtype == 'int32':
# Make sure the type is properly set to the correct type.
return rval.view(dtype=numpy.int32)
else:
# Using ``numpy.asarray`` should work just fine.
return rval
......@@ -57,7 +57,7 @@ class GpuFromHost(Op):
raise TypeError(x)
return Apply(self, [x], [CudaNdarrayType(broadcastable=x.broadcastable)()])
def perform(self, node, (x,), (z,)):
z[0] = type_support_filter(numpy.asarray(x, dtype='float32'), tuple([0]*x.ndim), 0)
z[0] = type_support_filter(theano._asarray(x, dtype='float32'), tuple([0]*x.ndim), 0)
def grad(self, inputs, (gz,)):
return gz,
#return [HostFromGpu()(gz)]
......
......@@ -23,7 +23,7 @@ _logger = logging.getLogger('driver_kouh')
def _shared_uniform(rng, low, high, size, dtype, name=None):
return shared(
numpy.asarray(
theano._asarray(
rng.uniform(low=low, high=high, size=size),
dtype=dtype), name)
......@@ -46,7 +46,7 @@ class Kouh2008(object):
if len(w_list) != len(x_list):
raise ValueError('w_list must have same len as x_list')
output = (sum(w * tensor.pow(x, p) for (w,x) in zip(w_list, x_list)))\
/ (numpy.asarray(eps, dtype=k.type.dtype) + k + tensor.pow(sum(tensor.pow(x, q) for x in x_list), r))
/ (theano._asarray(eps, dtype=k.type.dtype) + k + tensor.pow(sum(tensor.pow(x, q) for x in x_list), r))
assert output.type.ndim == 2
self.__dict__.update(locals())
......@@ -79,8 +79,8 @@ class Kouh2008(object):
w_l2_sqr = sum((wi**2).sum() for wi in w_list)
e_range_low, e_range_high = exponent_range
e_range_low = numpy.asarray(e_range_low, dtype=dtype)
e_range_high = numpy.asarray(e_range_high, dtype=dtype)
e_range_low = theano._asarray(e_range_low, dtype=dtype)
e_range_high = theano._asarray(e_range_high, dtype=dtype)
e_range_mag = e_range_high - e_range_low
if e_range_mag < 0:
raise ValueError('exponent range must have low <= high')
......@@ -93,8 +93,8 @@ class Kouh2008(object):
p = tensor.nnet.sigmoid(p_unbounded) * e_range_mag + e_range_low
q = tensor.nnet.sigmoid(q_unbounded) * e_range_mag + e_range_low
r = tensor.nnet.sigmoid(r_unbounded) * \
numpy.asarray(1.0/e_range_low - 1.0/e_range_high, dtype=dtype) \
+ numpy.asarray(1.0/e_range_high, dtype=dtype)
theano._asarray(1.0/e_range_low - 1.0/e_range_high, dtype=dtype) \
+ theano._asarray(1.0/e_range_high, dtype=dtype)
k = softsign(k_unbounded)
......@@ -157,10 +157,10 @@ class Kouh2008(object):
b_list = [shared_uniform(low=0, high=.01, size=(n_out,), name='b_%i'%i)
for i in xrange(n_terms)]
#x_list = [numpy.asarray(eps, dtype=dtype)+softplus(tensor.dot(input, f_list[i])) for i in xrange(n_terms)]
filter_range = numpy.asarray(filter_range, dtype=dtype)
half_filter_range = numpy.asarray(filter_range/2, dtype=dtype)
x_list = [numpy.asarray(filter_range + eps, dtype=dtype)+half_filter_range *softsign(tensor.dot(input, f_list[i]) +
#x_list = [theano._asarray(eps, dtype=dtype)+softplus(tensor.dot(input, f_list[i])) for i in xrange(n_terms)]
filter_range = theano._asarray(filter_range, dtype=dtype)
half_filter_range = theano._asarray(filter_range/2, dtype=dtype)
x_list = [theano._asarray(filter_range + eps, dtype=dtype)+half_filter_range *softsign(tensor.dot(input, f_list[i]) +
b_list[i]) for i in xrange(n_terms)]
rval = cls.new_expbounds(rng, x_list, n_out, dtype=dtype, params=f_list + b_list,
......@@ -304,7 +304,7 @@ def test_bench_elemwise(n_iter=1000, **kwargs):
train_nll = pfunc([x, y, s_lr], [], updates=updates)
xval = numpy.asarray(
xval = theano._asarray(
rng.uniform(size=(conf.ft_batchsize, x.type.shape[1])),
dtype=conf.dtype2,
)
......
......@@ -59,14 +59,14 @@ def _params_allgood_header():
def _params_allgood(ishape, kshape, mode, subsample=(1,1), img_stride=(1,1), kern_stride=(1,1), version=-1, verbose=0, random=True, print_=None, id=None, rtol=1e-5, atol = 1e-8, nb_iter=0, ones=False):
if ones:
assert not random
npy_img = numpy.asarray(numpy.ones(ishape), dtype='float32')
npy_kern = -numpy.asarray(numpy.ones(kshape), dtype='float32')
npy_img = theano._asarray(numpy.ones(ishape), dtype='float32')
npy_kern = -theano._asarray(numpy.ones(kshape), dtype='float32')
elif random:
npy_img = numpy.asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = numpy.asarray(numpy.random.rand(*kshape), dtype='float32')
npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
else:
npy_img = numpy.asarray(numpy.arange(numpy.prod(ishape)).reshape(ishape), dtype='float32')+1
npy_kern = -(numpy.asarray(numpy.arange(numpy.prod(kshape)).reshape(kshape), dtype='float32')+1)
npy_img = theano._asarray(numpy.arange(numpy.prod(ishape)).reshape(ishape), dtype='float32')+1
npy_kern = -(theano._asarray(numpy.arange(numpy.prod(kshape)).reshape(kshape), dtype='float32')+1)
img = cuda_ndarray.CudaNdarray(npy_img)
kern = cuda_ndarray.CudaNdarray(npy_kern)
......@@ -369,8 +369,8 @@ def _test_dummy():
mode = 'valid'
subsample = (1,1)
npy_img = numpy.asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = numpy.asarray(numpy.random.rand(*kshape), dtype='float32')
npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
img = cuda_ndarray.CudaNdarray(npy_img)
kern = cuda_ndarray.CudaNdarray(npy_kern)
......
......@@ -9,14 +9,14 @@ import numpy
def test_host_to_device():
print >>sys.stderr, 'starting test_host_to_dev'
for shape in ((), (3,), (2,3), (3,4,5,6)):
a = numpy.asarray(numpy.random.rand(*shape), dtype='float32')
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
c = numpy.asarray(b)
assert numpy.all(a == c)
def test_add():
for shape in ((), (3,), (2,3), (1,10000000),(10,1000000), (100,100000),(1000,10000),(10000,1000)):
a0 = numpy.asarray(numpy.random.rand(*shape), dtype='float32')
a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a1 = a0.copy()
b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1)
......@@ -54,7 +54,7 @@ def test_add():
def test_exp():
print >>sys.stderr, 'starting test_exp'
for shape in ((), (3,), (2,3), (1,10000000),(10,1000000), (100,100000),(1000,10000),(10000,1000)):
a0 = numpy.asarray(numpy.random.rand(*shape), dtype='float32')
a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a1 = a0.copy()
b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1)
......@@ -75,7 +75,7 @@ def test_exp():
def test_copy():
print >>sys.stderr, 'starting test_copy'
shape = (5,)
a = numpy.asarray(numpy.random.rand(*shape), dtype='float32')
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
print >>sys.stderr, '.. creating device object'
b = cuda_ndarray.CudaNdarray(a)
......@@ -92,8 +92,8 @@ def test_copy():
def test_dot():
print >>sys.stderr, 'starting test_dot'
a0 = numpy.asarray(numpy.random.rand(4, 7), dtype='float32')
a1 = numpy.asarray(numpy.random.rand(7, 6), dtype='float32')
a0 = theano._asarray(numpy.random.rand(4, 7), dtype='float32')
a1 = theano._asarray(numpy.random.rand(7, 6), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1)
......@@ -104,7 +104,7 @@ def test_dot():
def test_sum():
shape = (2,3)
a0 = numpy.asarray(numpy.arange(shape[0]*shape[1]).reshape(shape), dtype='float32')
a0 = theano._asarray(numpy.arange(shape[0]*shape[1]).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
......@@ -121,17 +121,17 @@ def test_sum():
assert numpy.allclose(a0, numpy.asarray(b0.reduce_sum([0,0])))
shape = (3,4,5,6,7,8)
a0 = numpy.asarray(numpy.arange(3*4*5*6*7*8).reshape(shape), dtype='float32')
a0 = theano._asarray(numpy.arange(3*4*5*6*7*8).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(axis=5).sum(axis=3).sum(axis=0), numpy.asarray(b0.reduce_sum([1,0,0,1,0,1])))
shape = (16,2048)
a0 = numpy.asarray(numpy.arange(16*2048).reshape(shape), dtype='float32')
a0 = theano._asarray(numpy.arange(16*2048).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(axis=0), numpy.asarray(b0.reduce_sum([1,0])))
shape = (16,10)
a0 = numpy.asarray(numpy.arange(160).reshape(shape), dtype='float32')
a0 = theano._asarray(numpy.arange(160).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(), numpy.asarray(b0.reduce_sum([1,1])))
......@@ -147,7 +147,7 @@ def test_reshape():
def subtest(shape_1, shape_2):
#print >> sys.stderr, "INFO: shapes", shape_1, shape_2
a = numpy.asarray(numpy.random.rand(*shape_1), dtype='float32')
a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
aa = a.reshape(shape_2)
......@@ -178,7 +178,7 @@ def test_getshape():
]
def subtest(shape):
a = numpy.asarray(numpy.random.rand(*shape_1), dtype='float32')
a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert b.shape == a.shape
......@@ -188,7 +188,7 @@ def test_getshape():
def test_stride_manipulation():
a = numpy.asarray([[0,1,2], [3,4,5]], dtype='float32')
a = theano._asarray([[0,1,2], [3,4,5]], dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
v = b.view()
v._dev_data += 0
......@@ -212,7 +212,7 @@ def test_stride_manipulation():
def test_copy_subtensor0():
sizeof_float=4
a = numpy.asarray(numpy.random.rand(30,20,5,5), dtype='float32')
a = theano._asarray(numpy.random.rand(30,20,5,5), dtype='float32')
cuda_a = cuda_ndarray.CudaNdarray(a)
a_view = cuda_a.view()
a_view_strides = a_view._strides
......@@ -225,7 +225,7 @@ def test_copy_subtensor0():
assert numpy.all(a[:,:,::-1,::-1] == numpy.asarray(a_view_copy))
def test_mapping_getitem_ellipsis():
a = numpy.asarray(numpy.random.rand(5,4,3,2), dtype='float32')
a = theano._asarray(numpy.random.rand(5,4,3,2), dtype='float32')
a = cuda_ndarray.CudaNdarray(a)
b = a[...]
......@@ -235,7 +235,7 @@ def test_mapping_getitem_ellipsis():
def test_mapping_getitem_reverse_some_dims():
dim=(5,4,3,2)
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32')
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_b = _a[:,:,::-1, ::-1]
......@@ -252,7 +252,7 @@ def test_mapping_getitem_w_int():
assert numpy.all(x == y)
dim =(2,)
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32')
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[1]), a[1])
_cmp(numpy.asarray(_a[::1]), a[::1])
......@@ -260,14 +260,14 @@ def test_mapping_getitem_w_int():
_cmp(numpy.asarray(_a[...]), a[...])
dim =()
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32')
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[...]), a[...])
dim =(5,4,3,2)
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32')
a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[:,:,::-1, ::-1]), a[:,:,::-1,::-1])
......@@ -280,9 +280,9 @@ def test_mapping_getitem_w_int():
_cmp(numpy.asarray(_a[...]), a[...])
def test_gemm_vector_vector():
a = numpy.asarray(numpy.random.rand(5,1), dtype='float32')
a = theano._asarray(numpy.random.rand(5,1), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = numpy.asarray(numpy.random.rand(1,5), dtype='float32')
b = theano._asarray(numpy.random.rand(1,5), dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
_c = cuda_ndarray.dot(_a,_b)
......
......@@ -49,10 +49,10 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100):
v = tcn.shared_constructor(numpy.zeros((n_hid, n_out)), 'c')
c = tcn.shared_constructor(numpy.zeros(n_out), 'c')
else:
w = shared(numpy.asarray(0.01*(numpy.random.rand(n_in,n_hid)-0.5), dtype='float32'), 'w')
b = shared(numpy.asarray(numpy.zeros(n_hid), dtype='float32'), 'b')
v = shared(numpy.asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c')
w = shared(theano._asarray(0.01*(numpy.random.rand(n_in,n_hid)-0.5), dtype='float32'), 'w')
b = shared(theano._asarray(numpy.zeros(n_hid), dtype='float32'), 'b')
v = shared(theano._asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
x = tensor.fmatrix('x')
y = tensor.fmatrix('y')
......@@ -75,9 +75,9 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100):
for i, n in enumerate(train.maker.env.toposort()):
print i, n
xval = numpy.asarray(numpy.random.rand(n_batch, n_in), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch, n_out), dtype='float32')
lr = numpy.asarray(0.01, dtype='float32')
xval = theano._asarray(numpy.random.rand(n_batch, n_in), dtype='float32')
yval = theano._asarray(numpy.random.rand(n_batch, n_out), dtype='float32')
lr = theano._asarray(0.01, dtype='float32')
t0 = time.time()
rval = []
......@@ -123,10 +123,10 @@ def run_conv_nnet1(use_gpu):
n_hid = n_kern * logical_hid_shape[0] * logical_hid_shape[1]
n_out = 10
w = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w')
b = shared_fn(numpy.asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b')
v = shared_fn(numpy.asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared_fn(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c')
w = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w')
b = shared_fn(theano._asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b')
v = shared_fn(theano._asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared_fn(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x')
y = tensor.fmatrix('y')
......@@ -152,9 +152,9 @@ def run_conv_nnet1(use_gpu):
# for i, n in enumerate(train.maker.env.toposort()):
# print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch, n_out), dtype='float32')
lr = numpy.asarray(0.01, dtype='float32')
xval = theano._asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = theano._asarray(numpy.random.rand(n_batch, n_out), dtype='float32')
lr = theano._asarray(0.01, dtype='float32')
for i in xrange(10):
rval = train(xval, yval, lr)
......@@ -204,12 +204,12 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
n_out = 10
w0 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0')
b0 = shared_fn(numpy.asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0')
w1 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1')
b1 = shared_fn(numpy.asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1')
v = shared_fn(numpy.asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared_fn(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c')
w0 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0')
b0 = shared_fn(theano._asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0')
w1 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1')
b1 = shared_fn(theano._asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1')
v = shared_fn(theano._asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared_fn(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x')
y = tensor.fmatrix('y')
......@@ -238,9 +238,9 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
# for i, n in enumerate(train.maker.env.toposort()):
# print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch,n_out), dtype='float32')#int32 make all 0...
lr = numpy.asarray(0.01, dtype='float32')
xval = theano._asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = theano._asarray(numpy.random.rand(n_batch,n_out), dtype='float32')#int32 make all 0...
lr = theano._asarray(0.01, dtype='float32')
for i in xrange(n_train):
rval = train(xval, yval, lr)
......@@ -284,12 +284,12 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
n_out = 10
w0 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0')
b0 = shared_fn(numpy.asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0')
w1 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1')
b1 = shared_fn(numpy.asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1')
v = shared_fn(numpy.asarray(0.01*numpy.random.randn(n_hid, n_out), dtype='float32'), 'v')
c = shared_fn(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c')
w0 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0')
b0 = shared_fn(theano._asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0')
w1 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1')
b1 = shared_fn(theano._asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1')
v = shared_fn(theano._asarray(0.01*numpy.random.randn(n_hid, n_out), dtype='float32'), 'v')
c = shared_fn(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
print 'ALLOCATING ARCH: w0 shape', w0.value.shape
print 'ALLOCATING ARCH: w1 shape', w1.value.shape
......@@ -330,9 +330,9 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
for i, n in enumerate(train.maker.env.toposort()):
print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch,n_out), dtype='float32')
lr = numpy.asarray(0.01, dtype='float32')
xval = theano._asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = theano._asarray(numpy.random.rand(n_batch,n_out), dtype='float32')
lr = theano._asarray(0.01, dtype='float32')
rvals=numpy.zeros(n_iter)
t0 = time.time()
......
......@@ -71,7 +71,7 @@ def shared_constructor(value, name, strict=False, broadcastable=None):
if strict:
_value = value
else:
_value = numpy.asarray(value, dtype='float32')
_value = theano._asarray(value, dtype='float32')
if not isinstance(_value, numpy.ndarray):
raise TypeError('ndarray required')
......
......@@ -8,7 +8,7 @@ DownsampleFactorMax, DownsampleAvg, DownsampleSoftmax.
from theano import gof, Op, tensor, Variable, Apply
from theano.printing import Print
import numpy
import numpy, theano
import __builtin__
class DownsampleFactorMaxGrad(Op):
......@@ -259,7 +259,7 @@ class DownsampleFactorMax(Op):
raise NotImplementedError('DownsampleFactorMax requires 4D input for now')
if z[0] is None:
z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border)) -float('inf')
z[0] = numpy.asarray(z[0], dtype=x.dtype)
z[0] = theano._asarray(z[0], dtype=x.dtype)
zz=z[0]
ds0, ds1 = self.ds
if self.ignore_border:
......
......@@ -197,7 +197,7 @@ class Scan(theano.Op):
def zero(p):
return theano.tensor.TensorConstant(theano.tensor.TensorType(\
dtype=p.type.dtype, broadcastable=[]),
numpy.asarray(0,dtype = p.type.dtype))
theano._asarray(0,dtype = p.type.dtype))
return [gmap.get(p, zero(p)) for p in inputs]
......
......@@ -211,7 +211,7 @@ class TheanoObject(object):
v = tensor.lscalar(name)
v._theanoclass_container = \
theano.gof.Container(v,
storage = [numpy.asarray(ival, dtype='int64')],
storage = [theano._asarray(ival, dtype='int64')],
readonly=False)
assert not hasattr(v, 'set')
assert not hasattr(v, 'get')
......
......@@ -2,7 +2,7 @@ import operator
import math
from copy import copy
import numpy
import numpy, theano
from theano import gof
from theano.gof import Op, utils, Variable, Constant, Type, Apply, Env
......@@ -33,7 +33,7 @@ def as_scalar(x, name = None):
def constant(x):
if isinstance(x, float):
for dtype in ['float32', 'float64']:
x_ = numpy.asarray(x, dtype=dtype)
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
x_ = None
......@@ -41,7 +41,7 @@ def constant(x):
return ScalarConstant(Scalar(str(x_.dtype)), x)
if isinstance(x, int):
for dtype in ['int8', 'int16', 'int32', 'int64']:
x_ = numpy.asarray(x, dtype=dtype)
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
x_ = None
......@@ -1090,7 +1090,7 @@ floor = Floor(same_out_nocomplex, name = 'ceil')
class IRound(UnaryScalarOp):
def impl(self, x):
return numpy.asarray(numpy.round(x), dtype = 'int64')
return theano._asarray(numpy.round(x), dtype = 'int64')
def c_code(self, node, name, (x, ), (z, ), sub):
return "%(z)s = round(%(x)s);" % locals()
iround = IRound(int_out_nocomplex)
......
......@@ -7,7 +7,7 @@ To read about different sparse formats, see U{http://www-users.cs.umn.edu/~saad/
"""
import sys, operator
import numpy
import numpy, theano
from scipy import sparse
import scipy.sparse
from theano.printing import Print
......@@ -279,9 +279,9 @@ class CSMProperties(gof.Op):
out[0][0] = csm.data[self.kmap]
#backport
#out[0][0] = csm.data if self.kmap is None else csm.data[self.kmap]
out[1][0] = numpy.asarray(csm.indices, dtype='int32')
out[2][0] = numpy.asarray(csm.indptr, dtype='int32')
out[3][0] = numpy.asarray(csm.shape, dtype='int32')
out[1][0] = theano._asarray(csm.indices, dtype='int32')
out[2][0] = theano._asarray(csm.indptr, dtype='int32')
out[3][0] = theano._asarray(csm.shape, dtype='int32')
# TODO FIX THIS
def grad(self, (csm,), g):
......@@ -344,28 +344,12 @@ class CSM(gof.Op):
"""
data = tensor.as_tensor_variable(data)
# Note that we use `view(numpy.int32)` in addition to providing the
# 'int32' dtype to `numpy.asarray`. This is because on some computers
# (e.g. a Windows 32 bits machine), we can have the following assert
# fail:
# x = numpy.array([0], dtype=numpy.intc)
# y = numpy.asarray(x, dtype=numpy.int32)
# assert y.dtype.num == numpy.dtype(numpy.int32).num
# while the assert does *not* fail when replacing the second line by:
# y = numpy.asarray(x, dtype='int32').view(numpy.int32)
# This is a known defect in Numpy. For more information see ticket
# http://projects.scipy.org/numpy/ticket/870
# Note also that it is important to keep "dtype='int32'" when calling
# `numpy.asarray`. This is because `view` is only some kind of cast to
# the exact data type we want to use. If a conversion is required (e.g.
# from int64 to int32), it must be done in the call to `numpy.asarray`.
if not isinstance(indices, tensor.TensorVariable):
indices = numpy.asarray(indices, dtype='int32').view(numpy.int32)
indices = theano._asarray(indices, dtype='int32')
if not isinstance(indptr, tensor.TensorVariable):
indptr = numpy.asarray(indptr, dtype='int32').view(numpy.int32)
indptr = theano._asarray(indptr, dtype='int32')
if not isinstance(shape, tensor.TensorVariable):
shape = numpy.asarray(shape, dtype='int32').view(numpy.int32)
shape = theano._asarray(shape, dtype='int32')
indices = tensor.as_tensor_variable(indices)
indptr = tensor.as_tensor_variable(indptr)
shape = tensor.as_tensor_variable(shape)
......
......@@ -9,7 +9,7 @@ import traceback #for overriding Op.__call__
if sys.version_info >= (2,5):
import functools
import numpy
import numpy, theano
from copy import copy
from theano import gof
......@@ -151,7 +151,7 @@ class NumpyAutocaster(object):
self.dtypes = tuple(dtypes)
def __call__(self, x):
for dtype in self.dtypes:
x_ = numpy.asarray(x, dtype=dtype)
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
# returns either an exact x_==x, or the last casted x_
......@@ -163,7 +163,7 @@ autocast_float = NumpyAutocaster(('float32', 'float64'))
# Note: it's a bit weird for a compiler to automatically downcast literals like this, and it might
# have implications for efficiency when mixing types. For example when you add 1.0 +
# dmatrix(), the 1.0 could be converted to float32, and require upcasting for the + operation
# at every position in the dmatrix. using numpy.asarray(1.0, dtype='float64') will circumvent
# at every position in the dmatrix. using theano._asarray(1.0, dtype='float64') will circumvent
# this autocasting, and in future, our ops might be smarter about factoring out upcasts. The
# advantage of this mechanism is to combine it with floatX so that 1.0 + xmatrix() will always
# have the same type as the xmatrix().
......@@ -197,7 +197,7 @@ def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
"""
if dtype is not None:
# in this case, the semantics are that the caller is forcing the dtype
x_ = numpy.asarray(x, dtype=dtype)
x_ = theano._asarray(x, dtype=dtype)
else:
# in this case, this function should infer the dtype according to the autocasting
# rules. See autocasting above.
......@@ -378,7 +378,7 @@ class TensorType(Type):
self, self.shape, data.shape))
return data
else:
data = numpy.asarray(data, dtype = self.dtype)
data = theano._asarray(data, dtype = self.dtype)
if not self.ndim == data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, got %s with shape %s." % (self.ndim, data.ndim, data.shape), data)
if any(b and d != 1 for d, b in zip(data.shape, self.broadcastable)):
......@@ -1258,7 +1258,7 @@ class Shape(Op):
x = as_tensor_variable(x)
return Apply(self, [x], [lvector()])
def perform(self, node, (x, ), (out, )):
out[0] = numpy.asarray(x.shape, dtype = 'int64')
out[0] = theano._asarray(x.shape, dtype = 'int64')
def grad(self, (x,), (gz,)):
return [None]
_shape = Shape()
......@@ -1300,9 +1300,7 @@ class MaxAndArgmax(Op):
return Apply(self, inputs, outputs)
def perform(self, node, (x, axis), (max, max_idx)):
max[0] = numpy.asarray(numpy.max(x, axis))
# Note: using 'view' is important until Numpy's ticket 870 is resolved.
max_idx[0] = numpy.asarray(numpy.argmax(x, axis), dtype='int32').view(
numpy.int32)
max_idx[0] = theano._asarray(numpy.argmax(x, axis), dtype='int32')
def grad(self, (x, axis), (g_max, g_max_idx)):
# @warning: This only works if axis is 0, else the max is
# broadcasted wrong in the call to eq.
......@@ -2498,7 +2496,7 @@ class Join(Op):
def perform(self, node, axis_and_tensors, (out, )):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
out[0] = numpy.asarray(numpy.concatenate(tensors, axis = axis),
out[0] = theano._asarray(numpy.concatenate(tensors, axis = axis),
dtype=node.outputs[0].type.dtype)
def grad(self, axis_and_tensors, (gz,)):
......@@ -3422,7 +3420,7 @@ def grad(cost, wrt, g_cost=None, consider_constant=[], warn_type=False):
def zero(p):
return TensorConstant(
TensorType(dtype = p.type.dtype, broadcastable = []),
numpy.asarray(0, dtype=p.type.dtype))
theano._asarray(0, dtype=p.type.dtype))
#try:
#it = iter(wrt)
......
import sys
import elemwise_cgen as cgen
import numpy
import numpy, theano
from theano import gof
from theano.gof import Op, Apply
from theano import scalar
......@@ -823,7 +823,7 @@ class CAReduce(Op):
if to_reduce:
for dimension in to_reduce:
variable = self.ufunc.reduce(variable, dimension)
output[0] = numpy.asarray(variable, dtype = node.outputs[0].type.dtype)
output[0] = theano._asarray(variable, dtype = node.outputs[0].type.dtype)
else:
output[0] = numpy.copy(variable)
......
......@@ -35,7 +35,7 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
if node.inputs[0].type == scalar.float32:
# These constants were obtained by looking at the output of python commands like:
# for i in xrange(750):
# print i, repr( numpy.asarray(1.0, dtype=dt) / (numpy.asarray(1.0, dtype=dt) + numpy.exp(-numpy.asarray([i,-i], dtype=dt))))
# print i, repr( theano._asarray(1.0, dtype=dt) / (theano._asarray(1.0, dtype=dt) + numpy.exp(-theano._asarray([i,-i], dtype=dt))))
# the boundary checks prevent us from generating inf
return """%(z)s = %(x)s < -88.0f ? 0.0 : %(x)s > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-%(x)s));""" % locals()
elif node.inputs[0].type == scalar.float64:
......@@ -70,7 +70,7 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
if node.inputs[0].type == scalar.float32:
# These constants were obtained by looking at the output of python commands like:
# for i in xrange(750):
# print i, repr( numpy.log1p(numpy.exp(numpy.asarray([i,-i], dtype=dt))))
# print i, repr( numpy.log1p(numpy.exp(theano._asarray([i,-i], dtype=dt))))
# the boundary checks prevent us from generating inf
return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals()
elif node.inputs[0].type == scalar.float64:
......
......@@ -14,7 +14,7 @@ from elemwise import Elemwise, DimShuffle
from theano import scalar
import basic as T
import inplace as I
import numpy
import numpy, theano
import numpy as N #guys... please don't do this in the library :(
import operator
import itertools
......@@ -874,7 +874,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
#first = num[0] if num else denum[0]
one = N.asarray(first).dtype.type(1)
else:
one = N.asarray(1, dtype=out_type.dtype)
one = theano._asarray(1, dtype=out_type.dtype)
v = reduce(N.multiply, num, one) / reduce(N.multiply, denum, one)
if aslist:
if N.all(v == 1):
......@@ -977,7 +977,7 @@ def local_mul_zero(node):
#print 'MUL by value', value, node.inputs
if N.all(value == 0):
#print '... returning zeros'
return _fill_chain(N.asarray(0, dtype=otype.dtype), node.inputs)
return _fill_chain(theano._asarray(0, dtype=otype.dtype), node.inputs)
register_canonicalize(local_mul_zero)
@gof.local_optimizer([T.true_div])
......@@ -1162,8 +1162,8 @@ def add_calculate(num, denum, aslist = False, out_type=None):
if out_type is None:
zero = 0.0
else:
zero = N.asarray(0, dtype=out_type.dtype)
#zero = 0.0 if out_type is None else N.asarray(0, dtype=out_type.dtype)
zero = theano._asarray(0, dtype=out_type.dtype)
#zero = 0.0 if out_type is None else theano._asarray(0, dtype=out_type.dtype)
v = reduce(N.add, num, zero) - reduce(N.add, denum, zero)
if aslist:
if N.all(v == 0):
......
......@@ -6,7 +6,7 @@ import numpy
#local imports
import basic as tensor
import opt
import opt, theano
from theano import gof
from theano.compile import optdb
......@@ -166,7 +166,7 @@ class RandomFunction(gof.Op):
rval = self.fn(r, *(args + [tuple(shape)]))
if not isinstance(rval, numpy.ndarray) \
or str(rval.dtype) != node.outputs[1].type.dtype:
out[0] = numpy.asarray(rval, dtype = node.outputs[1].type.dtype)
out[0] = theano._asarray(rval, dtype = node.outputs[1].type.dtype)
else:
out[0] = rval
if len(rval.shape) != self.outtype.ndim:
......@@ -182,7 +182,7 @@ def _infer_ndim(ndim, shape):
"""
if isinstance(shape, (tuple, list)):
v_shape = tensor.TensorConstant(type=tensor.lvector, data=numpy.asarray(shape, dtype='int64'))
v_shape = tensor.TensorConstant(type=tensor.lvector, data=theano._asarray(shape, dtype='int64'))
else:
v_shape = tensor.as_tensor_variable(shape)
......
......@@ -1063,8 +1063,8 @@ class test_bitwise(unittest.TestCase):
def test_or(self):
x, y = bvector(), bvector()
fn = inplace_func([x,y], x|y)
l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8')
l = theano._asarray([0,0,1,1], dtype = 'int8')
r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r)
self.failUnless(numpy.all(v == (operator.or_(l, r))), (l, r, v))
......@@ -1074,8 +1074,8 @@ class test_bitwise(unittest.TestCase):
ix = x
ix = inplace.xor_inplace(ix, y)
gn = inplace_func([x,y], ix)
l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8')
l = theano._asarray([0,0,1,1], dtype = 'int8')
r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r)
self.failUnless(numpy.all(v == (operator.xor(l, r))), (l, r, v))
v = gn(l, r)
......@@ -1085,16 +1085,16 @@ class test_bitwise(unittest.TestCase):
def test_and(self):
x, y = bvector(), bvector()
fn = inplace_func([x,y], x&y)
l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8')
l = theano._asarray([0,0,1,1], dtype = 'int8')
r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r)
self.failUnless(numpy.all(v == (operator.and_(l, r))), (l, r, v))
def test_inv(self):
x, y = bvector(), bvector()
fn = inplace_func([x,y], ~x)
l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8')
l = theano._asarray([0,0,1,1], dtype = 'int8')
r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r)
self.failUnless(numpy.all(v == (~l)), (l, r, v))
......@@ -1723,9 +1723,9 @@ def test_reshape():
assert numpy.all(a_val == a_val_copy)
#test that it works with inplace operations
a_val = numpy.asarray([0,1,2,3,4,5], dtype='float64')
a_val_copy = numpy.asarray([0,1,2,3,4,5], dtype='float64')
b_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
a_val = theano._asarray([0,1,2,3,4,5], dtype='float64')
a_val_copy = theano._asarray([0,1,2,3,4,5], dtype='float64')
b_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
f_sub = inplace_func([a,b], c-b)
assert numpy.all(f_sub(a_val, b_val) == 0.0)
......@@ -1733,7 +1733,7 @@ def test_reshape():
# verify gradient
def just_vals(v):
return Reshape(2)(v, numpy.asarray([2,3], dtype='int32'))
return Reshape(2)(v, theano._asarray([2,3], dtype='int32'))
utt.verify_grad(just_vals, [a_val])
......@@ -1744,8 +1744,8 @@ def test_flatten_outdimNone():
a = dmatrix()
c = flatten(a)
f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64')
a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = theano._asarray([0,1,2,3,4,5], dtype='float64')
assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val)
......@@ -1756,8 +1756,8 @@ def test_flatten_scalar():
a = dscalar()
c = flatten(a)
f = inplace_func([a], c)
a_val = numpy.asarray(3.0, dtype='float64')
c_val = numpy.asarray([3.0], dtype='float64')
a_val = theano._asarray(3.0, dtype='float64')
c_val = theano._asarray([3.0], dtype='float64')
assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val)
......@@ -1768,8 +1768,8 @@ def test_flatten_outdim1():
a = dmatrix()
c = flatten(a, 1)
f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64')
a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = theano._asarray([0,1,2,3,4,5], dtype='float64')
assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val)
......@@ -1780,7 +1780,7 @@ def test_flatten_outdim2():
a = dmatrix()
c = flatten(a, 2)
f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
assert numpy.all(f(a_val)==a_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val)==a_val)
......@@ -1791,8 +1791,8 @@ def test_flatten_outdim2_of_3():
a = TensorType('float64', (False, False, False))()
c = flatten(a, 2)
f = inplace_func([a], c)
a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64')
c_val = numpy.asarray([[0,1,2,3], [4,5,6,7]], dtype='float64')
a_val = theano._asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64')
c_val = theano._asarray([[0,1,2,3], [4,5,6,7]], dtype='float64')
assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val)
......@@ -2288,8 +2288,8 @@ def test_autocast():
ac.__enter__()
assert (dvector()+ 1.1).dtype == 'float64'
assert (fvector()+ 1.1).dtype == 'float32'
assert (fvector()+ numpy.asarray(1.1,dtype='float64')).dtype == 'float64'
assert (fvector()+ numpy.asarray(1.1,dtype='float32')).dtype == 'float32'
assert (fvector()+ theano._asarray(1.1,dtype='float64')).dtype == 'float64'
assert (fvector()+ theano._asarray(1.1,dtype='float32')).dtype == 'float32'
assert (dvector()+ 1).dtype == 'float64'
assert (fvector()+ 1).dtype == 'float32'
......@@ -2303,8 +2303,8 @@ def test_autocast():
assert (dvector()+ 1.1).dtype == 'float64'
assert (fvector()+ 1.1).dtype == 'float64'
assert (fvector()+ 1.0).dtype == 'float64'
assert (fvector()+ numpy.asarray(1.1,dtype='float64')).dtype == 'float64'
assert (fvector()+ numpy.asarray(1.1,dtype='float32')).dtype == 'float32'
assert (fvector()+ theano._asarray(1.1,dtype='float64')).dtype == 'float64'
assert (fvector()+ theano._asarray(1.1,dtype='float32')).dtype == 'float32'
assert (dvector()+ 1).dtype == 'float64'
assert (fvector()+ 1).dtype == 'float32'
......
......@@ -2,7 +2,7 @@ import traceback
import theano.tensor as T
from theano.gof import Env
from theano.printing import pp
import numpy
import numpy, theano
from theano.tensor.blas import *
from theano.tensor.blas import _dot22, res_is_a, _as_scalar, _is_real_matrix
from unittest import TestCase
......@@ -152,7 +152,7 @@ class t_gemm(TestCase):
C = self.rand(4,5)[:,:4]
def t(z,x,y,a=1.0, b=0.0,l='c|py',dt='float64'):
z,a,x,y,b = [numpy.asarray(p,dtype=dt) for p in z,a,x,y,b]
z,a,x,y,b = [theano._asarray(p,dtype=dt) for p in z,a,x,y,b]
z_orig = z.copy()
z_after = self._gemm(z, a, x, y, b)
......
......@@ -13,7 +13,7 @@ class test_casting(unittest.TestCase):
x = type_fn()
f = function([x], op_fn(x))
xval = numpy.asarray(numpy.random.rand(10)*10, dtype=type_fn.dtype)
xval = theano._asarray(numpy.random.rand(10)*10, dtype=type_fn.dtype)
yval = f(xval)
assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype
......
......@@ -73,7 +73,7 @@ def test_add_canonizer_problem0():
n_segments = 10
label = lscalar('label')
segment_labels = label + numpy.asarray([0] * n_segments, dtype='int64')
segment_labels = label + theano._asarray([0] * n_segments, dtype='int64')
r = segment_labels * 5
f = function([label], r)
......@@ -149,14 +149,14 @@ class test_canonize(unittest.TestCase):
dx, dy, dz = dmatrices('xyz')
fv = fvector('r').dimshuffle('x',0)
dv = dvector('s').dimshuffle('x',0)
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dyv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dzv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
cases = [
(fx+fy,(fx,fy),(fxv,fyv),1,'float32'),
(fx*fy,(fx,fy),(fxv,fyv),1,'float32'),
......@@ -229,14 +229,14 @@ class test_canonize(unittest.TestCase):
dx, dy, dz = dmatrices('xyz')
fv = fvector('r').dimshuffle('x',0)
dv = dvector('s').dimshuffle('x',0)
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dyv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dzv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
cases = [
(fx+fy,(fx,fy),(fxv,fyv),1,'float32'),
(fx*fy,(fx,fy),(fxv,fyv),1,'float32'),
......@@ -312,16 +312,16 @@ class test_canonize(unittest.TestCase):
dx, dy, dz, dw = dmatrices('xyzw')
fv = fvector('r').dimshuffle('x',0)
dv = dvector('s').dimshuffle('x',0)
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fwv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dwv = numpy.asarray(numpy.random.rand(*shp),dtype='float64')
dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fwv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dyv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dzv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dwv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
#We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
......@@ -463,13 +463,13 @@ class test_canonize(unittest.TestCase):
shp=(4,4)
fx, fy, fz = fmatrices('xyz')
dx, dy, dz = dmatrices('xyz')
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
dxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
dyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
dzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
#We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode=compile.mode.predefined_modes[compile.mode.default_mode]
......@@ -589,7 +589,7 @@ class test_fusion(unittest.TestCase):
"""
#TODO: disable the canonizer?
def my_init(shp, dtype='float64', num=0):
#ret = numpy.asarray(numpy.random.rand(*shp),dtype=dtype)
#ret = theano._asarray(numpy.random.rand(*shp),dtype=dtype)
ret = numpy.zeros(shp, dtype=dtype)+num
return ret
fw, fx, fy, fz = fmatrices('wxyz')
......@@ -601,15 +601,15 @@ class test_fusion(unittest.TestCase):
fxv = my_init(shp,'float32',2)
fyv = my_init(shp,'float32',3)
fzv = my_init(shp,'float32',4)
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dwv = my_init(shp,'float64',5)
ixv = numpy.asarray(my_init(shp,num=60),dtype='int32')
iyv = numpy.asarray(my_init(shp,num=70),dtype='int32')
izv = numpy.asarray(my_init(shp,num=70),dtype='int32')
ixv = theano._asarray(my_init(shp,num=60),dtype='int32')
iyv = theano._asarray(my_init(shp,num=70),dtype='int32')
izv = theano._asarray(my_init(shp,num=70),dtype='int32')
# dxv = my_init(shp,'float64',6)
# dyv = my_init(shp,'float64',7)
# dzv = my_init(shp,'float64',8)
# dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
# dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
fwx=fw+fx
cases = [
(fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,fxv+fyv+fzv,'float32'),#1
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论