提交 28e2a65a authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Replacing numpy.asarray by theano._asarray when a dtype conversion may be…

Replacing numpy.asarray by theano._asarray when a dtype conversion may be performed, to work around Numpy ticket 870 defect
上级 b7e570c6
...@@ -389,7 +389,7 @@ Here is an example showing how to use verify_grad: ...@@ -389,7 +389,7 @@ Here is an example showing how to use verify_grad:
>>> def test_flatten_outdimNone(): >>> def test_flatten_outdimNone():
>>> a = dmatrix() >>> a = dmatrix()
>>> # ... >>> # ...
>>> a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') >>> a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
>>> # ... >>> # ...
>>> tensor.verify_grad(Flatten(), [a_val]) >>> tensor.verify_grad(Flatten(), [a_val])
......
...@@ -67,7 +67,7 @@ BUT, YOU GOTTA RUN THIS CODE AND MAKE SURE IT STILL WORKS NICELY, HEY? ...@@ -67,7 +67,7 @@ BUT, YOU GOTTA RUN THIS CODE AND MAKE SURE IT STILL WORKS NICELY, HEY?
x_data = numpy.random.randn(100, 10) x_data = numpy.random.randn(100, 10)
y_data = numpy.random.randn(100, 3) y_data = numpy.random.randn(100, 3)
y_data = numpy.asarray(y_data == numpy.max(y_data, axis=1), dtype='int64') y_data = theano._asarray(y_data == numpy.max(y_data, axis=1), dtype='int64')
print "Model Training ..." print "Model Training ..."
for iteration in xrange(1000): for iteration in xrange(1000):
......
...@@ -173,7 +173,10 @@ More simple numpy stuff ...@@ -173,7 +173,10 @@ More simple numpy stuff
``x.reshape(x.size)`` ``x.reshape(x.size)``
You can also use ``resize`` but there is not reason to ''resize'' You can also use ``resize`` but there is not reason to ''resize''
* How do you convert the type of a numpy array? * How do you convert the type of a numpy array?
``numpy.asarray(x, dtype = 'int32')`` ``theano._asarray(x, dtype = 'int32')``
Note that using ``numpy.asarray`` is potentially dangerous, due to
a problem in numpy where the type may not be properly set (see
numpy's Track ticket #870).
========================================= =========================================
......
...@@ -53,6 +53,7 @@ from compile import \ ...@@ -53,6 +53,7 @@ from compile import \
ProfileMode, \ ProfileMode, \
Param, shared Param, shared
from misc.safe_asarray import _asarray
FancyModule = Module FancyModule = Module
......
...@@ -211,7 +211,7 @@ class Test_pfunc(unittest.TestCase): ...@@ -211,7 +211,7 @@ class Test_pfunc(unittest.TestCase):
z = tensor.ivector() z = tensor.ivector()
c = z*y c = z*y
f = pfunc([y], c+7, givens = {z : numpy.asarray([4,4,4], dtype='int32')}) f = pfunc([y], c+7, givens = {z : theano._asarray([4,4,4], dtype='int32')})
assert numpy.all(f([1,1,1]) == [11,11,11]) assert numpy.all(f([1,1,1]) == [11,11,11])
assert x.value == 0 assert x.value == 0
......
...@@ -106,7 +106,7 @@ class Test_SharedVariable(unittest.TestCase): ...@@ -106,7 +106,7 @@ class Test_SharedVariable(unittest.TestCase):
pass pass
# check that an assignment of a perfect value results in no copying # check that an assignment of a perfect value results in no copying
uval = numpy.asarray([5,6,7,8], dtype='float64') uval = theano._asarray([5,6,7,8], dtype='float64')
u.value = uval u.value = uval
assert u.value is uval assert u.value is uval
......
"""
Helper function to safely convert an array to a new data type.
"""
__docformat__ = "restructuredtext en"
import numpy
def _asarray(a, dtype=None, order=None):
"""Convert the input to a Numpy array.
This function is almost identical to ``numpy.asarray``, but it should be
used instead of its numpy counterpart when a data type is provided in
order to perform type conversion if required.
The reason is that ``numpy.asarray`` may not actually update the array's
data type to the user-provided type. For more information see ticket
http://projects.scipy.org/numpy/ticket/870.
Currently, this issue has only been causing trouble when the target
data type is 'int32', on some computers. As a result, this is the only
situation where we do more than a simple call to ``numpy.asarray``. If it
turns out that a similar problem can occur for more data type, this
function should be updated accordingly.
This function's name starts with a '_' to indicate that it is meant to be
used internally. It is imported so as to be available directly through
theano._asarray
"""
rval = numpy.asarray(a, dtype=dtype, order=order)
if dtype is numpy.int32 or dtype == 'int32':
# Make sure the type is properly set to the correct type.
return rval.view(dtype=numpy.int32)
else:
# Using ``numpy.asarray`` should work just fine.
return rval
...@@ -57,7 +57,7 @@ class GpuFromHost(Op): ...@@ -57,7 +57,7 @@ class GpuFromHost(Op):
raise TypeError(x) raise TypeError(x)
return Apply(self, [x], [CudaNdarrayType(broadcastable=x.broadcastable)()]) return Apply(self, [x], [CudaNdarrayType(broadcastable=x.broadcastable)()])
def perform(self, node, (x,), (z,)): def perform(self, node, (x,), (z,)):
z[0] = type_support_filter(numpy.asarray(x, dtype='float32'), tuple([0]*x.ndim), 0) z[0] = type_support_filter(theano._asarray(x, dtype='float32'), tuple([0]*x.ndim), 0)
def grad(self, inputs, (gz,)): def grad(self, inputs, (gz,)):
return gz, return gz,
#return [HostFromGpu()(gz)] #return [HostFromGpu()(gz)]
......
...@@ -39,7 +39,7 @@ def test_sum(): ...@@ -39,7 +39,7 @@ def test_sum():
val = numpy.random.rand(numpy.prod(shape)).reshape(shape) val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape) # val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape) # val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = numpy.asarray(val,dtype='float32') val = theano._asarray(val,dtype='float32')
f = theano.function([a],b, mode=mode_with_gpu) f = theano.function([a],b, mode=mode_with_gpu)
f2 = theano.function([a],b) f2 = theano.function([a],b)
assert tcn.GpuSum in [x.op.__class__ for x in f.maker.env.toposort()] assert tcn.GpuSum in [x.op.__class__ for x in f.maker.env.toposort()]
...@@ -60,7 +60,7 @@ def test_sum(): ...@@ -60,7 +60,7 @@ def test_sum():
val = numpy.random.rand(numpy.prod(shape)).reshape(shape) val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape) # val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape) # val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = numpy.asarray(val,dtype='float32') val = theano._asarray(val,dtype='float32')
val2 = cuda.CudaNdarray(val) val2 = cuda.CudaNdarray(val)
if len(shape)==1: if len(shape)==1:
val = val[::2] val = val[::2]
...@@ -89,22 +89,22 @@ def test_reshape(): ...@@ -89,22 +89,22 @@ def test_reshape():
#basic #basic
f = theano.function([a], c) f = theano.function([a], c)
fv = f(cuda_ndarray.CudaNdarray(numpy.asarray([0,1,2,3,4,5],dtype='float32'))) fv = f(cuda_ndarray.CudaNdarray(theano._asarray([0,1,2,3,4,5],dtype='float32')))
assert numpy.all(fv == numpy.asarray([[0,1,2], [3,4,5]])) assert numpy.all(fv == numpy.asarray([[0,1,2], [3,4,5]]))
#test that it works without inplace operations #test that it works without inplace operations
a_val = cuda_ndarray.CudaNdarray(numpy.asarray([0,1,2,3,4,5],dtype='float32')) a_val = cuda_ndarray.CudaNdarray(theano._asarray([0,1,2,3,4,5],dtype='float32'))
a_val_copy = cuda_ndarray.CudaNdarray(numpy.asarray([0,1,2,3,4,5],dtype='float32')) a_val_copy = cuda_ndarray.CudaNdarray(theano._asarray([0,1,2,3,4,5],dtype='float32'))
b_val = cuda_ndarray.CudaNdarray(numpy.asarray([[0,1,2],[3,4,5]],dtype='float32')) b_val = cuda_ndarray.CudaNdarray(theano._asarray([[0,1,2],[3,4,5]],dtype='float32'))
f_sub = theano.function([a,b], c-b) f_sub = theano.function([a,b], c-b)
assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy)) assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy))
#test that it works with inplace operations #test that it works with inplace operations
a_val = numpy.asarray([0,1,2,3,4,5], dtype='float32') a_val = theano._asarray([0,1,2,3,4,5], dtype='float32')
a_val_copy = numpy.asarray([0,1,2,3,4,5], dtype='float32') a_val_copy = theano._asarray([0,1,2,3,4,5], dtype='float32')
b_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float32') b_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float32')
f_sub = theano.function([a,b], c-b) f_sub = theano.function([a,b], c-b)
assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(f_sub(a_val, b_val) == 0.0)
...@@ -112,7 +112,7 @@ def test_reshape(): ...@@ -112,7 +112,7 @@ def test_reshape():
# verify gradient # verify gradient
def just_vals(v): def just_vals(v):
return T.Reshape(2)(v, numpy.asarray([2,3], dtype='int32')) return T.Reshape(2)(v, theano._asarray([2,3], dtype='int32'))
utt.verify_grad(just_vals, [a_val]) utt.verify_grad(just_vals, [a_val])
def test_elemwise0(): def test_elemwise0():
...@@ -231,8 +231,8 @@ def speed_elemwise_collapse(): ...@@ -231,8 +231,8 @@ def speed_elemwise_collapse():
""" used to time if the collapse of ccontiguous dims are usefull """ """ used to time if the collapse of ccontiguous dims are usefull """
shape = (30,40,50,600) shape = (30,40,50,600)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:,::2,:,:] a3 = a2[:,::2,:,:]
b = tcn.CudaNdarrayType((False, False, False, False))() b = tcn.CudaNdarrayType((False, False, False, False))()
...@@ -240,7 +240,7 @@ def speed_elemwise_collapse(): ...@@ -240,7 +240,7 @@ def speed_elemwise_collapse():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(*shape),dtype='float32') v = theano._asarray(numpy.random.rand(*shape),dtype='float32')
v = v[:,::2,:,:] v = v[:,::2,:,:]
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -255,8 +255,8 @@ def speed_elemwise_collapse2(): ...@@ -255,8 +255,8 @@ def speed_elemwise_collapse2():
""" used to test the speed up of the generalised collapse of ccontiguous dims""" """ used to test the speed up of the generalised collapse of ccontiguous dims"""
shape = (30,40,50,600) shape = (30,40,50,600)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:,:,:,::2] a3 = a2[:,:,:,::2]
b = tcn.CudaNdarrayType((False, False, False, False))() b = tcn.CudaNdarrayType((False, False, False, False))()
...@@ -264,7 +264,7 @@ def speed_elemwise_collapse2(): ...@@ -264,7 +264,7 @@ def speed_elemwise_collapse2():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(*shape),dtype='float32') v = theano._asarray(numpy.random.rand(*shape),dtype='float32')
v = v[:,:,:,::2] v = v[:,:,:,::2]
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -279,8 +279,8 @@ def test_elemwise_collapse(): ...@@ -279,8 +279,8 @@ def test_elemwise_collapse():
""" Test when all inputs have one(and the same) broadcastable dimension """ """ Test when all inputs have one(and the same) broadcastable dimension """
shape = (4,5,60) shape = (4,5,60)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0,'x',1,2) a3 = a2.dimshuffle(0,'x',1,2)
b = tcn.CudaNdarrayType((False, True, False, False))() b = tcn.CudaNdarrayType((False, True, False, False))()
...@@ -288,7 +288,7 @@ def test_elemwise_collapse(): ...@@ -288,7 +288,7 @@ def test_elemwise_collapse():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(shape[0],1,*shape[1:]),dtype='float32') v = theano._asarray(numpy.random.rand(shape[0],1,*shape[1:]),dtype='float32')
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
if False: if False:
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -302,8 +302,8 @@ def test_elemwise_collapse2(): ...@@ -302,8 +302,8 @@ def test_elemwise_collapse2():
""" Test when only one inputs have one broadcastable dimension """ """ Test when only one inputs have one broadcastable dimension """
shape = (4,5,60) shape = (4,5,60)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0,'x',1,2) a3 = a2.dimshuffle(0,'x',1,2)
b = tcn.CudaNdarrayType((False, False, False, False))() b = tcn.CudaNdarrayType((False, False, False, False))()
...@@ -311,7 +311,7 @@ def test_elemwise_collapse2(): ...@@ -311,7 +311,7 @@ def test_elemwise_collapse2():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(shape[0],5,*shape[1:]),dtype='float32') v = theano._asarray(numpy.random.rand(shape[0],5,*shape[1:]),dtype='float32')
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
if False: if False:
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -325,8 +325,8 @@ def test_elemwise_collapse3(): ...@@ -325,8 +325,8 @@ def test_elemwise_collapse3():
""" Test when only one inputs have two broadcastable dimension at each ends """ """ Test when only one inputs have two broadcastable dimension at each ends """
shape = (4,5) shape = (4,5)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x',0,1,'x') a3 = a2.dimshuffle('x',0,1,'x')
b = tcn.CudaNdarrayType((False, False, False, False))() b = tcn.CudaNdarrayType((False, False, False, False))()
...@@ -334,7 +334,7 @@ def test_elemwise_collapse3(): ...@@ -334,7 +334,7 @@ def test_elemwise_collapse3():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(5,shape[0],shape[1],4),dtype='float32') v = theano._asarray(numpy.random.rand(5,shape[0],shape[1],4),dtype='float32')
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
if False: if False:
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -348,8 +348,8 @@ def test_elemwise_collapse4(): ...@@ -348,8 +348,8 @@ def test_elemwise_collapse4():
""" Test when only one inputs have two broadcastable dimension at each ends and we add a scalar""" """ Test when only one inputs have two broadcastable dimension at each ends and we add a scalar"""
shape = (4,5) shape = (4,5)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x',0,1,'x') a3 = a2.dimshuffle('x',0,1,'x')
b = tcn.CudaNdarrayType((False, False, False, False))() b = tcn.CudaNdarrayType((False, False, False, False))()
...@@ -357,7 +357,7 @@ def test_elemwise_collapse4(): ...@@ -357,7 +357,7 @@ def test_elemwise_collapse4():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(5,shape[0],shape[1],4),dtype='float32') v = theano._asarray(numpy.random.rand(5,shape[0],shape[1],4),dtype='float32')
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
if False: if False:
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -371,8 +371,8 @@ def test_elemwise_collapse5(): ...@@ -371,8 +371,8 @@ def test_elemwise_collapse5():
""" Test when only one inputs have two broadcastable dimension at the beginning and we add a scalar""" """ Test when only one inputs have two broadcastable dimension at the beginning and we add a scalar"""
shape = (4,5) shape = (4,5)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x','x',0,1) a3 = a2.dimshuffle('x','x',0,1)
b = tcn.CudaNdarrayType((False, False, False, False))() b = tcn.CudaNdarrayType((False, False, False, False))()
...@@ -380,7 +380,7 @@ def test_elemwise_collapse5(): ...@@ -380,7 +380,7 @@ def test_elemwise_collapse5():
f = pfunc([b], [c]) f = pfunc([b], [c])
v = numpy.asarray(numpy.random.rand(5,4,shape[0],shape[1]),dtype='float32') v = theano._asarray(numpy.random.rand(5,4,shape[0],shape[1]),dtype='float32')
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
if False: if False:
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -394,14 +394,14 @@ def test_elemwise_collapse6(): ...@@ -394,14 +394,14 @@ def test_elemwise_collapse6():
""" Test when all inputs have two broadcastable dimension at the beginning""" """ Test when all inputs have two broadcastable dimension at the beginning"""
shape = (4,5) shape = (4,5)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a') a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x','x',0,1) a3 = a2.dimshuffle('x','x',0,1)
b = tcn.CudaNdarrayType((True, True, False, False))() b = tcn.CudaNdarrayType((True, True, False, False))()
f = pfunc([b], [a3+b]) f = pfunc([b], [a3+b])
v = numpy.asarray(numpy.random.rand(1,1,shape[0],shape[1]),dtype='float32') v = theano._asarray(numpy.random.rand(1,1,shape[0],shape[1]),dtype='float32')
v=cuda_ndarray.CudaNdarray(v) v=cuda_ndarray.CudaNdarray(v)
if False: if False:
for id,n in enumerate(f.maker.env.toposort()): for id,n in enumerate(f.maker.env.toposort()):
...@@ -416,8 +416,8 @@ def test_elemwise_collapse7(atol=1e-6): ...@@ -416,8 +416,8 @@ def test_elemwise_collapse7(atol=1e-6):
""" Test when one input have one broadcastable dimension and the other is a scalar""" """ Test when one input have one broadcastable dimension and the other is a scalar"""
shape = (5,4,1) shape = (5,4,1)
a = cuda_ndarray.CudaNdarray(numpy.asarray(numpy.random.rand(*shape),dtype='float32')) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = numpy.asarray(numpy.random.rand(*shape),dtype='float32') a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a.copy(), 'a') a2 = tcn.shared_constructor(a.copy(), 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2) a3 = a2.dimshuffle(0, 'x', 1, 2)
f = pfunc([], [a3+2]) f = pfunc([], [a3+2])
......
...@@ -23,7 +23,7 @@ _logger = logging.getLogger('driver_kouh') ...@@ -23,7 +23,7 @@ _logger = logging.getLogger('driver_kouh')
def _shared_uniform(rng, low, high, size, dtype, name=None): def _shared_uniform(rng, low, high, size, dtype, name=None):
return shared( return shared(
numpy.asarray( theano._asarray(
rng.uniform(low=low, high=high, size=size), rng.uniform(low=low, high=high, size=size),
dtype=dtype), name) dtype=dtype), name)
...@@ -46,7 +46,7 @@ class Kouh2008(object): ...@@ -46,7 +46,7 @@ class Kouh2008(object):
if len(w_list) != len(x_list): if len(w_list) != len(x_list):
raise ValueError('w_list must have same len as x_list') raise ValueError('w_list must have same len as x_list')
output = (sum(w * tensor.pow(x, p) for (w,x) in zip(w_list, x_list)))\ output = (sum(w * tensor.pow(x, p) for (w,x) in zip(w_list, x_list)))\
/ (numpy.asarray(eps, dtype=k.type.dtype) + k + tensor.pow(sum(tensor.pow(x, q) for x in x_list), r)) / (theano._asarray(eps, dtype=k.type.dtype) + k + tensor.pow(sum(tensor.pow(x, q) for x in x_list), r))
assert output.type.ndim == 2 assert output.type.ndim == 2
self.__dict__.update(locals()) self.__dict__.update(locals())
...@@ -79,8 +79,8 @@ class Kouh2008(object): ...@@ -79,8 +79,8 @@ class Kouh2008(object):
w_l2_sqr = sum((wi**2).sum() for wi in w_list) w_l2_sqr = sum((wi**2).sum() for wi in w_list)
e_range_low, e_range_high = exponent_range e_range_low, e_range_high = exponent_range
e_range_low = numpy.asarray(e_range_low, dtype=dtype) e_range_low = theano._asarray(e_range_low, dtype=dtype)
e_range_high = numpy.asarray(e_range_high, dtype=dtype) e_range_high = theano._asarray(e_range_high, dtype=dtype)
e_range_mag = e_range_high - e_range_low e_range_mag = e_range_high - e_range_low
if e_range_mag < 0: if e_range_mag < 0:
raise ValueError('exponent range must have low <= high') raise ValueError('exponent range must have low <= high')
...@@ -93,8 +93,8 @@ class Kouh2008(object): ...@@ -93,8 +93,8 @@ class Kouh2008(object):
p = tensor.nnet.sigmoid(p_unbounded) * e_range_mag + e_range_low p = tensor.nnet.sigmoid(p_unbounded) * e_range_mag + e_range_low
q = tensor.nnet.sigmoid(q_unbounded) * e_range_mag + e_range_low q = tensor.nnet.sigmoid(q_unbounded) * e_range_mag + e_range_low
r = tensor.nnet.sigmoid(r_unbounded) * \ r = tensor.nnet.sigmoid(r_unbounded) * \
numpy.asarray(1.0/e_range_low - 1.0/e_range_high, dtype=dtype) \ theano._asarray(1.0/e_range_low - 1.0/e_range_high, dtype=dtype) \
+ numpy.asarray(1.0/e_range_high, dtype=dtype) + theano._asarray(1.0/e_range_high, dtype=dtype)
k = softsign(k_unbounded) k = softsign(k_unbounded)
...@@ -157,10 +157,10 @@ class Kouh2008(object): ...@@ -157,10 +157,10 @@ class Kouh2008(object):
b_list = [shared_uniform(low=0, high=.01, size=(n_out,), name='b_%i'%i) b_list = [shared_uniform(low=0, high=.01, size=(n_out,), name='b_%i'%i)
for i in xrange(n_terms)] for i in xrange(n_terms)]
#x_list = [numpy.asarray(eps, dtype=dtype)+softplus(tensor.dot(input, f_list[i])) for i in xrange(n_terms)] #x_list = [theano._asarray(eps, dtype=dtype)+softplus(tensor.dot(input, f_list[i])) for i in xrange(n_terms)]
filter_range = numpy.asarray(filter_range, dtype=dtype) filter_range = theano._asarray(filter_range, dtype=dtype)
half_filter_range = numpy.asarray(filter_range/2, dtype=dtype) half_filter_range = theano._asarray(filter_range/2, dtype=dtype)
x_list = [numpy.asarray(filter_range + eps, dtype=dtype)+half_filter_range *softsign(tensor.dot(input, f_list[i]) + x_list = [theano._asarray(filter_range + eps, dtype=dtype)+half_filter_range *softsign(tensor.dot(input, f_list[i]) +
b_list[i]) for i in xrange(n_terms)] b_list[i]) for i in xrange(n_terms)]
rval = cls.new_expbounds(rng, x_list, n_out, dtype=dtype, params=f_list + b_list, rval = cls.new_expbounds(rng, x_list, n_out, dtype=dtype, params=f_list + b_list,
...@@ -304,7 +304,7 @@ def test_bench_elemwise(n_iter=1000, **kwargs): ...@@ -304,7 +304,7 @@ def test_bench_elemwise(n_iter=1000, **kwargs):
train_nll = pfunc([x, y, s_lr], [], updates=updates) train_nll = pfunc([x, y, s_lr], [], updates=updates)
xval = numpy.asarray( xval = theano._asarray(
rng.uniform(size=(conf.ft_batchsize, x.type.shape[1])), rng.uniform(size=(conf.ft_batchsize, x.type.shape[1])),
dtype=conf.dtype2, dtype=conf.dtype2,
) )
......
...@@ -59,14 +59,14 @@ def _params_allgood_header(): ...@@ -59,14 +59,14 @@ def _params_allgood_header():
def _params_allgood(ishape, kshape, mode, subsample=(1,1), img_stride=(1,1), kern_stride=(1,1), version=-1, verbose=0, random=True, print_=None, id=None, rtol=1e-5, atol = 1e-8, nb_iter=0, ones=False): def _params_allgood(ishape, kshape, mode, subsample=(1,1), img_stride=(1,1), kern_stride=(1,1), version=-1, verbose=0, random=True, print_=None, id=None, rtol=1e-5, atol = 1e-8, nb_iter=0, ones=False):
if ones: if ones:
assert not random assert not random
npy_img = numpy.asarray(numpy.ones(ishape), dtype='float32') npy_img = theano._asarray(numpy.ones(ishape), dtype='float32')
npy_kern = -numpy.asarray(numpy.ones(kshape), dtype='float32') npy_kern = -theano._asarray(numpy.ones(kshape), dtype='float32')
elif random: elif random:
npy_img = numpy.asarray(numpy.random.rand(*ishape), dtype='float32') npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = numpy.asarray(numpy.random.rand(*kshape), dtype='float32') npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
else: else:
npy_img = numpy.asarray(numpy.arange(numpy.prod(ishape)).reshape(ishape), dtype='float32')+1 npy_img = theano._asarray(numpy.arange(numpy.prod(ishape)).reshape(ishape), dtype='float32')+1
npy_kern = -(numpy.asarray(numpy.arange(numpy.prod(kshape)).reshape(kshape), dtype='float32')+1) npy_kern = -(theano._asarray(numpy.arange(numpy.prod(kshape)).reshape(kshape), dtype='float32')+1)
img = cuda_ndarray.CudaNdarray(npy_img) img = cuda_ndarray.CudaNdarray(npy_img)
kern = cuda_ndarray.CudaNdarray(npy_kern) kern = cuda_ndarray.CudaNdarray(npy_kern)
...@@ -369,8 +369,8 @@ def _test_dummy(): ...@@ -369,8 +369,8 @@ def _test_dummy():
mode = 'valid' mode = 'valid'
subsample = (1,1) subsample = (1,1)
npy_img = numpy.asarray(numpy.random.rand(*ishape), dtype='float32') npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = numpy.asarray(numpy.random.rand(*kshape), dtype='float32') npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
img = cuda_ndarray.CudaNdarray(npy_img) img = cuda_ndarray.CudaNdarray(npy_img)
kern = cuda_ndarray.CudaNdarray(npy_kern) kern = cuda_ndarray.CudaNdarray(npy_kern)
......
...@@ -9,14 +9,14 @@ import numpy ...@@ -9,14 +9,14 @@ import numpy
def test_host_to_device(): def test_host_to_device():
print >>sys.stderr, 'starting test_host_to_dev' print >>sys.stderr, 'starting test_host_to_dev'
for shape in ((), (3,), (2,3), (3,4,5,6)): for shape in ((), (3,), (2,3), (3,4,5,6)):
a = numpy.asarray(numpy.random.rand(*shape), dtype='float32') a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
b = cuda_ndarray.CudaNdarray(a) b = cuda_ndarray.CudaNdarray(a)
c = numpy.asarray(b) c = numpy.asarray(b)
assert numpy.all(a == c) assert numpy.all(a == c)
def test_add(): def test_add():
for shape in ((), (3,), (2,3), (1,10000000),(10,1000000), (100,100000),(1000,10000),(10000,1000)): for shape in ((), (3,), (2,3), (1,10000000),(10,1000000), (100,100000),(1000,10000),(10000,1000)):
a0 = numpy.asarray(numpy.random.rand(*shape), dtype='float32') a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a1 = a0.copy() a1 = a0.copy()
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1) b1 = cuda_ndarray.CudaNdarray(a1)
...@@ -54,7 +54,7 @@ def test_add(): ...@@ -54,7 +54,7 @@ def test_add():
def test_exp(): def test_exp():
print >>sys.stderr, 'starting test_exp' print >>sys.stderr, 'starting test_exp'
for shape in ((), (3,), (2,3), (1,10000000),(10,1000000), (100,100000),(1000,10000),(10000,1000)): for shape in ((), (3,), (2,3), (1,10000000),(10,1000000), (100,100000),(1000,10000),(10000,1000)):
a0 = numpy.asarray(numpy.random.rand(*shape), dtype='float32') a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a1 = a0.copy() a1 = a0.copy()
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1) b1 = cuda_ndarray.CudaNdarray(a1)
...@@ -75,7 +75,7 @@ def test_exp(): ...@@ -75,7 +75,7 @@ def test_exp():
def test_copy(): def test_copy():
print >>sys.stderr, 'starting test_copy' print >>sys.stderr, 'starting test_copy'
shape = (5,) shape = (5,)
a = numpy.asarray(numpy.random.rand(*shape), dtype='float32') a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
print >>sys.stderr, '.. creating device object' print >>sys.stderr, '.. creating device object'
b = cuda_ndarray.CudaNdarray(a) b = cuda_ndarray.CudaNdarray(a)
...@@ -92,8 +92,8 @@ def test_copy(): ...@@ -92,8 +92,8 @@ def test_copy():
def test_dot(): def test_dot():
print >>sys.stderr, 'starting test_dot' print >>sys.stderr, 'starting test_dot'
a0 = numpy.asarray(numpy.random.rand(4, 7), dtype='float32') a0 = theano._asarray(numpy.random.rand(4, 7), dtype='float32')
a1 = numpy.asarray(numpy.random.rand(7, 6), dtype='float32') a1 = theano._asarray(numpy.random.rand(7, 6), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
b1 = cuda_ndarray.CudaNdarray(a1) b1 = cuda_ndarray.CudaNdarray(a1)
...@@ -104,7 +104,7 @@ def test_dot(): ...@@ -104,7 +104,7 @@ def test_dot():
def test_sum(): def test_sum():
shape = (2,3) shape = (2,3)
a0 = numpy.asarray(numpy.arange(shape[0]*shape[1]).reshape(shape), dtype='float32') a0 = theano._asarray(numpy.arange(shape[0]*shape[1]).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
...@@ -121,17 +121,17 @@ def test_sum(): ...@@ -121,17 +121,17 @@ def test_sum():
assert numpy.allclose(a0, numpy.asarray(b0.reduce_sum([0,0]))) assert numpy.allclose(a0, numpy.asarray(b0.reduce_sum([0,0])))
shape = (3,4,5,6,7,8) shape = (3,4,5,6,7,8)
a0 = numpy.asarray(numpy.arange(3*4*5*6*7*8).reshape(shape), dtype='float32') a0 = theano._asarray(numpy.arange(3*4*5*6*7*8).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(axis=5).sum(axis=3).sum(axis=0), numpy.asarray(b0.reduce_sum([1,0,0,1,0,1]))) assert numpy.allclose(a0.sum(axis=5).sum(axis=3).sum(axis=0), numpy.asarray(b0.reduce_sum([1,0,0,1,0,1])))
shape = (16,2048) shape = (16,2048)
a0 = numpy.asarray(numpy.arange(16*2048).reshape(shape), dtype='float32') a0 = theano._asarray(numpy.arange(16*2048).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(axis=0), numpy.asarray(b0.reduce_sum([1,0]))) assert numpy.allclose(a0.sum(axis=0), numpy.asarray(b0.reduce_sum([1,0])))
shape = (16,10) shape = (16,10)
a0 = numpy.asarray(numpy.arange(160).reshape(shape), dtype='float32') a0 = theano._asarray(numpy.arange(160).reshape(shape), dtype='float32')
b0 = cuda_ndarray.CudaNdarray(a0) b0 = cuda_ndarray.CudaNdarray(a0)
assert numpy.allclose(a0.sum(), numpy.asarray(b0.reduce_sum([1,1]))) assert numpy.allclose(a0.sum(), numpy.asarray(b0.reduce_sum([1,1])))
...@@ -147,7 +147,7 @@ def test_reshape(): ...@@ -147,7 +147,7 @@ def test_reshape():
def subtest(shape_1, shape_2): def subtest(shape_1, shape_2):
#print >> sys.stderr, "INFO: shapes", shape_1, shape_2 #print >> sys.stderr, "INFO: shapes", shape_1, shape_2
a = numpy.asarray(numpy.random.rand(*shape_1), dtype='float32') a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a) b = cuda_ndarray.CudaNdarray(a)
aa = a.reshape(shape_2) aa = a.reshape(shape_2)
...@@ -178,7 +178,7 @@ def test_getshape(): ...@@ -178,7 +178,7 @@ def test_getshape():
] ]
def subtest(shape): def subtest(shape):
a = numpy.asarray(numpy.random.rand(*shape_1), dtype='float32') a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a) b = cuda_ndarray.CudaNdarray(a)
assert b.shape == a.shape assert b.shape == a.shape
...@@ -188,7 +188,7 @@ def test_getshape(): ...@@ -188,7 +188,7 @@ def test_getshape():
def test_stride_manipulation(): def test_stride_manipulation():
a = numpy.asarray([[0,1,2], [3,4,5]], dtype='float32') a = theano._asarray([[0,1,2], [3,4,5]], dtype='float32')
b = cuda_ndarray.CudaNdarray(a) b = cuda_ndarray.CudaNdarray(a)
v = b.view() v = b.view()
v._dev_data += 0 v._dev_data += 0
...@@ -212,7 +212,7 @@ def test_stride_manipulation(): ...@@ -212,7 +212,7 @@ def test_stride_manipulation():
def test_copy_subtensor0(): def test_copy_subtensor0():
sizeof_float=4 sizeof_float=4
a = numpy.asarray(numpy.random.rand(30,20,5,5), dtype='float32') a = theano._asarray(numpy.random.rand(30,20,5,5), dtype='float32')
cuda_a = cuda_ndarray.CudaNdarray(a) cuda_a = cuda_ndarray.CudaNdarray(a)
a_view = cuda_a.view() a_view = cuda_a.view()
a_view_strides = a_view._strides a_view_strides = a_view._strides
...@@ -225,7 +225,7 @@ def test_copy_subtensor0(): ...@@ -225,7 +225,7 @@ def test_copy_subtensor0():
assert numpy.all(a[:,:,::-1,::-1] == numpy.asarray(a_view_copy)) assert numpy.all(a[:,:,::-1,::-1] == numpy.asarray(a_view_copy))
def test_mapping_getitem_ellipsis(): def test_mapping_getitem_ellipsis():
a = numpy.asarray(numpy.random.rand(5,4,3,2), dtype='float32') a = theano._asarray(numpy.random.rand(5,4,3,2), dtype='float32')
a = cuda_ndarray.CudaNdarray(a) a = cuda_ndarray.CudaNdarray(a)
b = a[...] b = a[...]
...@@ -235,7 +235,7 @@ def test_mapping_getitem_ellipsis(): ...@@ -235,7 +235,7 @@ def test_mapping_getitem_ellipsis():
def test_mapping_getitem_reverse_some_dims(): def test_mapping_getitem_reverse_some_dims():
dim=(5,4,3,2) dim=(5,4,3,2)
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32') a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a) _a = cuda_ndarray.CudaNdarray(a)
_b = _a[:,:,::-1, ::-1] _b = _a[:,:,::-1, ::-1]
...@@ -252,7 +252,7 @@ def test_mapping_getitem_w_int(): ...@@ -252,7 +252,7 @@ def test_mapping_getitem_w_int():
assert numpy.all(x == y) assert numpy.all(x == y)
dim =(2,) dim =(2,)
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32') a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a) _a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[1]), a[1]) _cmp(numpy.asarray(_a[1]), a[1])
_cmp(numpy.asarray(_a[::1]), a[::1]) _cmp(numpy.asarray(_a[::1]), a[::1])
...@@ -260,14 +260,14 @@ def test_mapping_getitem_w_int(): ...@@ -260,14 +260,14 @@ def test_mapping_getitem_w_int():
_cmp(numpy.asarray(_a[...]), a[...]) _cmp(numpy.asarray(_a[...]), a[...])
dim =() dim =()
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32') a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a) _a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[...]), a[...]) _cmp(numpy.asarray(_a[...]), a[...])
dim =(5,4,3,2) dim =(5,4,3,2)
a = numpy.asarray(numpy.random.rand(*dim), dtype='float32') a = theano._asarray(numpy.random.rand(*dim), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a) _a = cuda_ndarray.CudaNdarray(a)
_cmp(numpy.asarray(_a[:,:,::-1, ::-1]), a[:,:,::-1,::-1]) _cmp(numpy.asarray(_a[:,:,::-1, ::-1]), a[:,:,::-1,::-1])
...@@ -280,9 +280,9 @@ def test_mapping_getitem_w_int(): ...@@ -280,9 +280,9 @@ def test_mapping_getitem_w_int():
_cmp(numpy.asarray(_a[...]), a[...]) _cmp(numpy.asarray(_a[...]), a[...])
def test_gemm_vector_vector(): def test_gemm_vector_vector():
a = numpy.asarray(numpy.random.rand(5,1), dtype='float32') a = theano._asarray(numpy.random.rand(5,1), dtype='float32')
_a = cuda_ndarray.CudaNdarray(a) _a = cuda_ndarray.CudaNdarray(a)
b = numpy.asarray(numpy.random.rand(1,5), dtype='float32') b = theano._asarray(numpy.random.rand(1,5), dtype='float32')
_b = cuda_ndarray.CudaNdarray(b) _b = cuda_ndarray.CudaNdarray(b)
_c = cuda_ndarray.dot(_a,_b) _c = cuda_ndarray.dot(_a,_b)
......
...@@ -49,10 +49,10 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100): ...@@ -49,10 +49,10 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100):
v = tcn.shared_constructor(numpy.zeros((n_hid, n_out)), 'c') v = tcn.shared_constructor(numpy.zeros((n_hid, n_out)), 'c')
c = tcn.shared_constructor(numpy.zeros(n_out), 'c') c = tcn.shared_constructor(numpy.zeros(n_out), 'c')
else: else:
w = shared(numpy.asarray(0.01*(numpy.random.rand(n_in,n_hid)-0.5), dtype='float32'), 'w') w = shared(theano._asarray(0.01*(numpy.random.rand(n_in,n_hid)-0.5), dtype='float32'), 'w')
b = shared(numpy.asarray(numpy.zeros(n_hid), dtype='float32'), 'b') b = shared(theano._asarray(numpy.zeros(n_hid), dtype='float32'), 'b')
v = shared(numpy.asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c') v = shared(theano._asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c') c = shared(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
x = tensor.fmatrix('x') x = tensor.fmatrix('x')
y = tensor.fmatrix('y') y = tensor.fmatrix('y')
...@@ -75,9 +75,9 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100): ...@@ -75,9 +75,9 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100):
for i, n in enumerate(train.maker.env.toposort()): for i, n in enumerate(train.maker.env.toposort()):
print i, n print i, n
xval = numpy.asarray(numpy.random.rand(n_batch, n_in), dtype='float32') xval = theano._asarray(numpy.random.rand(n_batch, n_in), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch, n_out), dtype='float32') yval = theano._asarray(numpy.random.rand(n_batch, n_out), dtype='float32')
lr = numpy.asarray(0.01, dtype='float32') lr = theano._asarray(0.01, dtype='float32')
t0 = time.time() t0 = time.time()
rval = [] rval = []
...@@ -123,10 +123,10 @@ def run_conv_nnet1(use_gpu): ...@@ -123,10 +123,10 @@ def run_conv_nnet1(use_gpu):
n_hid = n_kern * logical_hid_shape[0] * logical_hid_shape[1] n_hid = n_kern * logical_hid_shape[0] * logical_hid_shape[1]
n_out = 10 n_out = 10
w = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w') w = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w')
b = shared_fn(numpy.asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b') b = shared_fn(theano._asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b')
v = shared_fn(numpy.asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c') v = shared_fn(theano._asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared_fn(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c') c = shared_fn(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x') x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x')
y = tensor.fmatrix('y') y = tensor.fmatrix('y')
...@@ -152,9 +152,9 @@ def run_conv_nnet1(use_gpu): ...@@ -152,9 +152,9 @@ def run_conv_nnet1(use_gpu):
# for i, n in enumerate(train.maker.env.toposort()): # for i, n in enumerate(train.maker.env.toposort()):
# print i, n # print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32') xval = theano._asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch, n_out), dtype='float32') yval = theano._asarray(numpy.random.rand(n_batch, n_out), dtype='float32')
lr = numpy.asarray(0.01, dtype='float32') lr = theano._asarray(0.01, dtype='float32')
for i in xrange(10): for i in xrange(10):
rval = train(xval, yval, lr) rval = train(xval, yval, lr)
...@@ -204,12 +204,12 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST ...@@ -204,12 +204,12 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1] n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
n_out = 10 n_out = 10
w0 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0') w0 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0')
b0 = shared_fn(numpy.asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0') b0 = shared_fn(theano._asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0')
w1 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1') w1 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1')
b1 = shared_fn(numpy.asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1') b1 = shared_fn(theano._asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1')
v = shared_fn(numpy.asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c') v = shared_fn(theano._asarray(numpy.zeros((n_hid, n_out)), dtype='float32'), 'c')
c = shared_fn(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c') c = shared_fn(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x') x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x')
y = tensor.fmatrix('y') y = tensor.fmatrix('y')
...@@ -238,9 +238,9 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST ...@@ -238,9 +238,9 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
# for i, n in enumerate(train.maker.env.toposort()): # for i, n in enumerate(train.maker.env.toposort()):
# print i, n # print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32') xval = theano._asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch,n_out), dtype='float32')#int32 make all 0... yval = theano._asarray(numpy.random.rand(n_batch,n_out), dtype='float32')#int32 make all 0...
lr = numpy.asarray(0.01, dtype='float32') lr = theano._asarray(0.01, dtype='float32')
for i in xrange(n_train): for i in xrange(n_train):
rval = train(xval, yval, lr) rval = train(xval, yval, lr)
...@@ -284,12 +284,12 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter, ...@@ -284,12 +284,12 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
n_out = 10 n_out = 10
w0 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0') w0 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern)-0.5), dtype='float32'), 'w0')
b0 = shared_fn(numpy.asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0') b0 = shared_fn(theano._asarray(numpy.zeros((n_kern,)), dtype='float32'), 'b0')
w1 = shared_fn(numpy.asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1') w1 = shared_fn(theano._asarray(0.01*(numpy.random.rand(*shape_kern1)-0.5), dtype='float32'), 'w1')
b1 = shared_fn(numpy.asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1') b1 = shared_fn(theano._asarray(numpy.zeros((n_kern1,)), dtype='float32'), 'b1')
v = shared_fn(numpy.asarray(0.01*numpy.random.randn(n_hid, n_out), dtype='float32'), 'v') v = shared_fn(theano._asarray(0.01*numpy.random.randn(n_hid, n_out), dtype='float32'), 'v')
c = shared_fn(numpy.asarray(numpy.zeros(n_out), dtype='float32'), 'c') c = shared_fn(theano._asarray(numpy.zeros(n_out), dtype='float32'), 'c')
print 'ALLOCATING ARCH: w0 shape', w0.value.shape print 'ALLOCATING ARCH: w0 shape', w0.value.shape
print 'ALLOCATING ARCH: w1 shape', w1.value.shape print 'ALLOCATING ARCH: w1 shape', w1.value.shape
...@@ -330,9 +330,9 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter, ...@@ -330,9 +330,9 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
for i, n in enumerate(train.maker.env.toposort()): for i, n in enumerate(train.maker.env.toposort()):
print i, n print i, n
xval = numpy.asarray(numpy.random.rand(*shape_img), dtype='float32') xval = theano._asarray(numpy.random.rand(*shape_img), dtype='float32')
yval = numpy.asarray(numpy.random.rand(n_batch,n_out), dtype='float32') yval = theano._asarray(numpy.random.rand(n_batch,n_out), dtype='float32')
lr = numpy.asarray(0.01, dtype='float32') lr = theano._asarray(0.01, dtype='float32')
rvals=numpy.zeros(n_iter) rvals=numpy.zeros(n_iter)
t0 = time.time() t0 = time.time()
......
...@@ -71,7 +71,7 @@ def shared_constructor(value, name, strict=False, broadcastable=None): ...@@ -71,7 +71,7 @@ def shared_constructor(value, name, strict=False, broadcastable=None):
if strict: if strict:
_value = value _value = value
else: else:
_value = numpy.asarray(value, dtype='float32') _value = theano._asarray(value, dtype='float32')
if not isinstance(_value, numpy.ndarray): if not isinstance(_value, numpy.ndarray):
raise TypeError('ndarray required') raise TypeError('ndarray required')
......
...@@ -8,7 +8,7 @@ DownsampleFactorMax, DownsampleAvg, DownsampleSoftmax. ...@@ -8,7 +8,7 @@ DownsampleFactorMax, DownsampleAvg, DownsampleSoftmax.
from theano import gof, Op, tensor, Variable, Apply from theano import gof, Op, tensor, Variable, Apply
from theano.printing import Print from theano.printing import Print
import numpy import numpy, theano
import __builtin__ import __builtin__
class DownsampleFactorMaxGrad(Op): class DownsampleFactorMaxGrad(Op):
...@@ -259,7 +259,7 @@ class DownsampleFactorMax(Op): ...@@ -259,7 +259,7 @@ class DownsampleFactorMax(Op):
raise NotImplementedError('DownsampleFactorMax requires 4D input for now') raise NotImplementedError('DownsampleFactorMax requires 4D input for now')
if z[0] is None: if z[0] is None:
z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border)) -float('inf') z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border)) -float('inf')
z[0] = numpy.asarray(z[0], dtype=x.dtype) z[0] = theano._asarray(z[0], dtype=x.dtype)
zz=z[0] zz=z[0]
ds0, ds1 = self.ds ds0, ds1 = self.ds
if self.ignore_border: if self.ignore_border:
......
...@@ -197,7 +197,7 @@ class Scan(theano.Op): ...@@ -197,7 +197,7 @@ class Scan(theano.Op):
def zero(p): def zero(p):
return theano.tensor.TensorConstant(theano.tensor.TensorType(\ return theano.tensor.TensorConstant(theano.tensor.TensorType(\
dtype=p.type.dtype, broadcastable=[]), dtype=p.type.dtype, broadcastable=[]),
numpy.asarray(0,dtype = p.type.dtype)) theano._asarray(0,dtype = p.type.dtype))
return [gmap.get(p, zero(p)) for p in inputs] return [gmap.get(p, zero(p)) for p in inputs]
......
...@@ -211,7 +211,7 @@ class TheanoObject(object): ...@@ -211,7 +211,7 @@ class TheanoObject(object):
v = tensor.lscalar(name) v = tensor.lscalar(name)
v._theanoclass_container = \ v._theanoclass_container = \
theano.gof.Container(v, theano.gof.Container(v,
storage = [numpy.asarray(ival, dtype='int64')], storage = [theano._asarray(ival, dtype='int64')],
readonly=False) readonly=False)
assert not hasattr(v, 'set') assert not hasattr(v, 'set')
assert not hasattr(v, 'get') assert not hasattr(v, 'get')
......
...@@ -2,7 +2,7 @@ import operator ...@@ -2,7 +2,7 @@ import operator
import math import math
from copy import copy from copy import copy
import numpy import numpy, theano
from theano import gof from theano import gof
from theano.gof import Op, utils, Variable, Constant, Type, Apply, Env from theano.gof import Op, utils, Variable, Constant, Type, Apply, Env
...@@ -33,7 +33,7 @@ def as_scalar(x, name = None): ...@@ -33,7 +33,7 @@ def as_scalar(x, name = None):
def constant(x): def constant(x):
if isinstance(x, float): if isinstance(x, float):
for dtype in ['float32', 'float64']: for dtype in ['float32', 'float64']:
x_ = numpy.asarray(x, dtype=dtype) x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_): if numpy.all(x == x_):
break break
x_ = None x_ = None
...@@ -41,7 +41,7 @@ def constant(x): ...@@ -41,7 +41,7 @@ def constant(x):
return ScalarConstant(Scalar(str(x_.dtype)), x) return ScalarConstant(Scalar(str(x_.dtype)), x)
if isinstance(x, int): if isinstance(x, int):
for dtype in ['int8', 'int16', 'int32', 'int64']: for dtype in ['int8', 'int16', 'int32', 'int64']:
x_ = numpy.asarray(x, dtype=dtype) x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_): if numpy.all(x == x_):
break break
x_ = None x_ = None
...@@ -1090,7 +1090,7 @@ floor = Floor(same_out_nocomplex, name = 'ceil') ...@@ -1090,7 +1090,7 @@ floor = Floor(same_out_nocomplex, name = 'ceil')
class IRound(UnaryScalarOp): class IRound(UnaryScalarOp):
def impl(self, x): def impl(self, x):
return numpy.asarray(numpy.round(x), dtype = 'int64') return theano._asarray(numpy.round(x), dtype = 'int64')
def c_code(self, node, name, (x, ), (z, ), sub): def c_code(self, node, name, (x, ), (z, ), sub):
return "%(z)s = round(%(x)s);" % locals() return "%(z)s = round(%(x)s);" % locals()
iround = IRound(int_out_nocomplex) iround = IRound(int_out_nocomplex)
......
...@@ -7,7 +7,7 @@ To read about different sparse formats, see U{http://www-users.cs.umn.edu/~saad/ ...@@ -7,7 +7,7 @@ To read about different sparse formats, see U{http://www-users.cs.umn.edu/~saad/
""" """
import sys, operator import sys, operator
import numpy import numpy, theano
from scipy import sparse from scipy import sparse
import scipy.sparse import scipy.sparse
from theano.printing import Print from theano.printing import Print
...@@ -279,9 +279,9 @@ class CSMProperties(gof.Op): ...@@ -279,9 +279,9 @@ class CSMProperties(gof.Op):
out[0][0] = csm.data[self.kmap] out[0][0] = csm.data[self.kmap]
#backport #backport
#out[0][0] = csm.data if self.kmap is None else csm.data[self.kmap] #out[0][0] = csm.data if self.kmap is None else csm.data[self.kmap]
out[1][0] = numpy.asarray(csm.indices, dtype='int32') out[1][0] = theano._asarray(csm.indices, dtype='int32')
out[2][0] = numpy.asarray(csm.indptr, dtype='int32') out[2][0] = theano._asarray(csm.indptr, dtype='int32')
out[3][0] = numpy.asarray(csm.shape, dtype='int32') out[3][0] = theano._asarray(csm.shape, dtype='int32')
# TODO FIX THIS # TODO FIX THIS
def grad(self, (csm,), g): def grad(self, (csm,), g):
...@@ -344,28 +344,12 @@ class CSM(gof.Op): ...@@ -344,28 +344,12 @@ class CSM(gof.Op):
""" """
data = tensor.as_tensor_variable(data) data = tensor.as_tensor_variable(data)
# Note that we use `view(numpy.int32)` in addition to providing the
# 'int32' dtype to `numpy.asarray`. This is because on some computers
# (e.g. a Windows 32 bits machine), we can have the following assert
# fail:
# x = numpy.array([0], dtype=numpy.intc)
# y = numpy.asarray(x, dtype=numpy.int32)
# assert y.dtype.num == numpy.dtype(numpy.int32).num
# while the assert does *not* fail when replacing the second line by:
# y = numpy.asarray(x, dtype='int32').view(numpy.int32)
# This is a known defect in Numpy. For more information see ticket
# http://projects.scipy.org/numpy/ticket/870
# Note also that it is important to keep "dtype='int32'" when calling
# `numpy.asarray`. This is because `view` is only some kind of cast to
# the exact data type we want to use. If a conversion is required (e.g.
# from int64 to int32), it must be done in the call to `numpy.asarray`.
if not isinstance(indices, tensor.TensorVariable): if not isinstance(indices, tensor.TensorVariable):
indices = numpy.asarray(indices, dtype='int32').view(numpy.int32) indices = theano._asarray(indices, dtype='int32')
if not isinstance(indptr, tensor.TensorVariable): if not isinstance(indptr, tensor.TensorVariable):
indptr = numpy.asarray(indptr, dtype='int32').view(numpy.int32) indptr = theano._asarray(indptr, dtype='int32')
if not isinstance(shape, tensor.TensorVariable): if not isinstance(shape, tensor.TensorVariable):
shape = numpy.asarray(shape, dtype='int32').view(numpy.int32) shape = theano._asarray(shape, dtype='int32')
indices = tensor.as_tensor_variable(indices) indices = tensor.as_tensor_variable(indices)
indptr = tensor.as_tensor_variable(indptr) indptr = tensor.as_tensor_variable(indptr)
shape = tensor.as_tensor_variable(shape) shape = tensor.as_tensor_variable(shape)
......
...@@ -9,7 +9,7 @@ import traceback #for overriding Op.__call__ ...@@ -9,7 +9,7 @@ import traceback #for overriding Op.__call__
if sys.version_info >= (2,5): if sys.version_info >= (2,5):
import functools import functools
import numpy import numpy, theano
from copy import copy from copy import copy
from theano import gof from theano import gof
...@@ -151,7 +151,7 @@ class NumpyAutocaster(object): ...@@ -151,7 +151,7 @@ class NumpyAutocaster(object):
self.dtypes = tuple(dtypes) self.dtypes = tuple(dtypes)
def __call__(self, x): def __call__(self, x):
for dtype in self.dtypes: for dtype in self.dtypes:
x_ = numpy.asarray(x, dtype=dtype) x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_): if numpy.all(x == x_):
break break
# returns either an exact x_==x, or the last casted x_ # returns either an exact x_==x, or the last casted x_
...@@ -163,7 +163,7 @@ autocast_float = NumpyAutocaster(('float32', 'float64')) ...@@ -163,7 +163,7 @@ autocast_float = NumpyAutocaster(('float32', 'float64'))
# Note: it's a bit weird for a compiler to automatically downcast literals like this, and it might # Note: it's a bit weird for a compiler to automatically downcast literals like this, and it might
# have implications for efficiency when mixing types. For example when you add 1.0 + # have implications for efficiency when mixing types. For example when you add 1.0 +
# dmatrix(), the 1.0 could be converted to float32, and require upcasting for the + operation # dmatrix(), the 1.0 could be converted to float32, and require upcasting for the + operation
# at every position in the dmatrix. using numpy.asarray(1.0, dtype='float64') will circumvent # at every position in the dmatrix. using theano._asarray(1.0, dtype='float64') will circumvent
# this autocasting, and in future, our ops might be smarter about factoring out upcasts. The # this autocasting, and in future, our ops might be smarter about factoring out upcasts. The
# advantage of this mechanism is to combine it with floatX so that 1.0 + xmatrix() will always # advantage of this mechanism is to combine it with floatX so that 1.0 + xmatrix() will always
# have the same type as the xmatrix(). # have the same type as the xmatrix().
...@@ -197,7 +197,7 @@ def constant_or_value(x, rtype, name=None, ndim=None, dtype=None): ...@@ -197,7 +197,7 @@ def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
""" """
if dtype is not None: if dtype is not None:
# in this case, the semantics are that the caller is forcing the dtype # in this case, the semantics are that the caller is forcing the dtype
x_ = numpy.asarray(x, dtype=dtype) x_ = theano._asarray(x, dtype=dtype)
else: else:
# in this case, this function should infer the dtype according to the autocasting # in this case, this function should infer the dtype according to the autocasting
# rules. See autocasting above. # rules. See autocasting above.
...@@ -378,7 +378,7 @@ class TensorType(Type): ...@@ -378,7 +378,7 @@ class TensorType(Type):
self, self.shape, data.shape)) self, self.shape, data.shape))
return data return data
else: else:
data = numpy.asarray(data, dtype = self.dtype) data = theano._asarray(data, dtype = self.dtype)
if not self.ndim == data.ndim: if not self.ndim == data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, got %s with shape %s." % (self.ndim, data.ndim, data.shape), data) raise TypeError("Wrong number of dimensions: expected %s, got %s with shape %s." % (self.ndim, data.ndim, data.shape), data)
if any(b and d != 1 for d, b in zip(data.shape, self.broadcastable)): if any(b and d != 1 for d, b in zip(data.shape, self.broadcastable)):
...@@ -1258,7 +1258,7 @@ class Shape(Op): ...@@ -1258,7 +1258,7 @@ class Shape(Op):
x = as_tensor_variable(x) x = as_tensor_variable(x)
return Apply(self, [x], [lvector()]) return Apply(self, [x], [lvector()])
def perform(self, node, (x, ), (out, )): def perform(self, node, (x, ), (out, )):
out[0] = numpy.asarray(x.shape, dtype = 'int64') out[0] = theano._asarray(x.shape, dtype = 'int64')
def grad(self, (x,), (gz,)): def grad(self, (x,), (gz,)):
return [None] return [None]
_shape = Shape() _shape = Shape()
...@@ -1300,9 +1300,7 @@ class MaxAndArgmax(Op): ...@@ -1300,9 +1300,7 @@ class MaxAndArgmax(Op):
return Apply(self, inputs, outputs) return Apply(self, inputs, outputs)
def perform(self, node, (x, axis), (max, max_idx)): def perform(self, node, (x, axis), (max, max_idx)):
max[0] = numpy.asarray(numpy.max(x, axis)) max[0] = numpy.asarray(numpy.max(x, axis))
# Note: using 'view' is important until Numpy's ticket 870 is resolved. max_idx[0] = theano._asarray(numpy.argmax(x, axis), dtype='int32')
max_idx[0] = numpy.asarray(numpy.argmax(x, axis), dtype='int32').view(
numpy.int32)
def grad(self, (x, axis), (g_max, g_max_idx)): def grad(self, (x, axis), (g_max, g_max_idx)):
# @warning: This only works if axis is 0, else the max is # @warning: This only works if axis is 0, else the max is
# broadcasted wrong in the call to eq. # broadcasted wrong in the call to eq.
...@@ -2498,7 +2496,7 @@ class Join(Op): ...@@ -2498,7 +2496,7 @@ class Join(Op):
def perform(self, node, axis_and_tensors, (out, )): def perform(self, node, axis_and_tensors, (out, )):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:] axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
out[0] = numpy.asarray(numpy.concatenate(tensors, axis = axis), out[0] = theano._asarray(numpy.concatenate(tensors, axis = axis),
dtype=node.outputs[0].type.dtype) dtype=node.outputs[0].type.dtype)
def grad(self, axis_and_tensors, (gz,)): def grad(self, axis_and_tensors, (gz,)):
...@@ -3422,7 +3420,7 @@ def grad(cost, wrt, g_cost=None, consider_constant=[], warn_type=False): ...@@ -3422,7 +3420,7 @@ def grad(cost, wrt, g_cost=None, consider_constant=[], warn_type=False):
def zero(p): def zero(p):
return TensorConstant( return TensorConstant(
TensorType(dtype = p.type.dtype, broadcastable = []), TensorType(dtype = p.type.dtype, broadcastable = []),
numpy.asarray(0, dtype=p.type.dtype)) theano._asarray(0, dtype=p.type.dtype))
#try: #try:
#it = iter(wrt) #it = iter(wrt)
......
import sys import sys
import elemwise_cgen as cgen import elemwise_cgen as cgen
import numpy import numpy, theano
from theano import gof from theano import gof
from theano.gof import Op, Apply from theano.gof import Op, Apply
from theano import scalar from theano import scalar
...@@ -823,7 +823,7 @@ class CAReduce(Op): ...@@ -823,7 +823,7 @@ class CAReduce(Op):
if to_reduce: if to_reduce:
for dimension in to_reduce: for dimension in to_reduce:
variable = self.ufunc.reduce(variable, dimension) variable = self.ufunc.reduce(variable, dimension)
output[0] = numpy.asarray(variable, dtype = node.outputs[0].type.dtype) output[0] = theano._asarray(variable, dtype = node.outputs[0].type.dtype)
else: else:
output[0] = numpy.copy(variable) output[0] = numpy.copy(variable)
......
...@@ -35,7 +35,7 @@ class ScalarSigmoid(scalar.UnaryScalarOp): ...@@ -35,7 +35,7 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
if node.inputs[0].type == scalar.float32: if node.inputs[0].type == scalar.float32:
# These constants were obtained by looking at the output of python commands like: # These constants were obtained by looking at the output of python commands like:
# for i in xrange(750): # for i in xrange(750):
# print i, repr( numpy.asarray(1.0, dtype=dt) / (numpy.asarray(1.0, dtype=dt) + numpy.exp(-numpy.asarray([i,-i], dtype=dt)))) # print i, repr( theano._asarray(1.0, dtype=dt) / (theano._asarray(1.0, dtype=dt) + numpy.exp(-theano._asarray([i,-i], dtype=dt))))
# the boundary checks prevent us from generating inf # the boundary checks prevent us from generating inf
return """%(z)s = %(x)s < -88.0f ? 0.0 : %(x)s > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-%(x)s));""" % locals() return """%(z)s = %(x)s < -88.0f ? 0.0 : %(x)s > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-%(x)s));""" % locals()
elif node.inputs[0].type == scalar.float64: elif node.inputs[0].type == scalar.float64:
...@@ -70,7 +70,7 @@ class ScalarSoftplus(scalar.UnaryScalarOp): ...@@ -70,7 +70,7 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
if node.inputs[0].type == scalar.float32: if node.inputs[0].type == scalar.float32:
# These constants were obtained by looking at the output of python commands like: # These constants were obtained by looking at the output of python commands like:
# for i in xrange(750): # for i in xrange(750):
# print i, repr( numpy.log1p(numpy.exp(numpy.asarray([i,-i], dtype=dt)))) # print i, repr( numpy.log1p(numpy.exp(theano._asarray([i,-i], dtype=dt))))
# the boundary checks prevent us from generating inf # the boundary checks prevent us from generating inf
return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals() return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals()
elif node.inputs[0].type == scalar.float64: elif node.inputs[0].type == scalar.float64:
......
...@@ -14,7 +14,7 @@ from elemwise import Elemwise, DimShuffle ...@@ -14,7 +14,7 @@ from elemwise import Elemwise, DimShuffle
from theano import scalar from theano import scalar
import basic as T import basic as T
import inplace as I import inplace as I
import numpy import numpy, theano
import numpy as N #guys... please don't do this in the library :( import numpy as N #guys... please don't do this in the library :(
import operator import operator
import itertools import itertools
...@@ -874,7 +874,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None): ...@@ -874,7 +874,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
#first = num[0] if num else denum[0] #first = num[0] if num else denum[0]
one = N.asarray(first).dtype.type(1) one = N.asarray(first).dtype.type(1)
else: else:
one = N.asarray(1, dtype=out_type.dtype) one = theano._asarray(1, dtype=out_type.dtype)
v = reduce(N.multiply, num, one) / reduce(N.multiply, denum, one) v = reduce(N.multiply, num, one) / reduce(N.multiply, denum, one)
if aslist: if aslist:
if N.all(v == 1): if N.all(v == 1):
...@@ -977,7 +977,7 @@ def local_mul_zero(node): ...@@ -977,7 +977,7 @@ def local_mul_zero(node):
#print 'MUL by value', value, node.inputs #print 'MUL by value', value, node.inputs
if N.all(value == 0): if N.all(value == 0):
#print '... returning zeros' #print '... returning zeros'
return _fill_chain(N.asarray(0, dtype=otype.dtype), node.inputs) return _fill_chain(theano._asarray(0, dtype=otype.dtype), node.inputs)
register_canonicalize(local_mul_zero) register_canonicalize(local_mul_zero)
@gof.local_optimizer([T.true_div]) @gof.local_optimizer([T.true_div])
...@@ -1162,8 +1162,8 @@ def add_calculate(num, denum, aslist = False, out_type=None): ...@@ -1162,8 +1162,8 @@ def add_calculate(num, denum, aslist = False, out_type=None):
if out_type is None: if out_type is None:
zero = 0.0 zero = 0.0
else: else:
zero = N.asarray(0, dtype=out_type.dtype) zero = theano._asarray(0, dtype=out_type.dtype)
#zero = 0.0 if out_type is None else N.asarray(0, dtype=out_type.dtype) #zero = 0.0 if out_type is None else theano._asarray(0, dtype=out_type.dtype)
v = reduce(N.add, num, zero) - reduce(N.add, denum, zero) v = reduce(N.add, num, zero) - reduce(N.add, denum, zero)
if aslist: if aslist:
if N.all(v == 0): if N.all(v == 0):
......
...@@ -6,7 +6,7 @@ import numpy ...@@ -6,7 +6,7 @@ import numpy
#local imports #local imports
import basic as tensor import basic as tensor
import opt import opt, theano
from theano import gof from theano import gof
from theano.compile import optdb from theano.compile import optdb
...@@ -166,7 +166,7 @@ class RandomFunction(gof.Op): ...@@ -166,7 +166,7 @@ class RandomFunction(gof.Op):
rval = self.fn(r, *(args + [tuple(shape)])) rval = self.fn(r, *(args + [tuple(shape)]))
if not isinstance(rval, numpy.ndarray) \ if not isinstance(rval, numpy.ndarray) \
or str(rval.dtype) != node.outputs[1].type.dtype: or str(rval.dtype) != node.outputs[1].type.dtype:
out[0] = numpy.asarray(rval, dtype = node.outputs[1].type.dtype) out[0] = theano._asarray(rval, dtype = node.outputs[1].type.dtype)
else: else:
out[0] = rval out[0] = rval
if len(rval.shape) != self.outtype.ndim: if len(rval.shape) != self.outtype.ndim:
...@@ -182,7 +182,7 @@ def _infer_ndim(ndim, shape): ...@@ -182,7 +182,7 @@ def _infer_ndim(ndim, shape):
""" """
if isinstance(shape, (tuple, list)): if isinstance(shape, (tuple, list)):
v_shape = tensor.TensorConstant(type=tensor.lvector, data=numpy.asarray(shape, dtype='int64')) v_shape = tensor.TensorConstant(type=tensor.lvector, data=theano._asarray(shape, dtype='int64'))
else: else:
v_shape = tensor.as_tensor_variable(shape) v_shape = tensor.as_tensor_variable(shape)
......
...@@ -1063,8 +1063,8 @@ class test_bitwise(unittest.TestCase): ...@@ -1063,8 +1063,8 @@ class test_bitwise(unittest.TestCase):
def test_or(self): def test_or(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = inplace_func([x,y], x|y) fn = inplace_func([x,y], x|y)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = theano._asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
self.failUnless(numpy.all(v == (operator.or_(l, r))), (l, r, v)) self.failUnless(numpy.all(v == (operator.or_(l, r))), (l, r, v))
...@@ -1074,8 +1074,8 @@ class test_bitwise(unittest.TestCase): ...@@ -1074,8 +1074,8 @@ class test_bitwise(unittest.TestCase):
ix = x ix = x
ix = inplace.xor_inplace(ix, y) ix = inplace.xor_inplace(ix, y)
gn = inplace_func([x,y], ix) gn = inplace_func([x,y], ix)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = theano._asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
self.failUnless(numpy.all(v == (operator.xor(l, r))), (l, r, v)) self.failUnless(numpy.all(v == (operator.xor(l, r))), (l, r, v))
v = gn(l, r) v = gn(l, r)
...@@ -1085,16 +1085,16 @@ class test_bitwise(unittest.TestCase): ...@@ -1085,16 +1085,16 @@ class test_bitwise(unittest.TestCase):
def test_and(self): def test_and(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = inplace_func([x,y], x&y) fn = inplace_func([x,y], x&y)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = theano._asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
self.failUnless(numpy.all(v == (operator.and_(l, r))), (l, r, v)) self.failUnless(numpy.all(v == (operator.and_(l, r))), (l, r, v))
def test_inv(self): def test_inv(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = inplace_func([x,y], ~x) fn = inplace_func([x,y], ~x)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = theano._asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = theano._asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
self.failUnless(numpy.all(v == (~l)), (l, r, v)) self.failUnless(numpy.all(v == (~l)), (l, r, v))
...@@ -1723,9 +1723,9 @@ def test_reshape(): ...@@ -1723,9 +1723,9 @@ def test_reshape():
assert numpy.all(a_val == a_val_copy) assert numpy.all(a_val == a_val_copy)
#test that it works with inplace operations #test that it works with inplace operations
a_val = numpy.asarray([0,1,2,3,4,5], dtype='float64') a_val = theano._asarray([0,1,2,3,4,5], dtype='float64')
a_val_copy = numpy.asarray([0,1,2,3,4,5], dtype='float64') a_val_copy = theano._asarray([0,1,2,3,4,5], dtype='float64')
b_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') b_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
f_sub = inplace_func([a,b], c-b) f_sub = inplace_func([a,b], c-b)
assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(f_sub(a_val, b_val) == 0.0)
...@@ -1733,7 +1733,7 @@ def test_reshape(): ...@@ -1733,7 +1733,7 @@ def test_reshape():
# verify gradient # verify gradient
def just_vals(v): def just_vals(v):
return Reshape(2)(v, numpy.asarray([2,3], dtype='int32')) return Reshape(2)(v, theano._asarray([2,3], dtype='int32'))
utt.verify_grad(just_vals, [a_val]) utt.verify_grad(just_vals, [a_val])
...@@ -1744,8 +1744,8 @@ def test_flatten_outdimNone(): ...@@ -1744,8 +1744,8 @@ def test_flatten_outdimNone():
a = dmatrix() a = dmatrix()
c = flatten(a) c = flatten(a)
f = inplace_func([a], c) f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64') c_val = theano._asarray([0,1,2,3,4,5], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c) f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
...@@ -1756,8 +1756,8 @@ def test_flatten_scalar(): ...@@ -1756,8 +1756,8 @@ def test_flatten_scalar():
a = dscalar() a = dscalar()
c = flatten(a) c = flatten(a)
f = inplace_func([a], c) f = inplace_func([a], c)
a_val = numpy.asarray(3.0, dtype='float64') a_val = theano._asarray(3.0, dtype='float64')
c_val = numpy.asarray([3.0], dtype='float64') c_val = theano._asarray([3.0], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c) f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
...@@ -1768,8 +1768,8 @@ def test_flatten_outdim1(): ...@@ -1768,8 +1768,8 @@ def test_flatten_outdim1():
a = dmatrix() a = dmatrix()
c = flatten(a, 1) c = flatten(a, 1)
f = inplace_func([a], c) f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64') c_val = theano._asarray([0,1,2,3,4,5], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c) f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
...@@ -1780,7 +1780,7 @@ def test_flatten_outdim2(): ...@@ -1780,7 +1780,7 @@ def test_flatten_outdim2():
a = dmatrix() a = dmatrix()
c = flatten(a, 2) c = flatten(a, 2)
f = inplace_func([a], c) f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') a_val = theano._asarray([[0,1,2],[3,4,5]], dtype='float64')
assert numpy.all(f(a_val)==a_val) assert numpy.all(f(a_val)==a_val)
f = inplace_func([a], c) f = inplace_func([a], c)
assert numpy.all(f(a_val)==a_val) assert numpy.all(f(a_val)==a_val)
...@@ -1791,8 +1791,8 @@ def test_flatten_outdim2_of_3(): ...@@ -1791,8 +1791,8 @@ def test_flatten_outdim2_of_3():
a = TensorType('float64', (False, False, False))() a = TensorType('float64', (False, False, False))()
c = flatten(a, 2) c = flatten(a, 2)
f = inplace_func([a], c) f = inplace_func([a], c)
a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64') a_val = theano._asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64')
c_val = numpy.asarray([[0,1,2,3], [4,5,6,7]], dtype='float64') c_val = theano._asarray([[0,1,2,3], [4,5,6,7]], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = inplace_func([a], c) f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
...@@ -2288,8 +2288,8 @@ def test_autocast(): ...@@ -2288,8 +2288,8 @@ def test_autocast():
ac.__enter__() ac.__enter__()
assert (dvector()+ 1.1).dtype == 'float64' assert (dvector()+ 1.1).dtype == 'float64'
assert (fvector()+ 1.1).dtype == 'float32' assert (fvector()+ 1.1).dtype == 'float32'
assert (fvector()+ numpy.asarray(1.1,dtype='float64')).dtype == 'float64' assert (fvector()+ theano._asarray(1.1,dtype='float64')).dtype == 'float64'
assert (fvector()+ numpy.asarray(1.1,dtype='float32')).dtype == 'float32' assert (fvector()+ theano._asarray(1.1,dtype='float32')).dtype == 'float32'
assert (dvector()+ 1).dtype == 'float64' assert (dvector()+ 1).dtype == 'float64'
assert (fvector()+ 1).dtype == 'float32' assert (fvector()+ 1).dtype == 'float32'
...@@ -2303,8 +2303,8 @@ def test_autocast(): ...@@ -2303,8 +2303,8 @@ def test_autocast():
assert (dvector()+ 1.1).dtype == 'float64' assert (dvector()+ 1.1).dtype == 'float64'
assert (fvector()+ 1.1).dtype == 'float64' assert (fvector()+ 1.1).dtype == 'float64'
assert (fvector()+ 1.0).dtype == 'float64' assert (fvector()+ 1.0).dtype == 'float64'
assert (fvector()+ numpy.asarray(1.1,dtype='float64')).dtype == 'float64' assert (fvector()+ theano._asarray(1.1,dtype='float64')).dtype == 'float64'
assert (fvector()+ numpy.asarray(1.1,dtype='float32')).dtype == 'float32' assert (fvector()+ theano._asarray(1.1,dtype='float32')).dtype == 'float32'
assert (dvector()+ 1).dtype == 'float64' assert (dvector()+ 1).dtype == 'float64'
assert (fvector()+ 1).dtype == 'float32' assert (fvector()+ 1).dtype == 'float32'
......
...@@ -2,7 +2,7 @@ import traceback ...@@ -2,7 +2,7 @@ import traceback
import theano.tensor as T import theano.tensor as T
from theano.gof import Env from theano.gof import Env
from theano.printing import pp from theano.printing import pp
import numpy import numpy, theano
from theano.tensor.blas import * from theano.tensor.blas import *
from theano.tensor.blas import _dot22, res_is_a, _as_scalar, _is_real_matrix from theano.tensor.blas import _dot22, res_is_a, _as_scalar, _is_real_matrix
from unittest import TestCase from unittest import TestCase
...@@ -152,7 +152,7 @@ class t_gemm(TestCase): ...@@ -152,7 +152,7 @@ class t_gemm(TestCase):
C = self.rand(4,5)[:,:4] C = self.rand(4,5)[:,:4]
def t(z,x,y,a=1.0, b=0.0,l='c|py',dt='float64'): def t(z,x,y,a=1.0, b=0.0,l='c|py',dt='float64'):
z,a,x,y,b = [numpy.asarray(p,dtype=dt) for p in z,a,x,y,b] z,a,x,y,b = [theano._asarray(p,dtype=dt) for p in z,a,x,y,b]
z_orig = z.copy() z_orig = z.copy()
z_after = self._gemm(z, a, x, y, b) z_after = self._gemm(z, a, x, y, b)
......
...@@ -13,7 +13,7 @@ class test_casting(unittest.TestCase): ...@@ -13,7 +13,7 @@ class test_casting(unittest.TestCase):
x = type_fn() x = type_fn()
f = function([x], op_fn(x)) f = function([x], op_fn(x))
xval = numpy.asarray(numpy.random.rand(10)*10, dtype=type_fn.dtype) xval = theano._asarray(numpy.random.rand(10)*10, dtype=type_fn.dtype)
yval = f(xval) yval = f(xval)
assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype
......
...@@ -73,7 +73,7 @@ def test_add_canonizer_problem0(): ...@@ -73,7 +73,7 @@ def test_add_canonizer_problem0():
n_segments = 10 n_segments = 10
label = lscalar('label') label = lscalar('label')
segment_labels = label + numpy.asarray([0] * n_segments, dtype='int64') segment_labels = label + theano._asarray([0] * n_segments, dtype='int64')
r = segment_labels * 5 r = segment_labels * 5
f = function([label], r) f = function([label], r)
...@@ -149,14 +149,14 @@ class test_canonize(unittest.TestCase): ...@@ -149,14 +149,14 @@ class test_canonize(unittest.TestCase):
dx, dy, dz = dmatrices('xyz') dx, dy, dz = dmatrices('xyz')
fv = fvector('r').dimshuffle('x',0) fv = fvector('r').dimshuffle('x',0)
dv = dvector('s').dimshuffle('x',0) dv = dvector('s').dimshuffle('x',0)
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0]) fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dxv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dyv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dzv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0]) dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
cases = [ cases = [
(fx+fy,(fx,fy),(fxv,fyv),1,'float32'), (fx+fy,(fx,fy),(fxv,fyv),1,'float32'),
(fx*fy,(fx,fy),(fxv,fyv),1,'float32'), (fx*fy,(fx,fy),(fxv,fyv),1,'float32'),
...@@ -229,14 +229,14 @@ class test_canonize(unittest.TestCase): ...@@ -229,14 +229,14 @@ class test_canonize(unittest.TestCase):
dx, dy, dz = dmatrices('xyz') dx, dy, dz = dmatrices('xyz')
fv = fvector('r').dimshuffle('x',0) fv = fvector('r').dimshuffle('x',0)
dv = dvector('s').dimshuffle('x',0) dv = dvector('s').dimshuffle('x',0)
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0]) fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dxv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dyv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dzv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0]) dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
cases = [ cases = [
(fx+fy,(fx,fy),(fxv,fyv),1,'float32'), (fx+fy,(fx,fy),(fxv,fyv),1,'float32'),
(fx*fy,(fx,fy),(fxv,fyv),1,'float32'), (fx*fy,(fx,fy),(fxv,fyv),1,'float32'),
...@@ -312,16 +312,16 @@ class test_canonize(unittest.TestCase): ...@@ -312,16 +312,16 @@ class test_canonize(unittest.TestCase):
dx, dy, dz, dw = dmatrices('xyzw') dx, dy, dz, dw = dmatrices('xyzw')
fv = fvector('r').dimshuffle('x',0) fv = fvector('r').dimshuffle('x',0)
dv = dvector('s').dimshuffle('x',0) dv = dvector('s').dimshuffle('x',0)
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fwv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fwv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0]) fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dxv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dyv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dzv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dwv = numpy.asarray(numpy.random.rand(*shp),dtype='float64') dwv = theano._asarray(numpy.random.rand(*shp),dtype='float64')
dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0]) dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
#We must be sure that the Canonizer is working, but that we don't have other #We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion # optimisation that could hide bug in the Canonizer as local_elemwise_fusion
...@@ -463,13 +463,13 @@ class test_canonize(unittest.TestCase): ...@@ -463,13 +463,13 @@ class test_canonize(unittest.TestCase):
shp=(4,4) shp=(4,4)
fx, fy, fz = fmatrices('xyz') fx, fy, fz = fmatrices('xyz')
dx, dy, dz = dmatrices('xyz') dx, dy, dz = dmatrices('xyz')
fxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') fzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
dxv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') dxv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
dyv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') dyv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
dzv = numpy.asarray(numpy.random.rand(*shp),dtype='float32') dzv = theano._asarray(numpy.random.rand(*shp),dtype='float32')
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0]) fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
#We must be sure that the Canonizer is working, but that we don't have other #We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion # optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode=compile.mode.predefined_modes[compile.mode.default_mode] mode=compile.mode.predefined_modes[compile.mode.default_mode]
...@@ -589,7 +589,7 @@ class test_fusion(unittest.TestCase): ...@@ -589,7 +589,7 @@ class test_fusion(unittest.TestCase):
""" """
#TODO: disable the canonizer? #TODO: disable the canonizer?
def my_init(shp, dtype='float64', num=0): def my_init(shp, dtype='float64', num=0):
#ret = numpy.asarray(numpy.random.rand(*shp),dtype=dtype) #ret = theano._asarray(numpy.random.rand(*shp),dtype=dtype)
ret = numpy.zeros(shp, dtype=dtype)+num ret = numpy.zeros(shp, dtype=dtype)+num
return ret return ret
fw, fx, fy, fz = fmatrices('wxyz') fw, fx, fy, fz = fmatrices('wxyz')
...@@ -601,15 +601,15 @@ class test_fusion(unittest.TestCase): ...@@ -601,15 +601,15 @@ class test_fusion(unittest.TestCase):
fxv = my_init(shp,'float32',2) fxv = my_init(shp,'float32',2)
fyv = my_init(shp,'float32',3) fyv = my_init(shp,'float32',3)
fzv = my_init(shp,'float32',4) fzv = my_init(shp,'float32',4)
fvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0]) fvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float32').reshape(1,shp[0])
dwv = my_init(shp,'float64',5) dwv = my_init(shp,'float64',5)
ixv = numpy.asarray(my_init(shp,num=60),dtype='int32') ixv = theano._asarray(my_init(shp,num=60),dtype='int32')
iyv = numpy.asarray(my_init(shp,num=70),dtype='int32') iyv = theano._asarray(my_init(shp,num=70),dtype='int32')
izv = numpy.asarray(my_init(shp,num=70),dtype='int32') izv = theano._asarray(my_init(shp,num=70),dtype='int32')
# dxv = my_init(shp,'float64',6) # dxv = my_init(shp,'float64',6)
# dyv = my_init(shp,'float64',7) # dyv = my_init(shp,'float64',7)
# dzv = my_init(shp,'float64',8) # dzv = my_init(shp,'float64',8)
# dvv = numpy.asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0]) # dvv = theano._asarray(numpy.random.rand(shp[0]),dtype='float64').reshape(1,shp[0])
fwx=fw+fx fwx=fw+fx
cases = [ cases = [
(fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,fxv+fyv+fzv,'float32'),#1 (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,fxv+fyv+fzv,'float32'),#1
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论