提交 f5ed1817 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Update tests to use .get_value() and .set_value() on shared variables.

上级 27084539
......@@ -44,8 +44,8 @@ class Test_SharedVariable(unittest.TestCase):
u = shared('asdf', strict=False)
v = shared('asdf', strict=True)
u.value = 88
v.value = 88
u.set_value(88)
v.set_value(88)
def test_create_numpy_strict_false(self):
......@@ -96,14 +96,14 @@ class Test_SharedVariable(unittest.TestCase):
strict=False)
# check that assignments to value are casted properly
u.value = [3,4]
assert type(u.value) is numpy.ndarray
assert str(u.value.dtype) == 'float64'
assert numpy.all(u.value == [3,4])
u.set_value([3,4])
assert type(u.get_value()) is numpy.ndarray
assert str(u.get_value(borrow=True).dtype) == 'float64'
assert numpy.all(u.get_value() == [3,4])
# check that assignments of nonsense fail
try:
u.value = 'adsf'
u.set_value('adsf')
assert 0
except ValueError:
pass
......@@ -114,7 +114,8 @@ class Test_SharedVariable(unittest.TestCase):
assert u.get_value(borrow=True) is uval
def test_scalar_strict(self):
def f(var, val): var.value = val
def f(var, val):
var.set_value(val)
b = shared(numpy.int64(7), strict=True)
assert b.type == theano.tensor.lscalar
......@@ -154,7 +155,8 @@ class Test_SharedVariable(unittest.TestCase):
def test_tensor_strict(self):
def f(var, val): var.value = val
def f(var, val):
var.set_value(val)
b = shared(numpy.int64([7]), strict=True)
assert b.type == theano.tensor.lvector
......@@ -206,47 +208,48 @@ class Test_SharedVariable(unittest.TestCase):
# Since downcasting of a value now raises an Exception,
def f(var, val): var.value = val
def f(var, val):
var.set_value(val)
b = shared(numpy.int64(7), allow_downcast=True)
assert b.type == theano.tensor.lscalar
f(b,8.23)
assert b.value==8
assert b.get_value()==8
b = shared(numpy.int32(7), allow_downcast=True)
assert b.type == theano.tensor.iscalar
f(b,8.23)
assert b.value==8
assert b.get_value()==8
b = shared(numpy.int16(7), allow_downcast=True)
assert b.type == theano.tensor.wscalar
f(b,8.23)
assert b.value==8
assert b.get_value()==8
b = shared(numpy.int8(7), allow_downcast=True)
assert b.type == theano.tensor.bscalar
f(b,8.23)
assert b.value==8
assert b.get_value()==8
b = shared(numpy.float64(7.234), allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b,8)
assert b.value==8
assert b.get_value()==8
b = shared(numpy.float32(7.234), allow_downcast=True)
assert b.type == theano.tensor.fscalar
f(b,8)
assert b.value==8
assert b.get_value()==8
b = shared(numpy.float(7.234), allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b,8)
assert b.value==8
assert b.get_value()==8
b = shared(7.234, allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b,8)
assert b.value==8
assert b.get_value()==8
c = shared(numpy.zeros((5,5), dtype='float32'), allow_downcast=True)
self.failUnlessRaises(TypeError, f, b, numpy.random.rand(5,5))
......@@ -254,37 +257,38 @@ class Test_SharedVariable(unittest.TestCase):
def test_tensor_floatX(self):
def f(var, val): var.value = val
def f(var, val):
var.set_value(val)
b = shared(numpy.int64([7]), allow_downcast=True)
assert b.type == theano.tensor.lvector
f(b,[8.23])
assert b.value == 8
assert b.get_value() == 8
b = shared(numpy.int32([7]), allow_downcast=True)
assert b.type == theano.tensor.ivector
f(b,[8.23])
assert b.value == 8
assert b.get_value() == 8
b = shared(numpy.int16([7]), allow_downcast=True)
assert b.type == theano.tensor.wvector
f(b,[8.23])
assert b.value == 8
assert b.get_value() == 8
b = shared(numpy.int8([7]), allow_downcast=True)
assert b.type == theano.tensor.bvector
f(b,[8.23])
assert b.value == 8
assert b.get_value() == 8
b = shared(numpy.float64([7.234]), allow_downcast=True)
assert b.type == theano.tensor.dvector
f(b,[8])
assert b.value == 8
assert b.get_value() == 8
b = shared(numpy.float32([7.234]), allow_downcast=True)
assert b.type == theano.tensor.fvector
f(b,[8])
assert b.value == 8
assert b.get_value() == 8
#numpy.float([7.234]) don't work
# b = shared(numpy.float([7.234]))
......@@ -299,7 +303,7 @@ class Test_SharedVariable(unittest.TestCase):
b = shared(numpy.asarray([7.234],dtype=theano.config.floatX), allow_downcast=True)
assert b.dtype == theano.config.floatX
f(b,[8])
assert b.value == 8
assert b.get_value() == 8
c = shared(numpy.zeros((5,5), dtype='float32'), allow_downcast=True)
self.failUnlessRaises(TypeError, f, b, numpy.random.rand(5,5))
......
......@@ -212,10 +212,10 @@ def test_elemwise_empty():
f = pfunc([b], [], updates=[(a, a+b)], mode=mode_with_gpu)
f2 = pfunc([b], [], updates=[(a, a+b)], mode=mode_without_gpu)
a0 = a.value * 1.0
a0 = a.get_value() * 1.0
f(numpy.ones((0,0), dtype='float32'))
assert numpy.all(a0 + 1.0 == a.value)
assert numpy.all(a0 + 1.0 == a.get_value())
def test_elemwise0():
......@@ -228,14 +228,14 @@ def test_elemwise0():
#check that we work inplace.
assert f.maker.env.toposort()[1].op.destroy_map.items()==[(0,[0])]
a0 = a.value * 1.0
print 'BEFORE ADD', a.value
a0 = a.get_value() * 1.0
print 'BEFORE ADD', a.get_value()
for i, node in enumerate(f.maker.env.toposort()):
print i, node
f(numpy.ones((4,4), dtype='float32'))
print 'AFTER ADD', a.value
print 'AFTER ADD', a.get_value()
assert numpy.all(a0 + 1.0 == a.value)
assert numpy.all(a0 + 1.0 == a.get_value())
def test_elemwise_bad_broadcast():
x = cuda.fmatrix('x')
......@@ -751,7 +751,7 @@ def test_gpualloc_input_on_gpu():
assert sum([node.op == T.alloc for node in f.maker.env.toposort()])==1
assert sum([node.op == B.gpu_alloc for node in f_gpu.maker.env.toposort()])==1
assert numpy.allclose(numpy.ones(a.value.shape)+9,f_gpu(9))
assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape)+9,f_gpu(9))
assert numpy.allclose(f(5),f_gpu(5))
def test_gpujoin_gpualloc():
......@@ -788,7 +788,7 @@ def test_gpualloc_output_to_gpu():
assert sum([node.op == T.alloc for node in f.maker.env.toposort()])==1
assert sum([node.op == B.gpu_alloc for node in f_gpu.maker.env.toposort()])==1
assert numpy.allclose(numpy.ones(a.value.shape)+9,f_gpu(9))
assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape)+9,f_gpu(9))
assert numpy.allclose(f(5),f_gpu(5))
import theano.tensor.tests.test_basic
......
......@@ -35,15 +35,15 @@ def test_dot22():
f = pfunc([b], [], updates=[(a, tensor.dot(a,b))], mode=mode_with_gpu)
a0 = a.value * 1.0
a0 = a.get_value() * 1.0
print a0
for i, node in enumerate(f.maker.env.toposort()):
print i, node
bval = my_rand(4,4)
f(bval)
print a.value
print a.get_value()
assert numpy.allclose(numpy.dot(a0, bval), a.value)
assert numpy.allclose(numpy.dot(a0, bval), a.get_value())
def test_dot22scalar():
a = tensor.fmatrix()
......@@ -82,16 +82,16 @@ def test_gemm():
f = pfunc([b,c], [], updates=[(a, tensor.dot(a,b) + tensor.exp(c))], mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_inplace for node in f.maker.env.toposort()])
a0 = a.value * 1.0
a0 = a.get_value() * 1.0
print a0
for i, node in enumerate(f.maker.env.toposort()):
print i, node
bval = my_rand(4,4)
cval = my_rand(4,4)
f(bval,cval)
print a.value
print a.get_value()
assert numpy.allclose(numpy.dot(a0, bval)+numpy.exp(cval), a.value)
assert numpy.allclose(numpy.dot(a0, bval)+numpy.exp(cval), a.get_value())
def test_gemm_no_inplace():
......@@ -104,7 +104,7 @@ def test_gemm_no_inplace():
f = pfunc([b,b2], [tensor.dot(a,b2) + c], updates=[(a, tensor.dot(a,b) + c)], mode=mode_with_gpu)
a0 = a.value * 1.0
a0 = a.get_value() * 1.0
#print a0
for i, node in enumerate(f.maker.env.toposort()):
print i, node
......@@ -112,9 +112,9 @@ def test_gemm_no_inplace():
bval = my_rand(4,4)
bval2 = my_rand(4,4)
rval = f(bval,bval2)
#print a.value
#print a.get_value()
assert numpy.allclose(numpy.dot(a0, bval)+cval, a.value)
assert numpy.allclose(numpy.dot(a0, bval)+cval, a.get_value())
assert numpy.allclose(numpy.dot(a0, bval2)+cval, rval)
if 0:
......
......@@ -322,9 +322,9 @@ def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch,
v = shared_fn(0.01*my_randn(n_hid, n_out), 'v')
c = shared_fn(my_zeros(n_out), 'c')
print 'ALLOCATING ARCH: w0 shape', w0.value.shape
print 'ALLOCATING ARCH: w1 shape', w1.value.shape
print 'ALLOCATING ARCH: v shape', v.value.shape
print 'ALLOCATING ARCH: w0 shape', w0.get_value(borrow=True).shape
print 'ALLOCATING ARCH: w1 shape', w1.get_value(borrow=True).shape
print 'ALLOCATING ARCH: v shape', v.get_value(borrow=True).shape
x = tensor.Tensor(dtype='float32', broadcastable=(0,1,0,0))('x')
y = tensor.fmatrix('y')
......
......@@ -24,13 +24,13 @@ def test_neibs():
f = function([], images2neibs(images, neib_shape), mode=mode_without_gpu)
#print images.value
#print images.get_value(borrow=True)
neibs = f()
#print neibs
g = function([], neibs2images(neibs, neib_shape, images.shape), mode=mode_without_gpu)
#print g()
assert numpy.allclose(images.value,g())
assert numpy.allclose(images.get_value(borrow=True),g())
def test_neibs_bad_shape():
shape = (2,3,10,10)
......@@ -121,7 +121,7 @@ def test_neibs_manual():
f = function([], images2neibs(images, neib_shape), mode=mode_without_gpu)
#print images.value
#print images.get_value(borrow=True)
neibs = f()
print neibs
assert numpy.allclose(neibs,[[ 0, 1, 4, 5],
......@@ -151,7 +151,7 @@ def test_neibs_manual():
g = function([], neibs2images(neibs, neib_shape, images.shape), mode=mode_without_gpu)
#print g()
assert numpy.allclose(images.value,g())
assert numpy.allclose(images.get_value(borrow=True), g())
def test_neibs_step_manual():
......@@ -165,7 +165,7 @@ def test_neibs_step_manual():
for mode_idx,mode in enumerate(modes):
f = function([], images2neibs(images, neib_shape, neib_step), mode=mode)
#print images.value
#print images.get_value(borrow=True)
neibs = f()
if mode_idx==0:
assert Images2Neibs in [type(node.op) for node in f.maker.env.toposort()]
......@@ -200,7 +200,7 @@ def test_neibs_step_manual():
#g = function([], neibs2images(neibs, neib_shape, images.shape), mode=mode_without_gpu)
#print g()
#assert numpy.allclose(images.value,g())
#assert numpy.allclose(images.get_value(borrow=True),g())
def test_neibs_wrap_centered_step_manual():
......@@ -275,7 +275,7 @@ def test_neibs_wrap_centered_step_manual():
#g = function([], neibs2images(neibs, neib_shape, images.shape), mode=mode_without_gpu)
#assert numpy.allclose(images.value,g())
#assert numpy.allclose(images.get_value(borrow=True), g())
def test_neibs_gpu():
......@@ -297,14 +297,14 @@ def test_neibs_gpu():
f_gpu = function([], images2neibs(images,neib_shape),
mode=mode_with_gpu)
assert any([isinstance(node.op,GpuImages2Neibs) for node in f_gpu.maker.env.toposort()])
#print images.value
#print images.get_value(borrow=True)
neibs = numpy.asarray(f_gpu())
assert numpy.allclose(neibs,f())
#print neibs
g = function([], neibs2images(neibs, neib_shape, images.shape), mode=mode_with_gpu)
assert any([isinstance(node.op,GpuImages2Neibs) for node in f.maker.env.toposort()])
#print numpy.asarray(g())
assert numpy.allclose(images.value,g())
assert numpy.allclose(images.get_value(borrow=True), g())
def speed_neibs():
......
......@@ -257,9 +257,9 @@ def test_mlp():
batch_size = 100 # size of the minibatch
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.value.shape[0] / batch_size
n_valid_batches = valid_set_x.value.shape[0] / batch_size
n_test_batches = test_set_x.value.shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
......
......@@ -206,20 +206,20 @@ class t_gemm(TestCase):
#f(z, a, x, y, b)
f = inplace_func([], gemm_inplace(tz,ta,tx,ty,tb), mode = compile.Mode(optimizer = None, linker=l))
f()
self.failUnless(_approx_eq(z_after, tz.value), (z_orig, z_after, z, z_after - z))
self.failUnless(_approx_eq(z_after, tz.get_value(borrow=True)), (z_orig, z_after, z, z_after - z))
f()
self.failUnless(_approx_eq(z_after, tz.value), (z_orig, z_after, z, z_after - z))
self.failUnless(_approx_eq(z_after, tz.get_value(borrow=True)), (z_orig, z_after, z, z_after - z))
f()
self.failUnless(_approx_eq(z_after, tz.value), (z_orig, z_after, z, z_after - z))
self.failUnless(_approx_eq(z_after, tz.get_value(borrow=True)), (z_orig, z_after, z, z_after - z))
#tz.value *= 0 # clear z's value
y_T = ty.value.T
ty.value = tx.value.T
tx.value = y_T
y_T = ty.get_value(borrow=True).T
ty.set_value(tx.get_value(borrow=True).T, borrow=True)
tx.set_value(y_T, borrow=True)
f()
# test that the transposed version of multiplication gives same answer
self.failUnless(_approx_eq(z_after, tz.value.T))
self.failUnless(_approx_eq(z_after, tz.get_value(borrow=True).T))
t(C,A,B)
t(C.T, A, B)
......@@ -670,7 +670,7 @@ def test_dot_vm():
f = theano.function([], theano.dot(v,m), mode = mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(v.value,m.value))
assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
assert sum([isinstance(node.op, T.Dot) for node in
f.maker.env.toposort() ]) == 1
......@@ -684,7 +684,7 @@ def test_dot_mv():
f = theano.function([], theano.dot(m,v), mode = mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(m.value,v.value))
assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
assert sum([isinstance(node.op, T.Dot) for node in
f.maker.env.toposort() ]) == 1
......@@ -700,7 +700,8 @@ def test_gemv1():
f = theano.function([], v2+theano.dot(m,v1), mode = mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(m.value,v1.value)+v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op, Gemv)
......@@ -712,7 +713,8 @@ def test_gemv1():
# Assert they produce the same output
f()
assert numpy.allclose(v2.value, numpy.dot(m.value,v1.value)+v2_orig)
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = f.maker.env.toposort()
assert len(topo)==1
assert isinstance(topo[0].op, Gemv)
......@@ -730,7 +732,8 @@ def test_gemv2():
f = theano.function([], v2+theano.dot(v1,m), mode = mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(v1.value,m.value)+v2.value)
assert numpy.allclose(f(),
numpy.dot(v1.get_value(), m.get_value()) + v2.get_value())
topo = f.maker.env.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo)==1
assert topo[-1].op.inplace==False
......@@ -741,7 +744,8 @@ def test_gemv2():
# Assert they produce the same output
f()
assert numpy.allclose(v2.value, numpy.dot(v1.value, m.value)+v2_orig)
assert numpy.allclose(v2.get_value(),
numpy.dot(v1.get_value(), m.get_value()) + v2_orig)
topo = f.maker.env.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo)==1
if config.mode != 'FAST_COMPILE':
......
......@@ -832,7 +832,7 @@ class test_fusion(unittest.TestCase):
for x in range(nb_repeat):
f(*val_inputs)
t1=time.time()
out=out.value
out=out.get_value()
times[id]=t1-t0
atol=1e-8
......
......@@ -29,7 +29,8 @@ class T_SharedRandomStreams(unittest.TestCase):
assert numpy.all(g() == g())
assert numpy.all(abs(nearly_zeros()) < 1e-5)
assert isinstance(rv_u.rng.value, numpy.random.RandomState)
assert isinstance(rv_u.rng.get_value(borrow=True),
numpy.random.RandomState)
def test_basics(self):
random = RandomStreams(utt.fetch_seed())
......
import numpy
import unittest
import warnings
import theano
from theano.gof.python25 import all
......@@ -319,15 +320,15 @@ def makeSharedTester(shared_constructor_,
if x.__class__.__name__ != 'csr_matrix':
#sparse matrix don't support inplace affectation
x_shared.container.value[:] = nd
assert (numpy.asarray(x_shared.value)==nd).all()
assert (numpy.asarray(x_shared.get_value(borrow=True))==nd).all()
#This should always share value!
assert may_share_memory(old_data, x_shared.container.storage[0])
assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))
nd[0]+=1
x_shared.container.value[0] = nd[0]
assert (numpy.asarray(x_shared.value[0])==nd[0]).all()
assert (numpy.asarray(x_shared.value[1:])==nd[1:]).all()
assert (numpy.asarray(x_shared.get_value(borrow=True)[0])==nd[0]).all()
assert (numpy.asarray(x_shared.get_value(borrow=True)[1:])==nd[1:]).all()
#This should always share value!
assert may_share_memory(old_data, x_shared.container.storage[0])
assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))
......@@ -336,23 +337,31 @@ def makeSharedTester(shared_constructor_,
#sparse matrix don't support inplace affectation
nd += 1
#THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray
x_shared.value[:] = nd
#assert (numpy.asarray(x_shared.value)!=nd).all()
x_shared.get_value(borrow=True)[:] = nd
#assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()
assert may_share_memory(old_data, x_shared.container.storage[0])
x_shared.value
x_shared.get_value(borrow=True)
# Test by .value
# As we know that .value is deprecated, we filter out the warning
warnings.filterwarnings(
action='ignore',
message='The .value property of shared variables is deprecated.'
)
nd += 1
old_data = x_shared.container.storage[0]
x_shared.value = nd
assert numpy.allclose(self.ref_fct(x_shared.value), self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
# restore the warning filters
warnings.resetwarnings()
# Test by set_value with borrow=False
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(nd, borrow=False)
assert numpy.allclose(self.ref_fct(x_shared.value), self.ref_fct(self.cast_value(nd)))
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
# Test by set_value with borrow=False when new data casted.
......@@ -360,14 +369,16 @@ def makeSharedTester(shared_constructor_,
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd), borrow=False)
assert numpy.allclose(self.ref_fct(x_shared.value), self.ref_fct(self.cast_value(nd)))
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_casted_value_inplace
# Test by set_value with borrow=True
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(nd.copy(), borrow=True)
assert numpy.allclose(self.ref_fct(x_shared.value), self.ref_fct(self.cast_value(nd)))
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
# Test by set_value with borrow=True when new data casted.
......@@ -375,7 +386,7 @@ def makeSharedTester(shared_constructor_,
nd += 1
old_data = x_shared.container.storage[0]
x_shared.set_value(self.cast_value(nd.copy()), borrow=True)
assert numpy.allclose(self.ref_fct(x_shared.value), self.ref_fct(self.cast_value(nd)))
assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))
assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_casted_value_inplace
def test_specify_shape(self):
......@@ -395,7 +406,8 @@ def makeSharedTester(shared_constructor_,
x1_shared = self.shared_constructor(x1_1)
x1_specify_shape = tensor.specify_shape(x1_shared,x1_1.shape)
x1_shared.set_value(x1_2)
assert numpy.allclose(self.ref_fct(x1_shared.value), self.ref_fct( x1_2))
assert numpy.allclose(self.ref_fct(x1_shared.get_value(borrow=True)),
self.ref_fct( x1_2))
shape_op_fct = theano.function([],x1_shared.shape)
topo = shape_op_fct.maker.env.toposort()
if theano.config.mode!='FAST_COMPILE':
......@@ -460,7 +472,9 @@ def makeSharedTester(shared_constructor_,
(tensor.as_tensor_variable(x1_1.shape[0]),
x1_shared.shape[1]))
x1_shared.set_value(x1_2)
assert numpy.allclose(self.ref_fct(x1_shared.value), self.ref_fct( x1_2))
assert numpy.allclose(
self.ref_fct(x1_shared.get_value(borrow=True)),
self.ref_fct( x1_2))
shape_op_fct = theano.function([],x1_shared.shape)
topo = shape_op_fct.maker.env.toposort()
shape_op_fct()
......@@ -529,7 +543,7 @@ def makeSharedTester(shared_constructor_,
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
#Their is no inplace gemm for sparse
#assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
s_shared_specify = tensor.specify_shape(s_shared,s_shared.value.shape)
s_shared_specify = tensor.specify_shape(s_shared, s_shared.get_value(borrow=True).shape)
#now test with the specify shape op in the output
f = theano.function([], s_shared.shape,
......@@ -544,8 +558,10 @@ def makeSharedTester(shared_constructor_,
assert all(node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op,tensor.blas.Gemm))
assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
#now test with the specify shape op in the inputs and outputs
a_shared = tensor.specify_shape(a_shared,a_shared.value.shape)
b_shared = tensor.specify_shape(b_shared,b_shared.value.shape)
a_shared = tensor.specify_shape(a_shared,
a_shared.get_value(borrow=True).shape)
b_shared = tensor.specify_shape(b_shared,
b_shared.get_value(borrow=True).shape)
f = theano.function([], s_shared.shape,
updates={s_shared:theano.dot(a_shared,b_shared)
......
......@@ -225,9 +225,11 @@ class T_Scan(unittest.TestCase):
v_x0 = asarrayX(rng.uniform())
# compute the output i numpy
v_out = numpy.zeros((4,))
v_out[0] = v_u[0]*W_in.value + v_x0*W.value
v_out[0] = (v_u[0] * W_in.get_value(borrow=True) +
v_x0*W.get_value(borrow=True))
for step in xrange(1,4):
v_out[step] = v_u[step]*W_in.value + v_out[step-1]*W.value
v_out[step] = (v_u[step] * W_in.get_value(borrow=True) +
v_out[step-1] * W.get_value(borrow=True))
theano_values = f3(v_u, v_x0)
assert numpy.allclose(theano_values, v_out)
......@@ -539,8 +541,8 @@ class T_Scan(unittest.TestCase):
assert numpy.allclose( theano_y0 , numpy_y0[3:])
assert numpy.allclose( theano_y1 , numpy_y1[1:])
assert numpy.allclose( theano_y2 , numpy_y2 )
assert numpy.allclose( W1.value , numpy_W1 )
assert numpy.allclose( W2.value , numpy_W2 )
assert numpy.allclose(W1.get_value(borrow=True), numpy_W1)
assert numpy.allclose(W2.get_value(borrow=True), numpy_W2)
......@@ -622,7 +624,7 @@ class T_Scan(unittest.TestCase):
n_steps = 3
this_f(n_steps)
numpy_state = v_state* (2**(n_steps))
assert numpy.allclose(state.value, numpy_state)
assert numpy.allclose(state.get_value(borrow=True), numpy_state)
def test_map_functionality(self):
def f_rnn(u_t):
......@@ -1005,7 +1007,7 @@ class T_Scan(unittest.TestCase):
f()
print X.value
print X.get_value(borrow=True)
'''
def test_scan_output_padding(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论