提交 0faf7819 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Add some tests for the load and dump.

上级 6ba34a31
...@@ -4,10 +4,29 @@ from nose.plugins.skip import SkipTest ...@@ -4,10 +4,29 @@ from nose.plugins.skip import SkipTest
from theano.sandbox.cuda.var import float32_shared_constructor as f32sc from theano.sandbox.cuda.var import float32_shared_constructor as f32sc
from theano.sandbox.cuda import CudaNdarrayType, cuda_available from theano.sandbox.cuda import CudaNdarrayType, cuda_available
import theano
# Skip test if cuda_ndarray is not available. # Skip test if cuda_ndarray is not available.
if cuda_available == False: if cuda_available == False:
raise SkipTest('Optional package cuda disabled') raise SkipTest('Optional package cuda disabled')
def test_shared_pickle():
import pickle
picklestring = "ctheano.tensor.sharedvar\nload_shared_variable\np0\n(cnumpy.core.multiarray\n_reconstruct\np1\n(cnumpy\nndarray\np2\n(I0\ntp3\nS'b'\np4\ntp5\nRp6\n(I1\n(I2\ntp7\ncnumpy\ndtype\np8\n(S'f4'\np9\nI0\nI1\ntp10\nRp11\n(I3\nS'<'\np12\nNNNI-1\nI-1\nI0\ntp13\nbI00\nS'\\x00\\x00\\x80?\\x00\\x00\\x00@'\np14\ntp15\nbtp16\nRp17\n."
g = pickle.loads(picklestring)
v = numpy.array([1.0, 2.0], dtype='float32')
# This test will always be on the GPU
assert isinstance(g, theano.tensor.basic.TensorVariable)
assert isinstance(g.owner, theano.gof.graph.Apply)
assert isinstance(g.owner.op, theano.sandbox.cuda.HostFromGpu)
assert isinstance(g.owner.inputs[0], CudaNdarrayType.SharedVariable)
assert (g.owner.inputs[0].get_value() == v).all()
# Make sure it saves the same way (so that the tests before are not bogus)
s = theano.tensor.as_tensor_variable(theano.shared(v))
assert pickle.dumps(s) == picklestring
def test_float32_shared_constructor(): def test_float32_shared_constructor():
npy_row = numpy.zeros((1,10), dtype='float32') npy_row = numpy.zeros((1,10), dtype='float32')
......
...@@ -637,3 +637,25 @@ test_shared_options=makeSharedTester( ...@@ -637,3 +637,25 @@ test_shared_options=makeSharedTester(
cast_value_ = numpy.asarray, cast_value_ = numpy.asarray,
op_by_matrix_ = False, op_by_matrix_ = False,
name='test_shared_options') name='test_shared_options')
def test_shared_pickle():
import pickle
picklestring = "ctheano.tensor.sharedvar\nload_shared_variable\np0\n(cnumpy.core.multiarray\n_reconstruct\np1\n(cnumpy\nndarray\np2\n(I0\ntp3\nS'b'\np4\ntp5\nRp6\n(I1\n(I2\ntp7\ncnumpy\ndtype\np8\n(S'f4'\np9\nI0\nI1\ntp10\nRp11\n(I3\nS'<'\np12\nNNNI-1\nI-1\nI0\ntp13\nbI00\nS'\\x00\\x00\\x80?\\x00\\x00\\x00@'\np14\ntp15\nbtp16\nRp17\n."
g = pickle.loads(picklestring)
v = numpy.array([1.0, 2.0], dtype='float32')
assert g.type == theano.tensor.fvector
if theano.config.device.startswith('cpu'):
assert isinstance(g, theano.tensor.sharedvar.TensorSharedVariable)
assert (g.get_value() == v).all()
if theano.config.device.startswith('gpu'):
assert isinstance(g, theano.tensor.basic.TensorVariable)
# we don't go digging deeper because we don't want to
# import theano.sandbox.cuda.
# some other tests are there.
# Make sure it saves the same way (so that the tests before are not bogus)
s = theano.tensor.as_tensor_variable(theano.shared(v))
assert pickle.dumps(s) == picklestring
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论