提交 7f066345 authored 作者: Frederic Bastien's avatar Frederic Bastien

implement/test the proposal that created shared variable must return the same…

implement/test the proposal that created shared variable must return the same type as what they received as input.
上级 2a9bd646
......@@ -142,7 +142,7 @@ transparent. But when you are using a GPU (or in future perhaps a remote machin
is not the internal representation of your data.
If you really want Theano to return its internal representation *and never copy it*
then you should use the ``return_internal_type=True`` argument to
``get_value``. It will never copy the internal object (always return in
``get_value``. It will never cast the internal object (always return in
constant time), but might return various datatypes depending on contextual
factors (e.g. the compute device, the dtype of the numpy array).
......@@ -154,6 +154,12 @@ It is possible to use ``borrow=False`` in conjunction with
``return_internal_type=True``, which will return a deep copy of the internal object.
This is primarily for internal debugging, not for typical use.
For the transparent use of different type of optimization Theano can make,
there is the policy that get_value() always return by default the same object type
it received when the shared variable was created. So if you created manually data on
the gpu and create a shared variable on the gpu with this data, get_value will always
return gpu data event when return_internal_type=False.
*Take home message:*
It is safe (and sometimes much faster) to use ``get_value(borrow=True)`` when
......
......@@ -830,11 +830,12 @@ import theano.tensor.tests.test_sharedvar
test_shared_options = theano.tensor.tests.test_sharedvar.makeSharedTester(
shared_constructor_ = tcn.shared_constructor,
dtype_ = 'float32',
get_value_borrow_true_alias_ = False,
get_value_borrow_true_alias_ = True,
shared_borrow_true_alias_ = True,#True when the original value is already a CudaNdarray!
set_value_borrow_true_alias_ = False,
set_value_borrow_true_alias_ = True,
set_value_inplace_ = True,
set_casted_value_inplace_ = False,
shared_constructor_accept_ndarray_ = True,
internal_type_ = cuda_ndarray.CudaNdarray,
test_internal_type_ = lambda a: isinstance(a,cuda_ndarray.CudaNdarray),
theano_fct_ = theano.tensor.exp,
......@@ -851,6 +852,7 @@ test_shared_options2 = theano.tensor.tests.test_sharedvar.makeSharedTester(
set_value_borrow_true_alias_ = False,
set_value_inplace_ = True,
set_casted_value_inplace_ = True,
shared_constructor_accept_ndarray_ = True,
internal_type_ = cuda_ndarray.CudaNdarray,
test_internal_type_ = lambda a: isinstance(a,cuda_ndarray.CudaNdarray),
theano_fct_ = theano.tensor.exp,
......
......@@ -56,6 +56,8 @@ class CudaNdarraySharedVariable(SharedVariable, _operators):
Shared Variable interface to CUDA-allocated arrays
"""
get_value_return_ndarray = True
def get_value(self, borrow=False, return_internal_type=False):
"""
Return the value of this SharedVariable's internal array.
......@@ -76,7 +78,8 @@ class CudaNdarraySharedVariable(SharedVariable, _operators):
copying.
"""
if return_internal_type: # return a cuda_ndarray
if return_internal_type or not self.get_value_return_ndarray:
# return a cuda_ndarray
if borrow:
return self.container.value
else:
......@@ -183,7 +186,9 @@ def float32_shared_constructor(value, name=None, strict=False,
if broadcastable is None:
broadcastable = (False,) * len(value.shape)
type = CudaNdarrayType(broadcastable=broadcastable)
get_value_return_ndarray = True
if isinstance(value, theano.sandbox.cuda.CudaNdarray):
get_value_return_ndarray = False
if borrow:
deviceval = value
else:
......@@ -196,4 +201,7 @@ def float32_shared_constructor(value, name=None, strict=False,
except Exception, e:
print "ERROR", e
raise
rval.get_value_return_ndarray = get_value_return_ndarray
return rval
......@@ -508,6 +508,7 @@ test_shared_options=theano.tensor.tests.test_sharedvar.makeSharedTester(
set_value_borrow_true_alias_ = True,
set_value_inplace_ = False,
set_casted_value_inplace_ = False,
shared_constructor_accept_ndarray_ = False,
internal_type_ = scipy.sparse.csc_matrix,
test_internal_type_ = scipy.sparse.issparse,
theano_fct_ = lambda a: dense_from_sparse(a*2.),
......
......@@ -15,6 +15,7 @@ def makeSharedTester(shared_constructor_,
set_value_borrow_true_alias_,
set_value_inplace_,
set_casted_value_inplace_,
shared_constructor_accept_ndarray_,
internal_type_,
test_internal_type_,
theano_fct_,
......@@ -35,6 +36,7 @@ def makeSharedTester(shared_constructor_,
:param set_casted_value_inplace_: Should this shared variable overwrite the
current memory when the new value is of the same
type as the internal type.
:param shared_constructor_accept_ndarray_: Do the shared_constructor accept an ndarray as input?
:param internal_type_: The internal type used.
:param test_internal_type_: A function that tell if its input is of the same
type as this shared variable internal type.
......@@ -60,6 +62,7 @@ def makeSharedTester(shared_constructor_,
set_value_borrow_true_alias = set_value_borrow_true_alias_
set_value_inplace = set_value_inplace_
set_casted_value_inplace = set_casted_value_inplace_
shared_constructor_accept_ndarray = shared_constructor_accept_ndarray_
cast_value = staticmethod(cast_value_)
op_by_matrix = op_by_matrix_
......@@ -94,7 +97,7 @@ def makeSharedTester(shared_constructor_,
x = x_shared.get_value(borrow = False)
x /= .5
x /= values_to_div
total_val_3 = total_func()
......@@ -103,7 +106,7 @@ def makeSharedTester(shared_constructor_,
#in this case we can alias
x = x_shared.get_value(borrow = True)
x /= .5
x /= values_to_div
#this is not required by the contract but it is a feature we've
#implemented for some type of SharedVariable.
......@@ -194,6 +197,24 @@ def makeSharedTester(shared_constructor_,
#this is required by the contract
assert not numpy.allclose(self.ref_fct(x), total_func())
def test_get_value(self):
"""
Test that get_value return the same type as what was gived when creating the shared variable
"""
dtype = self.dtype
if dtype is None:
dtype = theano.config.floatX
rng = numpy.random.RandomState(utt.fetch_seed())
x_orig = numpy.asarray(rng.uniform(0,1,[2,4]),dtype=dtype)
x_cast = self.cast_value(x_orig)
if self.shared_constructor_accept_ndarray:
x_shared = self.shared_constructor(x_orig, borrow = False)
assert isinstance(x_shared.get_value(), x_orig.__class__)
x_shared = self.shared_constructor(x_cast, borrow = False)
assert isinstance(x_shared.get_value(), x_cast.__class__)
def test_set_value(self):
dtype = self.dtype
if dtype is None:
......@@ -211,10 +232,16 @@ def makeSharedTester(shared_constructor_,
total_func = theano.function([],total)
values_to_div = .5
if self.op_by_matrix:
#supported for cudandarray, but not ndarray.
values_to_div = self.internal_type(numpy.ones(x.shape,dtype=dtype)/2)
assert self.test_internal_type(values_to_div)
#test if that theano shared variable optimize set_value(borrow=True)
get_x = x_shared.get_value(borrow=True)
assert get_x is not x_orig#borrow=False to shared_constructor
get_x /= .5
get_x /= values_to_div
x_shared.set_value(get_x, borrow=True)
x = x_shared.get_value(borrow=True)
if self.set_value_borrow_true_alias:
......@@ -227,10 +254,6 @@ def makeSharedTester(shared_constructor_,
get_x = x_shared.get_value(borrow=True, return_internal_type=True)
assert get_x is not x_orig#borrow=False to shared_constructor
assert self.test_internal_type(get_x)
values_to_div = .5
if self.op_by_matrix:
values_to_div = self.internal_type(numpy.ones(x.shape,dtype=dtype)/2)#supported for cudandarray, but not ndarray.
assert self.test_internal_type(values_to_div)
get_x /= values_to_div#supported by ndarray and CudaNdarray
assert self.test_internal_type(get_x)
......@@ -531,6 +554,7 @@ test_shared_options=makeSharedTester(
set_value_borrow_true_alias_ = True,
set_value_inplace_ = False,
set_casted_value_inplace_ = False,
shared_constructor_accept_ndarray_ = True,
internal_type_ = numpy.ndarray,
test_internal_type_ = lambda a: isinstance(a,numpy.ndarray),
theano_fct_ = theano.tensor.sum,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论