提交 4572371c authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Deprecated tensor.shared (renamed tensor._shared)

Most of the time people who use tensor.shared do it because they mix it up with theano.shared. By renaming it with a leading underscore, this confusion should disappear. By the way this commit also changes a few calls in Theano code from tensor.shared to theano.shared, when I thought calling tensor.shared was not intentional.
上级 86163a52
...@@ -645,7 +645,7 @@ class Test_aliasing_rules(unittest.TestCase): ...@@ -645,7 +645,7 @@ class Test_aliasing_rules(unittest.TestCase):
""" """
def shared(self, x): def shared(self, x):
return tensor.shared(x) return tensor._shared(x)
def test_shared_constructor_copies(self): def test_shared_constructor_copies(self):
# shared constructor makes copy # shared constructor makes copy
......
...@@ -333,7 +333,7 @@ def speed_neibs_wrap_centered(): ...@@ -333,7 +333,7 @@ def speed_neibs_wrap_centered():
def test_neibs_grad(): def test_neibs_grad():
shape = (2,3,4,4) shape = (2,3,4,4)
images = T.shared(numpy.arange(numpy.prod(shape), dtype='float32').reshape(shape)) images = shared(numpy.arange(numpy.prod(shape), dtype='float32').reshape(shape))
cost = T.sum(T.sqr(images2neibs(images, (2,2))), axis=[0,1]) cost = T.sum(T.sqr(images2neibs(images, (2,2))), axis=[0,1])
......
...@@ -113,7 +113,7 @@ def test_consistency_cpu_serial(): ...@@ -113,7 +113,7 @@ def test_consistency_cpu_serial():
for i in range(n_streams): for i in range(n_streams):
stream_rstate = curr_rstate.copy() stream_rstate = curr_rstate.copy()
for j in range(n_substreams): for j in range(n_substreams):
rstate = tensor.shared(numpy.array([stream_rstate.copy()], dtype='int32')) rstate = theano.shared(numpy.array([stream_rstate.copy()], dtype='int32'))
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype=config.floatX, size=(1,)) new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype=config.floatX, size=(1,))
# Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior # Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate sample.rstate = rstate
...@@ -152,7 +152,7 @@ def test_consistency_cpu_parallel(): ...@@ -152,7 +152,7 @@ def test_consistency_cpu_parallel():
for j in range(1, n_substreams): for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1])) rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate) rstate = numpy.asarray(rstate)
rstate = tensor.shared(rstate) rstate = theano.shared(rstate)
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None, new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
dtype=config.floatX, size=(n_substreams,)) dtype=config.floatX, size=(n_substreams,))
......
...@@ -2597,7 +2597,7 @@ def test_speed(): ...@@ -2597,7 +2597,7 @@ def test_speed():
if 1: if 1:
r = numpy.arange(10000).astype(theano.config.floatX).reshape(-1,10) r = numpy.arange(10000).astype(theano.config.floatX).reshape(-1,10)
shared_r = theano.shared(r) shared_r = theano.shared(r)
s_i = tensor.shared(numpy.array(1)) s_i = theano.shared(numpy.array(1))
s_rinc = tensor.inc_subtensor(shared_r[s_i], shared_r[s_i-1], s_rinc = tensor.inc_subtensor(shared_r[s_i], shared_r[s_i-1],
tolerate_inplace_aliasing=True) tolerate_inplace_aliasing=True)
theano.printing.debugprint(s_rinc) theano.printing.debugprint(s_rinc)
......
"""Define the tensor toplevel""" """Define the tensor toplevel"""
__docformat__ = "restructuredtext en" __docformat__ = "restructuredtext en"
import warnings
from basic import * from basic import *
import opt import opt
...@@ -21,7 +24,28 @@ from elemwise import \ ...@@ -21,7 +24,28 @@ from elemwise import \
DimShuffle, Elemwise, CAReduce DimShuffle, Elemwise, CAReduce
import sharedvar # adds shared-variable constructors import sharedvar # adds shared-variable constructors
from sharedvar import tensor_constructor as shared
# We import as `_shared` instead of `shared` to avoid confusion between
# `theano.shared` and `tensor._shared`.
from sharedvar import tensor_constructor as _shared
def shared(*args, **kw):
"""
Backward-compatibility wrapper around `tensor._shared`.
Once the deprecation warning has been around for long enough, this function
can be deleted.
"""
# Note that we do not use the DeprecationWarning class because it is
# ignored by default since python 2.7.
warnings.warn('`tensor.shared` is deprecated. You should probably be using'
' `theano.shared` instead (if you *really* intend to call '
'`tensor.shared`, you can get rid of this warning by using '
'`tensor._shared`).',
stacklevel=2)
return _shared(*args, **kw)
import nnet # used for softmax, sigmoid, etc. import nnet # used for softmax, sigmoid, etc.
......
...@@ -12,7 +12,7 @@ from itertools import izip ...@@ -12,7 +12,7 @@ from itertools import izip
import numpy, theano import numpy, theano
#from copy import copy as python_copy #from copy import copy as python_copy
from theano import gof, shared from theano import gof
from theano.gof import Apply, Constant, Op, Type, Value, Variable from theano.gof import Apply, Constant, Op, Type, Value, Variable
......
...@@ -10,6 +10,7 @@ from numpy.testing import dec ...@@ -10,6 +10,7 @@ from numpy.testing import dec
from numpy.testing.noseclasses import KnownFailureTest from numpy.testing.noseclasses import KnownFailureTest
from theano.tensor import * from theano.tensor import *
from theano.tensor import _shared
from theano.tensor import basic as tensor # for hidden symbols from theano.tensor import basic as tensor # for hidden symbols
from theano.tensor import inplace from theano.tensor import inplace
...@@ -1796,7 +1797,7 @@ class T_subtensor(unittest.TestCase): ...@@ -1796,7 +1797,7 @@ class T_subtensor(unittest.TestCase):
""" """
This is build in a way that allow to reuse it to test the equivalent gpu op. This is build in a way that allow to reuse it to test the equivalent gpu op.
""" """
def __init__(self, name, shared=shared, def __init__(self, name, shared=_shared,
sub=theano.tensor.basic.Subtensor, sub=theano.tensor.basic.Subtensor,
inc_sub=theano.tensor.basic.IncSubtensor, inc_sub=theano.tensor.basic.IncSubtensor,
adv_sub1=theano.tensor.basic.AdvancedSubtensor1, adv_sub1=theano.tensor.basic.AdvancedSubtensor1,
...@@ -2361,7 +2362,7 @@ class T_subtensor(unittest.TestCase): ...@@ -2361,7 +2362,7 @@ class T_subtensor(unittest.TestCase):
for idx in idxs: for idx in idxs:
# Should stay on the cpu. # Should stay on the cpu.
idx_ = shared(numpy.asarray(idx)) idx_ = _shared(numpy.asarray(idx))
t = n[idx_] t = n[idx_]
gn = grad(sum(exp(t)), n) gn = grad(sum(exp(t)), n)
f = function([], [gn, gn.shape], mode=self.mode) f = function([], [gn, gn.shape], mode=self.mode)
...@@ -5175,7 +5176,7 @@ class test_size(unittest.TestCase): ...@@ -5175,7 +5176,7 @@ class test_size(unittest.TestCase):
def test_shared(self): def test_shared(self):
# NB: we also test higher order tensors at the same time. # NB: we also test higher order tensors at the same time.
y = numpy.zeros((1, 2, 3, 4), dtype=config.floatX) y = numpy.zeros((1, 2, 3, 4), dtype=config.floatX)
x = tensor.shared(y) x = theano.shared(y)
assert y.size == function([], x.size)() assert y.size == function([], x.size)()
......
...@@ -622,7 +622,7 @@ def makeSharedTester(shared_constructor_, ...@@ -622,7 +622,7 @@ def makeSharedTester(shared_constructor_,
return SharedTester return SharedTester
test_shared_options=makeSharedTester( test_shared_options=makeSharedTester(
shared_constructor_ = tensor.shared, shared_constructor_ = tensor._shared,
dtype_ = theano.config.floatX, dtype_ = theano.config.floatX,
get_value_borrow_true_alias_ = True, get_value_borrow_true_alias_ = True,
shared_borrow_true_alias_ = True, shared_borrow_true_alias_ = True,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论