提交 c8ed209a authored 作者: Frederic's avatar Frederic

New internal interface to the sparse grad of AdvancedSubtensor1

上级 9345964d
......@@ -176,10 +176,8 @@ def sparse_grad(var):
"""
assert isinstance(var.owner.op, tensor.AdvancedSubtensor1)
# TODO change the internal representation!!!
# It work, but bad as out.type is shared with var.type!!!
var.owner.inputs[0].tag.sparse_grad = True
return var
ret = var.owner.op.__class__(sparse_grad=True)(*var.owner.inputs)
return ret
import theano.tests
......
......@@ -450,6 +450,14 @@ class TestConstructSparseFromList(unittest.TestCase):
g = theano.grad(sub.sum(), m)
assert isinstance(g.owner.op, ConstructSparseFromList)
# Test that we create a sparse grad when asked
# Op INTERFACE
m = theano.tensor.matrix()
v = theano.tensor.ivector()
sub = theano.tensor.AdvancedSubtensor1(sparse_grad=True)(m, v)
g = theano.grad(sub.sum(), m)
assert isinstance(g.owner.op, ConstructSparseFromList)
# Test the sparse grad
valm = numpy.random.rand(5, 4).astype(config.floatX)
valv = numpy.random.random_integers(0, 4, 10)
......
......@@ -706,6 +706,11 @@ class TensorType(Type):
self.name = name
self.numpy_dtype = numpy.dtype(self.dtype)
self.sparse_grad = sparse_grad
if sparse_grad:
warnings.warn(
"DEPRECATION WARNING: You use an old interface to"
" AdvancedSubtensor1 sparse_grad. Now use"
" theano.sparse_grad(a_tensor[an_int_vector]).")
def filter(self, data, strict=False, allow_downcast=None):
"""Convert `data` to something which can be associated to a
......@@ -7153,6 +7158,9 @@ def inverse_permutation(perm):
class AdvancedSubtensor1(Op):
"""Implement x[ilist] where ilist is a vector of integers."""
def __init__(self, sparse_grad=False):
self.sparse_grad = sparse_grad
def __hash__(self):
return hash(type(self))
......@@ -7212,8 +7220,14 @@ class AdvancedSubtensor1(Op):
x, ilist = inputs
gz, = grads
assert len(inputs) == 2
if x.type.sparse_grad:
sparse = False
if getattr(x.type, 'sparse_grad', False):
sparse = True
warnings.warn(
"DEPRECATION WARNING: AdvancedSubtensor1, you are using"
" an old interface to the sparse grad. You should use"
" theano.sparse_grad(a_tensor[an_int_vector]). ")
if sparse or self.sparse_grad:
if x.type.ndim != 2:
raise TypeError(
"AdvancedSubtensor1: you can't take the sparse grad"
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论