提交 4e6ef4aa authored 作者: Eric Larsen's avatar Eric Larsen 提交者: Frederic

testing infer_shape: op AdvancedIncSubtensor

上级 ca52a0fc
......@@ -4298,7 +4298,14 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
the_op = AdvancedIncSubtensor1(inplace, set_instead_of_inc=False)
return the_op(real_x, y, ilist)
elif isinstance(x.owner.op, AdvancedSubtensor):
raise NotImplementedError()
real_x = x.owner.inputs[0]
coordvec_0 = x.owner.inputs[1]
coordvec_1 = x.owner.inputs[2]
if set_instead_of_inc:
the_op = AdvancedIncSubtensor(inplace, set_instead_of_inc=True)
else:
the_op = AdvancedIncSubtensor(inplace, set_instead_of_inc=False)
return the_op(real_x, y, coordvec_0, coordvec_1)
else:
raise TypeError('x must be result of a subtensor operation')
......@@ -6056,6 +6063,8 @@ class AdvancedSubtensor(Op):
def make_node(self, x, *inputs):
x = as_tensor_variable(x)
#FIXME
# Note (9 Jul 2012): what does this 'FIXME' mean? Possibly that the
# current implementation must be generalized? Please specify.
if x.ndim == 2 and len(inputs) == 2:
ind1 = as_tensor_variable(inputs[0])
ind2 = as_tensor_variable(inputs[1])
......@@ -6131,14 +6140,28 @@ class AdvancedIncSubtensor(Op):
"""Increments a subtensor using advanced indexing.
"""
def __eq__(self, other):
return self.__class__ == other.__class__
def __init__(self, inplace=False, set_instead_of_inc=False):
self.inplace = inplace
self.set_instead_of_inc = set_instead_of_inc
def __hash__(self):
return hash(self.__class__)
return hash((type(self), self.inplace, self.set_instead_of_inc))
def __eq__(self, other):
return (type(self) == type(other)
and self.inplace == other.inplace
and self.set_instead_of_inc == other.set_instead_of_inc)
def __str__(self):
return self.__class__.__name__
return "%s{%s, %s}" % (self.__class__.__name__,
"inplace=".join(str(self.inplace)),
" set_instead_of_inc".join(str(self. set_instead_of_inc)))
def props(self):
return (self.inplace, self.set_instead_of_inc)
def __repr__(self):
return 'AdvancedIncSubtensor{%s}' % str(self.props())
def make_node(self, x, y, *inputs):
x = as_tensor_variable(x)
......@@ -6153,21 +6176,31 @@ class AdvancedIncSubtensor(Op):
[tensor(dtype=x.type.dtype,
broadcastable=x.type.broadcastable)])
raise NotImplementedError(
'Advanced indexing increment of x (of dimension %i) by y'
'Advanced indexing increment/set of x (of dimension %i) by y'
' (of dimension %i) with these argument dimensions (%s) not'
' supported yet'
% (x.ndim, y.ndim,
','.join(str(input.ndim) for input in inputs)))
raise NotImplementedError(
'Advanced indexing increment of x (of dim %i) by y (of dim %i)'
'Advanced indexing increment/set of x (of dim %i) by y (of dim %i)'
' with arguments (%s) not supported yet'
% (x.ndim, y.ndim, ','.join(str(input) for input in inputs)))
def perform(self, node, inputs, out_):
# TODO: 1. opt to make this in place 2. generalize as described in
# AdvancedSubtensor's perform TODO
out, = out_
# TODO: same thing as in AdvancedSubtensor's perform TODO
if not self.inplace:
out[0] = inputs[0].copy()
else:
raise NotImplementedError('In place computation is not'
' implemented')
if self.set_instead_of_inc:
out[0][inputs[2:]] = inputs[1]
else:
out[0][inputs[2:]] += inputs[1]
if (numpy.__version__ <= '1.6.1' and
out[0].size != numpy.uint32(out[0].size)):
warnings.warn(
......
......@@ -37,7 +37,7 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
tile, patternbroadcast, Eye, Shape, Default, Dot, PermuteRowElements,
ScalarFromTensor, TensorFromScalar, dtensor4, Rebroadcast, Alloc,
dtensor3, SpecifyShape, Mean, IncSubtensor, AdvancedIncSubtensor1,
itensor3, Tile)
itensor3, Tile, AdvancedIncSubtensor)
from theano.tests import unittest_tools as utt
from theano.printing import debugprint
......@@ -6511,95 +6511,107 @@ class TestInferShape(utt.InferShapeTester):
advec = dvector()
adscal = dscalar()
admat_val = rand(5, 4)
bdvec_val = [2, 3]
aivec_val = [2, 3]
self._compile_and_check([admat, bdmat],
[set_subtensor(admat[bdvec_val], bdmat)],
[set_subtensor(admat[aivec_val], bdmat)],
[admat_val, [[1, 2, 3, 4]]], AdvancedIncSubtensor1)
bdvec_val = [1, 3, 2]
aivec_val = [1, 3, 2]
self._compile_and_check([admat, advec],
[set_subtensor(admat[bdvec_val], advec)],
[set_subtensor(admat[aivec_val], advec)],
[admat_val, [1, 2, 3, 4]], AdvancedIncSubtensor1)
bdvec_val = [0, 3, 0]
aivec_val = [0, 3, 0]
self._compile_and_check([admat, adscal],
[set_subtensor(admat[bdvec_val], adscal)],
[set_subtensor(admat[aivec_val], adscal)],
[admat_val, 1], AdvancedIncSubtensor1)
bdtens4 = dtensor4()
adtens4_val = rand(4, 3, 2, 5)
bdvec_val = [2, 3]
aivec_val = [2, 3]
self._compile_and_check([adtens4, bdtens4],
[set_subtensor(adtens4[bdvec_val], bdtens4)],
[set_subtensor(adtens4[aivec_val], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]],
AdvancedIncSubtensor1)
bdvec_val = [1, 3, 2]
aivec_val = [1, 3, 2]
self._compile_and_check([adtens4, advec],
[set_subtensor(adtens4[bdvec_val], advec)],
[set_subtensor(adtens4[aivec_val], advec)],
[adtens4_val, [1, 2, 3, 4, 5]],
AdvancedIncSubtensor1)
bdvec_val = [0, 3, 0]
aivec_val = [0, 3, 0]
self._compile_and_check([adtens4, adscal],
[set_subtensor(adtens4[bdvec_val], adscal)],
[set_subtensor(adtens4[aivec_val], adscal)],
[adtens4_val, 1],
AdvancedIncSubtensor1)
## TODO: (!!) function inc_subtensor fails on line 5784 in perform in basic.py
"""
bdvec_val = [2, 3]
aivec_val = [2, 3]
self._compile_and_check([admat, bdmat],
[inc_subtensor(admat[bdvec_val], bdmat)],
[admat_val, [[1, 2, 3, 4]]], AdvancedIncSubtensor1)
[inc_subtensor(admat[aivec_val], bdmat)],
bdvec_val = [1, 3, 2]
aivec_val = [1, 3, 2]
self._compile_and_check([admat, advec],
[inc_subtensor(admat[bdvec_val], advec)],
[inc_subtensor(admat[aivec_val], advec)],
[admat_val, [1, 2, 3, 4]], AdvancedIncSubtensor1)
bdvec_val = [0, 3, 0]
aivec_val = [0, 3, 0]
self._compile_and_check([admat, adscal],
[inc_subtensor(admat[bdvec_val], adscal)],
[inc_subtensor(admat[aivec_val], adscal)],
[admat_val, 1], AdvancedIncSubtensor1)
bdtens4 = dtensor4()
adtens4_val = rand(4, 3, 2, 5)
bdvec_val = [2, 3]
aivec_val = [2, 3]
self._compile_and_check([adtens4, bdtens4],
[inc_subtensor(adtens4[bdvec_val], bdtens4)],
[inc_subtensor(adtens4[aivec_val], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]],
AdvancedIncSubtensor1)
bdvec_val = [1, 3, 2]
aivec_val = [1, 3, 2]
self._compile_and_check([adtens4, advec],
[inc_subtensor(adtens4[bdvec_val], advec)],
[inc_subtensor(adtens4[aivec_val], advec)],
[adtens4_val, [[1, 2, 3, 4, 5]]],
AdvancedIncSubtensor1)
bdvec_val = [0, 3, 0]
aivec_val = [0, 3, 0]
self._compile_and_check([adtens4, adscal],
[inc_subtensor(adtens4[bdvec_val], adscal)],
[inc_subtensor(adtens4[aivec_val], adscal)],
[adtens4_val, [[1, 2, 3, 4, 5]]],
AdvancedIncSubtensor1)
"""
# AdvancedIncSubtensor
# TODO: AdvancedIncSubtensor cannot be reached through the wrappers
# set_subtensor and advanced_subtensor (see comment in Git, issue 476)
# TODO: The shape is apparently generated correctly but the final
# result is abnormal:
"""
# as an example only:
bdvec_val = [1, 3, 2]
cdvec_val = [2]
self._compile_and_check([admat, advec],
[set_subtensor(admat[bdvec_val, cdvec_val], adscal)],
[admat_val, 1], AdvancedIncSubtensor)
topo_shape:
[AdvancedIncSubtensor{Finplace=ainplace=linplace=sinplace=e, T set_instead_of_incr set_instead_of_incu set_instead_of_ince}(<TensorType(float64, matrix)>, <TensorType(float64, vector)>, TensorConstant{[1 3 2]}, TensorConstant{[0 3 3]}), Shape_i{1}(AdvancedIncSubtensor{Finplace=ainplace=linplace=sinplace=e, T set_instead_of_incr set_instead_of_incu set_instead_of_ince}.0), Shape_i{0}(AdvancedIncSubtensor{Finplace=ainplace=linplace=sinplace=e, T set_instead_of_incr set_instead_of_incu set_instead_of_ince}.0), MakeVector(Shape_i{0}.0, Shape_i{1}.0)]
shapes_function:
MakeVector [@A] '' 3
|Shape_i{0} [@B] '' 2
| |AdvancedIncSubtensor{Finplace=ainplace=linplace=sinplace=e, T set_instead_of_incr set_instead_of_incu set_instead_of_ince} [@C] '' 0
| |<TensorType(float64, matrix)> [@D]
| |<TensorType(float64, vector)> [@E]
| |TensorConstant{[1 3 2]} [@F]
| |TensorConstant{[0 3 3]} [@G]
|Shape_i{1} [@H] '' 1
|AdvancedIncSubtensor{Finplace=ainplace=linplace=sinplace=e, T set_instead_of_incr set_instead_of_incu set_instead_of_ince} [@C] '' 0
remaining op as a class: AdvancedIncSubtensor{Finplace=ainplace=linplace=sinplace=e, T set_instead_of_incr set_instead_of_incu set_instead_of_ince}
(5, 4) [5 4]
"""
# Reshape: basic 5094
aivec_val = [1, 3, 2]
bivec_val = [0, 3, 3]
advec_val = [23, 24, 25]
self._compile_and_check([admat, advec],
[set_subtensor(admat[aivec_val, bivec_val], advec)],
[admat_val, advec_val], AdvancedIncSubtensor)
# Reshape
# TODO: The shape is apparently generated correctly but the final result is abnormal:
"""
topo_shape:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论