提交 1adc5162 authored 作者: Xavier Bouthillier's avatar Xavier Bouthillier

Merge pull request #2814 from julianser/Fix_2613

Implemented fix for issue #2613: Concatenation bug for negative axes on ...
......@@ -3041,6 +3041,24 @@ class GpuJoin(tensor.Join, GpuOp):
as_tensor_variable_args = [as_cuda_ndarray_variable(x)
for x in tensors]
# Get joining axis as int
axis_int = 0
if not isinstance(axis, int):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
axis_int = int(tensor.get_scalar_constant_value(axis))
except tensor.basic.NotScalarConstantError:
pass
else:
axis_int = axis
if (axis_int < 0):
# Since all tensors must have the same number of dimensions,
# we simply add the number of dimensions for the first tensor
axis = axis + as_tensor_variable_args[0].ndim
output_maker = \
lambda bcast: CudaNdarrayType(broadcastable=bcast)()
......
......@@ -285,6 +285,55 @@ def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
assert numpy.allclose(numpy.asarray(f()), concat)
def test_opt_gpujoin_joinvectors_negativeaxes():
"""
Test that negative axis concatenation works as expected.
"""
# Test case for one-dimensional vectors
rng = numpy.random.RandomState(22)
x1 = rng.rand(5)
x2 = rng.rand(10)
t1 = shared(numpy.asarray(x1, theano.config.floatX))
t2 = shared(numpy.asarray(x2, theano.config.floatX))
t = T.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-1)))
# Test case for two-dimensional vectors
x1 = rng.rand(5, 10)
x2 = rng.rand(10, 10)
t1 = shared(numpy.asarray(x1, theano.config.floatX))
t2 = shared(numpy.asarray(x2, theano.config.floatX))
t = T.concatenate([t1, t2], axis=-2)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-2)))
# Now check that a value error is raised when vectors don't match
# along the negative concatenation axis
try:
t = T.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except ValueError:
assert(True)
# Finally check that a value error is raised when negative
# axis is larger in absolute value than smallest number of dims
try:
t = T.concatenate([t1, t2], axis=-3)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except ValueError:
assert(True)
def test_local_gpu_subtensor():
# Test shared forced on CPU.
t = tensor._shared(numpy.zeros(20, "float32"))
......@@ -647,4 +696,5 @@ if __name__ == '__main__':
test_gpualloc()
test_opt_gpujoin_onlyajoin()
test_opt_gpujoin_joinvectors_elemwise_then_minusone()
test_opt_gpujoin_joinvectors_negativeaxes()
......@@ -3455,8 +3455,7 @@ class Join(Op):
# be broadcastable for the output.
for x in as_tensor_variable_args:
for current_axis, bflag in enumerate(x.type.broadcastable):
# Not sure if this Op supports/supported/will support
# negative indices, but just to be sure...
# This Op supports negative axes, so only consider modulo
if current_axis == axis % ndim:
continue
if bflag:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论