提交 ae7c5a2a authored 作者: Ying Zhang's avatar Ying Zhang 提交者: Arnaud Bergeron

make it work with new axis

上级 75e6dc8d
......@@ -491,7 +491,6 @@ class GpuAdvancedSubtensor(HideC, tensor.AdvancedSubtensor):
idx = inputs[1:]
assert len(idx) >= x.ndim
dims = len(idx)
# step 1: find smallest index
for k, i in enumerate(idx):
if isinstance(i, numpy.ndarray):
......@@ -510,15 +509,19 @@ class GpuAdvancedSubtensor(HideC, tensor.AdvancedSubtensor):
dimshuffle_info = []
new_ind = []
k = 0
new_axis = x.ndim
dimshuffle_info_append = []
new_ind_append = []
for i in range(0, a):
if isinstance(ind[i], slice):
dimshuffle_info.append(k)
new_ind.append(ind[i])
dimshuffle_info_append.append(k)
new_ind_append.append(ind[i])
k += 1
elif ind[i] is None:
dimshuffle_info.append('x')
new_ind.append(slice(None))
dimshuffle_info_append.append(new_axis)
new_axis += 1
new_ind_append.append(slice(None))
dimshuffle_info.append(k)
new_ind.append(ind[a])
......@@ -533,15 +536,19 @@ class GpuAdvancedSubtensor(HideC, tensor.AdvancedSubtensor):
idx_2.append(ind[i])
k += 1
elif ind[i] is None:
idx_3.append('x')
new_ind.append(slice(None))
idx_1.append(new_axis)
new_axis += 1
idx_2.append(slice(None))
else:
idx_3.append(k)
new_ind.append(ind[i])
k += 1
valid_end = a + len(idx_3) + 1
valid_end = len(new_ind)
dimshuffle_info.extend(idx_3)
dimshuffle_info.extend(dimshuffle_info_append)
new_ind.extend(new_ind_append)
new_ind += idx_2
dimshuffle_info.extend(idx_1)
......@@ -551,40 +558,41 @@ class GpuAdvancedSubtensor(HideC, tensor.AdvancedSubtensor):
new_ind.append(ind[i])
k += 1
elif ind[i] is None:
dimshuffle_info.append('x')
dimshuffle_info.append(new_axis)
new_axis += 1
new_ind.append(slice(None))
return dimshuffle_info, new_ind, valid_end
(dimshuffle_idx, new_ind,
end_) = get_indices(start, end, idx)
shape = x.shape + (1, ) * (len(dimshuffle_idx) - x.ndim)
x = x.reshape(shape)
x = x.transpose(*dimshuffle_idx)
# step 3: partial flattening
start_ = start
shape = (x.shape[: start_] +
(numpy.prod(x.shape[start: end_]),) +
shape = (x.shape[: 0] +
(numpy.prod(x.shape[0: end_]),) +
x.shape[end_:])
input_flat = numpy.reshape(x, shape)
input_flat = x.reshape(shape)
# step 4: build the strides
strides = [1]
for i in range(start_, end_ - 1)[::-1]:
for i in range(0, end_ - 1)[::-1]:
stride = x.shape[i + 1] * strides[-1]
strides.append(stride)
# step 5: build the indices into x_flat
items = [new_ind[i] if isinstance(new_ind[i], numpy.ndarray)
else 0 for i in range(start_, end_)]
else 0 for i in range(0, end_)]
new_idx = numpy.sum([i * j for i, j
in zip(items, strides[::-1])],
axis=0)
# step 6: advanced slicing
out_flat = input_flat.take1(new_idx.flatten())
out_flat = input_flat.take1(pygpu.asarray(new_idx.flatten(),
context=input_flat.context))
# step 7: reshape into right shape
out_flat_shp = (x.shape[:start_] +
new_idx.shape + x.shape[end_:]).astype('int32')
o = out_flat.reshape(out_flat_shp,
ndim=dims + new_idx.ndim - 2)
idx_ = (new_ind[:start_] + [slice(None)] *
(new_idx.ndim - 2 + end_ - start_) + new_ind[end_:])
out_flat_shp = new_idx.shape + x.shape[end_:]
o = out_flat.reshape(out_flat_shp)
idx_ = ([slice(None)] * (new_idx.ndim - 2 + end_) +
new_ind[end_:])
out[0] = o.__getitem__(idx_)
......
......@@ -13,6 +13,7 @@ from ..subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor,
GpuAdvancedIncSubtensor1)
from ..type import gpuarray_shared_constructor
from .config import mode_with_gpu
......@@ -115,7 +116,7 @@ def test_adv_subtensor():
shared = gpuarray_shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype=theano.config.floatX).reshape(shp)
idx1, idx2 = tensor.ivectors('idx1', 'idx2')
idxs = [idx1, slice(0, 2, 1), idx2]
idxs = [idx1, None, slice(0, 2, 1), idx2, None]
x = shared(xval, name='x')
expr = x[idxs]
f = theano.function([idx1, idx2], expr, mode=mode_with_gpu)
......@@ -124,5 +125,5 @@ def test_adv_subtensor():
idx1_val = [0, 1]
idx2_val = [0, 1]
rval = f(idx1_val, idx2_val)
rep = xval[idx1_val, slice(0, 2, 1), idx2_val]
rep = xval[idx1_val, None, slice(0, 2, 1), idx2_val, None]
assert numpy.allclose(rval, rep)
......@@ -20,7 +20,7 @@ from theano.tensor.basic import alloc
from theano.tensor.basic import (addbroadcast, clip, get_scalar_constant_value,
ARange, TensorType, NotScalarConstantError)
from theano.tensor.elemwise import DimShuffle
from theano.tensor.type_other import NoneConst, SliceType, make_slice
from theano.tensor.type_other import NoneConst, SliceType, NoneTypeT, make_slice
from theano import config
inplace_increment = None
......@@ -2077,6 +2077,8 @@ def as_index_variable(idx):
return make_slice(idx)
if isinstance(idx, gof.Variable) and isinstance(idx.type, SliceType):
return idx
if isinstance(idx, gof.Variable) and isinstance(idx.type, NoneTypeT):
return idx
idx = theano.tensor.as_tensor_variable(idx)
if idx.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论