Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
19515020
提交
19515020
authored
5月 06, 2015
作者:
Frédéric Bastien
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2871 from bouthilx/concat_symbolic_axis
(#2613, #2747) Add handling of negative axis for Join and GpuJoin
上级
72cf3c79
3b3a33f9
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
119 行增加
和
28 行删除
+119
-28
basic_ops.py
theano/sandbox/cuda/basic_ops.py
+22
-21
basic.py
theano/tensor/basic.py
+26
-5
test_basic.py
theano/tensor/tests/test_basic.py
+71
-2
没有找到文件。
theano/sandbox/cuda/basic_ops.py
浏览文件 @
19515020
...
...
@@ -3058,24 +3058,6 @@ class GpuJoin(tensor.Join, GpuOp):
as_tensor_variable_args
=
[
as_cuda_ndarray_variable
(
x
)
for
x
in
tensors
]
# Get joining axis as int
axis_int
=
0
if
not
isinstance
(
axis
,
int
):
try
:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
axis_int
=
int
(
tensor
.
get_scalar_constant_value
(
axis
))
except
tensor
.
basic
.
NotScalarConstantError
:
pass
else
:
axis_int
=
axis
if
(
axis_int
<
0
):
# Since all tensors must have the same number of dimensions,
# we simply add the number of dimensions for the first tensor
axis
=
axis
+
as_tensor_variable_args
[
0
]
.
ndim
output_maker
=
\
lambda
bcast
:
CudaNdarrayType
(
broadcastable
=
bcast
)()
...
...
@@ -3088,6 +3070,12 @@ class GpuJoin(tensor.Join, GpuOp):
axis
,
cndas
=
axis_and_tensors
[
0
],
axis_and_tensors
[
1
:]
# In case axis is numpy.int8 and has no __index__() method
axis
=
int
(
axis
)
ndim
=
tensors
[
0
]
.
ndim
if
axis
<
-
ndim
:
raise
IndexError
(
"Join axis
%
d out of bounds [0,
%
d)"
%
(
axis
,
ndim
))
if
axis
<
0
:
axis
+=
ndim
# compute size/shape
width_sum
=
0
...
...
@@ -3151,7 +3139,7 @@ class GpuJoin(tensor.Join, GpuOp):
# getting the shapes of all the involved tensors (input[0]+out)
str
=
"""
const
int axis = PyInt_AsLong((PyObject*)
%(axis)
s);
int axis = PyInt_AsLong((PyObject*)
%(axis)
s);
const int nd =
%(nd)
s;
int shape_out[nd];
int width_sum = 0;
...
...
@@ -3167,9 +3155,22 @@ class GpuJoin(tensor.Join, GpuOp):
"""
%
locals
()
# Test negative axis
str
+=
"""
if( axis < -nd ){
PyErr_Format(PyExc_IndexError,
"Join axis
%%
d out of bounds [0,
%%
d)", axis, nd);
%(fail)
s
}
if( axis < 0 ){
axis = axis + nd;
}
"""
%
locals
()
# getting the shapes of all the involved tensors (input[1:])
# + check: all input tensors have same shape as final out
# ex
e
cept for "axis" dimension
# except for "axis" dimension
# shape_%(cdna)s[nd] is initialized before, to prevent following
# error: jump to label __label_9 crosses initialization of
# shape_%(cdna)s[nd]
...
...
@@ -3283,7 +3284,7 @@ class GpuJoin(tensor.Join, GpuOp):
return
str
def
c_code_cache_version
(
self
):
return
(
5
,)
return
(
6
,)
gpu_join
=
GpuJoin
()
...
...
theano/tensor/basic.py
浏览文件 @
19515020
...
...
@@ -3453,10 +3453,18 @@ class Join(Op):
# that broadcastable flag was False had length 1 along
# this dimension, and therefore this dimension should
# be broadcastable for the output.
if
axis
<
-
ndim
:
raise
IndexError
(
"Join axis
%
d out of bounds [0,
%
d)"
%
(
axis
,
ndim
))
if
axis
<
0
:
axis
+=
ndim
for
x
in
as_tensor_variable_args
:
for
current_axis
,
bflag
in
enumerate
(
x
.
type
.
broadcastable
):
# This Op supports negative axes, so only consider modulo
if
current_axis
==
axis
%
ndim
:
# Constant negative axis can no longer be negative at
# this point. It safe to compare this way.
if
current_axis
==
axis
:
continue
if
bflag
:
bcastable
[
current_axis
]
=
True
...
...
@@ -3489,14 +3497,20 @@ class Join(Op):
def
perform
(
self
,
node
,
axis_and_tensors
,
out_
):
out
,
=
out_
axis
,
tensors
=
axis_and_tensors
[
0
],
axis_and_tensors
[
1
:]
ndim
=
tensors
[
0
]
.
ndim
if
axis
<
-
ndim
:
raise
IndexError
(
"Join axis
%
d out of bounds [0,
%
d)"
%
(
axis
,
ndim
))
out
[
0
]
=
theano
.
_asarray
(
numpy
.
concatenate
(
tensors
,
axis
=
axis
),
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
)
def
c_code_cache_version
(
self
):
return
(
2
,)
return
(
3
,)
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
axis
,
tensors
=
inputs
[
0
],
inputs
[
1
:]
input_1
=
tensors
[
0
]
l
=
len
(
tensors
)
out
,
=
outputs
fail
=
sub
[
'fail'
]
...
...
@@ -3511,9 +3525,16 @@ class Join(Op):
"""
%
locals
()
code
+=
"""
//PyObject* PyArray_Concatenate(PyObject* obj, int axis)
int axis = ((
%(adtype)
s *)PyArray_DATA(
%(axis)
s))[0];
int ndim = PyArray_NDIM(
%(input_1)
s);
if( axis < -ndim ){
PyErr_Format(PyExc_IndexError,
"Join axis
%%
d out of bounds [0,
%%
d)", axis, ndim);
%(fail)
s
}
Py_XDECREF(
%(out)
s);
%(out)
s = (PyArrayObject *)PyArray_Concatenate(list,
((
%(adtype)
s *)PyArray_DATA(
%(axis)
s))[0]);
%(out)
s = (PyArrayObject *)PyArray_Concatenate(list, axis);
Py_DECREF(list);
if(!
%(out)
s){
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
19515020
...
...
@@ -3565,8 +3565,8 @@ class T_Join_and_Split(unittest.TestCase):
def
test_join_matrixV
(
self
):
"""variable join axis"""
v
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]],
dtype
=
self
.
floatX
)
a
=
self
.
shared
(
v
.
copy
()
)
b
=
as_tensor_variable
(
v
.
copy
()
)
a
=
self
.
shared
(
v
)
b
=
as_tensor_variable
(
v
)
ax
=
lscalar
()
s
=
join
(
ax
,
a
,
b
)
...
...
@@ -3588,6 +3588,75 @@ class T_Join_and_Split(unittest.TestCase):
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
0
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
1
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
def
test_join_matrixV_negative_axis
(
self
):
"""variable join negative axis"""
v
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]],
dtype
=
self
.
floatX
)
a
=
self
.
shared
(
v
)
b
=
as_tensor_variable
(
v
)
ax
=
lscalar
()
s
=
join
(
ax
,
a
,
b
)
f
=
inplace_func
([
ax
],
[
s
],
mode
=
self
.
mode
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
True
for
node
in
topo
if
isinstance
(
node
.
op
,
type
(
self
.
join_op
))]
want
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
,
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
,
.
4
,
.
5
,
.
6
]])
got
=
f
(
-
1
)
assert
numpy
.
allclose
(
got
,
want
)
want
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
],
[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]])
got
=
f
(
-
2
)
assert
numpy
.
allclose
(
got
,
want
)
try
:
got
=
f
(
-
3
)
assert
False
except
IndexError
:
pass
def
test_join_matrixC_negative_axis
(
self
):
"""constant join negative axis"""
v
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]],
dtype
=
self
.
floatX
)
a
=
self
.
shared
(
v
)
b
=
as_tensor_variable
(
v
)
s
=
join
(
-
1
,
a
,
b
)
f
=
theano
.
function
([],
[
s
],
mode
=
self
.
mode
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
True
for
node
in
topo
if
isinstance
(
node
.
op
,
type
(
self
.
join_op
))]
want
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
,
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
,
.
4
,
.
5
,
.
6
]])
got
=
f
()
assert
numpy
.
allclose
(
got
,
want
)
s
=
join
(
-
2
,
a
,
b
)
f
=
theano
.
function
([],
[
s
],
mode
=
self
.
mode
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
True
for
node
in
topo
if
isinstance
(
node
.
op
,
type
(
self
.
join_op
))]
want
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
],
[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]])
got
=
f
()
assert
numpy
.
allclose
(
got
,
want
)
try
:
s
=
join
(
-
3
,
a
,
b
)
assert
False
except
IndexError
:
pass
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
-
1
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
def
test_vector_len
(
self
):
x
=
lscalar
(
'x'
)
y
=
dscalar
(
'y'
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论