Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
bada8e14
提交
bada8e14
authored
11月 18, 2010
作者:
David Warde-Farley
浏览文件
操作
浏览文件
下载
差异文件
Merged with Fred's commit.
上级
894668f3
d7fa47aa
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
118 行增加
和
45 行删除
+118
-45
MANIFEST.in
MANIFEST.in
+4
-0
setup.py
setup.py
+3
-3
basic.py
theano/tensor/basic.py
+41
-23
test_basic.py
theano/tensor/tests/test_basic.py
+70
-19
没有找到文件。
MANIFEST.in
0 → 100644
浏览文件 @
bada8e14
global-include *.txt
global-include *.cu
global-include *.cuh
global-include *.sh
setup.py
浏览文件 @
bada8e14
...
@@ -36,8 +36,8 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"]
...
@@ -36,8 +36,8 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"]
MAJOR
=
0
MAJOR
=
0
MINOR
=
3
MINOR
=
3
MICRO
=
0
MICRO
=
0
SUFFIX
=
"rc
1
"
# Should be blank except for rc's, betas, etc.
SUFFIX
=
"rc
3
"
# Should be blank except for rc's, betas, etc.
ISRELEASED
=
Tru
e
ISRELEASED
=
Fals
e
if
MICRO
>
0
:
if
MICRO
>
0
:
VERSION
=
'
%
d.
%
d.
%
d'
%
(
MAJOR
,
MINOR
,
MICRO
)
VERSION
=
'
%
d.
%
d.
%
d'
%
(
MAJOR
,
MINOR
,
MICRO
)
...
@@ -90,7 +90,7 @@ if not release:
...
@@ -90,7 +90,7 @@ if not release:
HG_REVISION
=
hg_version
()
HG_REVISION
=
hg_version
()
elif
os
.
path
.
exists
(
filename
):
elif
os
.
path
.
exists
(
filename
):
# must be a source distribution, use existing version file
# must be a source distribution, use existing version file
from
theano.version
import
hg_revision
as
HG_REVISION
HG_REVISION
=
"RELEASE"
else
:
else
:
HG_REVISION
=
"unknown-hg"
HG_REVISION
=
"unknown-hg"
...
...
theano/tensor/basic.py
浏览文件 @
bada8e14
...
@@ -2995,32 +2995,50 @@ class Join(Op):
...
@@ -2995,32 +2995,50 @@ class Join(Op):
def
_make_node_internal
(
self
,
axis
,
tensors
,
def
_make_node_internal
(
self
,
axis
,
tensors
,
as_tensor_variable_args
,
output_maker
):
as_tensor_variable_args
,
output_maker
):
orig
=
as_tensor_variable_args
if
not
all
(
targs
.
type
.
ndim
for
targs
in
as_tensor_variable_args
):
if
not
all
(
targs
.
type
.
ndim
for
targs
in
as_tensor_variable_args
):
raise
TypeError
(
'Join cannot handle arguments of dimension 0. For joining scalar values, see @stack'
);
raise
TypeError
(
'Join cannot handle arguments of dimension 0. For joining scalar values, see @stack'
);
# Handle single-tensor joins immediately.
# When the axis may vary, no dimension can be guaranteed to be
if
len
(
as_tensor_variable_args
)
==
1
:
# broadcastable.
bcastable
=
list
(
as_tensor_variable_args
[
0
]
.
type
.
broadcastable
)
bcastable
=
[
False
]
*
len
(
as_tensor_variable_args
[
0
]
.
type
.
broadcastable
)
# When the axis is fixed, the broadcastable dimensions remain, except
# for the axis dimension.
# All concatenated elements must also have the same broadcastable
# dimensions.
orig
=
as_tensor_variable_args
if
isinstance
(
axis
,
int
):
bcasts
=
[
x
.
type
.
broadcastable
[
0
:
axis
]
+
\
x
.
type
.
broadcastable
[
axis
+
1
:]
for
x
in
as_tensor_variable_args
]
if
not
all
([
bcasts
[
0
]
==
bc
for
bc
in
bcasts
[
1
:]]):
raise
ValueError
(
'Dimensions other than the given axis must'
' have the same broadcast behavior'
,
tensors
)
bcastable
[:]
=
as_tensor_variable_args
[
0
]
.
type
.
broadcastable
try
:
bcastable
[
axis
]
=
False
except
IndexError
,
e
:
raise
ValueError
(
'Join argument "axis" is out of range (given input dimensions)'
)
as_tensor_variable_args
=
[
unbroadcast
(
x
,
axis
)
for
x
in
as_tensor_variable_args
]
else
:
else
:
as_tensor_variable_args
=
[
unbroadcast
(
x
,
*
range
(
x
.
type
.
ndim
))
for
x
in
as_tensor_variable_args
]
# When the axis is fixed, the broadcastable dimensions remain, except
# for the axis dimension.
# All concatenated elements must also have the same broadcastable
# dimensions.
# initialize bcastable all false, and then fill in some trues with
# the loops -- a dimension should be broadcastable if at least one
# of the inputs is broadcastable on that dimension (see
# justification below)
bcastable
=
[
False
]
*
len
(
as_tensor_variable_args
[
0
]
.
type
.
broadcastable
)
ndim
=
len
(
bcastable
)
if
isinstance
(
axis
,
int
):
# Basically, broadcastable -> length 1, but the converse does not
# hold. So we permit e.g. T/F/T joins, and if they fail at runtime
# they fail, but if they don't then it means that the argument
# where that broadcastable flag was False had length 1 along this
# dimension, and therefore this dimension should be broadcastable
# for the output.
for
x
in
as_tensor_variable_args
:
for
current_axis
,
bflag
in
enumerate
(
x
.
type
.
broadcastable
):
# Not sure if this Op supports/supported/will support
# negative indices, but just to be sure...
if
current_axis
==
axis
%
ndim
:
continue
if
bflag
:
bcastable
[
current_axis
]
=
True
try
:
bcastable
[
axis
]
=
False
except
IndexError
,
e
:
raise
ValueError
(
'Join argument "axis" is out of range (given input dimensions)'
)
as_tensor_variable_args
=
[
unbroadcast
(
x
,
axis
)
for
x
in
as_tensor_variable_args
]
else
:
# These unbroadcasts are for the gradient... not sure exactly
# why...
as_tensor_variable_args
=
[
unbroadcast
(
x
,
*
range
(
x
.
type
.
ndim
))
for
x
in
as_tensor_variable_args
]
# When the axis may vary, no dimension can be guaranteed to be
# broadcastable.
bcastable
=
[
False
]
*
len
(
as_tensor_variable_args
[
0
]
.
type
.
broadcastable
)
inputs
=
[
as_tensor_variable
(
axis
)]
+
as_tensor_variable_args
inputs
=
[
as_tensor_variable
(
axis
)]
+
as_tensor_variable_args
if
inputs
[
0
]
.
type
not
in
int_types
:
if
inputs
[
0
]
.
type
not
in
int_types
:
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
bada8e14
...
@@ -1507,19 +1507,6 @@ class T_Join_and_Split(unittest.TestCase):
...
@@ -1507,19 +1507,6 @@ class T_Join_and_Split(unittest.TestCase):
"""
"""
Split is tested by each verify_grad method.
Split is tested by each verify_grad method.
"""
"""
class
Join1
(
Op
):
def
make_node
(
self
,
*
inputs
):
inputs
=
[
as_tensor_variable
(
t
)
for
t
in
inputs
]
outputs
=
[
lscalar
()]
+
[
i
.
type
()
for
i
in
inputs
]
return
Apply
(
self
,
inputs
,
outputs
)
def
perform
(
self
,
node
,
inputs
,
outputs
):
outputs
[
0
][
0
]
=
1
for
i
,
o
in
zip
(
inputs
,
outputs
[
1
:]):
o
[
0
]
=
i
.
copy
()
def
grad
(
self
,
inputs
,
g_outputs
):
return
g_outputs
[
1
:]
def
setUp
(
self
):
def
setUp
(
self
):
Join
.
debug
=
False
Join
.
debug
=
False
...
@@ -1654,6 +1641,70 @@ class T_Join_and_Split(unittest.TestCase):
...
@@ -1654,6 +1641,70 @@ class T_Join_and_Split(unittest.TestCase):
f
=
function
([
x
,
y
],
[
b
,
c
,
a
])
f
=
function
([
x
,
y
],
[
b
,
c
,
a
])
assert
numpy
.
allclose
(
f
(
4
,
5
),
[
5
,
9
,
4
])
assert
numpy
.
allclose
(
f
(
4
,
5
),
[
5
,
9
,
4
])
def
test_broadcastable_flag_assignment_mixed_otheraxes
(
self
):
"""
Test that the broadcastable flags for the output of
a join operation on non-join axes are True if one or
more inputs is broadcastable on that dimension.
"""
a
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
0
,
0
,
1
])()
b
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
])()
c
=
join
(
1
,
a
,
b
)
assert
c
.
type
.
broadcastable
[
0
]
and
c
.
type
.
broadcastable
[
2
]
assert
not
c
.
type
.
broadcastable
[
1
]
def
test_broadcastable_flag_assignment_mixed_thisaxes
(
self
):
"""
Test that the broadcastable flag of the join axis
is False when some inputs are broadcastable on that
dimension.
"""
a
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
0
,
0
,
1
])()
b
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
])()
c
=
join
(
0
,
a
,
b
)
assert
not
c
.
type
.
broadcastable
[
0
]
def
test_broadcastable_flags_all_broadcastable_on_joinaxis
(
self
):
"""
Test that joining together several inputs which are all
broadcastable on the join dimension results in the output
being non-broadcastable on the join dimension.
"""
a
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
])()
b
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
])()
c
=
join
(
0
,
a
,
b
)
assert
not
c
.
type
.
broadcastable
[
0
]
def
test_broadcastable_single_input_broadcastable_dimension
(
self
):
"""
Test that all broadcastable flags are preserved by a
single-input join.
"""
a
=
join
(
0
,
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
])())
assert
a
.
type
.
broadcastable
[
0
]
assert
a
.
type
.
broadcastable
[
2
]
assert
not
a
.
type
.
broadcastable
[
1
]
def
test_broadcastable_flags_many_dims_and_inputs
(
self
):
"""
Test that the right broadcastable flags get set for a join
with many inputs and many input dimensions.
"""
a
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
,
0
,
0
,
0
])()
b
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
1
,
1
,
0
,
0
,
0
])()
c
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
0
,
0
,
0
,
0
])()
d
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
,
1
,
0
,
1
])()
e
=
TensorType
(
dtype
=
'int8'
,
broadcastable
=
[
1
,
0
,
1
,
0
,
0
,
1
])()
f
=
join
(
0
,
a
,
b
,
c
,
d
,
e
)
fb
=
f
.
type
.
broadcastable
assert
not
fb
[
0
]
and
fb
[
1
]
and
fb
[
2
]
and
fb
[
3
]
and
not
fb
[
4
]
and
fb
[
5
]
g
=
join
(
1
,
a
,
b
,
c
,
d
,
e
)
gb
=
g
.
type
.
broadcastable
assert
gb
[
0
]
and
not
gb
[
1
]
and
gb
[
2
]
and
gb
[
3
]
and
not
gb
[
4
]
and
gb
[
5
]
h
=
join
(
4
,
a
,
b
,
c
,
d
,
e
)
hb
=
h
.
type
.
broadcastable
assert
hb
[
0
]
and
hb
[
1
]
and
hb
[
2
]
and
hb
[
3
]
and
not
hb
[
4
]
and
hb
[
5
]
class
test_comparison
(
unittest
.
TestCase
):
class
test_comparison
(
unittest
.
TestCase
):
def
test_gt
(
self
):
def
test_gt
(
self
):
x
,
y
=
fvector
(),
fvector
()
x
,
y
=
fvector
(),
fvector
()
...
@@ -3458,7 +3509,7 @@ def makeSharedTester(shared_constructor_,
...
@@ -3458,7 +3509,7 @@ def makeSharedTester(shared_constructor_,
x
=
x_shared
.
get_value
(
borrow
=
True
)
x
=
x_shared
.
get_value
(
borrow
=
True
)
x
/=
.
5
x
/=
.
5
#this is not required by the contract but it is a feature we've
#this is not required by the contract but it is a feature we've
#implemented for some type of SharedVariable.
#implemented for some type of SharedVariable.
if
self
.
get_value_borrow_true_alias
:
if
self
.
get_value_borrow_true_alias
:
assert
numpy
.
allclose
(
self
.
ref_fct
(
x
),
total_func
())
assert
numpy
.
allclose
(
self
.
ref_fct
(
x
),
total_func
())
...
@@ -3484,16 +3535,16 @@ def makeSharedTester(shared_constructor_,
...
@@ -3484,16 +3535,16 @@ def makeSharedTester(shared_constructor_,
#in this case we can alias with the internal value
#in this case we can alias with the internal value
x
=
x_shared
.
get_value
(
borrow
=
True
,
return_internal_type
=
True
)
x
=
x_shared
.
get_value
(
borrow
=
True
,
return_internal_type
=
True
)
assert
self
.
test_internal_type
(
x
)
assert
self
.
test_internal_type
(
x
)
values_to_add
=
.
5
values_to_add
=
.
5
if
self
.
add_matrix
:
if
self
.
add_matrix
:
values_to_add
=
self
.
internal_type
(
numpy
.
ones
(
x
.
shape
,
dtype
=
dtype
)
/
2
)
#supported for cudandarray, but not ndarray.
values_to_add
=
self
.
internal_type
(
numpy
.
ones
(
x
.
shape
,
dtype
=
dtype
)
/
2
)
#supported for cudandarray, but not ndarray.
x
/=
values_to_add
#supported by ndarray and CudaNdarray
x
/=
values_to_add
#supported by ndarray and CudaNdarray
#this is not required by the contract but it is a feature we can
#this is not required by the contract but it is a feature we can
#implement for some type of SharedVariable.
#implement for some type of SharedVariable.
assert
numpy
.
allclose
(
self
.
ref_fct
(
x
),
total_func
())
assert
numpy
.
allclose
(
self
.
ref_fct
(
x
),
total_func
())
x
=
x_shared
.
get_value
(
borrow
=
False
,
return_internal_type
=
True
)
x
=
x_shared
.
get_value
(
borrow
=
False
,
return_internal_type
=
True
)
assert
self
.
test_internal_type
(
x
)
assert
self
.
test_internal_type
(
x
)
assert
x
is
not
x_shared
.
container
.
value
assert
x
is
not
x_shared
.
container
.
value
...
@@ -3582,8 +3633,8 @@ test_shared_options=makeSharedTester(tensor.shared, 'float64',
...
@@ -3582,8 +3633,8 @@ test_shared_options=makeSharedTester(tensor.shared, 'float64',
True
,
True
,
True
,
True
,
True
,
True
,
numpy
.
ndarray
,
numpy
.
ndarray
,
lambda
a
:
isinstance
(
a
,
numpy
.
ndarray
),
lambda
a
:
isinstance
(
a
,
numpy
.
ndarray
),
theano
.
tensor
.
sum
,
numpy
.
sum
)
theano
.
tensor
.
sum
,
numpy
.
sum
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
if
1
:
if
1
:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论