Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
8e46eac6
提交
8e46eac6
authored
2月 07, 2012
作者:
Olivier Delalleau
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
A few PEP8 fixes
上级
9dec43a3
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
61 行增加
和
50 行删除
+61
-50
basic.py
theano/tensor/basic.py
+43
-33
test_basic.py
theano/tensor/tests/test_basic.py
+18
-17
没有找到文件。
theano/tensor/basic.py
浏览文件 @
8e46eac6
...
@@ -5097,6 +5097,7 @@ class AdvancedSubtensor1(Op):
...
@@ -5097,6 +5097,7 @@ class AdvancedSubtensor1(Op):
def
__hash__
(
self
):
def
__hash__
(
self
):
return
hash
(
type
(
self
))
return
hash
(
type
(
self
))
def
__eq__
(
self
,
other
):
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
return
type
(
self
)
==
type
(
other
)
...
@@ -5115,7 +5116,7 @@ class AdvancedSubtensor1(Op):
...
@@ -5115,7 +5116,7 @@ class AdvancedSubtensor1(Op):
x
,
i
=
inp
x
,
i
=
inp
out
,
=
out_
out
,
=
out_
# Copy always implied by numpy advanced indexing semantic.
# Copy always implied by numpy advanced indexing semantic.
if
out
[
0
]
is
not
None
and
out
[
0
]
.
shape
==
(
len
(
i
),)
+
x
.
shape
[
1
:]:
if
out
[
0
]
is
not
None
and
out
[
0
]
.
shape
==
(
len
(
i
),)
+
x
.
shape
[
1
:]:
o
=
out
[
0
]
o
=
out
[
0
]
else
:
else
:
o
=
None
o
=
None
...
@@ -5131,8 +5132,9 @@ class AdvancedSubtensor1(Op):
...
@@ -5131,8 +5132,9 @@ class AdvancedSubtensor1(Op):
def
grad
(
self
,
inputs
,
grads
):
def
grad
(
self
,
inputs
,
grads
):
gz
,
=
grads
gz
,
=
grads
assert
len
(
inputs
)
==
2
assert
len
(
inputs
)
==
2
return
[
advanced_inc_subtensor1
(
zeros_like
(
inputs
[
0
]),
gz
,
inputs
[
1
])]
+
[
None
]
*
(
len
(
inputs
)
-
1
)
rval1
=
[
advanced_inc_subtensor1
(
zeros_like
(
inputs
[
0
]),
gz
,
inputs
[
1
])]
return
rval1
+
[
None
]
*
(
len
(
inputs
)
-
1
)
def
R_op
(
self
,
inputs
,
eval_points
):
def
R_op
(
self
,
inputs
,
eval_points
):
if
eval_points
[
0
]
is
None
:
if
eval_points
[
0
]
is
None
:
...
@@ -5141,10 +5143,11 @@ class AdvancedSubtensor1(Op):
...
@@ -5141,10 +5143,11 @@ class AdvancedSubtensor1(Op):
def
infer_shape
(
self
,
node
,
ishapes
):
def
infer_shape
(
self
,
node
,
ishapes
):
x
,
ilist
=
ishapes
x
,
ilist
=
ishapes
return
[
ilist
+
x
[
1
:]]
return
[
ilist
+
x
[
1
:]]
advanced_subtensor1
=
AdvancedSubtensor1
()
advanced_subtensor1
=
AdvancedSubtensor1
()
class
AdvancedIncSubtensor1
(
Op
):
class
AdvancedIncSubtensor1
(
Op
):
"""Increments a subtensor using advanced slicing (list of index)"""
"""Increments a subtensor using advanced slicing (list of index)"""
def
__init__
(
self
,
inplace
=
False
,
set_instead_of_inc
=
False
):
def
__init__
(
self
,
inplace
=
False
,
set_instead_of_inc
=
False
):
...
@@ -5178,8 +5181,8 @@ class AdvancedIncSubtensor1(Op):
...
@@ -5178,8 +5181,8 @@ class AdvancedIncSubtensor1(Op):
else
:
else
:
opname
=
'increment'
opname
=
'increment'
raise
TypeError
(
'cannot
%
s x subtensor with ndim=
%
s'
raise
TypeError
(
'cannot
%
s x subtensor with ndim=
%
s'
' by y with ndim=
%
s to x subtensor with ndim=
%
s '
%
(
' by y with ndim=
%
s to x subtensor with ndim=
%
s '
%
(
opname
,
x_
.
type
.
ndim
,
y_
.
type
.
ndim
))
opname
,
x_
.
type
.
ndim
,
y_
.
type
.
ndim
))
return
Apply
(
self
,
[
x_
,
y_
,
ilist_
],
[
x_
.
type
()])
return
Apply
(
self
,
[
x_
,
y_
,
ilist_
],
[
x_
.
type
()])
...
@@ -5218,7 +5221,6 @@ class AdvancedIncSubtensor1(Op):
...
@@ -5218,7 +5221,6 @@ class AdvancedIncSubtensor1(Op):
return
self
.
make_node
(
eval_points
[
0
],
eval_points
[
1
],
return
self
.
make_node
(
eval_points
[
0
],
eval_points
[
1
],
*
inputs
[
2
:])
.
outputs
*
inputs
[
2
:])
.
outputs
def
grad
(
self
,
inputs
,
grads
):
def
grad
(
self
,
inputs
,
grads
):
g_output
,
=
grads
g_output
,
=
grads
x
,
y
=
inputs
[:
2
]
x
,
y
=
inputs
[:
2
]
...
@@ -5231,6 +5233,7 @@ class AdvancedIncSubtensor1(Op):
...
@@ -5231,6 +5233,7 @@ class AdvancedIncSubtensor1(Op):
advanced_inc_subtensor1
=
AdvancedIncSubtensor1
()
advanced_inc_subtensor1
=
AdvancedIncSubtensor1
()
class
AdvancedSubtensor
(
Op
):
class
AdvancedSubtensor
(
Op
):
"""Return a subtensor copy, using advanced indexing.
"""Return a subtensor copy, using advanced indexing.
"""
"""
...
@@ -5238,10 +5241,10 @@ class AdvancedSubtensor(Op):
...
@@ -5238,10 +5241,10 @@ class AdvancedSubtensor(Op):
# AdvancedSubtensor(args)(self, *args),
# AdvancedSubtensor(args)(self, *args),
# if args contains and advanced indexing pattern
# if args contains and advanced indexing pattern
def
__init__
(
self
,
args
):
#
idx_list?
def
__init__
(
self
,
args
):
#
idx_list?
# For the moment, __init__ will be passed the whole list of arguments
# For the moment, __init__ will be passed the whole list of arguments
#TODO: see what's the best solution
#TODO: see what's the best solution
self
.
args
=
args
#
?
self
.
args
=
args
#
?
#FIXME: do not store variables in the class instance
#FIXME: do not store variables in the class instance
...
@@ -5593,6 +5596,7 @@ class TensorDotGrad(Op):
...
@@ -5593,6 +5596,7 @@ class TensorDotGrad(Op):
tensordot_grad
=
TensorDotGrad
tensordot_grad
=
TensorDotGrad
class
TensorDot
(
Op
):
class
TensorDot
(
Op
):
"""Compute tensor-tensor products over the given axes.
"""Compute tensor-tensor products over the given axes.
See numpy documentation for details.
See numpy documentation for details.
...
@@ -5603,21 +5607,23 @@ class TensorDot(Op):
...
@@ -5603,21 +5607,23 @@ class TensorDot(Op):
@classmethod
@classmethod
def
parse_axes
(
cls
,
axes
):
def
parse_axes
(
cls
,
axes
):
if
not
numpy
.
isscalar
(
axes
)
and
len
(
axes
)
!=
2
:
if
not
numpy
.
isscalar
(
axes
)
and
len
(
axes
)
!=
2
:
raise
ValueError
(
"Axes should be scalar valued or a list/tuple of len 2."
)
raise
ValueError
(
"Axes should be scalar valued or a list/tuple of "
"len 2."
)
if
isinstance
(
axes
,
(
list
,
tuple
)):
if
isinstance
(
axes
,
(
list
,
tuple
)):
axes_out
=
[]
axes_out
=
[]
# cast axes[0] and axes[1] to tuples
# cast axes[0] and axes[1] to tuples
for
i
,
a
in
enumerate
(
axes
):
for
i
,
a
in
enumerate
(
axes
):
if
numpy
.
isscalar
(
a
):
if
numpy
.
isscalar
(
a
):
axes_out
.
append
((
a
,))
axes_out
.
append
((
a
,))
else
:
else
:
axes_out
.
append
(
tuple
(
a
))
axes_out
.
append
(
tuple
(
a
))
# these should be of same length
# these should be of same length
if
len
(
axes_out
[
0
])
!=
len
(
axes_out
[
1
]):
if
len
(
axes_out
[
0
])
!=
len
(
axes_out
[
1
]):
raise
ValueError
(
"Elements of the axes list/tuple need to be of the same size."
)
raise
ValueError
(
"Elements of the axes list/tuple need to be "
"of the same size."
)
axes
=
tuple
(
axes_out
)
axes
=
tuple
(
axes_out
)
...
@@ -5634,22 +5640,23 @@ class TensorDot(Op):
...
@@ -5634,22 +5640,23 @@ class TensorDot(Op):
def
make_node
(
self
,
x
,
y
):
def
make_node
(
self
,
x
,
y
):
op
=
self
op
=
self
if
isinstance
(
self
.
axes
,
int
):
if
isinstance
(
self
.
axes
,
int
):
axes
=
[
range
(
x
.
ndim
-
self
.
axes
,
x
.
ndim
),
range
(
self
.
axes
)]
axes
=
[
range
(
x
.
ndim
-
self
.
axes
,
x
.
ndim
),
range
(
self
.
axes
)]
op
=
TensorDot
(
axes
)
op
=
TensorDot
(
axes
)
axesdim
=
numpy
.
size
(
op
.
axes
)
/
2
axesdim
=
numpy
.
size
(
op
.
axes
)
/
2
x
,
y
=
map
(
as_tensor_variable
,
[
x
,
y
])
x
,
y
=
map
(
as_tensor_variable
,
[
x
,
y
])
if
axesdim
>
x
.
type
.
ndim
or
axesdim
>
y
.
type
.
ndim
:
if
axesdim
>
x
.
type
.
ndim
or
axesdim
>
y
.
type
.
ndim
:
raise
TypeError
(
'Cannot sum over more dimensions than input.
%
i >
%
i,
%
i'
%
raise
TypeError
(
'Cannot sum over more dimensions than input. '
axesdim
,
x
.
type
.
ndim
,
y
.
type
.
ndim
)
'
%
i >
%
i,
%
i'
%
(
axesdim
,
x
.
type
.
ndim
,
y
.
type
.
ndim
))
outdim
=
x
.
type
.
ndim
+
y
.
type
.
ndim
-
2
*
axesdim
outdim
=
x
.
type
.
ndim
+
y
.
type
.
ndim
-
2
*
axesdim
output
=
tensor
(
dtype
=
scal
.
upcast
(
x
.
dtype
,
y
.
dtype
),
output
=
tensor
(
dtype
=
scal
.
upcast
(
x
.
dtype
,
y
.
dtype
),
broadcastable
=
[
False
]
*
outdim
);
broadcastable
=
[
False
]
*
outdim
)
return
Apply
(
op
,
inputs
=
[
x
,
y
],
outputs
=
[
output
,
])
return
Apply
(
op
,
inputs
=
[
x
,
y
],
outputs
=
[
output
,
])
def
perform
(
self
,
node
,
inp
,
out
):
def
perform
(
self
,
node
,
inp
,
out
):
x
,
y
=
inp
x
,
y
=
inp
...
@@ -5657,7 +5664,8 @@ class TensorDot(Op):
...
@@ -5657,7 +5664,8 @@ class TensorDot(Op):
try
:
try
:
z
[
0
]
=
numpy
.
asarray
(
numpy
.
tensordot
(
x
,
y
,
self
.
axes
))
z
[
0
]
=
numpy
.
asarray
(
numpy
.
tensordot
(
x
,
y
,
self
.
axes
))
except
ValueError
,
e
:
except
ValueError
,
e
:
# The error raised by numpy has no shape information, we mean to add that
# The error raised by numpy has no shape information, we mean to
# add that.
e
.
args
=
e
.
args
+
(
x
.
shape
,
y
.
shape
,
self
.
axes
)
e
.
args
=
e
.
args
+
(
x
.
shape
,
y
.
shape
,
self
.
axes
)
raise
raise
...
@@ -5670,13 +5678,15 @@ class TensorDot(Op):
...
@@ -5670,13 +5678,15 @@ class TensorDot(Op):
def
__str__
(
self
):
def
__str__
(
self
):
return
"tensordot"
return
"tensordot"
def
tensordot
(
x
,
y
=
None
,
axes
=
2
):
def
tensordot
(
x
,
y
=
None
,
axes
=
2
):
if
y
==
None
:
if
y
is
None
:
raise
NotImplementedError
(
'The interface to tensordot has changed from '
\
raise
NotImplementedError
(
'tensor.tensordot(axes)(x,y) to tensor.tensordot(x,y,axes). Please '
\
'The interface to tensordot has changed from '
'modify your code accordingly.'
)
'tensor.tensordot(axes)(x,y) to tensor.tensordot(x,y,axes). '
'Please modify your code accordingly.'
)
if
x
.
ndim
==
0
or
y
.
ndim
==
0
:
if
x
.
ndim
==
0
or
y
.
ndim
==
0
:
raise
ValueError
(
'Cannot perform tensordot of 0-d inputs.'
)
raise
ValueError
(
'Cannot perform tensordot of 0-d inputs.'
)
axes
=
TensorDot
.
parse_axes
(
axes
)
axes
=
TensorDot
.
parse_axes
(
axes
)
...
@@ -5685,16 +5695,16 @@ def tensordot(x, y=None, axes=2):
...
@@ -5685,16 +5695,16 @@ def tensordot(x, y=None, axes=2):
if
numpy
.
isscalar
(
axes
):
if
numpy
.
isscalar
(
axes
):
if
axes
>=
x
.
ndim
or
axes
>=
y
.
ndim
:
if
axes
>=
x
.
ndim
or
axes
>=
y
.
ndim
:
raise
ValueError
(
'axes should be smaller than the dimension of '
\
raise
ValueError
(
'axes should be smaller than the dimension of '
\
'x and y (x.ndim=
%
i, y.ndim=
%
i)'
%
(
x
.
ndim
,
y
.
ndim
))
'x and y (x.ndim=
%
i, y.ndim=
%
i)'
%
(
x
.
ndim
,
y
.
ndim
))
elif
isinstance
(
axes
,
(
list
,
tuple
)):
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
isinstance
(
axes
[
0
],
(
list
,
tuple
))
and
\
if
isinstance
(
axes
[
0
],
(
list
,
tuple
))
and
\
(
len
(
axes
[
0
])
>
x
.
ndim
or
(
numpy
.
array
(
axes
[
0
])
>=
x
.
ndim
)
.
any
()):
(
len
(
axes
[
0
])
>
x
.
ndim
or
(
numpy
.
array
(
axes
[
0
])
>=
x
.
ndim
)
.
any
()):
raise
ValueError
(
'axes[0] should be array_like, of length smaller'
\
raise
ValueError
(
'axes[0] should be array_like, of length smaller'
\
' than the dimension of x (x.ndim=
%
i, len(axes[0])=
%
i).'
%
' than the dimension of x (x.ndim=
%
i, len(axes[0])=
%
i).'
%
(
x
.
ndim
,
len
(
axes
[
0
])))
(
x
.
ndim
,
len
(
axes
[
0
])))
if
isinstance
(
axes
[
1
],
(
list
,
tuple
))
and
\
if
isinstance
(
axes
[
1
],
(
list
,
tuple
))
and
\
(
len
(
axes
[
1
])
>
y
.
ndim
or
(
numpy
.
array
(
axes
[
1
])
>=
y
.
ndim
)
.
any
()):
(
len
(
axes
[
1
])
>
y
.
ndim
or
(
numpy
.
array
(
axes
[
1
])
>=
y
.
ndim
)
.
any
()):
raise
ValueError
(
'axes[1] should be array_like, of length smaller'
\
raise
ValueError
(
'axes[1] should be array_like, of length smaller'
\
'than the dimension of y (y.ndim=
%
i, len(axes[1])=
%
i).'
%
'than the dimension of y (y.ndim=
%
i, len(axes[1])=
%
i).'
%
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
8e46eac6
...
@@ -2674,7 +2674,7 @@ class T_subtensor(unittest.TestCase):
...
@@ -2674,7 +2674,7 @@ class T_subtensor(unittest.TestCase):
#single element
#single element
utt
.
verify_grad
(
utt
.
verify_grad
(
inc_slice
(
2
,
1
),
inc_slice
(
2
,
1
),
(
numpy
.
asarray
([[
0
,
1
],
[
2
,
3
],
[
4
,
5.
]]),
numpy
.
asarray
(
9.
),))
(
numpy
.
asarray
([[
0
,
1
],
[
2
,
3
],
[
4
,
5.
]]),
numpy
.
asarray
(
9.
),))
def
test_advanced_inc_and_set
(
self
):
def
test_advanced_inc_and_set
(
self
):
"""
"""
...
@@ -5257,7 +5257,7 @@ class test_broadcast(unittest.TestCase):
...
@@ -5257,7 +5257,7 @@ class test_broadcast(unittest.TestCase):
def
test_len
():
def
test_len
():
for
shape
in
[(
5
,),
(
3
,
4
),
(
7
,
4
,
6
)]:
for
shape
in
[(
5
,),
(
3
,
4
),
(
7
,
4
,
6
)]:
x
=
tensor
.
tensor
(
dtype
=
'floatX'
,
broadcastable
=
(
False
,)
*
len
(
shape
))
x
=
tensor
.
tensor
(
dtype
=
'floatX'
,
broadcastable
=
(
False
,)
*
len
(
shape
))
try
:
try
:
len
(
x
)
len
(
x
)
assert
False
,
"Expected an error"
assert
False
,
"Expected an error"
...
@@ -5272,12 +5272,12 @@ def test_mod():
...
@@ -5272,12 +5272,12 @@ def test_mod():
as Python. That is what we want.
as Python. That is what we want.
"""
"""
x
,
y
=
fscalars
(
'xy'
)
x
,
y
=
fscalars
(
'xy'
)
fn
=
gof
.
DualLinker
()
.
accept
(
gof
.
Env
([
x
,
y
],
[
x
%
y
]))
.
make_function
()
fn
=
gof
.
DualLinker
()
.
accept
(
gof
.
Env
([
x
,
y
],
[
x
%
y
]))
.
make_function
()
for
a
,
b
in
((
0
,
1
),
(
1
,
1
),
(
0
,
-
1
),
(
1
,
-
1
),
(
-
1
,
-
1
),
for
a
,
b
in
((
0
,
1
),
(
1
,
1
),
(
0
,
-
1
),
(
1
,
-
1
),
(
-
1
,
-
1
),
(
1
,
2
),
(
-
1
,
2
),
(
1
,
-
2
),
(
-
1
,
-
2
),
(
1
,
2
),
(
-
1
,
2
),
(
1
,
-
2
),
(
-
1
,
-
2
),
(
5
,
3
),
(
-
5
,
3
),
(
5
,
-
3
),
(
-
5
,
-
3
)
(
5
,
3
),
(
-
5
,
3
),
(
5
,
-
3
),
(
-
5
,
-
3
)
):
):
assert
fn
(
a
,
b
)
==
a
%
b
,
(
a
,)
assert
fn
(
a
,
b
)
==
a
%
b
,
(
a
,)
def
test_mod_compile
():
def
test_mod_compile
():
...
@@ -5301,14 +5301,14 @@ def test_mod_compile():
...
@@ -5301,14 +5301,14 @@ def test_mod_compile():
shape
=
x
.
shape
shape
=
x
.
shape
out
=
tensor
.
switch
(
tensor
.
eq
(
3
%
x
.
shape
[
0
],
0
),
y
,
y
[:
-
1
])
out
=
tensor
.
switch
(
tensor
.
eq
(
3
%
x
.
shape
[
0
],
0
),
y
,
y
[:
-
1
])
f
=
theano
.
function
([
x
,
y
],
out
)
f
=
theano
.
function
([
x
,
y
],
out
)
def
test_unalign
():
def
test_unalign
():
if
config
.
floatX
==
'float64'
:
if
config
.
floatX
==
'float64'
:
dtype
=
"b1,f8"
dtype
=
"b1,f8"
else
:
else
:
dtype
=
"b1,f4"
dtype
=
"b1,f4"
a
=
numpy
.
empty
(
1e4
,
dtype
=
dtype
)[
'f1'
]
a
=
numpy
.
empty
(
1e4
,
dtype
=
dtype
)[
'f1'
]
b
=
numpy
.
empty
(
1e4
,
dtype
=
dtype
)[
'f1'
]
b
=
numpy
.
empty
(
1e4
,
dtype
=
dtype
)[
'f1'
]
...
@@ -5316,24 +5316,25 @@ def test_unalign():
...
@@ -5316,24 +5316,25 @@ def test_unalign():
assert
not
b
.
flags
.
aligned
assert
not
b
.
flags
.
aligned
a
[:]
=
rand
(
len
(
a
))
a
[:]
=
rand
(
len
(
a
))
b
[:]
=
rand
(
len
(
b
))
b
[:]
=
rand
(
len
(
b
))
out_numpy
=
2
*
a
+
3
*
b
out_numpy
=
2
*
a
+
3
*
b
av
,
bv
=
tensor
.
vectors
(
'ab'
)
av
,
bv
=
tensor
.
vectors
(
'ab'
)
f
=
theano
.
function
([
av
,
bv
],
2
*
av
+
3
*
bv
)
f
=
theano
.
function
([
av
,
bv
],
2
*
av
+
3
*
bv
)
f
.
maker
.
env
.
toposort
()
f
.
maker
.
env
.
toposort
()
# FAST_COMPILE use the python code that support unaligned data
# FAST_COMPILE use the python code that support unaligned data
# The DebugMode make a copy of the inputs, so they will be aligned.
# The DebugMode make a copy of the inputs, so they will be aligned.
should_raise
=
theano
.
config
.
mode
not
in
[
"FAST_COMPILE"
,
"DebugMode"
,
"DEBUG_MODE"
]
should_raise
=
theano
.
config
.
mode
not
in
[
"FAST_COMPILE"
,
"DebugMode"
,
"DEBUG_MODE"
]
try
:
try
:
out_theano
=
f
(
a
,
b
)
out_theano
=
f
(
a
,
b
)
assert
not
a
.
flags
.
aligned
assert
not
a
.
flags
.
aligned
assert
not
b
.
flags
.
aligned
assert
not
b
.
flags
.
aligned
assert
numpy
.
allclose
(
out_numpy
,
out_theano
)
assert
numpy
.
allclose
(
out_numpy
,
out_theano
)
if
should_raise
:
if
should_raise
:
raise
Exception
(
"Expected an error from Theano!"
)
raise
Exception
(
"Expected an error from Theano!"
)
except
NotImplementedError
,
e
:
except
NotImplementedError
,
e
:
if
not
should_raise
:
if
not
should_raise
:
raise
Exception
(
"Theano raised an
exception when none was expected
"
)
raise
Exception
(
"Theano raised an
unexpected exception
"
)
def
test_dimshuffle_duplicate
():
def
test_dimshuffle_duplicate
():
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论