Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
c71d9d5c
提交
c71d9d5c
authored
3月 24, 2017
作者:
amrithasuresh
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
1. Updated numpy as np
2. Fixed indentation
上级
cc612fde
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
111 行增加
和
111 行删除
+111
-111
test_elemwise.py
theano/tensor/tests/test_elemwise.py
+111
-111
没有找到文件。
theano/tensor/tests/test_elemwise.py
浏览文件 @
c71d9d5c
...
@@ -3,7 +3,7 @@ from copy import copy
...
@@ -3,7 +3,7 @@ from copy import copy
import
unittest
import
unittest
import
math
import
math
import
numpy
import
numpy
as
np
from
nose.plugins.skip
import
SkipTest
from
nose.plugins.skip
import
SkipTest
from
nose.tools
import
raises
from
nose.tools
import
raises
from
six.moves
import
xrange
from
six.moves
import
xrange
...
@@ -47,13 +47,13 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
...
@@ -47,13 +47,13 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
x
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
x
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
e
=
self
.
op
(
ib
,
shuffle
)(
x
)
e
=
self
.
op
(
ib
,
shuffle
)(
x
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
]))
.
make_function
()
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
]))
.
make_function
()
assert
f
(
n
umpy
.
ones
(
xsh
,
dtype
=
self
.
dtype
))
.
shape
==
zsh
assert
f
(
n
p
.
ones
(
xsh
,
dtype
=
self
.
dtype
))
.
shape
==
zsh
# test that DimShuffle.infer_shape work correctly
# test that DimShuffle.infer_shape work correctly
x
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
x
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
e
=
self
.
op
(
ib
,
shuffle
)(
x
)
e
=
self
.
op
(
ib
,
shuffle
)(
x
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
.
shape
]))
.
make_function
()
[
e
.
shape
]))
.
make_function
()
assert
all
(
f
(
n
umpy
.
ones
(
xsh
,
dtype
=
self
.
dtype
)))
==
all
(
zsh
)
assert
all
(
f
(
n
p
.
ones
(
xsh
,
dtype
=
self
.
dtype
)))
==
all
(
zsh
)
# Test when we drop a axis that is not broadcastable
# Test when we drop a axis that is not broadcastable
ib
=
[
False
,
True
,
False
]
ib
=
[
False
,
True
,
False
]
...
@@ -65,7 +65,7 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
...
@@ -65,7 +65,7 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
x
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
x
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
e
=
self
.
op
(
ib
,
(
1
,
2
))(
x
)
e
=
self
.
op
(
ib
,
(
1
,
2
))(
x
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
.
shape
]))
.
make_function
()
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
.
shape
]))
.
make_function
()
self
.
assertRaises
(
TypeError
,
f
,
n
umpy
.
ones
((
2
,
1
,
4
)))
self
.
assertRaises
(
TypeError
,
f
,
n
p
.
ones
((
2
,
1
,
4
)))
# Test that we can't take a dimensions multiple time
# Test that we can't take a dimensions multiple time
xsh
,
shuffle
,
zsh
=
((
1
,
1
,
4
),
(
0
,
1
,
2
,
0
),
(
1
,
4
))
xsh
,
shuffle
,
zsh
=
((
1
,
1
,
4
),
(
0
,
1
,
2
,
0
),
(
1
,
4
))
...
@@ -94,7 +94,7 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
...
@@ -94,7 +94,7 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
((
1
,),
(
'x'
,
'x'
))]:
((
1
,),
(
'x'
,
'x'
))]:
ib
=
[(
entry
==
1
)
for
entry
in
xsh
]
ib
=
[(
entry
==
1
)
for
entry
in
xsh
]
adtens
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
adtens
=
self
.
type
(
self
.
dtype
,
ib
)(
'x'
)
adtens_val
=
n
umpy
.
ones
(
xsh
,
dtype
=
self
.
dtype
)
adtens_val
=
n
p
.
ones
(
xsh
,
dtype
=
self
.
dtype
)
self
.
_compile_and_check
([
adtens
],
self
.
_compile_and_check
([
adtens
],
[
self
.
op
(
ib
,
shuffle
)(
adtens
)],
[
self
.
op
(
ib
,
shuffle
)(
adtens
)],
[
adtens_val
],
self
.
op
,
[
adtens_val
],
self
.
op
,
...
@@ -102,50 +102,50 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
...
@@ -102,50 +102,50 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
def
test_too_big_rank
(
self
):
def
test_too_big_rank
(
self
):
x
=
self
.
type
(
self
.
dtype
,
broadcastable
=
())()
x
=
self
.
type
(
self
.
dtype
,
broadcastable
=
())()
y
=
x
.
dimshuffle
((
'x'
,)
*
(
n
umpy
.
MAXDIMS
+
1
))
y
=
x
.
dimshuffle
((
'x'
,)
*
(
n
p
.
MAXDIMS
+
1
))
self
.
assertRaises
(
ValueError
,
y
.
eval
,
{
x
:
0
})
self
.
assertRaises
(
ValueError
,
y
.
eval
,
{
x
:
0
})
class
test_reduce_axes
(
unittest
.
TestCase
):
class
test_reduce_axes
(
unittest
.
TestCase
):
def
test_sum_axes
(
self
):
def
test_sum_axes
(
self
):
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
umpy
.
array
(
1
),
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
p
.
array
(
1
),
[
n
umpy
.
array
(
0
),
numpy
.
array
(
1
)]]
[
n
p
.
array
(
0
),
np
.
array
(
1
)]]
for
a
in
axes
:
for
a
in
axes
:
x
=
tensor
.
matrix
()
x
=
tensor
.
matrix
()
x
.
sum
(
a
)
x
.
sum
(
a
)
def
test_mean_axes
(
self
):
def
test_mean_axes
(
self
):
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
umpy
.
array
(
1
),
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
p
.
array
(
1
),
[
n
umpy
.
array
(
0
),
numpy
.
array
(
1
)]]
[
n
p
.
array
(
0
),
np
.
array
(
1
)]]
for
a
in
axes
:
for
a
in
axes
:
x
=
tensor
.
matrix
()
x
=
tensor
.
matrix
()
x
.
mean
(
a
)
x
.
mean
(
a
)
def
test_max_axes
(
self
):
def
test_max_axes
(
self
):
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
umpy
.
array
(
1
),
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
p
.
array
(
1
),
[
n
umpy
.
array
(
0
),
numpy
.
array
(
1
)]]
[
n
p
.
array
(
0
),
np
.
array
(
1
)]]
for
a
in
axes
:
for
a
in
axes
:
x
=
tensor
.
matrix
()
x
=
tensor
.
matrix
()
x
.
max
(
a
)
x
.
max
(
a
)
def
test_min_axes
(
self
):
def
test_min_axes
(
self
):
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
umpy
.
array
(
1
),
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
p
.
array
(
1
),
[
n
umpy
.
array
(
0
),
numpy
.
array
(
1
)]]
[
n
p
.
array
(
0
),
np
.
array
(
1
)]]
for
a
in
axes
:
for
a
in
axes
:
x
=
tensor
.
matrix
()
x
=
tensor
.
matrix
()
x
.
min
(
a
)
x
.
min
(
a
)
def
test_argmax_axes
(
self
):
def
test_argmax_axes
(
self
):
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
umpy
.
array
(
1
),
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
p
.
array
(
1
),
[
n
umpy
.
array
(
0
),
numpy
.
array
(
1
)]]
[
n
p
.
array
(
0
),
np
.
array
(
1
)]]
for
a
in
axes
:
for
a
in
axes
:
x
=
tensor
.
matrix
()
x
=
tensor
.
matrix
()
x
.
argmax
(
a
)
x
.
argmax
(
a
)
def
test_var_axes
(
self
):
def
test_var_axes
(
self
):
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
umpy
.
array
(
1
),
axes
=
[
None
,
0
,
1
,
[
0
,
1
],
n
p
.
array
(
1
),
[
n
umpy
.
array
(
0
),
numpy
.
array
(
1
)]]
[
n
p
.
array
(
0
),
np
.
array
(
1
)]]
for
a
in
axes
:
for
a
in
axes
:
x
=
tensor
.
matrix
()
x
=
tensor
.
matrix
()
x
.
var
(
a
)
x
.
var
(
a
)
...
@@ -166,12 +166,12 @@ class test_Broadcast(unittest.TestCase):
...
@@ -166,12 +166,12 @@ class test_Broadcast(unittest.TestCase):
linkers
=
[
gof
.
PerformLinker
,
gof
.
CLinker
]
linkers
=
[
gof
.
PerformLinker
,
gof
.
CLinker
]
def
rand_val
(
self
,
shp
):
def
rand_val
(
self
,
shp
):
return
n
umpy
.
asarray
(
numpy
.
random
.
rand
(
*
shp
),
return
n
p
.
asarray
(
np
.
random
.
rand
(
*
shp
),
dtype
=
theano
.
config
.
floatX
)
dtype
=
theano
.
config
.
floatX
)
def
rand_cval
(
self
,
shp
):
def
rand_cval
(
self
,
shp
):
return
n
umpy
.
asarray
(
numpy
.
random
.
rand
(
*
shp
),
return
n
p
.
asarray
(
np
.
random
.
rand
(
*
shp
),
dtype
=
theano
.
config
.
floatX
)
dtype
=
theano
.
config
.
floatX
)
def
setUp
(
self
):
def
setUp
(
self
):
unittest_tools
.
seed_rng
()
unittest_tools
.
seed_rng
()
...
@@ -331,19 +331,19 @@ class test_Broadcast(unittest.TestCase):
...
@@ -331,19 +331,19 @@ class test_Broadcast(unittest.TestCase):
def
reduce_bitwise_and
(
x
,
axis
=-
1
,
dtype
=
'int8'
):
def
reduce_bitwise_and
(
x
,
axis
=-
1
,
dtype
=
'int8'
):
identity
=
n
umpy
.
array
((
-
1
,),
dtype
=
dtype
)[
0
]
identity
=
n
p
.
array
((
-
1
,),
dtype
=
dtype
)[
0
]
shape_without_axis
=
tuple
([
s
for
i
,
s
in
enumerate
(
x
.
shape
)
if
i
!=
axis
])
shape_without_axis
=
tuple
([
s
for
i
,
s
in
enumerate
(
x
.
shape
)
if
i
!=
axis
])
if
0
in
shape_without_axis
:
if
0
in
shape_without_axis
:
return
n
umpy
.
empty
(
shape
=
shape_without_axis
,
dtype
=
x
.
dtype
)
return
n
p
.
empty
(
shape
=
shape_without_axis
,
dtype
=
x
.
dtype
)
def
custom_reduce
(
a
):
def
custom_reduce
(
a
):
out
=
identity
out
=
identity
for
i
in
range
(
a
.
size
):
for
i
in
range
(
a
.
size
):
out
=
n
umpy
.
bitwise_and
(
a
[
i
],
out
)
out
=
n
p
.
bitwise_and
(
a
[
i
],
out
)
return
out
return
out
return
n
umpy
.
apply_along_axis
(
custom_reduce
,
axis
,
x
)
return
n
p
.
apply_along_axis
(
custom_reduce
,
axis
,
x
)
class
test_CAReduce
(
unittest_tools
.
InferShapeTester
):
class
test_CAReduce
(
unittest_tools
.
InferShapeTester
):
...
@@ -384,20 +384,20 @@ class test_CAReduce(unittest_tools.InferShapeTester):
...
@@ -384,20 +384,20 @@ class test_CAReduce(unittest_tools.InferShapeTester):
tosum
=
list
(
range
(
len
(
xsh
)))
tosum
=
list
(
range
(
len
(
xsh
)))
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
]))
.
make_function
()
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
]))
.
make_function
()
xv
=
n
umpy
.
asarray
(
numpy
.
random
.
rand
(
*
xsh
))
xv
=
n
p
.
asarray
(
np
.
random
.
rand
(
*
xsh
))
if
dtype
not
in
tensor
.
discrete_dtypes
:
if
dtype
not
in
tensor
.
discrete_dtypes
:
xv
=
n
umpy
.
asarray
(
xv
,
dtype
=
dtype
)
xv
=
n
p
.
asarray
(
xv
,
dtype
=
dtype
)
else
:
else
:
xv
=
n
umpy
.
asarray
(
xv
<
0.5
,
dtype
=
dtype
)
xv
=
n
p
.
asarray
(
xv
<
0.5
,
dtype
=
dtype
)
if
test_nan
and
xv
.
size
>
0
:
if
test_nan
and
xv
.
size
>
0
:
if
len
(
xsh
)
>
0
:
if
len
(
xsh
)
>
0
:
xv
=
xv
.
flatten
()
xv
=
xv
.
flatten
()
xv
[
0
]
=
n
umpy
.
nan
xv
[
0
]
=
n
p
.
nan
xv
=
xv
.
reshape
(
*
xsh
)
xv
=
xv
.
reshape
(
*
xsh
)
else
:
else
:
xv
=
n
umpy
.
asarray
(
numpy
.
nan
,
dtype
=
dtype
)
xv
=
n
p
.
asarray
(
np
.
nan
,
dtype
=
dtype
)
zv
=
xv
zv
=
xv
if
pre_scalar_op
is
not
None
:
if
pre_scalar_op
is
not
None
:
zv
=
Elemwise
(
scalar_op
=
pre_scalar_op
)(
x
)
.
eval
({
x
:
xv
})
zv
=
Elemwise
(
scalar_op
=
pre_scalar_op
)(
x
)
.
eval
({
x
:
xv
})
...
@@ -415,48 +415,48 @@ class test_CAReduce(unittest_tools.InferShapeTester):
...
@@ -415,48 +415,48 @@ class test_CAReduce(unittest_tools.InferShapeTester):
tosum
=
tuple
(
axis2
)
tosum
=
tuple
(
axis2
)
if
tensor_op
==
tensor
.
all
:
if
tensor_op
==
tensor
.
all
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
all
(
zv
,
axis
)
zv
=
n
p
.
all
(
zv
,
axis
)
if
len
(
tosum
)
==
0
:
if
len
(
tosum
)
==
0
:
zv
=
zv
!=
0
zv
=
zv
!=
0
elif
tensor_op
==
tensor
.
any
:
elif
tensor_op
==
tensor
.
any
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
any
(
zv
,
axis
)
zv
=
n
p
.
any
(
zv
,
axis
)
if
len
(
tosum
)
==
0
:
if
len
(
tosum
)
==
0
:
zv
=
zv
!=
0
zv
=
zv
!=
0
elif
scalar_op
==
scalar
.
add
:
elif
scalar_op
==
scalar
.
add
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
add
.
reduce
(
zv
,
axis
)
zv
=
n
p
.
add
.
reduce
(
zv
,
axis
)
if
dtype
==
'bool'
:
if
dtype
==
'bool'
:
# n
umpy
.add of a bool upcast, while CAReduce don't
# n
p
.add of a bool upcast, while CAReduce don't
zv
=
zv
.
astype
(
dtype
)
zv
=
zv
.
astype
(
dtype
)
elif
scalar_op
==
scalar
.
mul
:
elif
scalar_op
==
scalar
.
mul
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
multiply
.
reduce
(
zv
,
axis
)
zv
=
n
p
.
multiply
.
reduce
(
zv
,
axis
)
elif
scalar_op
==
scalar
.
maximum
:
elif
scalar_op
==
scalar
.
maximum
:
try
:
try
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
maximum
.
reduce
(
zv
,
axis
)
zv
=
n
p
.
maximum
.
reduce
(
zv
,
axis
)
except
ValueError
:
except
ValueError
:
numpy_raised
=
True
numpy_raised
=
True
elif
scalar_op
==
scalar
.
minimum
:
elif
scalar_op
==
scalar
.
minimum
:
try
:
try
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
minimum
.
reduce
(
zv
,
axis
)
zv
=
n
p
.
minimum
.
reduce
(
zv
,
axis
)
except
ValueError
:
except
ValueError
:
numpy_raised
=
True
numpy_raised
=
True
elif
scalar_op
==
scalar
.
or_
:
elif
scalar_op
==
scalar
.
or_
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
bitwise_or
.
reduce
(
zv
,
axis
)
zv
=
n
p
.
bitwise_or
.
reduce
(
zv
,
axis
)
elif
scalar_op
==
scalar
.
and_
:
elif
scalar_op
==
scalar
.
and_
:
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
reduce_bitwise_and
(
zv
,
axis
,
dtype
=
dtype
)
zv
=
reduce_bitwise_and
(
zv
,
axis
,
dtype
=
dtype
)
elif
scalar_op
==
scalar
.
xor
:
elif
scalar_op
==
scalar
.
xor
:
# There is no identity value for the xor function
# There is no identity value for the xor function
# So we can't support shape of dimensions 0.
# So we can't support shape of dimensions 0.
if
n
umpy
.
prod
(
zv
.
shape
)
==
0
:
if
n
p
.
prod
(
zv
.
shape
)
==
0
:
continue
continue
for
axis
in
reversed
(
sorted
(
tosum
)):
for
axis
in
reversed
(
sorted
(
tosum
)):
zv
=
n
umpy
.
bitwise_xor
.
reduce
(
zv
,
axis
)
zv
=
n
p
.
bitwise_xor
.
reduce
(
zv
,
axis
)
else
:
else
:
raise
Exception
(
raise
Exception
(
"Test for CAReduce with scalar_op
%
s not implemented"
%
"Test for CAReduce with scalar_op
%
s not implemented"
%
...
@@ -482,7 +482,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
...
@@ -482,7 +482,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
try
:
try
:
f_xv
=
f
(
xv
)
f_xv
=
f
(
xv
)
self
.
assertTrue
((
f_xv
.
shape
==
zv
.
shape
),
(
f_xv
,
zv
))
self
.
assertTrue
((
f_xv
.
shape
==
zv
.
shape
),
(
f_xv
,
zv
))
self
.
assertTrue
(
n
umpy
.
allclose
(
f_xv
,
zv
),
self
.
assertTrue
(
n
p
.
allclose
(
f_xv
,
zv
),
(
f_xv
,
zv
,
xsh
,
tosum
))
(
f_xv
,
zv
,
xsh
,
tosum
))
except
NotImplementedError
:
except
NotImplementedError
:
# GpuCAReduce don't implement all cases when size is 0
# GpuCAReduce don't implement all cases when size is 0
...
@@ -498,7 +498,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
...
@@ -498,7 +498,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
],
[
e
.
shape
]))
.
make_function
()
[
e
.
shape
]))
.
make_function
()
if
not
(
scalar_op
in
[
scalar
.
maximum
,
scalar
.
minimum
]
and
if
not
(
scalar_op
in
[
scalar
.
maximum
,
scalar
.
minimum
]
and
((
xsh
==
()
or
n
umpy
.
prod
(
xsh
)
==
0
))):
((
xsh
==
()
or
n
p
.
prod
(
xsh
)
==
0
))):
try
:
try
:
assert
all
(
f
(
xv
)
==
zv
.
shape
)
assert
all
(
f
(
xv
)
==
zv
.
shape
)
except
NotImplementedError
:
except
NotImplementedError
:
...
@@ -579,7 +579,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
...
@@ -579,7 +579,7 @@ class test_CAReduce(unittest_tools.InferShapeTester):
x
=
pre_scalar_op
(
x
)
x
=
pre_scalar_op
(
x
)
if
tosum
is
None
:
if
tosum
is
None
:
tosum
=
list
(
range
(
len
(
xsh
)))
tosum
=
list
(
range
(
len
(
xsh
)))
xv
=
n
umpy
.
asarray
(
numpy
.
random
.
rand
(
*
xsh
),
dtype
=
dtype
)
xv
=
n
p
.
asarray
(
np
.
random
.
rand
(
*
xsh
),
dtype
=
dtype
)
d
=
{}
d
=
{}
if
pre_scalar_op
is
not
None
:
if
pre_scalar_op
is
not
None
:
xv
=
x
.
eval
({
x
.
owner
.
inputs
[
0
]:
xv
})
xv
=
x
.
eval
({
x
.
owner
.
inputs
[
0
]:
xv
})
...
@@ -608,8 +608,8 @@ class test_Prod(unittest.TestCase):
...
@@ -608,8 +608,8 @@ class test_Prod(unittest.TestCase):
# including zeros, as the case with zeros is important
# including zeros, as the case with zeros is important
# (and special cases: 1 zero in the row, more than 1 zero in the row)
# (and special cases: 1 zero in the row, more than 1 zero in the row)
x_val
=
n
umpy
.
asarray
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
],
[
.
7
,
.
8
,
.
9
]],
x_val
=
n
p
.
asarray
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
],
[
.
7
,
.
8
,
.
9
]],
dtype
=
'float32'
)
dtype
=
'float32'
)
# now with verify_grad
# now with verify_grad
unittest_tools
.
verify_grad
(
Prod
(
axis
=
1
),
[
x_val
],
mode
=
self
.
mode
)
unittest_tools
.
verify_grad
(
Prod
(
axis
=
1
),
[
x_val
],
mode
=
self
.
mode
)
...
@@ -623,8 +623,8 @@ class test_Prod(unittest.TestCase):
...
@@ -623,8 +623,8 @@ class test_Prod(unittest.TestCase):
def
test_verify_grad_with_zeros
(
self
):
def
test_verify_grad_with_zeros
(
self
):
# including zeros, as the case with zeros is important
# including zeros, as the case with zeros is important
# (and special cases: 1 zero in the row, more than 1 zero in the row)
# (and special cases: 1 zero in the row, more than 1 zero in the row)
x_val
=
n
umpy
.
asarray
([[
1.
,
2.
,
3.
],
[
0.
,
5.
,
6.
],
[
0.
,
0.
,
9.
]],
x_val
=
n
p
.
asarray
([[
1.
,
2.
,
3.
],
[
0.
,
5.
,
6.
],
[
0.
,
0.
,
9.
]],
dtype
=
'float32'
)
dtype
=
'float32'
)
x
=
theano
.
tensor
.
dmatrix
()
x
=
theano
.
tensor
.
dmatrix
()
# sanity check
# sanity check
...
@@ -635,7 +635,7 @@ class test_Prod(unittest.TestCase):
...
@@ -635,7 +635,7 @@ class test_Prod(unittest.TestCase):
# p2 = Prod(axis=1)(x2)
# p2 = Prod(axis=1)(x2)
# fn = theano.function([x, x2], [p - p2], mode=self.mode)
# fn = theano.function([x, x2], [p - p2], mode=self.mode)
# print("hand computed diff for each row")
# print("hand computed diff for each row")
# x2_val = n
umpy
.asarray([[1., 2., 3.003], [0.003, 5., 6], [
# x2_val = n
p
.asarray([[1., 2., 3.003], [0.003, 5., 6], [
# 0., 0., 9.01]])
# 0., 0., 9.01]])
# print(fn(x_val, x2_val))
# print(fn(x_val, x2_val))
# fn2 = theano.function([x], [theano.tensor.grad(p.sum(), x)],
# fn2 = theano.function([x], [theano.tensor.grad(p.sum(), x)],
...
@@ -643,7 +643,7 @@ class test_Prod(unittest.TestCase):
...
@@ -643,7 +643,7 @@ class test_Prod(unittest.TestCase):
# print("real grad")
# print("real grad")
# print(fn2(x_val))
# print(fn2(x_val))
fn3
=
theano
.
function
([
x
],
[
p
],
mode
=
self
.
mode
)
fn3
=
theano
.
function
([
x
],
[
p
],
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
fn3
(
x_val
),
[
6.
,
0.
,
0.
])
assert
n
p
.
allclose
(
fn3
(
x_val
),
[
6.
,
0.
,
0.
])
# now with verify_grad
# now with verify_grad
unittest_tools
.
verify_grad
(
Prod
(
axis
=
1
),
[
x_val
],
mode
=
self
.
mode
)
unittest_tools
.
verify_grad
(
Prod
(
axis
=
1
),
[
x_val
],
mode
=
self
.
mode
)
...
@@ -665,25 +665,25 @@ class test_Prod(unittest.TestCase):
...
@@ -665,25 +665,25 @@ class test_Prod(unittest.TestCase):
@attr
(
'slow'
)
@attr
(
'slow'
)
def
test_prod_no_zeros_in_input
(
self
):
def
test_prod_no_zeros_in_input
(
self
):
x
=
theano
.
tensor
.
dmatrix
()
x
=
theano
.
tensor
.
dmatrix
()
x_val
=
n
umpy
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
],
[
7
,
8
,
9
]],
dtype
=
'float32'
)
x_val
=
n
p
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
],
[
7
,
8
,
9
]],
dtype
=
'float32'
)
pwz
=
Prod
(
axis
=
1
,
no_zeros_in_input
=
True
)(
x
)
pwz
=
Prod
(
axis
=
1
,
no_zeros_in_input
=
True
)(
x
)
fn
=
theano
.
function
([
x
],
pwz
,
mode
=
self
.
mode
)
fn
=
theano
.
function
([
x
],
pwz
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
fn
(
x_val
),
[
6
,
120
,
504
])
assert
n
p
.
allclose
(
fn
(
x_val
),
[
6
,
120
,
504
])
pwz
=
Prod
(
no_zeros_in_input
=
True
)(
x
)
pwz
=
Prod
(
no_zeros_in_input
=
True
)(
x
)
g
=
theano
.
grad
(
pwz
,
x
)
g
=
theano
.
grad
(
pwz
,
x
)
gg
=
theano
.
grad
(
g
.
sum
(),
x
)
gg
=
theano
.
grad
(
g
.
sum
(),
x
)
fn
=
theano
.
function
([
x
],
g
,
mode
=
self
.
mode
)
fn
=
theano
.
function
([
x
],
g
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
fn
(
x_val
),
assert
n
p
.
allclose
(
fn
(
x_val
),
[[
362880.
,
181440.
,
120960.
],
[[
362880.
,
181440.
,
120960.
],
[
90720.
,
72576.
,
60480.
],
[
90720.
,
72576.
,
60480.
],
[
51840.
,
45360.
,
40320.
]])
[
51840.
,
45360.
,
40320.
]])
fn
=
theano
.
function
([
x
],
gg
,
mode
=
self
.
mode
)
fn
=
theano
.
function
([
x
],
gg
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
fn
(
x_val
),
assert
n
p
.
allclose
(
fn
(
x_val
),
[[
663696.
,
422568.
,
301872.
],
[[
663696.
,
422568.
,
301872.
],
[
233964.
,
190800.
,
161016.
],
[
233964.
,
190800.
,
161016.
],
[
139248.
,
122652.
,
109584.
]])
[
139248.
,
122652.
,
109584.
]])
unittest_tools
.
verify_grad
(
Prod
(
axis
=
1
,
no_zeros_in_input
=
True
),
unittest_tools
.
verify_grad
(
Prod
(
axis
=
1
,
no_zeros_in_input
=
True
),
[
x_val
],
[
x_val
],
mode
=
self
.
mode
)
mode
=
self
.
mode
)
...
@@ -697,14 +697,14 @@ class test_Prod(unittest.TestCase):
...
@@ -697,14 +697,14 @@ class test_Prod(unittest.TestCase):
def
test_prod_without_zeros
(
self
):
def
test_prod_without_zeros
(
self
):
x
=
theano
.
tensor
.
dmatrix
()
x
=
theano
.
tensor
.
dmatrix
()
x_val
=
n
umpy
.
array
([[
1
,
2
,
3
],
[
0
,
5
,
6
],
[
0
,
0
,
9
]],
dtype
=
'float32'
)
x_val
=
n
p
.
array
([[
1
,
2
,
3
],
[
0
,
5
,
6
],
[
0
,
0
,
9
]],
dtype
=
'float32'
)
pwz
=
ProdWithoutZeros
(
axis
=
1
)(
x
)
pwz
=
ProdWithoutZeros
(
axis
=
1
)(
x
)
fn
=
theano
.
function
([
x
],
pwz
,
mode
=
self
.
mode
)
fn
=
theano
.
function
([
x
],
pwz
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
fn
(
x_val
),
[
6
,
30
,
9
])
assert
n
p
.
allclose
(
fn
(
x_val
),
[
6
,
30
,
9
])
pwz_a0
=
ProdWithoutZeros
(
axis
=
0
)(
x
)
pwz_a0
=
ProdWithoutZeros
(
axis
=
0
)(
x
)
fn_a0
=
theano
.
function
([
x
],
pwz_a0
,
mode
=
self
.
mode
)
fn_a0
=
theano
.
function
([
x
],
pwz_a0
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
fn_a0
(
x_val
),
[
1
,
10
,
162
])
assert
n
p
.
allclose
(
fn_a0
(
x_val
),
[
1
,
10
,
162
])
@raises
(
theano
.
gradient
.
NullTypeGradError
)
@raises
(
theano
.
gradient
.
NullTypeGradError
)
def
test_prod_without_zeros_grad
(
self
):
def
test_prod_without_zeros_grad
(
self
):
...
@@ -716,33 +716,33 @@ class test_Prod(unittest.TestCase):
...
@@ -716,33 +716,33 @@ class test_Prod(unittest.TestCase):
@attr
(
'slow'
)
@attr
(
'slow'
)
def
test_other_grad_tests
(
self
):
def
test_other_grad_tests
(
self
):
x
=
theano
.
tensor
.
dmatrix
()
x
=
theano
.
tensor
.
dmatrix
()
x_val1
=
n
umpy
.
array
([[
1
,
2
,
3
],
[
0
,
5
,
6
],
[
0
,
0
,
9
]],
x_val1
=
n
p
.
array
([[
1
,
2
,
3
],
[
0
,
5
,
6
],
[
0
,
0
,
9
]],
dtype
=
'float32'
)
dtype
=
'float32'
)
x_val2
=
n
umpy
.
array
([[
1
,
2
,
0
],
[
0
,
5
,
6
],
[
7
,
8
,
9
],
[
9
,
10
,
0
]],
x_val2
=
n
p
.
array
([[
1
,
2
,
0
],
[
0
,
5
,
6
],
[
7
,
8
,
9
],
[
9
,
10
,
0
]],
dtype
=
'float32'
)
dtype
=
'float32'
)
rng
=
rng
=
n
umpy
.
random
.
RandomState
(
43
)
rng
=
rng
=
n
p
.
random
.
RandomState
(
43
)
p
=
Prod
(
axis
=
1
)
p
=
Prod
(
axis
=
1
)
grad_p
=
theano
.
tensor
.
grad
(
p
(
x
)
.
sum
(),
x
)
grad_p
=
theano
.
tensor
.
grad
(
p
(
x
)
.
sum
(),
x
)
grad_fn
=
theano
.
function
([
x
],
grad_p
,
mode
=
self
.
mode
)
grad_fn
=
theano
.
function
([
x
],
grad_p
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
assert
n
p
.
allclose
(
grad_fn
(
x_val1
),
grad_fn
(
x_val1
),
[[
6.
,
3.
,
2.
],
[
30.
,
0.
,
0.
],
[
0.
,
0.
,
0.
]])
[[
6.
,
3.
,
2.
],
[
30.
,
0.
,
0.
],
[
0.
,
0.
,
0.
]])
assert
n
umpy
.
allclose
(
assert
n
p
.
allclose
(
grad_fn
(
x_val2
),
grad_fn
(
x_val2
),
[[
0.
,
0.
,
2.
],
[
30.
,
0.
,
0.
],
[
72.
,
63.
,
56.
],
[
0.
,
0.
,
90.
]])
[[
0.
,
0.
,
2.
],
[
30.
,
0.
,
0.
],
[
72.
,
63.
,
56.
],
[
0.
,
0.
,
90.
]])
p_axis0
=
Prod
(
axis
=
0
)
p_axis0
=
Prod
(
axis
=
0
)
grad_p_axis0
=
theano
.
tensor
.
grad
(
p_axis0
(
x
)
.
sum
(),
x
)
grad_p_axis0
=
theano
.
tensor
.
grad
(
p_axis0
(
x
)
.
sum
(),
x
)
grad_fn_axis0
=
theano
.
function
([
x
],
grad_p_axis0
,
mode
=
self
.
mode
)
grad_fn_axis0
=
theano
.
function
([
x
],
grad_p_axis0
,
mode
=
self
.
mode
)
assert
n
umpy
.
allclose
(
assert
n
p
.
allclose
(
grad_fn_axis0
(
x_val2
),
grad_fn_axis0
(
x_val2
),
[[
0.
,
400.
,
0.
],
[
63.
,
160.
,
0.
],
[
0.
,
100.
,
0.
],
[
0.
,
80.
,
0.
]])
[[
0.
,
400.
,
0.
],
[
63.
,
160.
,
0.
],
[
0.
,
100.
,
0.
],
[
0.
,
80.
,
0.
]])
tensor
.
verify_grad
(
p
,
[
x_val1
],
rng
=
rng
,
mode
=
self
.
mode
)
tensor
.
verify_grad
(
p
,
[
x_val1
],
rng
=
rng
,
mode
=
self
.
mode
)
def
test_mul_without_zeros_zeros
(
self
):
def
test_mul_without_zeros_zeros
(
self
):
a
=
n
umpy
.
zeros
((
3
,
3
))
a
=
n
p
.
zeros
((
3
,
3
))
x
=
theano
.
tensor
.
dmatrix
()
x
=
theano
.
tensor
.
dmatrix
()
...
@@ -763,13 +763,13 @@ class test_Prod(unittest.TestCase):
...
@@ -763,13 +763,13 @@ class test_Prod(unittest.TestCase):
class
test_IsInf_IsNan
(
unittest
.
TestCase
):
class
test_IsInf_IsNan
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
test_vals
=
[
n
umpy
.
array
(
x
,
dtype
=
config
.
floatX
)
for
x
in
[
self
.
test_vals
=
[
n
p
.
array
(
x
,
dtype
=
config
.
floatX
)
for
x
in
[
0
,
0
,
1
,
1
,
n
umpy
.
nan
,
n
p
.
nan
,
n
umpy
.
inf
,
n
p
.
inf
,
-
n
umpy
.
inf
,
-
n
p
.
inf
,
[
n
umpy
.
nan
,
numpy
.
inf
,
-
numpy
.
inf
,
0
,
1
,
-
1
],
[
n
p
.
nan
,
np
.
inf
,
-
np
.
inf
,
0
,
1
,
-
1
],
]]
]]
self
.
scalar
=
tensor
.
scalar
()
self
.
scalar
=
tensor
.
scalar
()
self
.
vector
=
tensor
.
vector
()
self
.
vector
=
tensor
.
vector
()
...
@@ -784,7 +784,7 @@ class test_IsInf_IsNan(unittest.TestCase):
...
@@ -784,7 +784,7 @@ class test_IsInf_IsNan(unittest.TestCase):
theano_isfunc
=
theano
.
function
([
input
],
theano_isfunc
=
theano
.
function
([
input
],
getattr
(
tensor
,
isfunc
)(
input
),
getattr
(
tensor
,
isfunc
)(
input
),
mode
=
self
.
mode
)
mode
=
self
.
mode
)
numpy_isfunc
=
getattr
(
n
umpy
,
isfunc
)
numpy_isfunc
=
getattr
(
n
p
,
isfunc
)
for
x
in
self
.
test_vals
:
for
x
in
self
.
test_vals
:
if
((
x
.
ndim
==
0
and
input
is
not
self
.
scalar
)
or
if
((
x
.
ndim
==
0
and
input
is
not
self
.
scalar
)
or
(
x
.
ndim
==
1
and
input
is
not
self
.
vector
)):
(
x
.
ndim
==
1
and
input
is
not
self
.
vector
)):
...
@@ -830,7 +830,7 @@ class T_reduce_dtype(unittest.TestCase):
...
@@ -830,7 +830,7 @@ class T_reduce_dtype(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
(
topo
,
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
(
topo
,
dtype
)
dtype
)
data
=
n
umpy
.
random
.
rand
(
3
,
4
)
*
10
data
=
n
p
.
random
.
rand
(
3
,
4
)
*
10
data
=
data
.
astype
(
dtype
)
data
=
data
.
astype
(
dtype
)
f
(
data
)
f
(
data
)
...
@@ -859,7 +859,7 @@ class T_reduce_dtype(unittest.TestCase):
...
@@ -859,7 +859,7 @@ class T_reduce_dtype(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
(
topo
,
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
(
topo
,
dtype
)
dtype
)
data
=
n
umpy
.
random
.
rand
(
3
,
4
)
*
10
data
=
n
p
.
random
.
rand
(
3
,
4
)
*
10
data
=
data
.
astype
(
dtype
)
data
=
data
.
astype
(
dtype
)
f
(
data
)
f
(
data
)
...
@@ -887,7 +887,7 @@ class T_reduce_dtype(unittest.TestCase):
...
@@ -887,7 +887,7 @@ class T_reduce_dtype(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
\
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
\
(
topo
,
output_dtype
)
(
topo
,
output_dtype
)
data
=
n
umpy
.
random
.
rand
(
3
,
4
)
*
10
data
=
n
p
.
random
.
rand
(
3
,
4
)
*
10
data
=
data
.
astype
(
input_dtype
)
data
=
data
.
astype
(
input_dtype
)
if
output_dtype
==
'float16'
and
method
==
'prod'
:
if
output_dtype
==
'float16'
and
method
==
'prod'
:
# We will likely get something infinite,
# We will likely get something infinite,
...
@@ -943,17 +943,17 @@ class T_reduce_dtype(unittest.TestCase):
...
@@ -943,17 +943,17 @@ class T_reduce_dtype(unittest.TestCase):
def
test_reduce_precision
(
self
):
def
test_reduce_precision
(
self
):
# Check that the default accumulator precision is sufficient
# Check that the default accumulator precision is sufficient
for
method
in
self
.
methods
:
for
method
in
self
.
methods
:
x
=
theano
.
shared
(
n
umpy
.
asarray
([
1e8
,
1
,
-
1e8
],
x
=
theano
.
shared
(
n
p
.
asarray
([
1e8
,
1
,
-
1e8
],
dtype
=
'float32'
))
dtype
=
'float32'
))
s
=
getattr
(
x
,
method
)()
s
=
getattr
(
x
,
method
)()
f
=
theano
.
function
([],
s
,
mode
=
self
.
mode
)
f
=
theano
.
function
([],
s
,
mode
=
self
.
mode
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
topo
assert
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
self
.
op
)],
topo
s_val
=
f
()
s_val
=
f
()
# Use extra precision in NumPy to compute the good answer.
# Use extra precision in NumPy to compute the good answer.
ret
=
getattr
(
n
umpy
.
asarray
([
1e8
,
1
,
-
1e8
],
dtype
=
'float64'
),
ret
=
getattr
(
n
p
.
asarray
([
1e8
,
1
,
-
1e8
],
dtype
=
'float64'
),
method
)()
method
)()
assert
n
umpy
.
allclose
(
s_val
,
ret
),
(
s_val
,
ret
)
assert
n
p
.
allclose
(
s_val
,
ret
),
(
s_val
,
ret
)
class
T_mean_dtype
(
unittest
.
TestCase
):
class
T_mean_dtype
(
unittest
.
TestCase
):
...
@@ -971,7 +971,7 @@ class T_mean_dtype(unittest.TestCase):
...
@@ -971,7 +971,7 @@ class T_mean_dtype(unittest.TestCase):
else
:
else
:
assert
m
.
dtype
==
dtype
,
(
m
,
m
.
dtype
,
dtype
)
assert
m
.
dtype
==
dtype
,
(
m
,
m
.
dtype
,
dtype
)
f
=
theano
.
function
([
x
],
m
)
f
=
theano
.
function
([
x
],
m
)
data
=
n
umpy
.
random
.
rand
(
3
,
4
)
*
10
data
=
n
p
.
random
.
rand
(
3
,
4
)
*
10
data
=
data
.
astype
(
dtype
)
data
=
data
.
astype
(
dtype
)
f
(
data
)
f
(
data
)
...
@@ -1005,7 +1005,7 @@ class T_mean_dtype(unittest.TestCase):
...
@@ -1005,7 +1005,7 @@ class T_mean_dtype(unittest.TestCase):
input_dtype
!=
sum_dtype
):
input_dtype
!=
sum_dtype
):
continue
continue
f
=
theano
.
function
([
x
],
mean_var
)
f
=
theano
.
function
([
x
],
mean_var
)
data
=
n
umpy
.
random
.
rand
(
3
,
4
)
*
10
data
=
n
p
.
random
.
rand
(
3
,
4
)
*
10
data
=
data
.
astype
(
input_dtype
)
data
=
data
.
astype
(
input_dtype
)
f
(
data
)
f
(
data
)
# Check that we can take the gradient, when implemented
# Check that we can take the gradient, when implemented
...
@@ -1026,11 +1026,11 @@ class T_mean_dtype(unittest.TestCase):
...
@@ -1026,11 +1026,11 @@ class T_mean_dtype(unittest.TestCase):
def
test_mean_precision
(
self
):
def
test_mean_precision
(
self
):
# Check that the default accumulator precision is sufficient
# Check that the default accumulator precision is sufficient
x
=
theano
.
shared
(
n
umpy
.
asarray
([
1e8
,
1
,
-
1e8
],
dtype
=
'float32'
))
x
=
theano
.
shared
(
n
p
.
asarray
([
1e8
,
1
,
-
1e8
],
dtype
=
'float32'
))
m
=
x
.
mean
()
m
=
x
.
mean
()
f
=
theano
.
function
([],
m
)
f
=
theano
.
function
([],
m
)
m_val
=
f
()
m_val
=
f
()
assert
n
umpy
.
allclose
(
m_val
,
1.
/
3
)
assert
n
p
.
allclose
(
m_val
,
1.
/
3
)
class
T_prod_without_zeros_dtype
(
unittest
.
TestCase
):
class
T_prod_without_zeros_dtype
(
unittest
.
TestCase
):
...
@@ -1077,7 +1077,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
...
@@ -1077,7 +1077,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
if
'complex'
in
dtype
:
if
'complex'
in
dtype
:
continue
continue
f
=
theano
.
function
([
x
],
p
)
f
=
theano
.
function
([
x
],
p
)
data
=
n
umpy
.
random
.
rand
(
2
,
3
)
*
3
data
=
n
p
.
random
.
rand
(
2
,
3
)
*
3
data
=
data
.
astype
(
dtype
)
data
=
data
.
astype
(
dtype
)
f
(
data
)
f
(
data
)
...
@@ -1100,7 +1100,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
...
@@ -1100,7 +1100,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
'complex'
in
input_dtype
):
'complex'
in
input_dtype
):
continue
continue
f
=
theano
.
function
([
x
],
prod_woz_var
)
f
=
theano
.
function
([
x
],
prod_woz_var
)
data
=
n
umpy
.
random
.
rand
(
2
,
3
)
*
3
data
=
n
p
.
random
.
rand
(
2
,
3
)
*
3
data
=
data
.
astype
(
input_dtype
)
data
=
data
.
astype
(
input_dtype
)
f
(
data
)
f
(
data
)
...
@@ -1129,7 +1129,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
...
@@ -1129,7 +1129,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
input_dtype
!=
acc_dtype
):
input_dtype
!=
acc_dtype
):
continue
continue
f
=
theano
.
function
([
x
],
prod_woz_var
)
f
=
theano
.
function
([
x
],
prod_woz_var
)
data
=
n
umpy
.
random
.
rand
(
2
,
3
)
*
3
data
=
n
p
.
random
.
rand
(
2
,
3
)
*
3
data
=
data
.
astype
(
input_dtype
)
data
=
data
.
astype
(
input_dtype
)
f
(
data
)
f
(
data
)
else
:
else
:
...
@@ -1143,7 +1143,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
...
@@ -1143,7 +1143,7 @@ class T_prod_without_zeros_dtype(unittest.TestCase):
class
TestBitOpReduceGrad
(
unittest
.
TestCase
):
class
TestBitOpReduceGrad
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
rng
=
n
umpy
.
random
.
RandomState
(
unittest_tools
.
fetch_seed
())
self
.
rng
=
n
p
.
random
.
RandomState
(
unittest_tools
.
fetch_seed
())
def
test_all_grad
(
self
):
def
test_all_grad
(
self
):
x
=
tensor
.
bmatrix
(
'x'
)
x
=
tensor
.
bmatrix
(
'x'
)
...
@@ -1152,11 +1152,11 @@ class TestBitOpReduceGrad(unittest.TestCase):
...
@@ -1152,11 +1152,11 @@ class TestBitOpReduceGrad(unittest.TestCase):
f
=
theano
.
function
([
x
],
gx
)
f
=
theano
.
function
([
x
],
gx
)
x_random
=
self
.
rng
.
binomial
(
n
=
1
,
p
=
0.5
,
size
=
(
5
,
7
))
.
astype
(
'int8'
)
x_random
=
self
.
rng
.
binomial
(
n
=
1
,
p
=
0.5
,
size
=
(
5
,
7
))
.
astype
(
'int8'
)
for
x_val
in
(
x_random
,
for
x_val
in
(
x_random
,
n
umpy
.
zeros_like
(
x_random
),
n
p
.
zeros_like
(
x_random
),
n
umpy
.
ones_like
(
x_random
)):
n
p
.
ones_like
(
x_random
)):
gx_val
=
f
(
x_val
)
gx_val
=
f
(
x_val
)
assert
gx_val
.
shape
==
x_val
.
shape
assert
gx_val
.
shape
==
x_val
.
shape
assert
n
umpy
.
all
(
gx_val
==
0
)
assert
n
p
.
all
(
gx_val
==
0
)
def
test_any_grad
(
self
):
def
test_any_grad
(
self
):
x
=
tensor
.
bmatrix
(
'x'
)
x
=
tensor
.
bmatrix
(
'x'
)
...
@@ -1165,11 +1165,11 @@ class TestBitOpReduceGrad(unittest.TestCase):
...
@@ -1165,11 +1165,11 @@ class TestBitOpReduceGrad(unittest.TestCase):
f
=
theano
.
function
([
x
],
gx
)
f
=
theano
.
function
([
x
],
gx
)
x_random
=
self
.
rng
.
binomial
(
n
=
1
,
p
=
0.5
,
size
=
(
5
,
7
))
.
astype
(
'int8'
)
x_random
=
self
.
rng
.
binomial
(
n
=
1
,
p
=
0.5
,
size
=
(
5
,
7
))
.
astype
(
'int8'
)
for
x_val
in
(
x_random
,
for
x_val
in
(
x_random
,
n
umpy
.
zeros_like
(
x_random
),
n
p
.
zeros_like
(
x_random
),
n
umpy
.
ones_like
(
x_random
)):
n
p
.
ones_like
(
x_random
)):
gx_val
=
f
(
x_val
)
gx_val
=
f
(
x_val
)
assert
gx_val
.
shape
==
x_val
.
shape
assert
gx_val
.
shape
==
x_val
.
shape
assert
n
umpy
.
all
(
gx_val
==
0
)
assert
n
p
.
all
(
gx_val
==
0
)
class
TestElemwise
(
unittest_tools
.
InferShapeTester
):
class
TestElemwise
(
unittest_tools
.
InferShapeTester
):
...
@@ -1195,8 +1195,8 @@ class TestElemwise(unittest_tools.InferShapeTester):
...
@@ -1195,8 +1195,8 @@ class TestElemwise(unittest_tools.InferShapeTester):
dtype
=
theano
.
config
.
floatX
dtype
=
theano
.
config
.
floatX
t_left
=
TensorType
(
dtype
,
[(
entry
==
1
)
for
entry
in
s_left
])()
t_left
=
TensorType
(
dtype
,
[(
entry
==
1
)
for
entry
in
s_left
])()
t_right
=
TensorType
(
dtype
,
[(
entry
==
1
)
for
entry
in
s_right
])()
t_right
=
TensorType
(
dtype
,
[(
entry
==
1
)
for
entry
in
s_right
])()
t_left_val
=
n
umpy
.
zeros
(
s_left
,
dtype
=
dtype
)
t_left_val
=
n
p
.
zeros
(
s_left
,
dtype
=
dtype
)
t_right_val
=
n
umpy
.
zeros
(
s_right
,
dtype
=
dtype
)
t_right_val
=
n
p
.
zeros
(
s_right
,
dtype
=
dtype
)
self
.
_compile_and_check
(
self
.
_compile_and_check
(
[
t_left
,
t_right
],
[
t_left
,
t_right
],
[
Elemwise
(
scalar
.
add
)(
t_left
,
t_right
)],
[
Elemwise
(
scalar
.
add
)(
t_left
,
t_right
)],
...
@@ -1210,7 +1210,7 @@ class TestElemwise(unittest_tools.InferShapeTester):
...
@@ -1210,7 +1210,7 @@ class TestElemwise(unittest_tools.InferShapeTester):
s
=
a
+
b
+
c
+
d
+
e
+
f
s
=
a
+
b
+
c
+
d
+
e
+
f
g
=
theano
.
function
([
a
,
b
,
c
,
d
,
e
,
f
],
s
,
g
=
theano
.
function
([
a
,
b
,
c
,
d
,
e
,
f
],
s
,
mode
=
theano
.
compile
.
Mode
(
linker
=
'py'
))
mode
=
theano
.
compile
.
Mode
(
linker
=
'py'
))
g
(
*
[
n
umpy
.
zeros
(
2
**
11
,
config
.
floatX
)
for
i
in
xrange
(
6
)])
g
(
*
[
n
p
.
zeros
(
2
**
11
,
config
.
floatX
)
for
i
in
xrange
(
6
)])
def
test_gt_grad
():
def
test_gt_grad
():
...
@@ -1226,9 +1226,9 @@ def test_gt_grad():
...
@@ -1226,9 +1226,9 @@ def test_gt_grad():
T
=
theano
.
tensor
T
=
theano
.
tensor
input_
=
T
.
vector
(
dtype
=
floatX
)
input_
=
T
.
vector
(
dtype
=
floatX
)
random_values
=
n
umpy
.
random
.
RandomState
(
1234
)
.
uniform
(
random_values
=
n
p
.
random
.
RandomState
(
1234
)
.
uniform
(
low
=-
1
,
high
=
1
,
size
=
(
2
,
2
))
low
=-
1
,
high
=
1
,
size
=
(
2
,
2
))
W_values
=
n
umpy
.
asarray
(
random_values
,
dtype
=
floatX
)
W_values
=
n
p
.
asarray
(
random_values
,
dtype
=
floatX
)
W
=
theano
.
shared
(
value
=
W_values
,
name
=
'weights'
)
W
=
theano
.
shared
(
value
=
W_values
,
name
=
'weights'
)
correct_score
=
T
.
dot
(
input_
,
W
)
correct_score
=
T
.
dot
(
input_
,
W
)
wrong_input
=
T
.
vector
(
dtype
=
floatX
)
wrong_input
=
T
.
vector
(
dtype
=
floatX
)
...
@@ -1258,7 +1258,7 @@ def test_clip_grad():
...
@@ -1258,7 +1258,7 @@ def test_clip_grad():
# use an x value less than y, an x value between y and z, and an x value
# use an x value less than y, an x value between y and z, and an x value
# greater than z
# greater than z
unittest_tools
.
verify_grad
(
func
,
unittest_tools
.
verify_grad
(
func
,
[
n
umpy
.
asarray
([
-
1.
,
0.5
,
2.
]),
0.
,
1.
])
[
n
p
.
asarray
([
-
1.
,
0.5
,
2.
]),
0.
,
1.
])
def
test_grad_useless_sum
():
def
test_grad_useless_sum
():
...
@@ -1287,16 +1287,16 @@ def test_grad_useless_sum():
...
@@ -1287,16 +1287,16 @@ def test_grad_useless_sum():
tensor
.
type
.
values_eq_approx_remove_nan
)
tensor
.
type
.
values_eq_approx_remove_nan
)
try
:
try
:
for
test_value
in
test_values
:
for
test_value
in
test_values
:
outputs
.
append
(
f
(
n
umpy
.
array
([
test_value
])
.
astype
(
'float32'
)))
outputs
.
append
(
f
(
n
p
.
array
([
test_value
])
.
astype
(
'float32'
)))
finally
:
finally
:
TensorType
.
values_eq_approx
=
old_values_eq_approx
TensorType
.
values_eq_approx
=
old_values_eq_approx
assert
not
any
([
isinstance
(
node
.
op
,
theano
.
tensor
.
elemwise
.
Sum
)
for
node
in
nodes
])
assert
not
any
([
isinstance
(
node
.
op
,
theano
.
tensor
.
elemwise
.
Sum
)
for
node
in
nodes
])
assert
n
umpy
.
allclose
(
outputs
,
[[
-
3.72007598e-44
],
assert
n
p
.
allclose
(
outputs
,
[[
-
3.72007598e-44
],
[
-
0.26894142
],
[
-
0.26894142
],
[
-
0.5
],
[
-
0.5
],
[
-
0.73105858
],
[
-
0.73105858
],
[
-
1.
]])
[
-
1.
]])
def
test_elemwise_grad_broadcast
():
def
test_elemwise_grad_broadcast
():
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论