Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
8910e6eb
提交
8910e6eb
authored
3月 01, 2013
作者:
lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1261 from nouiz/adv_sub1
Adv sub1: allow broadcasted index vector.
上级
ad8a1755
477370da
显示空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
86 行增加
和
62 行删除
+86
-62
basic_ops.py
theano/sandbox/cuda/basic_ops.py
+1
-1
basic.py
theano/tensor/basic.py
+1
-1
test_basic.py
theano/tensor/tests/test_basic.py
+84
-60
没有找到文件。
theano/sandbox/cuda/basic_ops.py
浏览文件 @
8910e6eb
...
@@ -2322,7 +2322,7 @@ class GpuAdvancedSubtensor1(tensor.AdvancedSubtensor1, GpuOp):
...
@@ -2322,7 +2322,7 @@ class GpuAdvancedSubtensor1(tensor.AdvancedSubtensor1, GpuOp):
ilist_
=
tensor
.
as_tensor_variable
(
ilist
)
ilist_
=
tensor
.
as_tensor_variable
(
ilist
)
if
ilist_
.
type
.
dtype
[:
3
]
not
in
(
'int'
,
'uin'
):
if
ilist_
.
type
.
dtype
[:
3
]
not
in
(
'int'
,
'uin'
):
raise
TypeError
(
'index must be integers'
)
raise
TypeError
(
'index must be integers'
)
if
ilist_
.
type
.
broadcastable
!=
(
False
,)
:
if
ilist_
.
type
.
ndim
!=
1
:
raise
TypeError
(
'index must be vector'
)
raise
TypeError
(
'index must be vector'
)
if
x_
.
type
.
ndim
==
0
:
if
x_
.
type
.
ndim
==
0
:
raise
TypeError
(
'cannot index into a scalar'
)
raise
TypeError
(
'cannot index into a scalar'
)
...
...
theano/tensor/basic.py
浏览文件 @
8910e6eb
...
@@ -6896,7 +6896,7 @@ class AdvancedSubtensor1(Op):
...
@@ -6896,7 +6896,7 @@ class AdvancedSubtensor1(Op):
ilist_
=
as_tensor_variable
(
ilist
)
ilist_
=
as_tensor_variable
(
ilist
)
if
ilist_
.
type
.
dtype
[:
3
]
not
in
(
'int'
,
'uin'
):
if
ilist_
.
type
.
dtype
[:
3
]
not
in
(
'int'
,
'uin'
):
raise
TypeError
(
'index must be integers'
)
raise
TypeError
(
'index must be integers'
)
if
ilist_
.
type
.
broadcastable
!=
(
False
,)
:
if
ilist_
.
type
.
ndim
!=
1
:
raise
TypeError
(
'index must be vector'
)
raise
TypeError
(
'index must be vector'
)
if
x_
.
type
.
ndim
==
0
:
if
x_
.
type
.
ndim
==
0
:
raise
TypeError
(
'cannot index into a scalar'
)
raise
TypeError
(
'cannot index into a scalar'
)
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
8910e6eb
...
@@ -39,13 +39,12 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
...
@@ -39,13 +39,12 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
get_scalar_constant_value
,
ivector
,
reshape
,
scalar_from_tensor
,
scal
,
get_scalar_constant_value
,
ivector
,
reshape
,
scalar_from_tensor
,
scal
,
iscalars
,
arange
,
dscalars
,
fvector
,
imatrix
,
numeric_grad
,
iscalars
,
arange
,
dscalars
,
fvector
,
imatrix
,
numeric_grad
,
opt
,
ComplexError
,
lvector
,
true_div
,
max
,
min
,
Split
,
roll
,
opt
,
ComplexError
,
lvector
,
true_div
,
max
,
min
,
Split
,
roll
,
tile
,
patternbroadcast
,
Eye
,
Shape
,
D
efault
,
D
ot
,
PermuteRowElements
,
tile
,
patternbroadcast
,
Eye
,
Shape
,
Dot
,
PermuteRowElements
,
ScalarFromTensor
,
TensorFromScalar
,
dtensor4
,
Rebroadcast
,
Alloc
,
ScalarFromTensor
,
TensorFromScalar
,
dtensor4
,
Rebroadcast
,
Alloc
,
dtensor3
,
SpecifyShape
,
Mean
,
IncSubtensor
,
AdvancedIncSubtensor1
,
dtensor3
,
SpecifyShape
,
Mean
,
IncSubtensor
,
AdvancedIncSubtensor1
,
itensor3
,
Tile
,
AdvancedIncSubtensor
,
switch
,
Diagonal
,
Diag
,
itensor3
,
Tile
,
AdvancedIncSubtensor
,
switch
,
Diagonal
,
Diag
,
nonzero
,
flatnonzero
,
nonzero_values
)
nonzero
,
flatnonzero
,
nonzero_values
)
from
theano.tests
import
unittest_tools
as
utt
from
theano.tests
import
unittest_tools
as
utt
from
theano.printing
import
debugprint
imported_scipy_special
=
False
imported_scipy_special
=
False
...
@@ -617,10 +616,10 @@ SubTester = makeBroadcastTester(
...
@@ -617,10 +616,10 @@ SubTester = makeBroadcastTester(
SubInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
sub_inplace
,
SubInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
sub_inplace
,
expected
=
lambda
x
,
y
:
x
-
y
,
expected
=
lambda
x
,
y
:
x
-
y
,
good
=
_good_broadcast_binary_normal
,
good
=
_good_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_runtime
=
_bad_runtime_broadcast_binary_normal
,
bad_runtime
=
_bad_runtime_broadcast_binary_normal
,
grad
=
_grad_broadcast_binary_normal
,
grad
=
_grad_broadcast_binary_normal
,
inplace
=
True
)
inplace
=
True
)
SwitchTester
=
makeBroadcastTester
(
SwitchTester
=
makeBroadcastTester
(
...
@@ -902,7 +901,8 @@ _good_broadcast_unary_normal_float_no_complex = copymod(
...
@@ -902,7 +901,8 @@ _good_broadcast_unary_normal_float_no_complex = copymod(
without
=
[
'complex'
])
without
=
[
'complex'
])
_good_broadcast_unary_normal
=
dict
(
_good_broadcast_unary_normal
=
dict
(
normal
=
[
numpy
.
asarray
(
rand_ranged
(
-
5
,
5
,
(
2
,
3
)),
dtype
=
config
.
floatX
)],
normal
=
[
numpy
.
asarray
(
rand_ranged
(
-
5
,
5
,
(
2
,
3
)),
dtype
=
config
.
floatX
)],
integers
=
[
randint_ranged
(
-
5
,
5
,
(
2
,
3
))],
integers
=
[
randint_ranged
(
-
5
,
5
,
(
2
,
3
))],
corner_case
=
[
corner_case
],
corner_case
=
[
corner_case
],
complex
=
[
randcomplex
(
2
,
3
)],
complex
=
[
randcomplex
(
2
,
3
)],
...
@@ -944,9 +944,9 @@ _good_broadcast_unary_normal_abs = copy(_good_broadcast_unary_normal)
...
@@ -944,9 +944,9 @@ _good_broadcast_unary_normal_abs = copy(_good_broadcast_unary_normal)
del
_good_broadcast_unary_normal_abs
[
'complex'
]
del
_good_broadcast_unary_normal_abs
[
'complex'
]
AbsInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
abs__inplace
,
AbsInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
abs__inplace
,
expected
=
lambda
x
:
numpy
.
abs
(
x
),
expected
=
lambda
x
:
numpy
.
abs
(
x
),
good
=
_good_broadcast_unary_normal_abs
,
good
=
_good_broadcast_unary_normal_abs
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
inplace
=
True
)
NegTester
=
makeBroadcastTester
(
op
=
tensor
.
neg
,
NegTester
=
makeBroadcastTester
(
op
=
tensor
.
neg
,
expected
=
lambda
x
:
-
x
,
expected
=
lambda
x
:
-
x
,
...
@@ -964,9 +964,9 @@ SgnTester = makeBroadcastTester(op=tensor.sgn,
...
@@ -964,9 +964,9 @@ SgnTester = makeBroadcastTester(op=tensor.sgn,
grad
=
_grad_broadcast_unary_normal
,)
grad
=
_grad_broadcast_unary_normal
,)
SgnInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
sgn_inplace
,
SgnInplaceTester
=
makeBroadcastTester
(
op
=
inplace
.
sgn_inplace
,
expected
=
numpy
.
sign
,
expected
=
numpy
.
sign
,
good
=
_good_broadcast_unary_normal_no_complex
,
good
=
_good_broadcast_unary_normal_no_complex
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
inplace
=
True
)
IntDivTester
=
makeBroadcastTester
(
IntDivTester
=
makeBroadcastTester
(
...
@@ -1602,16 +1602,18 @@ DotTester = makeTester(name='DotTester',
...
@@ -1602,16 +1602,18 @@ DotTester = makeTester(name='DotTester',
rand
(
5
,
7
)),
rand
(
5
,
7
)),
mixed2
=
(
rand
(
5
)
.
astype
(
'float64'
),
mixed2
=
(
rand
(
5
)
.
astype
(
'float64'
),
rand
(
5
,
7
)),
rand
(
5
,
7
)),
complex1
=
(
randcomplex
(
5
,
7
),
randcomplex
(
7
)),
complex1
=
(
randcomplex
(
5
,
7
),
complex2
=
(
rand
(
5
,
7
),
randcomplex
(
7
)),
randcomplex
(
7
)),
complex3
=
(
randcomplex
(
5
,
7
),
rand
(
7
)),
complex2
=
(
rand
(
5
,
7
),
randcomplex
(
7
)),
empty1
=
(
numpy
.
asarray
([]),
numpy
.
asarray
([])),
complex3
=
(
randcomplex
(
5
,
7
),
rand
(
7
)),
empty2
=
(
rand
(
5
,
0
),
rand
(
0
,
2
)),
empty1
=
(
numpy
.
asarray
([]),
empty3
=
(
rand
(
0
,
5
),
rand
(
5
,
0
)),
numpy
.
asarray
([])),
empty2
=
(
rand
(
5
,
0
),
rand
(
0
,
2
)),
empty3
=
(
rand
(
0
,
5
),
rand
(
5
,
0
)),
),
),
bad_build
=
dict
(),
bad_build
=
dict
(),
bad_runtime
=
dict
(
bad1
=
(
rand
(
5
,
7
),
rand
(
5
,
7
)),
bad_runtime
=
dict
(
bad1
=
(
rand
(
5
,
7
),
rand
(
5
,
7
)),
bad2
=
(
rand
(
5
,
7
),
rand
(
8
,
3
))))
bad2
=
(
rand
(
5
,
7
),
rand
(
8
,
3
))))
def
_numpy_second
(
x
,
y
):
def
_numpy_second
(
x
,
y
):
...
@@ -1678,7 +1680,8 @@ SecondSameRankTester = makeTester(
...
@@ -1678,7 +1680,8 @@ SecondSameRankTester = makeTester(
multi_dtype_checks
((
4
,
5
),
(
5
,
4
)),
multi_dtype_checks
((
4
,
5
),
(
5
,
4
)),
multi_dtype_checks
((
1
,
5
),
(
5
,
4
)),
multi_dtype_checks
((
1
,
5
),
(
5
,
4
)),
)),
)),
mode
=
get_default_mode
()
.
excluding
(
'local_fill_to_alloc'
,
mode
=
get_default_mode
()
.
excluding
(
'local_fill_to_alloc'
,
'local_useless_fill'
)
'local_useless_fill'
)
)
)
...
@@ -1869,7 +1872,6 @@ class test_triangle(unittest.TestCase):
...
@@ -1869,7 +1872,6 @@ class test_triangle(unittest.TestCase):
yield
check
,
dtype
,
5
,
3
,
1
yield
check
,
dtype
,
5
,
3
,
1
yield
check
,
dtype
,
5
,
3
,
-
1
yield
check
,
dtype
,
5
,
3
,
-
1
def
test_tril_triu
(
self
):
def
test_tril_triu
(
self
):
def
check_l
(
m
,
k
=
0
):
def
check_l
(
m
,
k
=
0
):
m_symb
=
matrix
(
dtype
=
m
.
dtype
)
m_symb
=
matrix
(
dtype
=
m
.
dtype
)
...
@@ -1911,12 +1913,13 @@ class test_nonzero(unittest.TestCase):
...
@@ -1911,12 +1913,13 @@ class test_nonzero(unittest.TestCase):
def
test_nonzero
(
self
):
def
test_nonzero
(
self
):
def
check
(
m
):
def
check
(
m
):
m_symb
=
theano
.
tensor
.
tensor
(
dtype
=
m
.
dtype
,
m_symb
=
theano
.
tensor
.
tensor
(
dtype
=
m
.
dtype
,
broadcastable
=
(
False
,)
*
m
.
ndim
)
broadcastable
=
(
False
,)
*
m
.
ndim
)
f_tuple
=
function
([
m_symb
],
nonzero
(
m_symb
,
return_matrix
=
False
))
f_tuple
=
function
([
m_symb
],
nonzero
(
m_symb
,
return_matrix
=
False
))
f_matrix
=
function
([
m_symb
],
nonzero
(
m_symb
,
return_matrix
=
True
))
f_matrix
=
function
([
m_symb
],
nonzero
(
m_symb
,
return_matrix
=
True
))
self
.
assertTrue
(
numpy
.
allclose
(
f_matrix
(
m
),
numpy
.
vstack
(
numpy
.
nonzero
(
m
))))
self
.
assertTrue
(
numpy
.
allclose
(
f_matrix
(
m
),
numpy
.
vstack
(
numpy
.
nonzero
(
m
))))
for
i
,
j
in
zip
(
f_tuple
(
m
),
numpy
.
nonzero
(
m
)):
for
i
,
j
in
zip
(
f_tuple
(
m
),
numpy
.
nonzero
(
m
)):
self
.
assertTrue
(
numpy
.
allclose
(
i
,
j
))
self
.
assertTrue
(
numpy
.
allclose
(
i
,
j
))
...
@@ -1939,11 +1942,10 @@ class test_nonzero(unittest.TestCase):
...
@@ -1939,11 +1942,10 @@ class test_nonzero(unittest.TestCase):
rand4d
[:
4
]
=
0
rand4d
[:
4
]
=
0
check
(
rand4d
)
check
(
rand4d
)
def
test_flatnonzero
(
self
):
def
test_flatnonzero
(
self
):
def
check
(
m
):
def
check
(
m
):
m_symb
=
theano
.
tensor
.
tensor
(
dtype
=
m
.
dtype
,
m_symb
=
theano
.
tensor
.
tensor
(
dtype
=
m
.
dtype
,
broadcastable
=
(
False
,)
*
m
.
ndim
)
broadcastable
=
(
False
,)
*
m
.
ndim
)
f
=
function
([
m_symb
],
flatnonzero
(
m_symb
))
f
=
function
([
m_symb
],
flatnonzero
(
m_symb
))
result
=
f
(
m
)
result
=
f
(
m
)
assert
numpy
.
allclose
(
result
,
numpy
.
flatnonzero
(
m
))
assert
numpy
.
allclose
(
result
,
numpy
.
flatnonzero
(
m
))
...
@@ -1970,7 +1972,7 @@ class test_nonzero(unittest.TestCase):
...
@@ -1970,7 +1972,7 @@ class test_nonzero(unittest.TestCase):
def
test_nonzero_values
(
self
):
def
test_nonzero_values
(
self
):
def
check
(
m
):
def
check
(
m
):
m_symb
=
theano
.
tensor
.
tensor
(
dtype
=
m
.
dtype
,
m_symb
=
theano
.
tensor
.
tensor
(
dtype
=
m
.
dtype
,
broadcastable
=
(
False
,)
*
m
.
ndim
)
broadcastable
=
(
False
,)
*
m
.
ndim
)
f
=
function
([
m_symb
],
nonzero_values
(
m_symb
))
f
=
function
([
m_symb
],
nonzero_values
(
m_symb
))
result
=
f
(
m
)
result
=
f
(
m
)
assert
numpy
.
allclose
(
result
,
m
[
numpy
.
nonzero
(
m
)])
assert
numpy
.
allclose
(
result
,
m
[
numpy
.
nonzero
(
m
)])
...
@@ -2104,6 +2106,7 @@ def _approx_eq(a, b, eps=1.0e-4):
...
@@ -2104,6 +2106,7 @@ def _approx_eq(a, b, eps=1.0e-4):
return
True
return
True
_approx_eq
.
debug
=
0
_approx_eq
.
debug
=
0
def
test_batched_dot
():
def
test_batched_dot
():
first
=
theano
.
tensor
.
tensor3
(
"first"
)
first
=
theano
.
tensor
.
tensor3
(
"first"
)
second
=
theano
.
tensor
.
tensor3
(
"second"
)
second
=
theano
.
tensor
.
tensor3
(
"second"
)
...
@@ -2126,6 +2129,7 @@ def test_batched_dot():
...
@@ -2126,6 +2129,7 @@ def test_batched_dot():
assert
result
.
shape
[
0
]
==
first_val
.
shape
[
0
]
assert
result
.
shape
[
0
]
==
first_val
.
shape
[
0
]
def
test_tensor_values_eq_approx
():
def
test_tensor_values_eq_approx
():
#test, inf, -inf and nan equal themself
#test, inf, -inf and nan equal themself
a
=
numpy
.
asarray
([
-
numpy
.
inf
,
-
1
,
0
,
1
,
numpy
.
inf
,
numpy
.
nan
])
a
=
numpy
.
asarray
([
-
numpy
.
inf
,
-
1
,
0
,
1
,
numpy
.
inf
,
numpy
.
nan
])
...
@@ -3168,6 +3172,23 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
...
@@ -3168,6 +3172,23 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
self
.
assertTrue
(
numpy
.
allclose
(
f
([
0
]),
ones
[
0
]
*
5
))
self
.
assertTrue
(
numpy
.
allclose
(
f
([
0
]),
ones
[
0
]
*
5
))
self
.
assertRaises
(
IndexError
,
f
,
[
0
,
1
])
self
.
assertRaises
(
IndexError
,
f
,
[
0
,
1
])
def
test_adv_sub1_idx_broadcast
(
self
):
# The idx can be a broadcastable vector.
ones
=
numpy
.
ones
((
4
,
3
),
dtype
=
self
.
dtype
)
n
=
self
.
shared
(
ones
*
5
)
idx
=
tensor
.
TensorType
(
dtype
=
'int64'
,
broadcastable
=
(
True
,))()
assert
idx
.
type
.
broadcastable
==
(
True
,)
t
=
n
[
idx
]
self
.
assertTrue
(
isinstance
(
t
.
owner
.
op
,
tensor
.
AdvancedSubtensor1
))
f
=
self
.
function
([
idx
],
t
,
op
=
self
.
adv_sub1
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo_
=
[
node
for
node
in
topo
if
not
isinstance
(
node
.
op
,
self
.
ignore_topo
)]
assert
len
(
topo_
)
==
1
self
.
assertTrue
(
isinstance
(
topo_
[
0
]
.
op
,
self
.
adv_sub1
))
self
.
assertTrue
(
numpy
.
allclose
(
f
([
0
]),
ones
[
0
]
*
5
))
def
test_shape_i_const
(
self
):
def
test_shape_i_const
(
self
):
# Each axis is treated independently by shape_i/shape operators
# Each axis is treated independently by shape_i/shape operators
...
@@ -3179,8 +3200,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
...
@@ -3179,8 +3200,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
for
stop
in
[
None
]
+
[
-
8
,
-
5
,
-
1
,
0
,
1
,
5
,
8
]:
for
stop
in
[
None
]
+
[
-
8
,
-
5
,
-
1
,
0
,
1
,
5
,
8
]:
for
step
in
[
None
]
+
[
-
3
,
-
1
,
2
]:
for
step
in
[
None
]
+
[
-
3
,
-
1
,
2
]:
outs
+=
[
data
[
start
:
stop
:
step
]
.
shape
]
outs
+=
[
data
[
start
:
stop
:
step
]
.
shape
]
shapes
+=
[
data
.
get_value
(
borrow
=
shapes
+=
[
data
.
get_value
(
True
)[
start
:
stop
:
step
]
.
shape
]
borrow
=
True
)[
start
:
stop
:
step
]
.
shape
]
f
=
self
.
function
([],
outs
,
mode
=
mode_opt
,
f
=
self
.
function
([],
outs
,
mode
=
mode_opt
,
op
=
self
.
ops
,
N
=
0
)
op
=
self
.
ops
,
N
=
0
)
t_shapes
=
f
()
t_shapes
=
f
()
...
@@ -3209,8 +3230,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
...
@@ -3209,8 +3230,8 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
for
start
in
[
-
8
,
-
5
,
-
4
,
-
1
,
0
,
1
,
4
,
5
,
8
]:
for
start
in
[
-
8
,
-
5
,
-
4
,
-
1
,
0
,
1
,
4
,
5
,
8
]:
for
stop
in
[
-
8
,
-
5
,
-
4
,
-
1
,
0
,
1
,
4
,
5
,
8
]:
for
stop
in
[
-
8
,
-
5
,
-
4
,
-
1
,
0
,
1
,
4
,
5
,
8
]:
for
step
in
[
-
3
,
-
1
,
2
,
5
]:
for
step
in
[
-
3
,
-
1
,
2
,
5
]:
assert
numpy
.
all
(
assert
numpy
.
all
(
f
(
start
,
stop
,
step
)
==
f
(
start
,
stop
,
step
)
==
v_data
[
start
:
stop
:
step
]
.
shape
)
v_data
[
start
:
stop
:
step
]
.
shape
)
def
test_slice_canonical_form_0
(
self
):
def
test_slice_canonical_form_0
(
self
):
start
=
tensor
.
iscalar
(
'b'
)
start
=
tensor
.
iscalar
(
'b'
)
...
@@ -4477,10 +4498,10 @@ class t_dot(unittest.TestCase):
...
@@ -4477,10 +4498,10 @@ class t_dot(unittest.TestCase):
self
.
cmp_dot
(
rand
(),
rand
(
5
))
self
.
cmp_dot
(
rand
(),
rand
(
5
))
def
test_dot_0d_2d
(
self
):
def
test_dot_0d_2d
(
self
):
self
.
cmp_dot
(
rand
(),
rand
(
6
,
7
))
self
.
cmp_dot
(
rand
(),
rand
(
6
,
7
))
def
test_dot_0d_3d
(
self
):
def
test_dot_0d_3d
(
self
):
self
.
cmp_dot
(
rand
(),
rand
(
8
,
6
,
7
))
self
.
cmp_dot
(
rand
(),
rand
(
8
,
6
,
7
))
def
test_dot_1d_0d
(
self
):
def
test_dot_1d_0d
(
self
):
self
.
cmp_dot
(
rand
(
5
),
rand
())
self
.
cmp_dot
(
rand
(
5
),
rand
())
...
@@ -4512,10 +4533,10 @@ class t_dot(unittest.TestCase):
...
@@ -4512,10 +4533,10 @@ class t_dot(unittest.TestCase):
self
.
cmp_dot
(
rand
(
0
),
rand
(
0
,
0
))
self
.
cmp_dot
(
rand
(
0
),
rand
(
0
,
0
))
def
test_dot_1d_3d
(
self
):
def
test_dot_1d_3d
(
self
):
self
.
cmp_dot
(
rand
(
6
),
rand
(
8
,
6
,
7
))
self
.
cmp_dot
(
rand
(
6
),
rand
(
8
,
6
,
7
))
def
test_dot_2d_0d
(
self
):
def
test_dot_2d_0d
(
self
):
self
.
cmp_dot
(
rand
(
5
,
6
),
rand
())
self
.
cmp_dot
(
rand
(
5
,
6
),
rand
())
def
test_dot_2d_1d
(
self
):
def
test_dot_2d_1d
(
self
):
self
.
cmp_dot
(
rand
(
5
,
6
),
rand
(
6
))
self
.
cmp_dot
(
rand
(
5
,
6
),
rand
(
6
))
...
@@ -4548,19 +4569,19 @@ class t_dot(unittest.TestCase):
...
@@ -4548,19 +4569,19 @@ class t_dot(unittest.TestCase):
self
.
cmp_dot
(
rand
(
0
,
6
),
rand
(
6
,
0
))
self
.
cmp_dot
(
rand
(
0
,
6
),
rand
(
6
,
0
))
def
test_dot_2d_3d
(
self
):
def
test_dot_2d_3d
(
self
):
self
.
cmp_dot
(
rand
(
5
,
6
),
rand
(
8
,
6
,
7
))
self
.
cmp_dot
(
rand
(
5
,
6
),
rand
(
8
,
6
,
7
))
def
test_dot_3d_0d
(
self
):
def
test_dot_3d_0d
(
self
):
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
())
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
())
def
test_dot_3d_1d
(
self
):
def
test_dot_3d_1d
(
self
):
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
(
6
))
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
(
6
))
def
test_dot_3d_2d
(
self
):
def
test_dot_3d_2d
(
self
):
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
(
6
,
7
))
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
(
6
,
7
))
def
test_dot_3d_3d
(
self
):
def
test_dot_3d_3d
(
self
):
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
(
8
,
6
,
7
))
self
.
cmp_dot
(
rand
(
4
,
5
,
6
),
rand
(
8
,
6
,
7
))
def
not_aligned
(
self
,
x
,
y
):
def
not_aligned
(
self
,
x
,
y
):
ctv_backup
=
config
.
compute_test_value
ctv_backup
=
config
.
compute_test_value
...
@@ -4602,7 +4623,7 @@ class t_dot(unittest.TestCase):
...
@@ -4602,7 +4623,7 @@ class t_dot(unittest.TestCase):
self
.
not_aligned
(
rand
(
5
),
rand
(
6
,
4
))
self
.
not_aligned
(
rand
(
5
),
rand
(
6
,
4
))
def
test_align_1_3
(
self
):
def
test_align_1_3
(
self
):
self
.
not_aligned
(
rand
(
5
),
rand
(
6
,
4
,
7
))
self
.
not_aligned
(
rand
(
5
),
rand
(
6
,
4
,
7
))
def
test_align_2_1
(
self
):
def
test_align_2_1
(
self
):
self
.
not_aligned
(
rand
(
5
,
4
),
rand
(
6
))
self
.
not_aligned
(
rand
(
5
,
4
),
rand
(
6
))
...
@@ -4611,16 +4632,16 @@ class t_dot(unittest.TestCase):
...
@@ -4611,16 +4632,16 @@ class t_dot(unittest.TestCase):
self
.
not_aligned
(
rand
(
5
,
4
),
rand
(
6
,
7
))
self
.
not_aligned
(
rand
(
5
,
4
),
rand
(
6
,
7
))
def
test_align_2_3
(
self
):
def
test_align_2_3
(
self
):
self
.
not_aligned
(
rand
(
5
,
4
),
rand
(
6
,
7
,
8
))
self
.
not_aligned
(
rand
(
5
,
4
),
rand
(
6
,
7
,
8
))
def
test_align_3_1
(
self
):
def
test_align_3_1
(
self
):
self
.
not_aligned
(
rand
(
5
,
4
,
3
),
rand
(
6
))
self
.
not_aligned
(
rand
(
5
,
4
,
3
),
rand
(
6
))
def
test_align_3_2
(
self
):
def
test_align_3_2
(
self
):
self
.
not_aligned
(
rand
(
5
,
4
,
3
),
rand
(
6
,
7
))
self
.
not_aligned
(
rand
(
5
,
4
,
3
),
rand
(
6
,
7
))
def
test_align_3_3
(
self
):
def
test_align_3_3
(
self
):
self
.
not_aligned
(
rand
(
5
,
4
,
3
),
rand
(
6
,
7
,
8
))
self
.
not_aligned
(
rand
(
5
,
4
,
3
),
rand
(
6
,
7
,
8
))
def
test_grad
(
self
):
def
test_grad
(
self
):
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
),
rand
(
3
,
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
),
rand
(
3
,
2
)])
...
@@ -4628,14 +4649,14 @@ class t_dot(unittest.TestCase):
...
@@ -4628,14 +4649,14 @@ class t_dot(unittest.TestCase):
utt
.
verify_grad
(
dot
,
[
rand
(
3
,
2
),
rand
(
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(
3
,
2
),
rand
(
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
),
rand
(
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
),
rand
(
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(),
rand
(
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(),
rand
(
2
)])
utt
.
verify_grad
(
dot
,
[
rand
(),
rand
(
2
,
5
)])
utt
.
verify_grad
(
dot
,
[
rand
(),
rand
(
2
,
5
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
),
rand
()])
utt
.
verify_grad
(
dot
,
[
rand
(
2
),
rand
()])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
5
),
rand
()])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
5
),
rand
()])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
,
4
),
rand
(
4
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
,
4
),
rand
(
4
)])
utt
.
verify_grad
(
dot
,
[
rand
(
3
),
rand
(
2
,
3
,
4
)])
utt
.
verify_grad
(
dot
,
[
rand
(
3
),
rand
(
2
,
3
,
4
)])
utt
.
verify_grad
(
dot
,
[
rand
(
4
,
3
),
rand
(
2
,
3
,
4
)])
utt
.
verify_grad
(
dot
,
[
rand
(
4
,
3
),
rand
(
2
,
3
,
4
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
,
4
),
rand
(
4
,
5
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
,
4
),
rand
(
4
,
5
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
,
4
),
rand
(
3
,
4
,
5
)])
utt
.
verify_grad
(
dot
,
[
rand
(
2
,
3
,
4
),
rand
(
3
,
4
,
5
)])
def
test_broadcastable_patterns
(
self
):
def
test_broadcastable_patterns
(
self
):
...
@@ -4650,7 +4671,8 @@ class t_dot(unittest.TestCase):
...
@@ -4650,7 +4671,8 @@ class t_dot(unittest.TestCase):
# This strange way of doing things is the only way that worked on
# This strange way of doing things is the only way that worked on
# numpy 1.4.1
# numpy 1.4.1
if
r
.
ndim
==
0
:
if
r
.
ndim
==
0
:
return
numpy
.
asarray
(
numpy
.
complex
(
1.1
,
2.1
),
dtype
=
r
.
dtype
)
return
numpy
.
asarray
(
numpy
.
complex
(
1.1
,
2.1
),
dtype
=
r
.
dtype
)
if
r
.
ndim
==
1
:
if
r
.
ndim
==
1
:
if
r
.
dtype
==
'complex64'
:
if
r
.
dtype
==
'complex64'
:
return
numpy
.
complex64
([
numpy
.
complex
(
1.2
,
2.2
)])
return
numpy
.
complex64
([
numpy
.
complex
(
1.2
,
2.2
)])
...
@@ -4680,8 +4702,8 @@ class t_dot(unittest.TestCase):
...
@@ -4680,8 +4702,8 @@ class t_dot(unittest.TestCase):
x
=
TensorType
(
dtype
=
dtype0
,
broadcastable
=
bc0
)()
x
=
TensorType
(
dtype
=
dtype0
,
broadcastable
=
bc0
)()
y
=
TensorType
(
dtype
=
dtype1
,
broadcastable
=
bc1
)()
y
=
TensorType
(
dtype
=
dtype1
,
broadcastable
=
bc1
)()
z
=
dot
(
x
,
y
)
z
=
dot
(
x
,
y
)
t
=
TensorType
(
dtype
=
t
=
TensorType
(
dtype
=
dtype0
,
dtype0
,
broadcastable
=
z
.
broadcastable
)()
broadcastable
=
z
.
broadcastable
)()
rval
=
z
*
3
+
2
*
t
rval
=
z
*
3
+
2
*
t
f
=
function
([
x
,
y
,
t
],
rval
)
f
=
function
([
x
,
y
,
t
],
rval
)
...
@@ -5004,8 +5026,10 @@ def test_make_column_matrix_broadcastable():
...
@@ -5004,8 +5026,10 @@ def test_make_column_matrix_broadcastable():
def
test_flatten_outdimNone
():
def
test_flatten_outdimNone
():
""" Flatten always returns a copy of the array. There is no danger with in-place
"""Flatten always returns a copy of the array. There is no danger
operations and thus no need to test it."""
with in-place operations and thus no need to test it.
"""
a
=
dmatrix
()
a
=
dmatrix
()
c
=
flatten
(
a
)
c
=
flatten
(
a
)
...
@@ -5612,7 +5636,7 @@ class test_tensordot(unittest.TestCase):
...
@@ -5612,7 +5636,7 @@ class test_tensordot(unittest.TestCase):
Since tensordot is no longer an op, mimic the old op signature
Since tensordot is no longer an op, mimic the old op signature
to allow easy use of verify_grad.
to allow easy use of verify_grad.
"""
"""
return
lambda
a
,
b
:
tensordot
(
a
,
b
,
axes
)
return
lambda
a
,
b
:
tensordot
(
a
,
b
,
axes
)
def
setUp
(
self
):
def
setUp
(
self
):
utt
.
seed_rng
()
utt
.
seed_rng
()
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论