Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
7ce2ab47
提交
7ce2ab47
authored
5月 02, 2015
作者:
David Warde-Farley
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
PY3K: Respect PEP 3113 (no more tuple unpacking arguments).
上级
10d68dd8
全部展开
隐藏空白字符变更
内嵌
并排
正在显示
15 个修改的文件
包含
152 行增加
和
61 行删除
+152
-61
rng_mrg.py
theano/sandbox/rng_mrg.py
+6
-2
basic.py
theano/scalar/basic.py
+0
-0
basic_scipy.py
theano/scalar/basic_scipy.py
+6
-2
basic.py
theano/sparse/basic.py
+0
-0
opt.py
theano/sparse/opt.py
+25
-12
sp.py
theano/sparse/sandbox/sp.py
+9
-5
sp2.py
theano/sparse/sandbox/sp2.py
+12
-4
test_basic.py
theano/sparse/tests/test_basic.py
+6
-2
basic.py
theano/tensor/basic.py
+12
-5
extra_ops.py
theano/tensor/extra_ops.py
+3
-1
nlinalg.py
theano/tensor/nlinalg.py
+27
-9
slinalg.py
theano/tensor/slinalg.py
+13
-5
test_elemwise.py
theano/tensor/tests/test_elemwise.py
+3
-1
test_opt.py
theano/tensor/tests/test_opt.py
+3
-1
basic.py
theano/typed_list/basic.py
+27
-12
没有找到文件。
theano/sandbox/rng_mrg.py
浏览文件 @
7ce2ab47
...
@@ -84,7 +84,9 @@ class DotModulo(Op):
...
@@ -84,7 +84,9 @@ class DotModulo(Op):
def
make_node
(
self
,
A
,
s
,
m
,
A2
,
s2
,
m2
):
def
make_node
(
self
,
A
,
s
,
m
,
A2
,
s2
,
m2
):
return
Apply
(
self
,
[
A
,
s
,
m
,
A2
,
s2
,
m2
],
[
s
.
type
()])
return
Apply
(
self
,
[
A
,
s
,
m
,
A2
,
s2
,
m2
],
[
s
.
type
()])
def
perform
(
self
,
node
,
(
A
,
s
,
m
,
A2
,
s2
,
m2
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
A
,
s
,
m
,
A2
,
s2
,
m2
)
=
inputs
(
out
,)
=
outputs
o1
=
matVecModM
(
A
,
s
,
m
)
o1
=
matVecModM
(
A
,
s
,
m
)
o2
=
matVecModM
(
A2
,
s2
,
m2
)
o2
=
matVecModM
(
A2
,
s2
,
m2
)
out
[
0
]
=
numpy
.
concatenate
((
o1
,
o2
))
out
[
0
]
=
numpy
.
concatenate
((
o1
,
o2
))
...
@@ -92,7 +94,9 @@ class DotModulo(Op):
...
@@ -92,7 +94,9 @@ class DotModulo(Op):
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
(
6
,)
return
(
6
,)
def
c_code
(
self
,
node
,
name
,
(
_A
,
_s
,
_m
,
_A2
,
_s2
,
_m2
),
(
_z
,
),
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
_A
,
_s
,
_m
,
_A2
,
_s2
,
_m2
)
=
inputs
(
_z
,)
=
outputs
return
"""
return
"""
int osize = -1;
int osize = -1;
if (PyArray_NDIM(
%(_A)
s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(A) != 2");
%(fail)
s;}
if (PyArray_NDIM(
%(_A)
s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(A) != 2");
%(fail)
s;}
...
...
theano/scalar/basic.py
浏览文件 @
7ce2ab47
差异被折叠。
点击展开。
theano/scalar/basic_scipy.py
浏览文件 @
7ce2ab47
...
@@ -171,7 +171,9 @@ class Gamma(UnaryScalarOp):
...
@@ -171,7 +171,9 @@ class Gamma(UnaryScalarOp):
else
:
else
:
super
(
Gamma
,
self
)
.
impl
(
x
)
super
(
Gamma
,
self
)
.
impl
(
x
)
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
def
grad
(
self
,
inputs
,
gout
):
(
x
,)
=
inputs
(
gz
,)
=
gout
if
x
.
type
in
complex_types
:
if
x
.
type
in
complex_types
:
raise
NotImplementedError
()
raise
NotImplementedError
()
if
self
(
x
)
.
type
in
discrete_types
:
if
self
(
x
)
.
type
in
discrete_types
:
...
@@ -182,7 +184,9 @@ class Gamma(UnaryScalarOp):
...
@@ -182,7 +184,9 @@ class Gamma(UnaryScalarOp):
return
gz
*
gamma
(
x
)
*
psi
(
x
),
return
gz
*
gamma
(
x
)
*
psi
(
x
),
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
x
,)
=
inputs
(
z
,)
=
outputs
if
node
.
inputs
[
0
]
.
type
in
float_types
:
if
node
.
inputs
[
0
]
.
type
in
float_types
:
return
"""
%(z)
s = tgamma(
%(x)
s);"""
%
locals
()
return
"""
%(z)
s = tgamma(
%(x)
s);"""
%
locals
()
raise
NotImplementedError
(
'only floating point is implemented'
)
raise
NotImplementedError
(
'only floating point is implemented'
)
...
...
theano/sparse/basic.py
浏览文件 @
7ce2ab47
差异被折叠。
点击展开。
theano/sparse/opt.py
浏览文件 @
7ce2ab47
...
@@ -105,7 +105,9 @@ class AddSD_ccode(gof.op.Op):
...
@@ -105,7 +105,9 @@ class AddSD_ccode(gof.op.Op):
[
data
,
indices
,
indptr
,
y
],
[
data
,
indices
,
indptr
,
y
],
[
out
])
[
out
])
def
c_code
(
self
,
node
,
name
,
(
_data
,
_indices
,
_indptr
,
y
),
(
z
,
),
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
_data
,
_indices
,
_indptr
,
y
)
=
inputs
(
z
,)
=
outputs
inplace
=
int
(
self
.
inplace
)
inplace
=
int
(
self
.
inplace
)
format
=
{
'csc'
:
0
,
'csr'
:
1
}[
self
.
format
]
format
=
{
'csc'
:
0
,
'csr'
:
1
}[
self
.
format
]
out_typenum
=
node
.
outputs
[
0
]
.
type
.
dtype_specs
()[
2
]
out_typenum
=
node
.
outputs
[
0
]
.
type
.
dtype_specs
()[
2
]
...
@@ -236,7 +238,9 @@ class StructuredDotCSC(gof.Op):
...
@@ -236,7 +238,9 @@ class StructuredDotCSC(gof.Op):
[
tensor
.
tensor
(
dtype_out
,
(
False
,
b
.
type
.
broadcastable
[
1
]))])
[
tensor
.
tensor
(
dtype_out
,
(
False
,
b
.
type
.
broadcastable
[
1
]))])
return
r
return
r
def
perform
(
self
,
node
,
(
a_val
,
a_ind
,
a_ptr
,
a_nrows
,
b
),
(
out
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
a_val
,
a_ind
,
a_ptr
,
a_nrows
,
b
)
=
inputs
(
out
,)
=
outputs
a
=
scipy
.
sparse
.
csc_matrix
((
a_val
,
a_ind
,
a_ptr
),
a
=
scipy
.
sparse
.
csc_matrix
((
a_val
,
a_ind
,
a_ptr
),
(
a_nrows
,
b
.
shape
[
0
]),
(
a_nrows
,
b
.
shape
[
0
]),
copy
=
False
)
copy
=
False
)
...
@@ -244,7 +248,7 @@ class StructuredDotCSC(gof.Op):
...
@@ -244,7 +248,7 @@ class StructuredDotCSC(gof.Op):
out
[
0
]
=
theano
.
_asarray
(
a
*
b
,
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
)
out
[
0
]
=
theano
.
_asarray
(
a
*
b
,
dtype
=
node
.
outputs
[
0
]
.
type
.
dtype
)
assert
_is_dense
(
out
[
0
])
# scipy 0.7 automatically converts to dense
assert
_is_dense
(
out
[
0
])
# scipy 0.7 automatically converts to dense
def
c_code
(
self
,
node
,
name
,
(
a_val
,
a_ind
,
a_ptr
,
a_nrows
,
b
),
(
z
,)
,
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
# C-implementation of the dot product of the sparse matrix A and matrix
# C-implementation of the dot product of the sparse matrix A and matrix
# B.
# B.
# @param a_val: non-zero values of the sparse matrix
# @param a_val: non-zero values of the sparse matrix
...
@@ -257,6 +261,8 @@ class StructuredDotCSC(gof.Op):
...
@@ -257,6 +261,8 @@ class StructuredDotCSC(gof.Op):
# @param z: return value
# @param z: return value
# @param sub: TODO, not too sure, something to do with weave probably
# @param sub: TODO, not too sure, something to do with weave probably
(
a_val
,
a_ind
,
a_ptr
,
a_nrows
,
b
)
=
inputs
(
z
,)
=
outputs
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
raise
NotImplementedError
(
'Complex types are not supported for a_val'
)
raise
NotImplementedError
(
'Complex types are not supported for a_val'
)
if
node
.
inputs
[
4
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
4
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
...
@@ -426,7 +432,9 @@ class StructuredDotCSR(gof.Op):
...
@@ -426,7 +432,9 @@ class StructuredDotCSR(gof.Op):
b
.
type
.
broadcastable
[
1
]))])
b
.
type
.
broadcastable
[
1
]))])
return
r
return
r
def
perform
(
self
,
node
,
(
a_val
,
a_ind
,
a_ptr
,
b
),
(
out
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
a_val
,
a_ind
,
a_ptr
,
b
)
=
inputs
(
out
,)
=
outputs
a
=
scipy
.
sparse
.
csr_matrix
((
a_val
,
a_ind
,
a_ptr
),
a
=
scipy
.
sparse
.
csr_matrix
((
a_val
,
a_ind
,
a_ptr
),
(
len
(
a_ptr
)
-
1
,
b
.
shape
[
0
]),
(
len
(
a_ptr
)
-
1
,
b
.
shape
[
0
]),
copy
=
True
)
# use view_map before setting this to False
copy
=
True
)
# use view_map before setting this to False
...
@@ -435,7 +443,7 @@ class StructuredDotCSR(gof.Op):
...
@@ -435,7 +443,7 @@ class StructuredDotCSR(gof.Op):
# scipy 0.7 automatically converts to dense, but not .6 sometimes
# scipy 0.7 automatically converts to dense, but not .6 sometimes
assert
_is_dense
(
out
[
0
])
assert
_is_dense
(
out
[
0
])
def
c_code
(
self
,
node
,
name
,
(
a_val
,
a_ind
,
a_ptr
,
b
),
(
z
,)
,
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
"""
"""
C-implementation of the dot product of the sparse matrix A and matrix
C-implementation of the dot product of the sparse matrix A and matrix
B.
B.
...
@@ -449,7 +457,8 @@ class StructuredDotCSR(gof.Op):
...
@@ -449,7 +457,8 @@ class StructuredDotCSR(gof.Op):
@param z: return value
@param z: return value
@param sub: TODO, not too sure, something to do with weave probably
@param sub: TODO, not too sure, something to do with weave probably
"""
"""
# retrieve dtype number
(
a_val
,
a_ind
,
a_ptr
,
b
)
=
inputs
(
z
,)
=
outputs
typenum_z
=
tensor
.
TensorType
(
self
.
dtype_out
,
[])
.
dtype_specs
()[
2
]
typenum_z
=
tensor
.
TensorType
(
self
.
dtype_out
,
[])
.
dtype_specs
()[
2
]
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
raise
NotImplementedError
(
'Complex types are not supported for a_val'
)
raise
NotImplementedError
(
'Complex types are not supported for a_val'
)
...
@@ -890,9 +899,11 @@ class CSMGradC(gof.Op):
...
@@ -890,9 +899,11 @@ class CSMGradC(gof.Op):
return
gof
.
Apply
(
self
,
[
a_val
,
a_ind
,
a_ptr
,
a_dim
,
return
gof
.
Apply
(
self
,
[
a_val
,
a_ind
,
a_ptr
,
a_dim
,
b_val
,
b_ind
,
b_ptr
,
b_dim
],
[
b_val
.
type
()])
b_val
,
b_ind
,
b_ptr
,
b_dim
],
[
b_val
.
type
()])
def
c_code
(
self
,
node
,
name
,
(
a_val
,
a_ind
,
a_ptr
,
a_dim
,
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
b_val
,
b_ind
,
b_ptr
,
b_dim
),
(
z
,),
sub
):
# retrieve dtype number
# retrieve dtype number
(
a_val
,
a_ind
,
a_ptr
,
a_dim
,
b_val
,
b_ind
,
b_ptr
,
b_dim
)
=
inputs
(
z
,)
=
outputs
typenum_z
=
node
.
outputs
[
0
]
.
type
.
dtype_specs
()[
2
]
typenum_z
=
node
.
outputs
[
0
]
.
type
.
dtype_specs
()[
2
]
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
raise
NotImplementedError
(
'Complex types are not supported for a_val'
)
raise
NotImplementedError
(
'Complex types are not supported for a_val'
)
...
@@ -1047,9 +1058,10 @@ class MulSDCSC(gof.Op):
...
@@ -1047,9 +1058,10 @@ class MulSDCSC(gof.Op):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplementedError()
# return NotImplementedError()
def
c_code
(
self
,
node
,
name
,
(
_data
,
_indices
,
_indptr
,
_b
,),
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
_zout
,
),
sub
):
(
_data
,
_indices
,
_indptr
,
_b
,)
=
inputs
(
_zout
,)
=
outputs
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
raise
NotImplementedError
(
'Complex types are not supported for a'
)
raise
NotImplementedError
(
'Complex types are not supported for a'
)
if
node
.
inputs
[
3
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
3
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
...
@@ -1163,9 +1175,10 @@ class MulSDCSR(gof.Op):
...
@@ -1163,9 +1175,10 @@ class MulSDCSR(gof.Op):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplemented()
# return NotImplemented()
def
c_code
(
self
,
node
,
name
,
(
_data
,
_indices
,
_indptr
,
_b
,),
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
_zout
,
),
sub
):
(
_data
,
_indices
,
_indptr
,
_b
,)
=
inputs
(
_zout
,)
=
outputs
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
0
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
raise
NotImplementedError
(
'Complex types are not supported for a'
)
raise
NotImplementedError
(
'Complex types are not supported for a'
)
if
node
.
inputs
[
3
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
if
node
.
inputs
[
3
]
.
type
.
dtype
in
(
'complex64'
,
'complex128'
):
...
...
theano/sparse/sandbox/sp.py
浏览文件 @
7ce2ab47
...
@@ -42,18 +42,20 @@ class ConvolutionIndices(Op):
...
@@ -42,18 +42,20 @@ class ConvolutionIndices(Op):
"""
"""
@staticmethod
@staticmethod
def
sparse_eval
(
inshp
,
kshp
,
nkern
,
(
dx
,
dy
)
=
(
1
,
1
),
mode
=
'valid'
):
def
sparse_eval
(
inshp
,
kshp
,
nkern
,
strides
=
(
1
,
1
),
mode
=
'valid'
):
(
dx
,
dy
)
=
strides
return
convolution_indices
.
evaluate
(
inshp
,
kshp
,
(
dx
,
dy
),
return
convolution_indices
.
evaluate
(
inshp
,
kshp
,
(
dx
,
dy
),
nkern
,
mode
=
mode
,
ws
=
False
)
nkern
,
mode
=
mode
,
ws
=
False
)
@staticmethod
@staticmethod
def
conv_eval
(
inshp
,
kshp
,
(
dx
,
dy
)
=
(
1
,
1
),
mode
=
'valid'
):
def
conv_eval
(
inshp
,
kshp
,
strides
=
(
1
,
1
),
mode
=
'valid'
):
(
dx
,
dy
)
=
strides
return
convolution_indices
.
evaluate
(
inshp
,
kshp
,
(
dx
,
dy
),
return
convolution_indices
.
evaluate
(
inshp
,
kshp
,
(
dx
,
dy
),
mode
=
mode
,
ws
=
True
)
mode
=
mode
,
ws
=
True
)
# img_shape and ker_shape are (height,width)
# img_shape and ker_shape are (height,width)
@staticmethod
@staticmethod
def
evaluate
(
inshp
,
kshp
,
(
dx
,
dy
)
=
(
1
,
1
),
nkern
=
1
,
mode
=
'valid'
,
ws
=
True
):
def
evaluate
(
inshp
,
kshp
,
strides
=
(
1
,
1
),
nkern
=
1
,
mode
=
'valid'
,
ws
=
True
):
"""Build a sparse matrix which can be used for performing...
"""Build a sparse matrix which can be used for performing...
* convolution: in this case, the dot product of this matrix
* convolution: in this case, the dot product of this matrix
with the input images will generate a stack of images
with the input images will generate a stack of images
...
@@ -79,6 +81,7 @@ class ConvolutionIndices(Op):
...
@@ -79,6 +81,7 @@ class ConvolutionIndices(Op):
:returns: the structure of a sparse matrix, and the logical dimensions
:returns: the structure of a sparse matrix, and the logical dimensions
of the image which will be the result of filtering.
of the image which will be the result of filtering.
"""
"""
(
dx
,
dy
)
=
strides
N
=
numpy
N
=
numpy
# inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
# inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
...
@@ -251,8 +254,9 @@ class ConvolutionIndices(Op):
...
@@ -251,8 +254,9 @@ class ConvolutionIndices(Op):
return
rval
return
rval
def
perform
(
self
,
node
,
(
inshp
,
kshp
),
\
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
out_indices
,
out_indptr
,
spmat_shape
)):
(
inshp
,
kshp
)
=
inputs
(
out_indices
,
out_indptr
,
spmat_shape
)
=
outputs
indices
,
indptr
,
spmatshp
,
outshp
=
self
.
evaluate
(
inshp
,
kshp
)
indices
,
indptr
,
spmatshp
,
outshp
=
self
.
evaluate
(
inshp
,
kshp
)
out_indices
[
0
]
=
indices
out_indices
[
0
]
=
indices
out_indptr
[
0
]
=
indptr
out_indptr
[
0
]
=
indptr
...
...
theano/sparse/sandbox/sp2.py
浏览文件 @
7ce2ab47
...
@@ -71,7 +71,9 @@ class Poisson(gof.op.Op):
...
@@ -71,7 +71,9 @@ class Poisson(gof.op.Op):
x
=
as_sparse_variable
(
x
)
x
=
as_sparse_variable
(
x
)
return
gof
.
Apply
(
self
,
[
x
],
[
x
.
type
()])
return
gof
.
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
out
,)
=
outputs
assert
_is_sparse
(
x
)
assert
_is_sparse
(
x
)
assert
x
.
format
in
[
"csr"
,
"csc"
]
assert
x
.
format
in
[
"csr"
,
"csc"
]
out
[
0
]
=
x
.
copy
()
out
[
0
]
=
x
.
copy
()
...
@@ -130,7 +132,9 @@ class Binomial(gof.op.Op):
...
@@ -130,7 +132,9 @@ class Binomial(gof.op.Op):
[
SparseType
(
dtype
=
self
.
dtype
,
[
SparseType
(
dtype
=
self
.
dtype
,
format
=
self
.
format
)
.
make_variable
()])
format
=
self
.
format
)
.
make_variable
()])
def
perform
(
self
,
node
,
(
n
,
p
,
shape
,
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
n
,
p
,
shape
)
=
inputs
(
out
,)
=
outputs
binomial
=
numpy
.
random
.
binomial
(
n
,
p
,
size
=
shape
)
binomial
=
numpy
.
random
.
binomial
(
n
,
p
,
size
=
shape
)
csx_matrix
=
getattr
(
scipy
.
sparse
,
self
.
format
+
'_matrix'
)
csx_matrix
=
getattr
(
scipy
.
sparse
,
self
.
format
+
'_matrix'
)
out
[
0
]
=
csx_matrix
(
binomial
,
dtype
=
self
.
dtype
)
out
[
0
]
=
csx_matrix
(
binomial
,
dtype
=
self
.
dtype
)
...
@@ -138,7 +142,9 @@ class Binomial(gof.op.Op):
...
@@ -138,7 +142,9 @@ class Binomial(gof.op.Op):
def
connection_pattern
(
self
,
node
):
def
connection_pattern
(
self
,
node
):
return
[[
True
],
[
True
],
[
False
]]
return
[[
True
],
[
True
],
[
False
]]
def
grad
(
self
,
(
n
,
p
,
shape
,
),
(
gz
,)):
def
grad
(
self
,
inputs
,
gout
):
(
n
,
p
,
shape
)
=
inputs
(
gz
,)
=
gout
comment_n
=
"No gradient exists for the number of samples in class
\
comment_n
=
"No gradient exists for the number of samples in class
\
Binomial of theano/sparse/sandbox/sp2.py"
Binomial of theano/sparse/sandbox/sp2.py"
comment_p
=
"No gradient exists for the prob of success in class
\
comment_p
=
"No gradient exists for the prob of success in class
\
...
@@ -196,7 +202,9 @@ class Multinomial(gof.op.Op):
...
@@ -196,7 +202,9 @@ class Multinomial(gof.op.Op):
return
gof
.
Apply
(
self
,
[
n
,
p
],
[
p
.
type
()])
return
gof
.
Apply
(
self
,
[
n
,
p
],
[
p
.
type
()])
def
perform
(
self
,
node
,
(
n
,
p
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
n
,
p
)
=
inputs
(
out
,)
=
outputs
assert
_is_sparse
(
p
)
assert
_is_sparse
(
p
)
if
p
.
format
!=
'csr'
:
if
p
.
format
!=
'csr'
:
...
...
theano/sparse/tests/test_basic.py
浏览文件 @
7ce2ab47
...
@@ -186,11 +186,15 @@ class T_verify_grad_sparse(unittest.TestCase):
...
@@ -186,11 +186,15 @@ class T_verify_grad_sparse(unittest.TestCase):
x
=
as_sparse_variable
(
x
)
x
=
as_sparse_variable
(
x
)
return
gof
.
Apply
(
self
,
[
x
],
[
x
.
type
()])
return
gof
.
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
out
,)
=
outputs
assert
_is_sparse
(
x
)
assert
_is_sparse
(
x
)
out
[
0
]
=
-
x
out
[
0
]
=
-
x
def
grad
(
self
,
(
x
,),
(
gz
,)):
def
grad
(
self
,
inputs
,
gout
):
(
x
,)
=
inputs
(
gz
,)
=
gout
assert
_is_sparse_variable
(
x
)
and
_is_sparse_variable
(
gz
)
assert
_is_sparse_variable
(
x
)
and
_is_sparse_variable
(
gz
)
if
self
.
structured
:
if
self
.
structured
:
return
sp_ones_like
(
x
)
*
dense_from_sparse
(
gz
),
return
sp_ones_like
(
x
)
*
dense_from_sparse
(
gz
),
...
...
theano/tensor/basic.py
浏览文件 @
7ce2ab47
...
@@ -5159,10 +5159,14 @@ class Diagonal(Op):
...
@@ -5159,10 +5159,14 @@ class Diagonal(Op):
return
Apply
(
self
,
[
x
],
[
tensor
(
dtype
=
x
.
dtype
,
return
Apply
(
self
,
[
x
],
[
tensor
(
dtype
=
x
.
dtype
,
broadcastable
=
[
False
]
*
(
x
.
ndim
-
1
))])
broadcastable
=
[
False
]
*
(
x
.
ndim
-
1
))])
def
perform
(
self
,
node
,
(
x
,),
(
z
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
z
,)
=
outputs
z
[
0
]
=
x
.
diagonal
(
self
.
offset
,
self
.
axis1
,
self
.
axis2
)
z
[
0
]
=
x
.
diagonal
(
self
.
offset
,
self
.
axis1
,
self
.
axis2
)
def
grad
(
self
,
(
x
,),
(
gz
,)):
def
grad
(
self
,
inputs
,
gout
):
(
x
,)
=
inputs
(
gz
,)
=
gout
return
[
grad_not_implemented
(
self
,
0
,
x
)]
return
[
grad_not_implemented
(
self
,
0
,
x
)]
def
infer_shape
(
self
,
node
,
shapes
):
def
infer_shape
(
self
,
node
,
shapes
):
...
@@ -5207,10 +5211,12 @@ class Diag(Op):
...
@@ -5207,10 +5211,12 @@ class Diag(Op):
return
Apply
(
self
,
[
diag
],
[
matrix
(
dtype
=
diag
.
dtype
)])
return
Apply
(
self
,
[
diag
],
[
matrix
(
dtype
=
diag
.
dtype
)])
def
perform
(
self
,
node
,
inputs
,
(
z
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
z
,)
=
outputs
z
[
0
]
=
numpy
.
diag
(
inputs
[
0
])
z
[
0
]
=
numpy
.
diag
(
inputs
[
0
])
def
grad
(
self
,
inputs
,
(
gz
,)):
def
grad
(
self
,
inputs
,
gout
):
(
gz
,)
=
gout
return
[
diagonal
(
gz
)]
return
[
diagonal
(
gz
)]
def
infer_shape
(
self
,
nodes
,
shapes
):
def
infer_shape
(
self
,
nodes
,
shapes
):
...
@@ -5435,7 +5441,8 @@ class Choose(Op):
...
@@ -5435,7 +5441,8 @@ class Choose(Op):
o
=
TensorType
(
choice
.
dtype
,
bcast
)
o
=
TensorType
(
choice
.
dtype
,
bcast
)
return
Apply
(
self
,
[
a
,
choice
],
[
o
()])
return
Apply
(
self
,
[
a
,
choice
],
[
o
()])
def
perform
(
self
,
node
,
inputs
,
(
z
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
z
,)
=
outputs
a
=
inputs
[
0
]
a
=
inputs
[
0
]
choice
=
inputs
[
1
]
choice
=
inputs
[
1
]
# TODO reuse out?
# TODO reuse out?
...
...
theano/tensor/extra_ops.py
浏览文件 @
7ce2ab47
...
@@ -593,7 +593,9 @@ class RepeatOp(theano.Op):
...
@@ -593,7 +593,9 @@ class RepeatOp(theano.Op):
return
[[
True
],
[
False
]]
return
[[
True
],
[
False
]]
def
grad
(
self
,
(
x
,
repeats
),
(
gz
,
)):
def
grad
(
self
,
inputs
,
gout
):
(
x
,
repeats
)
=
inputs
(
gz
,)
=
gout
if
repeats
.
ndim
==
0
:
if
repeats
.
ndim
==
0
:
if
self
.
axis
is
None
:
if
self
.
axis
is
None
:
axis
=
x
.
ndim
axis
=
x
.
ndim
...
...
theano/tensor/nlinalg.py
浏览文件 @
7ce2ab47
...
@@ -42,7 +42,9 @@ class MatrixPinv(Op):
...
@@ -42,7 +42,9 @@ class MatrixPinv(Op):
assert
x
.
ndim
==
2
assert
x
.
ndim
==
2
return
Apply
(
self
,
[
x
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,),
(
z
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
z
,)
=
outputs
z
[
0
]
=
numpy
.
linalg
.
pinv
(
x
)
.
astype
(
x
.
dtype
)
z
[
0
]
=
numpy
.
linalg
.
pinv
(
x
)
.
astype
(
x
.
dtype
)
pinv
=
MatrixPinv
()
pinv
=
MatrixPinv
()
...
@@ -69,7 +71,9 @@ class MatrixInverse(Op):
...
@@ -69,7 +71,9 @@ class MatrixInverse(Op):
assert
x
.
ndim
==
2
assert
x
.
ndim
==
2
return
Apply
(
self
,
[
x
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,),
(
z
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
z
,)
=
outputs
z
[
0
]
=
numpy
.
linalg
.
inv
(
x
)
.
astype
(
x
.
dtype
)
z
[
0
]
=
numpy
.
linalg
.
inv
(
x
)
.
astype
(
x
.
dtype
)
def
grad
(
self
,
inputs
,
g_outputs
):
def
grad
(
self
,
inputs
,
g_outputs
):
...
@@ -149,7 +153,9 @@ class AllocDiag(Op):
...
@@ -149,7 +153,9 @@ class AllocDiag(Op):
def
grad
(
self
,
inputs
,
g_outputs
):
def
grad
(
self
,
inputs
,
g_outputs
):
return
[
extract_diag
(
g_outputs
[
0
])]
return
[
extract_diag
(
g_outputs
[
0
])]
def
perform
(
self
,
node
,
(
x
,),
(
z
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
z
,)
=
outputs
if
x
.
ndim
!=
1
:
if
x
.
ndim
!=
1
:
raise
TypeError
(
x
)
raise
TypeError
(
x
)
z
[
0
]
=
numpy
.
diag
(
x
)
z
[
0
]
=
numpy
.
diag
(
x
)
...
@@ -264,7 +270,9 @@ class Det(Op):
...
@@ -264,7 +270,9 @@ class Det(Op):
o
=
theano
.
tensor
.
scalar
(
dtype
=
x
.
dtype
)
o
=
theano
.
tensor
.
scalar
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
o
])
return
Apply
(
self
,
[
x
],
[
o
])
def
perform
(
self
,
node
,
(
x
,),
(
z
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
z
,)
=
outputs
try
:
try
:
z
[
0
]
=
numpy
.
asarray
(
numpy
.
linalg
.
det
(
x
),
dtype
=
x
.
dtype
)
z
[
0
]
=
numpy
.
asarray
(
numpy
.
linalg
.
det
(
x
),
dtype
=
x
.
dtype
)
except
Exception
:
except
Exception
:
...
@@ -298,7 +306,9 @@ class Eig(Op):
...
@@ -298,7 +306,9 @@ class Eig(Op):
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
w
,
v
])
return
Apply
(
self
,
[
x
],
[
w
,
v
])
def
perform
(
self
,
node
,
(
x
,),
(
w
,
v
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
w
,
v
)
=
outputs
w
[
0
],
v
[
0
]
=
[
z
.
astype
(
x
.
dtype
)
for
z
in
self
.
_numop
(
x
)]
w
[
0
],
v
[
0
]
=
[
z
.
astype
(
x
.
dtype
)
for
z
in
self
.
_numop
(
x
)]
def
infer_shape
(
self
,
node
,
shapes
):
def
infer_shape
(
self
,
node
,
shapes
):
...
@@ -333,7 +343,9 @@ class Eigh(Eig):
...
@@ -333,7 +343,9 @@ class Eigh(Eig):
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
w
,
v
])
return
Apply
(
self
,
[
x
],
[
w
,
v
])
def
perform
(
self
,
node
,
(
x
,),
(
w
,
v
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
w
,
v
)
=
outputs
w
[
0
],
v
[
0
]
=
self
.
_numop
(
x
,
self
.
UPLO
)
w
[
0
],
v
[
0
]
=
self
.
_numop
(
x
,
self
.
UPLO
)
def
grad
(
self
,
inputs
,
g_outputs
):
def
grad
(
self
,
inputs
,
g_outputs
):
...
@@ -466,7 +478,9 @@ class QRFull(Op):
...
@@ -466,7 +478,9 @@ class QRFull(Op):
return
Apply
(
self
,
[
x
],
[
q
,
r
])
return
Apply
(
self
,
[
x
],
[
q
,
r
])
def
perform
(
self
,
node
,
(
x
,),
(
q
,
r
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
q
,
r
)
=
outputs
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
[
0
],
r
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
q
[
0
],
r
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
...
@@ -489,7 +503,9 @@ class QRIncomplete(Op):
...
@@ -489,7 +503,9 @@ class QRIncomplete(Op):
q
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
q
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
q
])
return
Apply
(
self
,
[
x
],
[
q
])
def
perform
(
self
,
node
,
(
x
,),
(
q
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
q
,)
=
outputs
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
assert
x
.
ndim
==
2
,
"The input of qr function should be a matrix."
q
[
0
]
=
self
.
_numop
(
x
,
q
[
0
]
=
self
.
_numop
(
x
,
self
.
mode
)
self
.
mode
)
...
@@ -594,7 +610,9 @@ class SVD(Op):
...
@@ -594,7 +610,9 @@ class SVD(Op):
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
v
=
theano
.
tensor
.
matrix
(
dtype
=
x
.
dtype
)
return
Apply
(
self
,
[
x
],
[
w
,
u
,
v
])
return
Apply
(
self
,
[
x
],
[
w
,
u
,
v
])
def
perform
(
self
,
node
,
(
x
,),
(
w
,
u
,
v
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,)
=
inputs
(
w
,
u
,
v
)
=
outputs
assert
x
.
ndim
==
2
,
"The input of svd function should be a matrix."
assert
x
.
ndim
==
2
,
"The input of svd function should be a matrix."
w
[
0
],
u
[
0
],
v
[
0
]
=
self
.
_numop
(
x
,
w
[
0
],
u
[
0
],
v
[
0
]
=
self
.
_numop
(
x
,
self
.
full_matrices
,
self
.
full_matrices
,
...
...
theano/tensor/slinalg.py
浏览文件 @
7ce2ab47
...
@@ -232,7 +232,8 @@ class Eigvalsh(Op):
...
@@ -232,7 +232,8 @@ class Eigvalsh(Op):
w
=
theano
.
tensor
.
vector
(
dtype
=
out_dtype
)
w
=
theano
.
tensor
.
vector
(
dtype
=
out_dtype
)
return
Apply
(
self
,
[
a
,
b
],
[
w
])
return
Apply
(
self
,
[
a
,
b
],
[
w
])
def
perform
(
self
,
node
,
inputs
,
(
w
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
w
,)
=
outputs
if
len
(
inputs
)
==
2
:
if
len
(
inputs
)
==
2
:
w
[
0
]
=
scipy
.
linalg
.
eigvalsh
(
a
=
inputs
[
0
],
b
=
inputs
[
1
],
lower
=
self
.
lower
)
w
[
0
]
=
scipy
.
linalg
.
eigvalsh
(
a
=
inputs
[
0
],
b
=
inputs
[
1
],
lower
=
self
.
lower
)
else
:
else
:
...
@@ -288,7 +289,8 @@ class EigvalshGrad(Op):
...
@@ -288,7 +289,8 @@ class EigvalshGrad(Op):
out2
=
theano
.
tensor
.
matrix
(
dtype
=
out_dtype
)
out2
=
theano
.
tensor
.
matrix
(
dtype
=
out_dtype
)
return
Apply
(
self
,
[
a
,
b
,
gw
],
[
out1
,
out2
])
return
Apply
(
self
,
[
a
,
b
,
gw
],
[
out1
,
out2
])
def
perform
(
self
,
node
,
(
a
,
b
,
gw
),
outputs
):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
a
,
b
,
gw
)
=
inputs
w
,
v
=
scipy
.
linalg
.
eigh
(
a
,
b
,
lower
=
self
.
lower
)
w
,
v
=
scipy
.
linalg
.
eigh
(
a
,
b
,
lower
=
self
.
lower
)
gA
=
v
.
dot
(
numpy
.
diag
(
gw
)
.
dot
(
v
.
T
))
gA
=
v
.
dot
(
numpy
.
diag
(
gw
)
.
dot
(
v
.
T
))
gB
=
-
v
.
dot
(
numpy
.
diag
(
gw
*
w
)
.
dot
(
v
.
T
))
gB
=
-
v
.
dot
(
numpy
.
diag
(
gw
*
w
)
.
dot
(
v
.
T
))
...
@@ -353,10 +355,14 @@ class Expm(Op):
...
@@ -353,10 +355,14 @@ class Expm(Op):
expm
=
theano
.
tensor
.
matrix
(
dtype
=
A
.
dtype
)
expm
=
theano
.
tensor
.
matrix
(
dtype
=
A
.
dtype
)
return
Apply
(
self
,
[
A
,
],
[
expm
,
])
return
Apply
(
self
,
[
A
,
],
[
expm
,
])
def
perform
(
self
,
node
,
(
A
,),
(
expm
,)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
A
,)
=
inputs
(
expm
,)
=
outputs
expm
[
0
]
=
scipy
.
linalg
.
expm
(
A
)
expm
[
0
]
=
scipy
.
linalg
.
expm
(
A
)
def
grad
(
self
,
(
A
,),
(
g_out
,)):
def
grad
(
self
,
inputs
,
outputs
):
(
A
,)
=
inputs
(
g_out
,)
=
outputs
return
[
ExpmGrad
()(
A
,
g_out
)]
return
[
ExpmGrad
()(
A
,
g_out
)]
def
infer_shape
(
self
,
node
,
shapes
):
def
infer_shape
(
self
,
node
,
shapes
):
...
@@ -378,10 +384,12 @@ class ExpmGrad(Op):
...
@@ -378,10 +384,12 @@ class ExpmGrad(Op):
def
infer_shape
(
self
,
node
,
shapes
):
def
infer_shape
(
self
,
node
,
shapes
):
return
[
shapes
[
0
]]
return
[
shapes
[
0
]]
def
perform
(
self
,
node
,
(
A
,
gA
),
(
out
,)
):
def
perform
(
self
,
node
,
inputs
,
outputs
):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# Kind of... You need to do some algebra from there to arrive at
# this expression.
# this expression.
(
A
,
gA
)
=
inputs
(
out
,)
=
outputs
w
,
V
=
scipy
.
linalg
.
eig
(
A
,
right
=
True
)
w
,
V
=
scipy
.
linalg
.
eig
(
A
,
right
=
True
)
U
=
scipy
.
linalg
.
inv
(
V
)
.
T
U
=
scipy
.
linalg
.
inv
(
V
)
.
T
...
...
theano/tensor/tests/test_elemwise.py
浏览文件 @
7ce2ab47
...
@@ -1233,7 +1233,9 @@ def test_not_implemented_elemwise_grad():
...
@@ -1233,7 +1233,9 @@ def test_not_implemented_elemwise_grad():
def
impl
(
self
,
n
,
x
):
def
impl
(
self
,
n
,
x
):
return
x
*
n
return
x
*
n
def
grad
(
self
,
(
n
,
x
),
(
gz
,)):
def
grad
(
self
,
inputs
,
gout
):
(
n
,
x
)
=
inputs
(
gz
,)
=
gout
dy_dx
=
n
dy_dx
=
n
return
[
theano
.
gradient
.
grad_not_implemented
(
self
,
0
,
n
),
return
[
theano
.
gradient
.
grad_not_implemented
(
self
,
0
,
n
),
gz
*
dy_dx
]
gz
*
dy_dx
]
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
7ce2ab47
...
@@ -1421,7 +1421,9 @@ class TimesN(theano.scalar.basic.UnaryScalarOp):
...
@@ -1421,7 +1421,9 @@ class TimesN(theano.scalar.basic.UnaryScalarOp):
float
%(nodename)
s_timesn(float x) { return x *
%(n)
s; }
float
%(nodename)
s_timesn(float x) { return x *
%(n)
s; }
"""
%
locals
()
"""
%
locals
()
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
x
,)
=
inputs
(
z
,)
=
outputs
return
"
%(z)
s =
%(name)
s_timesn(
%(x)
s);"
%
locals
()
return
"
%(z)
s =
%(name)
s_timesn(
%(x)
s);"
%
locals
()
...
...
theano/typed_list/basic.py
浏览文件 @
7ce2ab47
...
@@ -80,7 +80,9 @@ class GetItem(Op):
...
@@ -80,7 +80,9 @@ class GetItem(Op):
else
:
else
:
raise
TypeError
(
'Expected scalar or slice as index.'
)
raise
TypeError
(
'Expected scalar or slice as index.'
)
def
perform
(
self
,
node
,
(
x
,
index
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,
index
)
=
inputs
(
out
,)
=
outputs
if
not
isinstance
(
index
,
slice
):
if
not
isinstance
(
index
,
slice
):
index
=
int
(
index
)
index
=
int
(
index
)
out
[
0
]
=
x
[
index
]
out
[
0
]
=
x
[
index
]
...
@@ -137,7 +139,9 @@ class Append(Op):
...
@@ -137,7 +139,9 @@ class Append(Op):
assert
x
.
ttype
==
toAppend
.
type
,
(
x
.
ttype
,
toAppend
.
type
)
assert
x
.
ttype
==
toAppend
.
type
,
(
x
.
ttype
,
toAppend
.
type
)
return
Apply
(
self
,
[
x
,
toAppend
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
,
toAppend
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
toAppend
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,
toAppend
)
=
inputs
(
out
,)
=
outputs
if
not
self
.
inplace
:
if
not
self
.
inplace
:
out
[
0
]
=
list
(
x
)
out
[
0
]
=
list
(
x
)
else
:
else
:
...
@@ -209,7 +213,9 @@ class Extend(Op):
...
@@ -209,7 +213,9 @@ class Extend(Op):
assert
x
.
type
==
toAppend
.
type
assert
x
.
type
==
toAppend
.
type
return
Apply
(
self
,
[
x
,
toAppend
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
,
toAppend
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
toAppend
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,
toAppend
)
=
inputs
(
out
,)
=
outputs
if
not
self
.
inplace
:
if
not
self
.
inplace
:
out
[
0
]
=
list
(
x
)
out
[
0
]
=
list
(
x
)
else
:
else
:
...
@@ -292,7 +298,9 @@ class Insert(Op):
...
@@ -292,7 +298,9 @@ class Insert(Op):
assert
isinstance
(
index
,
T
.
TensorVariable
)
and
index
.
ndim
==
0
assert
isinstance
(
index
,
T
.
TensorVariable
)
and
index
.
ndim
==
0
return
Apply
(
self
,
[
x
,
index
,
toInsert
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
,
index
,
toInsert
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
index
,
toInsert
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,
index
,
toInsert
)
=
inputs
(
out
,)
=
outputs
if
not
self
.
inplace
:
if
not
self
.
inplace
:
out
[
0
]
=
list
(
x
)
out
[
0
]
=
list
(
x
)
else
:
else
:
...
@@ -360,8 +368,9 @@ class Remove(Op):
...
@@ -360,8 +368,9 @@ class Remove(Op):
assert
x
.
ttype
==
toRemove
.
type
assert
x
.
ttype
==
toRemove
.
type
return
Apply
(
self
,
[
x
,
toRemove
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
,
toRemove
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
toRemove
),
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
x
,
toRemove
)
=
inputs
(
out
,)
=
outputs
if
not
self
.
inplace
:
if
not
self
.
inplace
:
out
[
0
]
=
list
(
x
)
out
[
0
]
=
list
(
x
)
else
:
else
:
...
@@ -413,8 +422,8 @@ class Reverse(Op):
...
@@ -413,8 +422,8 @@ class Reverse(Op):
assert
isinstance
(
x
.
type
,
TypedListType
)
assert
isinstance
(
x
.
type
,
TypedListType
)
return
Apply
(
self
,
[
x
],
[
x
.
type
()])
return
Apply
(
self
,
[
x
],
[
x
.
type
()])
def
perform
(
self
,
node
,
inp
,
(
out
,
)
):
def
perform
(
self
,
node
,
inp
,
outputs
):
(
out
,)
=
outputs
if
not
self
.
inplace
:
if
not
self
.
inplace
:
out
[
0
]
=
list
(
inp
[
0
])
out
[
0
]
=
list
(
inp
[
0
])
else
:
else
:
...
@@ -470,12 +479,14 @@ class Index(Op):
...
@@ -470,12 +479,14 @@ class Index(Op):
assert
x
.
ttype
==
elem
.
type
assert
x
.
ttype
==
elem
.
type
return
Apply
(
self
,
[
x
,
elem
],
[
T
.
scalar
()])
return
Apply
(
self
,
[
x
,
elem
],
[
T
.
scalar
()])
def
perform
(
self
,
node
,
(
x
,
elem
),
(
out
,
)
):
def
perform
(
self
,
node
,
inputs
,
outputs
):
"""
"""
inelegant workaround for ValueError: The truth value of an
inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
being thrown when trying to remove a matrix from a matrices list
"""
"""
(
x
,
elem
)
=
inputs
(
out
,)
=
outputs
for
y
in
range
(
len
(
x
)):
for
y
in
range
(
len
(
x
)):
if
node
.
inputs
[
0
]
.
ttype
.
values_eq
(
x
[
y
],
elem
):
if
node
.
inputs
[
0
]
.
ttype
.
values_eq
(
x
[
y
],
elem
):
out
[
0
]
=
numpy
.
asarray
(
y
,
dtype
=
theano
.
config
.
floatX
)
out
[
0
]
=
numpy
.
asarray
(
y
,
dtype
=
theano
.
config
.
floatX
)
...
@@ -500,12 +511,14 @@ class Count(Op):
...
@@ -500,12 +511,14 @@ class Count(Op):
assert
x
.
ttype
==
elem
.
type
assert
x
.
ttype
==
elem
.
type
return
Apply
(
self
,
[
x
,
elem
],
[
T
.
scalar
()])
return
Apply
(
self
,
[
x
,
elem
],
[
T
.
scalar
()])
def
perform
(
self
,
node
,
(
x
,
elem
),
(
out
,
)
):
def
perform
(
self
,
node
,
inputs
,
outputs
):
"""
"""
inelegant workaround for ValueError: The truth value of an
inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
being thrown when trying to remove a matrix from a matrices list
"""
"""
(
x
,
elem
)
=
inputs
(
out
,)
=
outputs
out
[
0
]
=
0
out
[
0
]
=
0
for
y
in
range
(
len
(
x
)):
for
y
in
range
(
len
(
x
)):
if
node
.
inputs
[
0
]
.
ttype
.
values_eq
(
x
[
y
],
elem
):
if
node
.
inputs
[
0
]
.
ttype
.
values_eq
(
x
[
y
],
elem
):
...
@@ -543,7 +556,8 @@ class Length(Op):
...
@@ -543,7 +556,8 @@ class Length(Op):
assert
isinstance
(
x
.
type
,
TypedListType
)
assert
isinstance
(
x
.
type
,
TypedListType
)
return
Apply
(
self
,
[
x
],
[
T
.
scalar
(
dtype
=
'int64'
)])
return
Apply
(
self
,
[
x
],
[
T
.
scalar
(
dtype
=
'int64'
)])
def
perform
(
self
,
node
,
x
,
(
out
,
)):
def
perform
(
self
,
node
,
x
,
outputs
):
(
out
,)
=
outputs
out
[
0
]
=
numpy
.
asarray
(
len
(
x
[
0
]),
'int64'
)
out
[
0
]
=
numpy
.
asarray
(
len
(
x
[
0
]),
'int64'
)
def
__str__
(
self
):
def
__str__
(
self
):
...
@@ -593,7 +607,8 @@ class MakeList(Op):
...
@@ -593,7 +607,8 @@ class MakeList(Op):
return
Apply
(
self
,
a2
,
[
tl
])
return
Apply
(
self
,
a2
,
[
tl
])
def
perform
(
self
,
node
,
inputs
,
(
out
,
)):
def
perform
(
self
,
node
,
inputs
,
outputs
):
(
out
,)
=
outputs
out
[
0
]
=
list
(
inputs
)
out
[
0
]
=
list
(
inputs
)
make_list
=
MakeList
()
make_list
=
MakeList
()
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论