Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
c89e1bc2
提交
c89e1bc2
authored
7月 16, 2015
作者:
Frédéric Bastien
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3107 from harlouci/flake8_v3
Flake8 tensor
上级
de826376
cceee86e
隐藏空白字符变更
内嵌
并排
正在显示
11 个修改的文件
包含
224 行增加
和
248 行删除
+224
-248
test_linalg.py
theano/sandbox/linalg/tests/test_linalg.py
+10
-0
blas.py
theano/tensor/blas.py
+68
-70
blas_c.py
theano/tensor/blas_c.py
+7
-9
blas_scipy.py
theano/tensor/blas_scipy.py
+11
-11
elemwise_cgen.py
theano/tensor/elemwise_cgen.py
+23
-30
extra_ops.py
theano/tensor/extra_ops.py
+1
-0
fourier.py
theano/tensor/fourier.py
+6
-5
nlinalg.py
theano/tensor/nlinalg.py
+10
-25
opt_uncanonicalize.py
theano/tensor/opt_uncanonicalize.py
+4
-3
type.py
theano/tensor/type.py
+84
-85
test_flake8.py
theano/tests/test_flake8.py
+0
-10
没有找到文件。
theano/sandbox/linalg/tests/test_linalg.py
浏览文件 @
c89e1bc2
...
...
@@ -174,3 +174,13 @@ def test_tag_solve_triangular():
for
node
in
f
.
maker
.
fgraph
.
toposort
():
if
isinstance
(
node
.
op
,
Solve
):
assert
node
.
op
.
A_structure
==
'upper_triangular'
def
test_matrix_inverse_solve
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Solve op."
)
A
=
theano
.
tensor
.
dmatrix
(
'A'
)
b
=
theano
.
tensor
.
dmatrix
(
'b'
)
node
=
matrix_inverse
(
A
)
.
dot
(
b
)
.
owner
[
out
]
=
inv_as_solve
.
transform
(
node
)
assert
isinstance
(
out
.
owner
.
op
,
Solve
)
theano/tensor/blas.py
浏览文件 @
c89e1bc2
...
...
@@ -164,7 +164,7 @@ def default_blas_ldflags():
global
numpy
try
:
if
(
hasattr
(
numpy
.
distutils
,
'__config__'
)
and
numpy
.
distutils
.
__config__
):
numpy
.
distutils
.
__config__
):
# If the old private interface is available use it as it
# don't print information to the user.
blas_info
=
numpy
.
distutils
.
__config__
.
blas_opt_info
...
...
@@ -177,7 +177,7 @@ def default_blas_ldflags():
# ignored"
# This happen with Python 2.7.3 |EPD 7.3-1 and numpy 1.8.1
import
numpy.distutils.system_info
# noqa
import
numpy.distutils.system_info
# noqa
# We need to catch warnings as in some cases NumPy print
# stuff that we don't want the user to see like this:
...
...
@@ -276,7 +276,7 @@ SOMEPATH/Canopy_64bit/User/lib/python2.7/site-packages/numpy/distutils/system_in
# Using "conda install mkl" will install both, as well as
# optimized versions of numpy and scipy.
try
:
import
mkl
#
noqa
import
mkl
#
noqa
except
ImportError
as
e
:
_logger
.
info
(
'Conda mkl is not available:
%
s'
,
e
)
else
:
...
...
@@ -319,8 +319,8 @@ SOMEPATH/Canopy_64bit/User/lib/python2.7/site-packages/numpy/distutils/system_in
AddConfigVar
(
'blas.ldflags'
,
"lib[s] to include for [Fortran] level-3 blas implementation"
,
StrParam
(
default_blas_ldflags
))
"lib[s] to include for [Fortran] level-3 blas implementation"
,
StrParam
(
default_blas_ldflags
))
try
:
...
...
@@ -333,12 +333,10 @@ try:
# `scipy.linalg.blas.fblas` with `scipy.linalg.blas`.
# See http://github.com/scipy/scipy/pull/358
fblas
=
scipy
.
linalg
.
blas
_blas_gemv_fns
=
{
numpy
.
dtype
(
'float32'
):
fblas
.
sgemv
,
numpy
.
dtype
(
'float64'
):
fblas
.
dgemv
,
numpy
.
dtype
(
'complex64'
):
fblas
.
cgemv
,
numpy
.
dtype
(
'complex128'
):
fblas
.
zgemv
,
}
_blas_gemv_fns
=
{
numpy
.
dtype
(
'float32'
):
fblas
.
sgemv
,
numpy
.
dtype
(
'float64'
):
fblas
.
dgemv
,
numpy
.
dtype
(
'complex64'
):
fblas
.
cgemv
,
numpy
.
dtype
(
'complex128'
):
fblas
.
zgemv
}
except
ImportError
as
e
:
have_fblas
=
False
# This is used in Gemv and ScipyGer. We use CGemv and CGer
...
...
@@ -400,8 +398,8 @@ class Gemv(Op):
# The following is not grounds for error because as long as
# sizes are 1 at time of perform() there is no problem
# if x.broadcastable[0] != A.broadcastable[1]:
# raise TypeError('broadcastable mismatch between x and A',
#
(x.type, A.type))
# raise TypeError('broadcastable mismatch between x and A',
#
(x.type, A.type))
return
Apply
(
self
,
[
y
,
alpha
,
A
,
x
,
beta
],
[
y
.
type
()])
def
perform
(
self
,
node
,
inputs
,
out_storage
):
...
...
@@ -411,9 +409,10 @@ class Gemv(Op):
gemv
=
_blas_gemv_fns
[
y
.
dtype
]
if
(
A
.
shape
[
0
]
!=
y
.
shape
[
0
]
or
A
.
shape
[
1
]
!=
x
.
shape
[
0
]):
raise
ValueError
(
'Incompatible shapes for gemv '
'(beta * y + alpha * dot(A, x)). y:
%
s, A:
%
s, x:
%
s '
%
(
y
.
shape
,
A
.
shape
,
x
.
shape
))
raise
ValueError
(
'Incompatible shapes for gemv '
'(beta * y + alpha * dot(A, x)). y:
%
s, A:
%
s, x:
%
s '
%
(
y
.
shape
,
A
.
shape
,
x
.
shape
))
# Here I suppose that A is in c order. If we don't make it
# explicitly as fortran order, scipy 0.7.2 seam to create
...
...
@@ -479,7 +478,7 @@ class Ger(Op):
alpha
=
T
.
as_tensor_variable
(
alpha
)
if
len
(
set
([
A
.
dtype
,
alpha
.
dtype
,
x
.
dtype
,
y
.
dtype
]))
!=
1
:
raise
TypeError
(
'ger requires matching dtypes'
,
(
A
.
dtype
,
alpha
.
dtype
,
x
.
dtype
,
y
.
dtype
))
(
A
.
dtype
,
alpha
.
dtype
,
x
.
dtype
,
y
.
dtype
))
if
alpha
.
ndim
!=
0
:
raise
TypeError
(
'ger requires scalar alpha'
,
alpha
.
type
)
if
A
.
ndim
!=
2
:
...
...
@@ -567,13 +566,14 @@ def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
for
d
in
dirs
:
for
f
in
os
.
listdir
(
d
):
if
(
f
.
endswith
(
'.so'
)
or
f
.
endswith
(
'.dylib'
)
or
f
.
endswith
(
'.dll'
)):
f
.
endswith
(
'.dll'
)):
if
any
([
f
.
find
(
ll
)
>=
0
for
ll
in
l
]):
found_dyn
=
True
if
not
found_dyn
and
dirs
:
_logger
.
warning
(
"We did not found a dynamic library into the "
"library_dir of the library we use for blas. If you use "
"ATLAS, make sure to compile it with dynamics library."
)
_logger
.
warning
(
"We did not found a dynamic library into the "
"library_dir of the library we use for blas. If you use "
"ATLAS, make sure to compile it with dynamics library."
)
for
t
in
ldflags_str
.
split
():
# Remove extra quote.
...
...
@@ -644,7 +644,7 @@ class GemmRelated(Op):
return
ldflags
()
# code_cache_version is built by subclasses from
#
build_gemm_version
# build_gemm_version
def
c_compile_args
(
self
):
return
ldflags
(
libs
=
False
,
flags
=
True
)
...
...
@@ -673,7 +673,7 @@ class GemmRelated(Op):
int sx_0, sx_1, sy_0, sy_1, sz_0, sz_1;
"""
#setup_z_Nz_Sz = None
#
setup_z_Nz_Sz = None
check_xyz_rank2
=
"""
if (PyArray_NDIM(
%(_x)
s) != 2) {
...
...
@@ -823,7 +823,7 @@ class GemmRelated(Op):
{
"""
#case_float_ab_constants = None
#
case_float_ab_constants = None
case_float_gemm
=
"""
float* x = (float*)PyArray_DATA(
%(_x)
s);
...
...
@@ -856,7 +856,7 @@ class GemmRelated(Op):
{
"""
#case_double_ab_constants = None
#
case_double_ab_constants = None
case_double_gemm
=
"""
double* x = (double*)PyArray_DATA(
%(_x)
s);
...
...
@@ -1028,10 +1028,10 @@ class Gemm(GemmRelated):
if
not
(
z
.
dtype
==
a
.
dtype
==
x
.
dtype
==
y
.
dtype
==
b
.
dtype
):
raise
TypeError
(
Gemm
.
E_mixed
,
(
z
.
dtype
,
a
.
dtype
,
x
.
dtype
,
y
.
dtype
,
b
.
dtype
))
(
z
.
dtype
,
a
.
dtype
,
x
.
dtype
,
y
.
dtype
,
b
.
dtype
))
if
(
not
z
.
dtype
.
startswith
(
'float'
)
and
not
z
.
dtype
.
startswith
(
'complex'
)):
if
(
not
z
.
dtype
.
startswith
(
'float'
)
and
not
z
.
dtype
.
startswith
(
'complex'
)):
raise
TypeError
(
Gemm
.
E_float
,
(
z
.
dtype
))
output
=
z
.
type
()
...
...
@@ -1173,8 +1173,8 @@ class Gemm(GemmRelated):
_z
,
_a
,
_x
,
_y
,
_b
=
inp
_zout
,
=
out
if
node
.
inputs
[
0
]
.
type
.
dtype
.
startswith
(
'complex'
):
raise
utils
.
MethodNotDefined
(
'
%
s.c_code'
\
%
self
.
__class__
.
__name__
)
raise
utils
.
MethodNotDefined
(
'
%
s.c_code'
%
self
.
__class__
.
__name__
)
if
not
config
.
blas
.
ldflags
:
return
super
(
Gemm
,
self
)
.
c_code
(
node
,
name
,
(
_z
,
_a
,
_x
,
_y
,
_b
),
(
_zout
,
),
...
...
@@ -1203,9 +1203,9 @@ def res_is_a(node, op, maxclients=None):
else
:
retval
=
True
return
node
.
owner
\
and
node
.
owner
.
op
==
op
\
and
retval
return
(
node
.
owner
and
node
.
owner
.
op
==
op
and
retval
)
def
_as_scalar
(
res
,
dtype
=
None
):
...
...
@@ -1235,16 +1235,16 @@ def _as_scalar(res, dtype=None):
def
_is_real_matrix
(
res
):
return
res
.
type
.
dtype
in
(
'float32'
,
'float64'
)
\
and
res
.
type
.
ndim
==
2
\
and
res
.
type
.
broadcastable
[
0
]
==
False
\
and
res
.
type
.
broadcastable
[
1
]
==
False
# cope with tuple vs. list
return
(
res
.
type
.
dtype
in
(
'float32'
,
'float64'
)
and
res
.
type
.
ndim
==
2
and
res
.
type
.
broadcastable
[
0
]
==
False
and
res
.
type
.
broadcastable
[
1
]
==
False
)
# cope with tuple vs. list
def
_is_real_vector
(
res
):
return
res
.
type
.
dtype
in
(
'float32'
,
'float64'
)
\
and
res
.
type
.
ndim
==
1
\
and
res
.
type
.
broadcastable
[
0
]
==
False
return
(
res
.
type
.
dtype
in
(
'float32'
,
'float64'
)
and
res
.
type
.
ndim
==
1
and
res
.
type
.
broadcastable
[
0
]
==
False
)
def
_beta_L_plus_alpha_M
(
beta
,
L
,
alpha
,
M
,
recurse_flip
=
True
):
...
...
@@ -1262,8 +1262,8 @@ def _beta_L_plus_alpha_M(beta, L, alpha, M, recurse_flip=True):
# it also might be the case that there is a dimshuffle between the +
# and the dot22. local_dot_to_dot22 in particular will put in such things.
if
(
M
.
owner
and
isinstance
(
M
.
owner
.
op
,
T
.
DimShuffle
)
and
M
.
owner
.
inputs
[
0
]
.
owner
and
isinstance
(
M
.
owner
.
inputs
[
0
]
.
owner
.
op
,
Dot22
)):
M
.
owner
.
inputs
[
0
]
.
owner
and
isinstance
(
M
.
owner
.
inputs
[
0
]
.
owner
.
op
,
Dot22
)):
MM
=
M
.
owner
.
inputs
[
0
]
if
M
.
owner
.
op
.
new_order
==
(
0
,):
# it is making a column MM into a vector
...
...
@@ -1493,7 +1493,7 @@ def _gemm_from_factored_list(lst):
assert
len
(
gemm_of_sM_list
)
==
1
add_inputs
=
[
item_to_var
(
input
)
for
k
,
input
in
enumerate
(
lst
)
if
k
not
in
(
i
,
j
)]
for
k
,
input
in
enumerate
(
lst
)
if
k
not
in
(
i
,
j
)]
add_inputs
.
extend
(
gemm_of_sM_list
)
if
len
(
add_inputs
)
>
1
:
rval
=
[
T
.
add
(
*
add_inputs
)]
...
...
@@ -1583,7 +1583,7 @@ class GemmOptimizer(Optimizer):
(
theano
.
scalar
.
Add
,
theano
.
scalar
.
Sub
,
theano
.
scalar
.
Neg
,
theano
.
scalar
.
Mul
))):
continue
if
no
t
node
in
fgraph
.
apply_nodes
:
if
no
de
not
in
fgraph
.
apply_nodes
:
# This mean that we already removed this node from
# the graph
continue
...
...
@@ -1592,7 +1592,7 @@ class GemmOptimizer(Optimizer):
time_canonicalize
+=
time1
time_factor_can
+=
time2
time_factor_list
+=
time3
except
InconsistencyError
as
e
:
except
InconsistencyError
:
nb_inconsistency_make
+=
1
continue
if
new_outputs
:
...
...
@@ -1725,8 +1725,8 @@ class Dot22(GemmRelated):
_x
,
_y
=
inp
_zout
,
=
out
if
node
.
inputs
[
0
]
.
type
.
dtype
.
startswith
(
'complex'
):
raise
utils
.
MethodNotDefined
(
'
%
s.c_code'
\
%
self
.
__class__
.
__name__
)
raise
utils
.
MethodNotDefined
(
'
%
s.c_code'
%
self
.
__class__
.
__name__
)
if
len
(
self
.
c_libraries
())
<=
0
:
return
super
(
Dot22
,
self
)
.
c_code
(
node
,
name
,
(
_x
,
_y
),
(
_zout
,
),
sub
)
...
...
@@ -1895,17 +1895,16 @@ blas_optdb.register('local_dot_to_dot22',
in2out
(
local_dot_to_dot22
),
0
,
'fast_run'
,
'fast_compile'
)
blas_optdb
.
register
(
'gemm_optimizer'
,
GemmOptimizer
(),
10
,
'fast_run'
)
GemmOptimizer
(),
10
,
'fast_run'
)
blas_optdb
.
register
(
'local_gemm_to_gemv'
,
EquilibriumOptimizer
([
local_gemm_to_gemv
,
local_gemm_to_ger
,
local_dot22_to_ger_or_gemv
,
local_dimshuffle_lift
],
max_use_ratio
=
5
,
ignore_newtrees
=
False
),
15
,
'fast_run'
)
EquilibriumOptimizer
([
local_gemm_to_gemv
,
local_gemm_to_ger
,
local_dot22_to_ger_or_gemv
,
local_dimshuffle_lift
],
max_use_ratio
=
5
,
ignore_newtrees
=
False
),
15
,
'fast_run'
)
# After destroyhandler(49.5) but before we try to make elemwise things
...
...
@@ -1936,12 +1935,12 @@ class Dot22Scalar(GemmRelated):
if
not
(
a
.
dtype
==
x
.
dtype
==
y
.
dtype
):
raise
TypeError
(
'Dot22Scalar requires matching dtypes'
,
(
a
.
dtype
,
x
.
dtype
,
y
.
dtype
))
(
a
.
dtype
,
x
.
dtype
,
y
.
dtype
))
if
(
not
a
.
dtype
.
startswith
(
'float'
)
and
not
a
.
dtype
.
startswith
(
'complex'
)):
if
(
not
a
.
dtype
.
startswith
(
'float'
)
and
not
a
.
dtype
.
startswith
(
'complex'
)):
raise
TypeError
(
'Dot22Scalar requires float or complex args'
,
a
.
dtype
)
a
.
dtype
)
bz
=
[
x
.
type
.
broadcastable
[
0
],
y
.
type
.
broadcastable
[
1
]]
outputs
=
[
T
.
tensor
(
x
.
type
.
dtype
,
bz
)]
...
...
@@ -1992,8 +1991,8 @@ class Dot22Scalar(GemmRelated):
_x
,
_y
,
_a
=
inp
_zout
,
=
out
if
node
.
inputs
[
0
]
.
type
.
dtype
.
startswith
(
'complex'
):
raise
utils
.
MethodNotDefined
(
'
%
s.c_code'
\
%
self
.
__class__
.
__name__
)
raise
utils
.
MethodNotDefined
(
'
%
s.c_code'
%
self
.
__class__
.
__name__
)
if
len
(
self
.
c_libraries
())
<=
0
:
return
super
(
Dot22Scalar
,
self
)
.
c_code
(
node
,
name
,
(
_x
,
_y
),
(
_zout
,
),
sub
)
...
...
@@ -2051,7 +2050,7 @@ def local_dot22_to_dot22scalar(node):
# The canonizer should have merged those mul together.
i_mul
=
[
x
.
owner
and
x
.
owner
.
op
==
T
.
mul
and
any
([
_as_scalar
(
x_i
,
dtype
=
d
.
dtype
)
for
x_i
in
x
.
owner
.
inputs
])
for
x_i
in
x
.
owner
.
inputs
])
for
x
in
node
.
inputs
]
if
not
any
(
i_mul
):
# no scalar in input and no multiplication
...
...
@@ -2065,8 +2064,7 @@ def local_dot22_to_dot22scalar(node):
scalar_idx
=
-
1
for
i
,
x
in
enumerate
(
m
.
owner
.
inputs
):
if
_as_scalar
(
x
,
dtype
=
d
.
dtype
)
and
(
theano
.
scalar
.
upcast
(
x
.
type
.
dtype
,
d
.
type
.
dtype
)
==
d
.
type
.
dtype
):
x
.
type
.
dtype
,
d
.
type
.
dtype
)
==
d
.
type
.
dtype
):
scalar_idx
=
i
break
...
...
@@ -2103,8 +2101,8 @@ def local_dot22_to_dot22scalar(node):
break
if
scalar_idx
<
0
:
_logger
.
info
(
'Not optimizing dot22 with inputs
%
s
%
s, as the type '
'of the scalar cannot be upcasted to the matrix type'
,
node
.
inputs
,
[
x
.
type
for
x
in
node
.
inputs
])
'of the scalar cannot be upcasted to the matrix type'
,
node
.
inputs
,
[
x
.
type
for
x
in
node
.
inputs
])
return
False
assert
scalar_idx
<
len
(
node
.
inputs
)
s
=
node
.
inputs
[
scalar_idx
]
...
...
@@ -2128,8 +2126,8 @@ blas_optdb.register('local_dot22_to_dot22scalar',
11
,
'fast_run'
)
#from opt import register_specialize, register_canonicalize
#@register_specialize
#
from opt import register_specialize, register_canonicalize
#
@register_specialize
@local_optimizer
([
T
.
sub
,
T
.
add
])
def
local_print_as_we_go_along
(
node
):
if
node
.
op
in
(
T
.
sub
,
T
.
add
):
...
...
theano/tensor/blas_c.py
浏览文件 @
c89e1bc2
...
...
@@ -4,8 +4,7 @@ from theano import config
from
theano.tensor.opt
import
in2out
from
theano.tensor.blas
import
ldflags
,
blas_header_text
,
blas_header_version
from
theano.tensor.blas
import
(
blas_optdb
,
optdb
,
local_optimizer
,
EquilibriumOptimizer
)
from
theano.tensor.blas
import
blas_optdb
,
optdb
,
local_optimizer
from
theano.tensor.blas
import
Ger
,
ger
,
ger_destructive
from
theano.tensor.blas
import
Gemv
,
gemv_inplace
,
gemv_no_inplace
from
theano.tensor
import
basic
as
T
...
...
@@ -268,7 +267,7 @@ def ger_c_code(A, a, x, y, Z, destructive, fail):
(double*)x_data, &Sx,
(double*)y_data, &Sy,
(double*)(PyArray_DATA(
%(Z)
s)), &Sz1);
}
else {
...
...
@@ -610,7 +609,7 @@ def gemv_c_code(aa, xx, yy, zz, alpha, beta, destructive, fail,
// so Sx1 == 1 is required for safety.
if (Nx0 == 1 && Sx1 == 1)
{
zz_data[0] = fbeta*zz_data[0] + alpha*sdot_(&Nx1,
zz_data[0] = fbeta*zz_data[0] + alpha*sdot_(&Nx1,
(float*)(PyArray_DATA(
%(xx)
s)), &Sx1,
(float*)yy_data, &Sy);
}
...
...
@@ -633,7 +632,7 @@ def gemv_c_code(aa, xx, yy, zz, alpha, beta, destructive, fail,
// so Sx1 == 1 is required for safety.
if (Nx0 == 1 && Sx1 == 1)
{
zz_data[0] = dbeta*zz_data[0] + alpha*ddot_(&Nx1,
zz_data[0] = dbeta*zz_data[0] + alpha*ddot_(&Nx1,
(double*)(PyArray_DATA(
%(xx)
s)), &Sx1,
(double*)yy_data, &Sy);
}
...
...
@@ -732,8 +731,7 @@ def check_force_gemv_init():
gemv_no_inplace
(
aa
,
1.
,
xx
,
yy
,
0.
),
theano
.
compile
.
Mode
(
optimizer
=
'fast_compile'
)
.
excluding
(
'gpu'
,
'gpuarray'
),
profile
=
False
)
profile
=
False
)
finally
:
theano
.
config
.
compute_test_value
=
tv
theano
.
config
.
compute_test_value_opt
=
tvo
...
...
@@ -742,11 +740,11 @@ def check_force_gemv_init():
# then we want gemv_c_code to initiliaze the memory to 0 so that we
# don't inadvertantly introduce NaNs to the users data.
aa_data
=
numpy
.
array
(
float
(
'NaN'
)
*
numpy
.
ones
((
2
,)),
float
(
'NaN'
)
*
numpy
.
ones
((
2
,)),
dtype
=
theano
.
config
.
floatX
)
yy_data
=
numpy
.
array
(
numpy
.
ones
((
2
,))
*
2
,
numpy
.
ones
((
2
,))
*
2
,
dtype
=
theano
.
config
.
floatX
)
xx_data
=
numpy
.
array
(
...
...
theano/tensor/blas_scipy.py
浏览文件 @
c89e1bc2
...
...
@@ -12,11 +12,11 @@ from theano.tensor.opt import in2out
if
have_fblas
:
from
theano.tensor.blas
import
fblas
_blas_ger_fns
=
{
numpy
.
dtype
(
'float32'
):
fblas
.
sger
,
numpy
.
dtype
(
'float64'
):
fblas
.
dger
,
numpy
.
dtype
(
'complex64'
):
fblas
.
cgeru
,
numpy
.
dtype
(
'complex128'
):
fblas
.
zgeru
,
}
numpy
.
dtype
(
'float32'
):
fblas
.
sger
,
numpy
.
dtype
(
'float64'
):
fblas
.
dger
,
numpy
.
dtype
(
'complex64'
):
fblas
.
cgeru
,
numpy
.
dtype
(
'complex128'
):
fblas
.
zgeru
,
}
class
ScipyGer
(
Ger
):
...
...
@@ -47,10 +47,10 @@ class ScipyGer(Ger):
A
=
A
.
copy
()
elif
A
.
flags
[
'C_CONTIGUOUS'
]:
A
=
local_ger
(
calpha
[
0
],
cy
[
0
],
cx
[
0
],
a
=
A
.
T
,
overwrite_a
=
int
(
self
.
destructive
))
.
T
overwrite_a
=
int
(
self
.
destructive
))
.
T
else
:
A
=
local_ger
(
calpha
[
0
],
cx
[
0
],
cy
[
0
],
a
=
A
,
overwrite_a
=
int
(
self
.
destructive
))
overwrite_a
=
int
(
self
.
destructive
))
cZ
[
0
]
=
A
for
o
in
node_output_compute
:
o
[
0
]
=
True
...
...
@@ -87,10 +87,10 @@ if have_fblas:
# precedence. Once the original Ger is replaced, then these optimizations
# have no effect.
blas_optdb
.
register
(
'scipy_blas'
,
use_scipy_blas
,
100
,
'fast_run'
)
use_scipy_blas
,
100
,
'fast_run'
)
# this matches the InplaceBlasOpt defined in blas.py
optdb
.
register
(
'make_scipy_blas_destructive'
,
make_scipy_blas_destructive
,
70.0
,
'fast_run'
,
'inplace'
)
make_scipy_blas_destructive
,
70.0
,
'fast_run'
,
'inplace'
)
theano/tensor/elemwise_cgen.py
浏览文件 @
c89e1bc2
...
...
@@ -276,7 +276,7 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
if
index
!=
'x'
:
order_loops
+=
"""
%(ovar)
s_loops_it->first = abs(PyArray_STRIDES(
%(ovar)
s)[
%(index)
i]);
"""
%
locals
()
"""
%
locals
()
else
:
# Stride is 0 when dimension is broadcastable
order_loops
+=
"""
...
...
@@ -311,15 +311,13 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
total
=
"
%(var)
s_n
%(candidate)
s"
%
locals
()
break
else
:
total
=
'1'
;
total
=
'1'
totals
.
append
(
total
)
declare_totals
=
"""
int init_totals[
%(nnested)
s] = {
%(totals)
s};
"""
%
dict
(
nnested
=
nnested
,
totals
=
', '
.
join
(
totals
)
)
"""
%
dict
(
nnested
=
nnested
,
totals
=
', '
.
join
(
totals
))
# Sort totals to match the new order that was computed by sorting
# the loop vector. One integer variable per loop is declared.
...
...
@@ -355,13 +353,11 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
declare_strides
=
"""
int init_strides[
%(nvars)
i][
%(nnested)
i] = {
%(strides)
s
};"""
%
dict
(
nvars
=
nvars
,
nnested
=
nnested
,
strides
=
',
\n
'
.
join
(
', '
.
join
(
get_loop_strides
(
lo
,
i
))
for
i
,
lo
in
enumerate
(
init_loop_orders
)
if
len
(
lo
)
>
0
))
};"""
%
dict
(
nvars
=
nvars
,
nnested
=
nnested
,
strides
=
',
\n
'
.
join
(
', '
.
join
(
get_loop_strides
(
lo
,
i
))
for
i
,
lo
in
enumerate
(
init_loop_orders
)
if
len
(
lo
)
>
0
))
# Declare (sorted) stride and for each variable
# we iterate from innermost loop to outermost loop
...
...
@@ -385,9 +381,9 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
declare_iter
+=
"
%(var)
s_iter = (
%(dtype)
s*)(PyArray_DATA(
%(var)
s));
\n
"
%
locals
()
pointer_update
=
''
for
j
,
dtype
in
enumerate
(
dtypes
):
for
j
,
dtype
in
enumerate
(
dtypes
):
var
=
sub
[
"lv
%
i"
%
j
]
pointer_update
+=
"
%(dtype)
s &
%(var)
s_i = * (
%(var)
s_iter"
%
locals
()
pointer_update
+=
"
%(dtype)
s &
%(var)
s_i = * (
%(var)
s_iter"
%
locals
()
tot_jump
=
''
for
i
in
reversed
(
range
(
nnested
)):
iterv
=
'ITER_
%
i'
%
i
...
...
@@ -401,7 +397,7 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
update
=
''
forloop
=
''
# The pointers are defined only in the most inner loop
if
i
==
nnested
-
1
:
if
i
==
nnested
-
1
:
update
=
pointer_update
if
i
==
0
:
if
openmp
:
...
...
@@ -413,19 +409,17 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
%(forloop)
s
{ // begin loop
%(i)
i
%(update)
s
%(loop)
s
%(loop)
s
} // end loop
%(i)
i
"""
%
locals
()
return
'
\n
'
.
join
([
'{'
,
order_loops
,
declare_totals
,
declare_strides
,
declare_iter
,
loop
,
'}
\n
'
,
])
return
'
\n
'
.
join
([
'{'
,
order_loops
,
declare_totals
,
declare_strides
,
declare_iter
,
loop
,
'}
\n
'
])
# print make_declare(((0, 1, 2, 3), ('x', 1, 0, 3), ('x', 'x', 'x', 0)),
# ('double', 'int', 'float'),
...
...
@@ -451,16 +445,16 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
##################
#
## DimShuffle ##
#
#
DimShuffle
#
##################
#################
#
## Broadcast ##
#
#
Broadcast
#
#################
################
#
## CAReduce ##
#
#
CAReduce
#
################
...
...
@@ -527,4 +521,3 @@ def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):
s
+=
loop_tasks
[
-
1
]
return
"{
%
s}"
%
s
theano/tensor/extra_ops.py
浏览文件 @
c89e1bc2
...
...
@@ -5,6 +5,7 @@ from six.moves import xrange
import
theano
from
theano.tensor
import
basic
from
theano.tensor
import
nlinalg
# noqa
from
theano
import
gof
,
scalar
from
theano.gradient
import
DisconnectedType
tensor
=
basic
...
...
theano/tensor/fourier.py
浏览文件 @
c89e1bc2
...
...
@@ -62,7 +62,7 @@ class Fourier(gof.Op):
(
axis
.
data
<
0
or
axis
.
data
>
a
.
ndim
-
1
)):
raise
TypeError
(
'
%
s: index of the transformed axis must be'
' a scalar not smaller than 0 and smaller than'
' dimension of array'
%
self
.
__class__
.
__name__
)
' dimension of array'
%
self
.
__class__
.
__name__
)
if
n
is
None
:
n
=
a
.
shape
[
axis
]
n
=
tensor
.
as_tensor_variable
(
n
)
...
...
@@ -78,7 +78,7 @@ class Fourier(gof.Op):
' strictly positive scalar'
%
self
.
__class__
.
__name__
)
return
gof
.
Apply
(
self
,
[
a
,
n
,
axis
],
[
tensor
.
TensorType
(
'complex128'
,
a
.
type
.
broadcastable
)()])
a
.
type
.
broadcastable
)()])
def
infer_shape
(
self
,
node
,
in_shapes
):
shape_a
=
in_shapes
[
0
]
...
...
@@ -87,8 +87,8 @@ class Fourier(gof.Op):
if
len
(
shape_a
)
==
1
:
return
[(
n
,)]
elif
isinstance
(
axis
,
tensor
.
TensorConstant
):
out_shape
=
list
(
shape_a
[
0
:
axis
.
data
.
item
()])
+
[
n
]
+
\
list
(
shape_a
[
axis
.
data
+
1
:]
)
out_shape
=
(
list
(
shape_a
[
0
:
axis
.
data
.
item
()])
+
[
n
]
+
list
(
shape_a
[
axis
.
data
+
1
:])
)
else
:
l
=
len
(
shape_a
)
shape_a
=
tensor
.
stack
(
*
shape_a
)
...
...
@@ -136,7 +136,8 @@ class Fourier(gof.Op):
flip_shape
=
list
(
numpy
.
arange
(
0
,
a
.
ndim
)[::
-
1
])
res
=
res
.
dimshuffle
(
flip_shape
)
res
=
tensor
.
switch
(
tensor
.
lt
(
n
,
tensor
.
shape
(
a
)[
axis
]),
tensor
.
set_subtensor
(
res
[
n
::,
],
0
,
False
,
False
),
res
)
tensor
.
set_subtensor
(
res
[
n
::,
],
0
,
False
,
False
),
res
)
res
=
res
.
dimshuffle
(
flip_shape
)
# insures that gradient shape conforms to input shape:
...
...
theano/tensor/nlinalg.py
浏览文件 @
c89e1bc2
from
__future__
import
print_function
import
logging
import
theano
logger
=
logging
.
getLogger
(
__name__
)
import
numpy
from
six.moves
import
xrange
import
theano
from
theano.tensor
import
as_tensor_variable
from
theano.gof
import
Op
,
Apply
from
theano.tensor
import
as_tensor_variable
,
dot
,
DimShuffle
,
Dot
from
theano.tensor.blas
import
Dot22
from
theano.tensor.opt
import
(
register_stabilize
,
register_specialize
,
register_canonicalize
)
from
theano.gof
import
local_optimizer
from
theano.gof.opt
import
Optimizer
from
theano.gradient
import
DisconnectedType
from
theano.tensor
import
basic
as
tensor
logger
=
logging
.
getLogger
(
__name__
)
class
MatrixPinv
(
Op
):
"""Computes the pseudo-inverse of a matrix :math:`A`.
...
...
@@ -427,8 +423,10 @@ class EighGrad(Op):
N
=
x
.
shape
[
0
]
outer
=
numpy
.
outer
G
=
lambda
n
:
sum
(
v
[:,
m
]
*
V
.
T
[
n
]
.
dot
(
v
[:,
m
])
/
(
w
[
n
]
-
w
[
m
])
for
m
in
xrange
(
N
)
if
m
!=
n
)
def
G
(
n
):
return
sum
(
v
[:,
m
]
*
V
.
T
[
n
]
.
dot
(
v
[:,
m
])
/
(
w
[
n
]
-
w
[
m
])
for
m
in
xrange
(
N
)
if
m
!=
n
)
g
=
sum
(
outer
(
v
[:,
n
],
v
[:,
n
]
*
W
[
n
]
+
G
(
n
))
for
n
in
xrange
(
N
))
...
...
@@ -641,16 +639,6 @@ def svd(a, full_matrices=1, compute_uv=1):
return
SVD
(
full_matrices
,
compute_uv
)(
a
)
def
test_matrix_inverse_solve
():
if
not
imported_scipy
:
raise
SkipTest
(
"Scipy needed for the Solve op."
)
A
=
theano
.
tensor
.
dmatrix
(
'A'
)
b
=
theano
.
tensor
.
dmatrix
(
'b'
)
node
=
matrix_inverse
(
A
)
.
dot
(
b
)
.
owner
[
out
]
=
inv_as_solve
.
transform
(
node
)
assert
isinstance
(
out
.
owner
.
op
,
Solve
)
class
lstsq
(
Op
):
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
...
...
@@ -670,9 +658,6 @@ class lstsq(Op):
theano
.
tensor
.
lscalar
(),
theano
.
tensor
.
dvector
()])
def
perform
(
self
,
node
,
inputs
,
outputs
):
x
=
inputs
[
0
]
y
=
inputs
[
1
]
rcond
=
inputs
[
2
]
zz
=
numpy
.
linalg
.
lstsq
(
inputs
[
0
],
inputs
[
1
],
inputs
[
2
])
outputs
[
0
][
0
]
=
zz
[
0
]
outputs
[
1
][
0
]
=
zz
[
1
]
...
...
@@ -703,7 +688,7 @@ def norm(x, ord):
return
x
[
x
.
nonzero
()]
.
shape
[
0
]
else
:
try
:
z
=
tensor
.
sum
(
abs
(
x
**
ord
))
**
(
1.
/
ord
)
z
=
tensor
.
sum
(
abs
(
x
**
ord
))
**
(
1.
/
ord
)
except
TypeError
:
raise
ValueError
(
"Invalid norm order for vectors."
)
return
z
...
...
theano/tensor/opt_uncanonicalize.py
浏览文件 @
c89e1bc2
...
...
@@ -33,7 +33,6 @@ supposed to be canonical.
# TODO: intelligent merge for mul/add
# TODO: 0*x -> 0
import
logging
_logger
=
logging
.
getLogger
(
'theano.tensor.opt'
)
from
theano
import
gof
from
theano.tensor.elemwise
import
CAReduce
...
...
@@ -44,6 +43,8 @@ from theano.tensor.basic import (get_scalar_constant_value,
from
theano.tensor.opt
import
register_uncanonicalize
from
theano
import
scalar
as
scal
_logger
=
logging
.
getLogger
(
'theano.tensor.opt'
)
@register_uncanonicalize
@gof.local_optimizer
([
T
.
_max_and_argmax
])
...
...
@@ -81,8 +82,8 @@ def local_max_to_min(node):
if
node
.
op
==
T
.
neg
and
node
.
inputs
[
0
]
.
owner
:
max
=
node
.
inputs
[
0
]
if
(
max
.
owner
and
isinstance
(
max
.
owner
.
op
,
CAReduce
)
and
max
.
owner
.
op
.
scalar_op
==
scal
.
maximum
):
isinstance
(
max
.
owner
.
op
,
CAReduce
)
and
max
.
owner
.
op
.
scalar_op
==
scal
.
maximum
):
neg
=
max
.
owner
.
inputs
[
0
]
if
neg
.
owner
and
neg
.
owner
.
op
==
T
.
neg
:
return
[
CAReduce
(
scal
.
minimum
,
...
...
theano/tensor/type.py
浏览文件 @
c89e1bc2
import
logging
_logger
=
logging
.
getLogger
(
"theano.tensor.type"
)
import
warnings
import
numpy
import
theano
from
theano
import
config
from
theano.gof
import
Constant
,
hashtype
,
Type
,
Variable
from
theano.gof.utils
import
MethodNotDefined
from
theano.gof
import
hashtype
,
Type
,
Variable
from
theano
import
scalar
as
scal
_logger
=
logging
.
getLogger
(
"theano.tensor.type"
)
class
TensorType
(
Type
):
"""Symbolic `Type` representing a numpy.ndarray value."""
...
...
@@ -39,7 +40,7 @@ class TensorType(Type):
if
self
.
dtype
==
'floatX'
:
self
.
dtype
=
config
.
floatX
# broadcastable is immutable, and all elements are either
#
##
True or False
# True or False
self
.
broadcastable
=
tuple
(
bool
(
b
)
for
b
in
broadcastable
)
self
.
dtype_specs
()
# error checking is done there
self
.
name
=
name
...
...
@@ -74,17 +75,17 @@ class TensorType(Type):
# input (typical mistake, especially with shared variables).
if
isinstance
(
data
,
Variable
):
raise
TypeError
(
'Expected an array-like object, but found a Variable: '
'maybe you are trying to call a function on a (possibly '
'shared) variable instead of a numeric array?'
)
'Expected an array-like object, but found a Variable: '
'maybe you are trying to call a function on a (possibly '
'shared) variable instead of a numeric array?'
)
if
((
type
(
data
)
is
numpy
.
ndarray
)
and
(
data
.
dtype
==
self
.
numpy_dtype
)):
if
((
type
(
data
)
is
numpy
.
ndarray
)
and
(
data
.
dtype
==
self
.
numpy_dtype
)):
if
data
.
dtype
.
num
!=
self
.
numpy_dtype
.
num
:
data
=
theano
.
_asarray
(
data
,
dtype
=
self
.
dtype
)
# -- now fall through to ndim check
elif
((
type
(
data
)
is
numpy
.
memmap
)
and
(
data
.
dtype
==
self
.
numpy_dtype
)):
elif
((
type
(
data
)
is
numpy
.
memmap
)
and
(
data
.
dtype
==
self
.
numpy_dtype
)):
# numpy.memmap is a "safe" subclass of ndarray,
# so we can use it whereever we expect a base ndarray.
# however, casting it would defeat the purpose of not
...
...
@@ -95,11 +96,11 @@ class TensorType(Type):
# we raise a meaningful TypeError.
if
not
(
type
(
data
)
is
numpy
.
ndarray
):
raise
TypeError
(
"
%
s expected a ndarray object."
%
self
,
data
,
type
(
data
))
data
,
type
(
data
))
if
data
.
dtype
!=
self
.
numpy_dtype
:
raise
TypeError
((
"
%
s expected a ndarray object with "
"dtype =
%
s (got
%
s)."
)
%
(
self
,
self
.
numpy_dtype
,
data
.
dtype
))
"dtype =
%
s (got
%
s)."
)
%
(
self
,
self
.
numpy_dtype
,
data
.
dtype
))
assert
False
,
"This point should never be reached."
else
:
if
allow_downcast
:
...
...
@@ -185,7 +186,7 @@ class TensorType(Type):
" dimension."
,
data
.
shape
,
self
.
broadcastable
)
i
+=
1
if
(
self
.
filter_checks_isfinite
and
not
numpy
.
all
(
numpy
.
isfinite
(
data
))):
not
numpy
.
all
(
numpy
.
isfinite
(
data
))):
raise
ValueError
(
"non-finite elements not allowed"
)
return
data
...
...
@@ -208,14 +209,12 @@ class TensorType(Type):
return
other
raise
TypeError
(
'Cannot convert Type
%(othertype)
s '
'(of Variable
%(other)
s) into Type
%(self)
s. '
'You can try to manually convert
%(other)
s into a
%(self)
s.'
%
dict
(
othertype
=
other
.
type
,
other
=
other
,
self
=
self
)
)
'Cannot convert Type
%(othertype)
s '
'(of Variable
%(other)
s) into Type
%(self)
s. '
'You can try to manually convert
%(other)
s into a
%(self)
s.'
%
dict
(
othertype
=
other
.
type
,
other
=
other
,
self
=
self
))
def
value_validity_msg
(
self
,
a
):
try
:
...
...
@@ -247,10 +246,10 @@ class TensorType(Type):
'int64'
:
(
int
,
'npy_int64'
,
'NPY_INT64'
),
'complex128'
:
(
complex
,
'theano_complex128'
,
'NPY_COMPLEX128'
),
'complex64'
:
(
complex
,
'theano_complex64'
,
'NPY_COMPLEX64'
)
}[
self
.
dtype
]
}[
self
.
dtype
]
except
KeyError
:
raise
TypeError
(
"Unsupported dtype for
%
s:
%
s"
%
(
self
.
__class__
.
__name__
,
self
.
dtype
))
%
(
self
.
__class__
.
__name__
,
self
.
dtype
))
def
to_scalar_type
(
self
):
return
scal
.
get_scalar_type
(
dtype
=
self
.
dtype
)
...
...
@@ -261,11 +260,11 @@ class TensorType(Type):
and
other
.
broadcastable
==
self
.
broadcastable
def
convert_variable
(
self
,
var
):
if
(
type
(
self
)
==
type
(
var
.
type
)
and
if
(
type
(
self
)
==
type
(
var
.
type
)
and
# noqa
self
.
dtype
==
var
.
type
.
dtype
and
self
.
ndim
==
var
.
type
.
ndim
and
all
(
sb
==
ob
or
ob
for
sb
,
ob
in
zip
(
self
.
broadcastable
,
var
.
type
.
broadcastable
))):
var
.
type
.
broadcastable
))):
return
theano
.
tensor
.
patternbroadcast
(
var
,
self
.
broadcastable
)
@staticmethod
...
...
@@ -351,7 +350,7 @@ class TensorType(Type):
rtol
=
1.0000000000000001e-05
atol
=
1e-8
cmp_elemwise
=
(
numpy
.
absolute
(
a
-
b
)
<=
(
atol
+
rtol
*
numpy
.
absolute
(
b
)))
(
atol
+
rtol
*
numpy
.
absolute
(
b
)))
# Find places where both a and b have missing values.
both_missing
=
a_missing
*
numpy
.
isnan
(
b
)
...
...
@@ -361,9 +360,9 @@ class TensorType(Type):
# cmp_elemwise is weird when we have inf and -inf.
# set it to False
cmp_elemwise
=
numpy
.
where
(
both_inf
&
cmp_elemwise
,
a
==
b
,
cmp_elemwise
)
both_inf
&
cmp_elemwise
,
a
==
b
,
cmp_elemwise
)
# check the sign of the inf
both_inf
=
numpy
.
where
(
both_inf
,
(
a
==
b
),
both_inf
)
...
...
@@ -383,7 +382,7 @@ class TensorType(Type):
return
hashtype
(
self
)
^
hash
(
self
.
dtype
)
^
hash
(
self
.
broadcastable
)
ndim
=
property
(
lambda
self
:
len
(
self
.
broadcastable
),
doc
=
"number of dimensions"
)
doc
=
"number of dimensions"
)
"""Number of dimensions
This read-only property is the preferred way to get the number of
...
...
@@ -407,10 +406,10 @@ class TensorType(Type):
else
:
b
=
self
.
broadcastable
named_broadcastable
=
{():
'scalar'
,
(
False
,):
'vector'
,
(
False
,
True
):
'col'
,
(
True
,
False
):
'row'
,
(
False
,
False
):
'matrix'
}
(
False
,):
'vector'
,
(
False
,
True
):
'col'
,
(
True
,
False
):
'row'
,
(
False
,
False
):
'matrix'
}
if
b
in
named_broadcastable
:
bcast
=
named_broadcastable
[
b
]
else
:
...
...
@@ -422,7 +421,7 @@ class TensorType(Type):
def
__repr__
(
self
):
return
str
(
self
)
#"TensorType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
#
"TensorType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def
c_declare
(
self
,
name
,
sub
,
check_input
=
True
):
"""Override `CLinkerType.c_declare` """
...
...
@@ -636,13 +635,13 @@ def values_eq_approx_always_true(a, b):
# Register TensorType C code for ViewOp.
theano
.
compile
.
register_view_op_c_code
(
TensorType
,
"""
Py_XDECREF(
%(oname)
s);
%(oname)
s =
%(iname)
s;
Py_XINCREF(
%(oname)
s);
"""
,
version
=
1
)
TensorType
,
"""
Py_XDECREF(
%(oname)
s);
%(oname)
s =
%(iname)
s;
Py_XINCREF(
%(oname)
s);
"""
,
version
=
1
)
# Register TensorType C code for Shape Op.
...
...
@@ -665,51 +664,51 @@ theano.compile.register_shape_c_code(
# Register TensorType C code for ViewOp.
theano
.
compile
.
register_shape_i_c_code
(
TensorType
,
"""
if(!
%(oname)
s)
%(oname)
s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(
%(oname)
s))[0]=PyArray_DIMS(
%(iname)
s)[
%(i)
s];
"""
,
"""
if (
%(i)
s>=PyArray_NDIM(
%(iname)
s)){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)
s
}
"""
,
version
=
3
)
TensorType
,
"""
if(!
%(oname)
s)
%(oname)
s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(
%(oname)
s))[0]=PyArray_DIMS(
%(iname)
s)[
%(i)
s];
"""
,
"""
if (
%(i)
s>=PyArray_NDIM(
%(iname)
s)){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)
s
}
"""
,
version
=
3
)
# Register TensorType C code for DeepCopyOp
theano
.
compile
.
register_deep_copy_op_c_code
(
TensorType
,
"""
int alloc =
%(oname)
s == NULL;
for(int i=0; !alloc && i<PyArray_NDIM(
%(oname)
s); i++) {
if(PyArray_DIMS(
%(iname)
s)[i] != PyArray_DIMS(
%(oname)
s)[i]) {
alloc = true;
break;
}
TensorType
,
"""
int alloc =
%(oname)
s == NULL;
for(int i=0; !alloc && i<PyArray_NDIM(
%(oname)
s); i++) {
if(PyArray_DIMS(
%(iname)
s)[i] != PyArray_DIMS(
%(oname)
s)[i]) {
alloc = true;
break;
}
}
if(alloc) {
Py_XDECREF(
%(oname)
s);
%(oname)
s = (PyArrayObject*)PyArray_NewCopy(
%(iname)
s,
NPY_ANYORDER);
if (!
%(oname)
s)
{
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed!");
%(fail)
s;
}
if(alloc) {
Py_XDECREF(
%(oname)
s);
%(oname)
s = (PyArrayObject*)PyArray_NewCopy(
%(iname)
s,
NPY_ANYORDER);
if (!
%(oname)
s)
{
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed!");
%(fail)
s;
}
} else {
if(PyArray_CopyInto(
%(oname)
s,
%(iname)
s)){
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed into already allocated space!");
%(fail)
s;
}
} else {
if(PyArray_CopyInto(
%(oname)
s,
%(iname)
s)){
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed into already allocated space!");
%(fail)
s;
}
"""
,
version
=
2
)
}
"""
,
version
=
2
)
theano
.
compile
.
register_rebroadcast_c_code
(
...
...
@@ -723,7 +722,7 @@ theano.compile.register_rebroadcast_c_code(
%(fail)
s
}
"""
,
version
=
1
)
version
=
1
)
theano
.
compile
.
register_specify_shape_c_code
(
...
...
theano/tests/test_flake8.py
浏览文件 @
c89e1bc2
...
...
@@ -57,17 +57,7 @@ whitelist_flake8 = [
"typed_list/tests/test_type.py"
,
"typed_list/tests/test_opt.py"
,
"typed_list/tests/test_basic.py"
,
"tensor/blas_headers.py"
,
"tensor/type.py"
,
"tensor/fourier.py"
,
"tensor/__init__.py"
,
"tensor/opt_uncanonicalize.py"
,
"tensor/blas.py"
,
"tensor/extra_ops.py"
,
"tensor/nlinalg.py"
,
"tensor/blas_c.py"
,
"tensor/elemwise_cgen.py"
,
"tensor/blas_scipy.py"
,
"tensor/tests/test_subtensor.py"
,
"tensor/tests/test_utils.py"
,
"tensor/tests/test_nlinalg.py"
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论