Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
bd11e130
提交
bd11e130
authored
7月 02, 2015
作者:
Frédéric Bastien
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3074 from harlouci/flake8_v2
flake8
上级
cb08bc11
fc6d2310
全部展开
显示空白字符变更
内嵌
并排
正在显示
15 个修改的文件
包含
115 行增加
和
128 行删除
+115
-128
basic.py
theano/tensor/basic.py
+35
-36
blas.py
theano/tensor/blas.py
+3
-3
elemwise.py
theano/tensor/elemwise.py
+0
-0
inplace.py
theano/tensor/inplace.py
+0
-1
opt.py
theano/tensor/opt.py
+0
-0
raw_random.py
theano/tensor/raw_random.py
+18
-17
shared_randomstreams.py
theano/tensor/shared_randomstreams.py
+3
-1
sharedvar.py
theano/tensor/sharedvar.py
+3
-1
slinalg.py
theano/tensor/slinalg.py
+8
-15
sort.py
theano/tensor/sort.py
+9
-9
subtensor.py
theano/tensor/subtensor.py
+22
-24
utils.py
theano/tensor/utils.py
+0
-0
var.py
theano/tensor/var.py
+6
-8
xlogx.py
theano/tensor/xlogx.py
+8
-0
test_flake8.py
theano/tests/test_flake8.py
+0
-13
没有找到文件。
theano/tensor/basic.py
浏览文件 @
bd11e130
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
__docformat__
=
"restructuredtext en"
import
sys
import
sys
import
warnings
import
warnings
...
@@ -29,7 +27,6 @@ from theano.printing import pprint, min_informative_str
...
@@ -29,7 +27,6 @@ from theano.printing import pprint, min_informative_str
# For history
# For history
from
theano.compile
import
Rebroadcast
,
Shape
,
shape
from
theano.compile
import
Rebroadcast
,
Shape
,
shape
# We use these exceptions as well.
# We use these exceptions as well.
import
theano.scalar.sharedvar
import
theano.scalar.sharedvar
from
theano.gradient
import
grad_undefined
from
theano.gradient
import
grad_undefined
...
@@ -42,6 +39,8 @@ from theano.tensor.elemwise import Elemwise, DimShuffle, CAReduce, Sum
...
@@ -42,6 +39,8 @@ from theano.tensor.elemwise import Elemwise, DimShuffle, CAReduce, Sum
import
logging
import
logging
_logger
=
logging
.
getLogger
(
"theano.tensor.basic"
)
_logger
=
logging
.
getLogger
(
"theano.tensor.basic"
)
__docformat__
=
"restructuredtext en"
# This is needed as we will hide it later
# This is needed as we will hide it later
python_complex
=
complex
python_complex
=
complex
python_any
=
any
python_any
=
any
...
@@ -620,8 +619,8 @@ def get_scalar_constant_value(orig_v, elemwise=True,
...
@@ -620,8 +619,8 @@ def get_scalar_constant_value(orig_v, elemwise=True,
ret
=
[[
None
]]
ret
=
[[
None
]]
v
.
owner
.
op
.
perform
(
v
.
owner
,
const
,
ret
)
v
.
owner
.
op
.
perform
(
v
.
owner
,
const
,
ret
)
return
ret
[
0
][
0
]
return
ret
[
0
][
0
]
elif
(
isinstance
(
v
.
owner
.
op
,
theano
.
tensor
.
subtensor
.
Subtensor
)
elif
(
isinstance
(
v
.
owner
.
op
,
theano
.
tensor
.
subtensor
.
Subtensor
)
and
and
v
.
ndim
==
0
):
v
.
ndim
==
0
):
if
isinstance
(
v
.
owner
.
inputs
[
0
],
TensorConstant
):
if
isinstance
(
v
.
owner
.
inputs
[
0
],
TensorConstant
):
cdata
=
tuple
(
v
.
owner
.
op
.
get_constant_idx
(
v
.
owner
.
inputs
))
cdata
=
tuple
(
v
.
owner
.
op
.
get_constant_idx
(
v
.
owner
.
inputs
))
try
:
try
:
...
@@ -1090,7 +1089,7 @@ scalar_from_tensor = ScalarFromTensor()
...
@@ -1090,7 +1089,7 @@ scalar_from_tensor = ScalarFromTensor()
# to be removed as we get the epydoc routine-documenting thing going
# to be removed as we get the epydoc routine-documenting thing going
#-JB 20080924
#
-JB 20080924
def
_conversion
(
real_value
,
name
):
def
_conversion
(
real_value
,
name
):
__oplist_tag
(
real_value
,
'casting'
)
__oplist_tag
(
real_value
,
'casting'
)
real_value
.
__module__
=
'tensor.basic'
real_value
.
__module__
=
'tensor.basic'
...
@@ -1235,8 +1234,8 @@ class MaxAndArgmax(Op):
...
@@ -1235,8 +1234,8 @@ class MaxAndArgmax(Op):
raise
TypeError
(
raise
TypeError
(
"MaxAndArgmax needs a constant axis. Got
%
s"
%
axis
)
"MaxAndArgmax needs a constant axis. Got
%
s"
%
axis
)
else
:
else
:
assert
(
axis
.
dtype
.
startswith
(
"int"
)
assert
(
axis
.
dtype
.
startswith
(
"int"
)
or
or
axis
.
dtype
.
startswith
(
"uint"
))
axis
.
dtype
.
startswith
(
"uint"
))
axis
=
int
(
axis
.
data
)
axis
=
int
(
axis
.
data
)
# we make the axis all positive to make the infer_shape work
# we make the axis all positive to make the infer_shape work
# with negative axis
# with negative axis
...
@@ -1373,13 +1372,13 @@ class MaxAndArgmax(Op):
...
@@ -1373,13 +1372,13 @@ class MaxAndArgmax(Op):
# Lebesgue measure, the result may be interpreted as weak gradient.
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# @note: This function should work correctly for L{vector}s.
#
(x, y), (gz, gw)
#
(x, y), (gz, gw)
#
gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
#
gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
#
gMax * dMax/dx + gArgMax * dArgMax/dx,
#
gMax * dMax/dx + gArgMax * dArgMax/dx,
#
gMax * dMax/daxis + gArgMax * dArgMax/daxis
#
gMax * dMax/daxis + gArgMax * dArgMax/daxis
#
g_max has one less dimension than x, so you need to complete
#
g_max has one less dimension than x, so you need to complete
#
g_max to x's shape when axis=0 the broadcasting mechanism
#
g_max to x's shape when axis=0 the broadcasting mechanism
#
does it automatically
#
does it automatically
x
,
axis
=
inp
x
,
axis
=
inp
g_max
,
g_max_idx
=
grads
g_max
,
g_max_idx
=
grads
...
@@ -2078,7 +2077,7 @@ def chi2sf(x, k):
...
@@ -2078,7 +2077,7 @@ def chi2sf(x, k):
# numpy.real(float32) return a view on the inputs.
# numpy.real(float32) return a view on the inputs.
#@_scal_elemwise_with_nfunc('real', 1, 1)
#
@_scal_elemwise_with_nfunc('real', 1, 1)
@_scal_elemwise
@_scal_elemwise
def
real
(
z
):
def
real
(
z
):
"""Return real component of complex-valued tensor `z`"""
"""Return real component of complex-valued tensor `z`"""
...
@@ -2116,7 +2115,7 @@ def complex_from_polar(abs, angle):
...
@@ -2116,7 +2115,7 @@ def complex_from_polar(abs, angle):
# fill, _fill_inplace = _elemwise(scal.second, 'fill',
# fill, _fill_inplace = _elemwise(scal.second, 'fill',
#
"""fill WRITEME (elemwise)""")
#
"""fill WRITEME (elemwise)""")
@_scal_elemwise
@_scal_elemwise
def
second
(
a
,
b
):
def
second
(
a
,
b
):
"""Create a matrix by filling the shape of a with b"""
"""Create a matrix by filling the shape of a with b"""
...
@@ -3540,8 +3539,8 @@ class Join(Op):
...
@@ -3540,8 +3539,8 @@ class Join(Op):
dtypes
=
[
x
.
type
.
dtype
for
x
in
as_tensor_variable_args
]
dtypes
=
[
x
.
type
.
dtype
for
x
in
as_tensor_variable_args
]
out_dtype
=
scal
.
upcast
(
*
dtypes
)
out_dtype
=
scal
.
upcast
(
*
dtypes
)
output_maker
=
lambda
bcastable
:
tensor
(
dtype
=
out_dtype
,
def
output_maker
(
bcastable
):
broadcastable
=
bcastable
)
return
tensor
(
dtype
=
out_dtype
,
broadcastable
=
bcastable
)
return
self
.
_make_node_internal
(
return
self
.
_make_node_internal
(
axis
,
tensors
,
as_tensor_variable_args
,
output_maker
)
axis
,
tensors
,
as_tensor_variable_args
,
output_maker
)
...
@@ -4361,8 +4360,7 @@ class Tile(Op):
...
@@ -4361,8 +4360,7 @@ class Tile(Op):
def
make_node
(
self
,
x
,
reps
):
def
make_node
(
self
,
x
,
reps
):
warnings
.
warn
((
warnings
.
warn
((
"Tile op is deprecated, use tile function instead."
),
"Tile op is deprecated, use tile function instead."
),
stacklevel
=
3
)
stacklevel
=
3
)
x
=
as_tensor_variable
(
x
)
x
=
as_tensor_variable
(
x
)
reps
=
as_tensor_variable
(
reps
)
reps
=
as_tensor_variable
(
reps
)
return
gof
.
Apply
(
self
,
[
x
,
reps
],
[
tensor
(
x
.
type
.
dtype
,
[
False
]
*
return
gof
.
Apply
(
self
,
[
x
,
reps
],
[
tensor
(
x
.
type
.
dtype
,
[
False
]
*
...
@@ -4428,7 +4426,8 @@ def tile(x, reps, ndim=None):
...
@@ -4428,7 +4426,8 @@ def tile(x, reps, ndim=None):
raise
ValueError
(
"reps must be iterable"
)
raise
ValueError
(
"reps must be iterable"
)
if
not
numpy
.
all
([
isinstance
(
r
,
integer_types
)
or
if
not
numpy
.
all
([
isinstance
(
r
,
integer_types
)
or
(
isinstance
(
r
,
TensorVariable
)
and
(
isinstance
(
r
,
TensorVariable
)
and
r
.
dtype
in
[
"int8"
,
"int16"
,
"int32"
,
"int64"
])
for
r
in
reps
]):
r
.
dtype
in
[
"int8"
,
"int16"
,
"int32"
,
"int64"
])
for
r
in
reps
]):
raise
ValueError
(
"elements of reps must be scalars of integer dtype"
)
raise
ValueError
(
"elements of reps must be scalars of integer dtype"
)
elif
len
(
reps
)
!=
x
.
ndim
:
elif
len
(
reps
)
!=
x
.
ndim
:
raise
ValueError
(
"len(reps) != x.ndim not currently supported"
)
raise
ValueError
(
"len(reps) != x.ndim not currently supported"
)
...
@@ -4442,10 +4441,10 @@ def tile(x, reps, ndim=None):
...
@@ -4442,10 +4441,10 @@ def tile(x, reps, ndim=None):
shape
=
[
x
.
shape
[
i
]
for
i
in
xrange
(
ndim
)]
shape
=
[
x
.
shape
[
i
]
for
i
in
xrange
(
ndim
)]
alloc_shape
=
reps
+
shape
alloc_shape
=
reps
+
shape
y
=
alloc
(
x
,
*
alloc_shape
)
y
=
alloc
(
x
,
*
alloc_shape
)
shuffle_ind
=
numpy
.
arange
(
ndim
*
2
)
.
reshape
(
2
,
ndim
)
shuffle_ind
=
numpy
.
arange
(
ndim
*
2
)
.
reshape
(
2
,
ndim
)
shuffle_ind
=
shuffle_ind
.
transpose
()
.
flatten
()
shuffle_ind
=
shuffle_ind
.
transpose
()
.
flatten
()
y
=
y
.
dimshuffle
(
*
shuffle_ind
)
y
=
y
.
dimshuffle
(
*
shuffle_ind
)
new_shapes
=
[
sh
*
reps
[
i
]
for
i
,
sh
in
enumerate
(
shape
)]
new_shapes
=
[
sh
*
reps
[
i
]
for
i
,
sh
in
enumerate
(
shape
)]
y
=
y
.
reshape
(
new_shapes
)
y
=
y
.
reshape
(
new_shapes
)
return
y
return
y
...
@@ -4512,8 +4511,8 @@ class ARange(Op):
...
@@ -4512,8 +4511,8 @@ class ARange(Op):
else
:
else
:
stop
=
upcast
(
stop
)
stop
=
upcast
(
stop
)
start
=
upcast
(
start
)
start
=
upcast
(
start
)
return
[(
maximum
(
cast
(
ceil
(
cast
((
stop
-
start
),
'float64'
)
return
[(
maximum
(
cast
(
ceil
(
cast
((
stop
-
start
),
'float64'
)
/
step
),
/
step
),
'int64'
),
0
),)]
'int64'
),
0
),)]
def
perform
(
self
,
node
,
inp
,
out_
):
def
perform
(
self
,
node
,
inp
,
out_
):
start
,
stop
,
step
=
inp
start
,
stop
,
step
=
inp
...
@@ -4742,8 +4741,8 @@ class PermuteRowElements(Op):
...
@@ -4742,8 +4741,8 @@ class PermuteRowElements(Op):
# the gradient over these axes, but keep the dimension (as
# the gradient over these axes, but keep the dimension (as
# broadcastable)
# broadcastable)
broadcasted_dims
=
[
dim
for
dim
in
xrange
(
gz
.
type
.
ndim
)
broadcasted_dims
=
[
dim
for
dim
in
xrange
(
gz
.
type
.
ndim
)
if
x
.
type
.
broadcastable
[
dim
]
if
x
.
type
.
broadcastable
[
dim
]
and
and
not
gz
.
type
.
broadcastable
[
dim
]]
not
gz
.
type
.
broadcastable
[
dim
]]
gx
=
Sum
(
axis
=
broadcasted_dims
)(
gx
)
gx
=
Sum
(
axis
=
broadcasted_dims
)(
gx
)
# Sum(...) removed the dimensions in broadcasted_dims,
# Sum(...) removed the dimensions in broadcasted_dims,
...
@@ -4876,17 +4875,17 @@ class Dot(Op):
...
@@ -4876,17 +4875,17 @@ class Dot(Op):
xgrad
=
gz
*
y
xgrad
=
gz
*
y
ygrad
=
gz
*
x
ygrad
=
gz
*
x
#x is vector, y is matrix, grad is vector
#
x is vector, y is matrix, grad is vector
elif
xdim
==
1
and
ydim
==
2
:
elif
xdim
==
1
and
ydim
==
2
:
xgrad
=
dot
(
gz
,
y
.
T
)
xgrad
=
dot
(
gz
,
y
.
T
)
ygrad
=
outer
(
x
.
T
,
gz
)
ygrad
=
outer
(
x
.
T
,
gz
)
#x is matrix, y is vector, grad is vector
#
x is matrix, y is vector, grad is vector
elif
xdim
==
2
and
ydim
==
1
:
elif
xdim
==
2
and
ydim
==
1
:
xgrad
=
outer
(
gz
,
y
.
T
)
xgrad
=
outer
(
gz
,
y
.
T
)
ygrad
=
dot
(
x
.
T
,
gz
)
ygrad
=
dot
(
x
.
T
,
gz
)
#x is matrix, y is matrix, grad is matrix
#
x is matrix, y is matrix, grad is matrix
elif
xdim
==
ydim
==
2
:
elif
xdim
==
ydim
==
2
:
xgrad
=
dot
(
gz
,
y
.
T
)
xgrad
=
dot
(
gz
,
y
.
T
)
ygrad
=
dot
(
x
.
T
,
gz
)
ygrad
=
dot
(
x
.
T
,
gz
)
...
@@ -4958,8 +4957,8 @@ class Dot(Op):
...
@@ -4958,8 +4957,8 @@ class Dot(Op):
if
eval_point_values
[
i
]
is
not
None
and
\
if
eval_point_values
[
i
]
is
not
None
and
\
input_values
[
i
]
.
shape
!=
eval_point_values
[
i
]
.
shape
:
input_values
[
i
]
.
shape
!=
eval_point_values
[
i
]
.
shape
:
raise
ValueError
(
raise
ValueError
(
'input '
+
str
(
i
)
+
' and eval_point '
+
str
(
i
)
'input '
+
str
(
i
)
+
' and eval_point '
+
str
(
i
)
+
+
' to Dot.R_op should have the same shape, but '
' to Dot.R_op should have the same shape, but '
'their shapes are
%
s and
%
s, respectively'
%
(
'their shapes are
%
s and
%
s, respectively'
%
(
str
(
input_values
[
i
]
.
shape
),
str
(
input_values
[
i
]
.
shape
),
str
(
eval_point_values
[
i
]
.
shape
)))
str
(
eval_point_values
[
i
]
.
shape
)))
...
@@ -5230,8 +5229,8 @@ def tensordot(a, b, axes=2):
...
@@ -5230,8 +5229,8 @@ def tensordot(a, b, axes=2):
'equal to b.ndim (b.ndim=
%
i, max(axes[1])=
%
i).'
%
'equal to b.ndim (b.ndim=
%
i, max(axes[1])=
%
i).'
%
(
b
.
ndim
,
numpy
.
max
(
numpy
.
array
(
b_axes
))))
(
b
.
ndim
,
numpy
.
max
(
numpy
.
array
(
b_axes
))))
a_order
=
(
tuple
(
x
for
x
in
tuple
(
xrange
(
a
.
ndim
))
if
x
not
in
a_axes
)
a_order
=
(
tuple
(
x
for
x
in
tuple
(
xrange
(
a
.
ndim
))
if
x
not
in
a_axes
)
+
+
a_axes
)
a_axes
)
b_order
=
(
b_axes
+
tuple
(
x
b_order
=
(
b_axes
+
tuple
(
x
for
x
in
tuple
(
xrange
(
b
.
ndim
))
for
x
in
tuple
(
xrange
(
b
.
ndim
))
if
x
not
in
b_axes
))
if
x
not
in
b_axes
))
...
@@ -5635,7 +5634,7 @@ class AllocEmpty(gof.Op):
...
@@ -5635,7 +5634,7 @@ class AllocEmpty(gof.Op):
out
[
0
]
=
numpy
.
empty
(
sh
,
dtype
=
self
.
dtype
)
out
[
0
]
=
numpy
.
empty
(
sh
,
dtype
=
self
.
dtype
)
def
c_code
(
self
,
node
,
name
,
inputs
,
out_
,
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
out_
,
sub
):
dtype
=
"NPY_"
+
self
.
dtype
.
upper
()
dtype
=
"NPY_"
+
self
.
dtype
.
upper
()
out
,
=
out_
out
,
=
out_
fail
=
sub
[
'fail'
]
fail
=
sub
[
'fail'
]
shps
=
inputs
shps
=
inputs
...
...
theano/tensor/blas.py
浏览文件 @
bd11e130
...
@@ -266,7 +266,7 @@ SOMEPATH/Canopy_64bit/User/lib/python2.7/site-packages/numpy/distutils/system_in
...
@@ -266,7 +266,7 @@ SOMEPATH/Canopy_64bit/User/lib/python2.7/site-packages/numpy/distutils/system_in
# Using "conda install mkl" will install both, as well as
# Using "conda install mkl" will install both, as well as
# optimized versions of numpy and scipy.
# optimized versions of numpy and scipy.
try
:
try
:
import
mkl
import
mkl
#noqa
except
ImportError
as
e
:
except
ImportError
as
e
:
_logger
.
info
(
'Conda mkl is not available:
%
s'
,
e
)
_logger
.
info
(
'Conda mkl is not available:
%
s'
,
e
)
else
:
else
:
...
@@ -1599,11 +1599,11 @@ class GemmOptimizer(Optimizer):
...
@@ -1599,11 +1599,11 @@ class GemmOptimizer(Optimizer):
)
)
did_something
=
True
did_something
=
True
nb_replacement
+=
1
nb_replacement
+=
1
except
InconsistencyError
as
e
:
except
InconsistencyError
:
# TODO: retry other applications of gemm (see comment
# TODO: retry other applications of gemm (see comment
# in _gemm_from_node)
# in _gemm_from_node)
nb_inconsistency_replace
+=
1
nb_inconsistency_replace
+=
1
except
ReplacementDidntRemovedError
as
e
:
except
ReplacementDidntRemovedError
:
nb_replacement_didn_t_remove
+=
1
nb_replacement_didn_t_remove
+=
1
self
.
warned
=
True
self
.
warned
=
True
fgraph
.
remove_feature
(
u
)
fgraph
.
remove_feature
(
u
)
...
...
theano/tensor/elemwise.py
浏览文件 @
bd11e130
差异被折叠。
点击展开。
theano/tensor/inplace.py
浏览文件 @
bd11e130
...
@@ -28,7 +28,6 @@ def _scal_inplace(symbol):
...
@@ -28,7 +28,6 @@ def _scal_inplace(symbol):
def
chk
(
pstate
,
r
):
def
chk
(
pstate
,
r
):
if
not
r
.
owner
:
if
not
r
.
owner
:
return
False
return
False
op
=
r
.
owner
.
op
return
r
.
owner
.
op
==
rval
return
r
.
owner
.
op
==
rval
pprint
.
assign
(
chk
,
printing
.
FunctionPrinter
(
symbolname
.
replace
(
'_inplace'
,
'='
)))
pprint
.
assign
(
chk
,
printing
.
FunctionPrinter
(
symbolname
.
replace
(
'_inplace'
,
'='
)))
...
...
theano/tensor/opt.py
浏览文件 @
bd11e130
差异被折叠。
点击展开。
theano/tensor/raw_random.py
浏览文件 @
bd11e130
"""Define random number Type (`RandomStateType`) and Op (`RandomFunction`)."""
"""Define random number Type (`RandomStateType`) and Op (`RandomFunction`)."""
from
__future__
import
print_function
from
__future__
import
print_function
__docformat__
=
"restructuredtext en"
import
sys
import
sys
from
copy
import
copy
from
copy
import
copy
...
@@ -15,6 +15,8 @@ from theano import gof
...
@@ -15,6 +15,8 @@ from theano import gof
from
six
import
string_types
from
six
import
string_types
from
theano.compile
import
optdb
from
theano.compile
import
optdb
__docformat__
=
"restructuredtext en"
class
RandomStateType
(
gof
.
Type
):
class
RandomStateType
(
gof
.
Type
):
"""A Type wrapper for numpy.random.RandomState
"""A Type wrapper for numpy.random.RandomState
...
@@ -135,9 +137,8 @@ class RandomFunction(gof.Op):
...
@@ -135,9 +137,8 @@ class RandomFunction(gof.Op):
and
self
.
ndim_added
==
other
.
ndim_added
and
self
.
ndim_added
==
other
.
ndim_added
def
__hash__
(
self
):
def
__hash__
(
self
):
return
hash
(
type
(
self
))
^
hash
(
self
.
fn
)
\
return
(
hash
(
type
(
self
))
^
hash
(
self
.
fn
)
^
hash
(
self
.
outtype
)
^
^
hash
(
self
.
outtype
)
\
hash
(
self
.
inplace
)
^
hash
(
self
.
ndim_added
))
^
hash
(
self
.
inplace
)
^
hash
(
self
.
ndim_added
)
def
__getstate__
(
self
):
def
__getstate__
(
self
):
return
self
.
state
return
self
.
state
...
@@ -233,7 +234,6 @@ class RandomFunction(gof.Op):
...
@@ -233,7 +234,6 @@ class RandomFunction(gof.Op):
# copy of r if self.inplace is False
# copy of r if self.inplace is False
r
,
shape
,
args
=
inputs
[
0
],
inputs
[
1
],
inputs
[
2
:]
r
,
shape
,
args
=
inputs
[
0
],
inputs
[
1
],
inputs
[
2
:]
assert
type
(
r
)
==
numpy
.
random
.
RandomState
,
(
type
(
r
),
r
)
assert
type
(
r
)
==
numpy
.
random
.
RandomState
,
(
type
(
r
),
r
)
r_orig
=
r
# If shape == [], that means no shape is enforced, and numpy is
# If shape == [], that means no shape is enforced, and numpy is
# trusted to draw the appropriate number of samples, numpy uses
# trusted to draw the appropriate number of samples, numpy uses
...
@@ -253,8 +253,8 @@ class RandomFunction(gof.Op):
...
@@ -253,8 +253,8 @@ class RandomFunction(gof.Op):
r
=
copy
(
r
)
r
=
copy
(
r
)
rout
[
0
]
=
r
rout
[
0
]
=
r
rval
=
self
.
fn
(
r
,
*
(
args
+
[
shape
]))
rval
=
self
.
fn
(
r
,
*
(
args
+
[
shape
]))
if
not
isinstance
(
rval
,
numpy
.
ndarray
)
\
if
(
not
isinstance
(
rval
,
numpy
.
ndarray
)
or
or
str
(
rval
.
dtype
)
!=
node
.
outputs
[
1
]
.
type
.
dtype
:
str
(
rval
.
dtype
)
!=
node
.
outputs
[
1
]
.
type
.
dtype
)
:
rval
=
theano
.
_asarray
(
rval
,
dtype
=
node
.
outputs
[
1
]
.
type
.
dtype
)
rval
=
theano
.
_asarray
(
rval
,
dtype
=
node
.
outputs
[
1
]
.
type
.
dtype
)
# When shape is None, numpy has a tendency to unexpectedly
# When shape is None, numpy has a tendency to unexpectedly
...
@@ -353,7 +353,8 @@ def _infer_ndim_bcast(ndim, shape, *args):
...
@@ -353,7 +353,8 @@ def _infer_ndim_bcast(ndim, shape, *args):
break
break
else
:
else
:
if
n_a_i
==
0
:
if
n_a_i
==
0
:
raise
ValueError
((
'Auto-shape of -1 must overlap'
raise
ValueError
((
'Auto-shape of -1 must overlap'
'with the shape of one of the broadcastable'
'with the shape of one of the broadcastable'
'inputs'
))
'inputs'
))
else
:
else
:
...
@@ -517,7 +518,8 @@ def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
...
@@ -517,7 +518,8 @@ def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
# p=numpy.asarray([.1, .2, .3], dtype='float64'))
# p=numpy.asarray([.1, .2, .3], dtype='float64'))
n
=
tensor
.
cast
(
n
,
'int32'
)
n
=
tensor
.
cast
(
n
,
'int32'
)
op
=
RandomFunction
(
'binomial'
,
op
=
RandomFunction
(
'binomial'
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
(
False
,)
*
ndim
))
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
(
False
,)
*
ndim
))
return
op
(
random_state
,
size
,
n
,
p
)
return
op
(
random_state
,
size
,
n
,
p
)
...
@@ -719,7 +721,8 @@ def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):
...
@@ -719,7 +721,8 @@ def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
)
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
)
# print "NDIM", ndim, size
# print "NDIM", ndim, size
op
=
RandomFunction
(
permutation_helper
,
op
=
RandomFunction
(
permutation_helper
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
+
(
False
,)),
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
+
(
False
,)),
ndim_added
=
1
)
ndim_added
=
1
)
return
op
(
random_state
,
size
,
n
)
return
op
(
random_state
,
size
,
n
)
...
@@ -738,14 +741,11 @@ def multinomial_helper(random_state, n, pvals, size):
...
@@ -738,14 +741,11 @@ def multinomial_helper(random_state, n, pvals, size):
ndim
=
len
(
size
)
ndim
=
len
(
size
)
else
:
else
:
ndim
=
max
(
n
.
ndim
,
pvals
.
ndim
-
1
)
ndim
=
max
(
n
.
ndim
,
pvals
.
ndim
-
1
)
out_ndim
=
ndim
+
1
# broadcast n to ndim dimensions and pvals to ndim+1
# broadcast n to ndim dimensions and pvals to ndim+1
if
n
.
ndim
>
ndim
:
if
n
.
ndim
>
ndim
:
raise
ValueError
(
raise
ValueError
(
'n.ndim (
%
i) should not be larger than len(size) (
%
i)'
'n.ndim (
%
i) should not be larger than len(size) (
%
i)'
%
(
n
.
ndim
,
ndim
),
n
,
size
)
%
(
n
.
ndim
,
ndim
),
n
,
size
)
if
n
.
ndim
<
ndim
:
if
n
.
ndim
<
ndim
:
n
=
n
.
reshape
((
1
,)
*
(
ndim
-
n
.
ndim
)
+
n
.
shape
)
n
=
n
.
reshape
((
1
,)
*
(
ndim
-
n
.
ndim
)
+
n
.
shape
)
...
@@ -788,7 +788,7 @@ def multinomial_helper(random_state, n, pvals, size):
...
@@ -788,7 +788,7 @@ def multinomial_helper(random_state, n, pvals, size):
# because mtrand.pyx has a ValueError that will trigger if
# because mtrand.pyx has a ValueError that will trigger if
# sum(pvals[:-1]) > 1.0
# sum(pvals[:-1]) > 1.0
pvi
=
pvi
*
(
1.0
-
5e-5
)
pvi
=
pvi
*
(
1.0
-
5e-5
)
#pvi = pvi * .9
#
pvi = pvi * .9
pisum
=
numpy
.
sum
(
pvi
)
pisum
=
numpy
.
sum
(
pvi
)
elif
pvi
[
-
1
]
<
5e-5
:
# will this even work?
elif
pvi
[
-
1
]
<
5e-5
:
# will this even work?
pvi
=
pvi
*
(
1.0
-
5e-5
)
pvi
=
pvi
*
(
1.0
-
5e-5
)
...
@@ -859,7 +859,8 @@ def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5],
...
@@ -859,7 +859,8 @@ def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5],
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
,
n
,
tmp
)
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
,
n
,
tmp
)
bcast
=
bcast
+
(
pvals
.
type
.
broadcastable
[
-
1
],)
bcast
=
bcast
+
(
pvals
.
type
.
broadcastable
[
-
1
],)
op
=
RandomFunction
(
multinomial_helper
,
op
=
RandomFunction
(
multinomial_helper
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
),
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
),
ndim_added
=
1
)
ndim_added
=
1
)
return
op
(
random_state
,
size
,
n
,
pvals
)
return
op
(
random_state
,
size
,
n
,
pvals
)
...
...
theano/tensor/shared_randomstreams.py
浏览文件 @
bd11e130
"""Define RandomStreams, providing random number variables for Theano
"""Define RandomStreams, providing random number variables for Theano
graphs.
graphs.
"""
"""
__docformat__
=
"restructuredtext en"
import
copy
import
copy
import
numpy
import
numpy
from
theano.compile.sharedvalue
import
(
SharedVariable
,
shared_constructor
,
from
theano.compile.sharedvalue
import
(
SharedVariable
,
shared_constructor
,
shared
)
shared
)
from
theano.tensor
import
raw_random
from
theano.tensor
import
raw_random
__docformat__
=
"restructuredtext en"
class
RandomStateSharedVariable
(
SharedVariable
):
class
RandomStateSharedVariable
(
SharedVariable
):
pass
pass
...
...
theano/tensor/sharedvar.py
浏览文件 @
bd11e130
...
@@ -86,7 +86,9 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
...
@@ -86,7 +86,9 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
# strict is True and the types do not match.
# strict is True and the types do not match.
rval
=
ScalarSharedVariable
(
type
=
tensor_type
,
rval
=
ScalarSharedVariable
(
type
=
tensor_type
,
value
=
numpy
.
array
(
value
,
copy
=
True
),
value
=
numpy
.
array
(
value
,
copy
=
True
),
name
=
name
,
strict
=
strict
,
allow_downcast
=
allow_downcast
)
name
=
name
,
strict
=
strict
,
allow_downcast
=
allow_downcast
)
return
rval
return
rval
except
Exception
:
except
Exception
:
traceback
.
print_exc
()
traceback
.
print_exc
()
...
...
theano/tensor/slinalg.py
浏览文件 @
bd11e130
import
logging
import
logging
logger
=
logging
.
getLogger
(
__name__
)
import
numpy
import
warnings
import
warnings
from
six.moves
import
xrange
from
six.moves
import
xrange
from
theano.gof
import
Op
,
Apply
import
numpy
from
theano.tensor
import
as_tensor_variable
,
dot
,
DimShuffle
,
Dot
from
theano.tensor.blas
import
Dot22
from
theano
import
tensor
import
theano.tensor
from
theano.tensor.opt
import
(
register_stabilize
,
register_specialize
,
register_canonicalize
)
from
theano.gof
import
local_optimizer
from
theano.gof.opt
import
Optimizer
from
theano.gradient
import
DisconnectedType
try
:
try
:
import
scipy.linalg
import
scipy.linalg
...
@@ -24,6 +11,13 @@ except ImportError:
...
@@ -24,6 +11,13 @@ except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy
=
False
imported_scipy
=
False
from
theano
import
tensor
import
theano.tensor
from
theano.tensor
import
as_tensor_variable
from
theano.gof
import
Op
,
Apply
logger
=
logging
.
getLogger
(
__name__
)
MATRIX_STRUCTURES
=
(
MATRIX_STRUCTURES
=
(
'general'
,
'general'
,
'symmetric'
,
'symmetric'
,
...
@@ -123,7 +117,6 @@ class CholeskyGrad(Op):
...
@@ -123,7 +117,6 @@ class CholeskyGrad(Op):
F
[
k
,
k
]
/=
(
2
*
L
[
k
,
k
])
F
[
k
,
k
]
/=
(
2
*
L
[
k
,
k
])
else
:
else
:
F
=
numpy
.
triu
(
dz
)
F
=
numpy
.
triu
(
dz
)
M
=
N
-
1
for
k
in
xrange
(
N
-
1
,
-
1
,
-
1
):
for
k
in
xrange
(
N
-
1
,
-
1
,
-
1
):
for
j
in
xrange
(
k
+
1
,
N
):
for
j
in
xrange
(
k
+
1
,
N
):
for
i
in
xrange
(
j
,
N
):
for
i
in
xrange
(
j
,
N
):
...
...
theano/tensor/sort.py
浏览文件 @
bd11e130
...
@@ -64,7 +64,7 @@ class SortOp(theano.Op):
...
@@ -64,7 +64,7 @@ class SortOp(theano.Op):
" matrix (and axis is None or 0) and tensor3"
)
" matrix (and axis is None or 0) and tensor3"
)
if
a
.
ndim
==
1
:
if
a
.
ndim
==
1
:
idx
=
argsort
(
*
inputs
,
kind
=
self
.
kind
,
order
=
self
.
order
)
idx
=
argsort
(
*
inputs
,
kind
=
self
.
kind
,
order
=
self
.
order
)
#
rev_idx = numpy.where(idx[None, :]==numpy.arange(5)[:,None])[1]
#
rev_idx = numpy.where(idx[None, :]==numpy.arange(5)[:,None])[1]
rev_idx
=
theano
.
tensor
.
eq
(
idx
[
None
,
:],
rev_idx
=
theano
.
tensor
.
eq
(
idx
[
None
,
:],
arange
(
a
.
shape
[
0
])[:,
None
])
.
nonzero
()[
1
]
arange
(
a
.
shape
[
0
])[:,
None
])
.
nonzero
()[
1
]
inp_grad
=
output_grads
[
0
][
rev_idx
]
inp_grad
=
output_grads
[
0
][
rev_idx
]
...
@@ -72,8 +72,9 @@ class SortOp(theano.Op):
...
@@ -72,8 +72,9 @@ class SortOp(theano.Op):
if
(
axis
is
None
or
if
(
axis
is
None
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
idx
=
argsort
(
*
inputs
,
kind
=
self
.
kind
,
order
=
self
.
order
)
idx
=
argsort
(
*
inputs
,
kind
=
self
.
kind
,
order
=
self
.
order
)
rev_idx
=
theano
.
tensor
.
eq
(
idx
[
None
,
:],
rev_idx
=
theano
.
tensor
.
eq
(
arange
(
a
.
shape
[
0
]
*
a
.
shape
[
1
])[:,
None
])
.
nonzero
()[
1
]
idx
[
None
,
:],
arange
(
a
.
shape
[
0
]
*
a
.
shape
[
1
])[:,
None
])
.
nonzero
()[
1
]
inp_grad
=
output_grads
[
0
][
rev_idx
]
.
reshape
(
a
.
shape
)
inp_grad
=
output_grads
[
0
][
rev_idx
]
.
reshape
(
a
.
shape
)
elif
(
axis
==
0
or
elif
(
axis
==
0
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
==
0
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
==
0
)):
...
@@ -178,8 +179,8 @@ class ArgSortOp(theano.Op):
...
@@ -178,8 +179,8 @@ class ArgSortOp(theano.Op):
return
hash
(
type
(
self
))
^
hash
(
self
.
order
)
^
hash
(
self
.
kind
)
return
hash
(
type
(
self
))
^
hash
(
self
.
order
)
^
hash
(
self
.
kind
)
def
__str__
(
self
):
def
__str__
(
self
):
return
(
self
.
__class__
.
__name__
return
(
self
.
__class__
.
__name__
+
+
"{
%
s,
%
s}"
%
(
self
.
kind
,
str
(
self
.
order
)))
"{
%
s,
%
s}"
%
(
self
.
kind
,
str
(
self
.
order
)))
def
make_node
(
self
,
input
,
axis
=-
1
):
def
make_node
(
self
,
input
,
axis
=-
1
):
input
=
theano
.
tensor
.
as_tensor_variable
(
input
)
input
=
theano
.
tensor
.
as_tensor_variable
(
input
)
...
@@ -190,15 +191,14 @@ class ArgSortOp(theano.Op):
...
@@ -190,15 +191,14 @@ class ArgSortOp(theano.Op):
else
:
else
:
axis
=
theano
.
tensor
.
as_tensor_variable
(
axis
)
axis
=
theano
.
tensor
.
as_tensor_variable
(
axis
)
bcast
=
input
.
type
.
broadcastable
bcast
=
input
.
type
.
broadcastable
return
theano
.
Apply
(
self
,
[
input
,
axis
],
return
theano
.
Apply
(
self
,
[
input
,
axis
],
[
theano
.
tensor
.
TensorType
(
[
theano
.
tensor
.
TensorType
(
dtype
=
"int64"
,
broadcastable
=
bcast
)()])
dtype
=
"int64"
,
broadcastable
=
bcast
)()])
def
perform
(
self
,
node
,
inputs
,
output_storage
):
def
perform
(
self
,
node
,
inputs
,
output_storage
):
a
=
inputs
[
0
]
a
=
inputs
[
0
]
axis
=
inputs
[
1
]
axis
=
inputs
[
1
]
z
=
output_storage
[
0
]
z
=
output_storage
[
0
]
z
[
0
]
=
theano
.
_asarray
(
z
[
0
]
=
theano
.
_asarray
(
np
.
argsort
(
a
,
axis
,
self
.
kind
,
self
.
order
),
np
.
argsort
(
a
,
axis
,
self
.
kind
,
self
.
order
),
dtype
=
node
.
outputs
[
0
]
.
dtype
)
dtype
=
node
.
outputs
[
0
]
.
dtype
)
def
infer_shape
(
self
,
node
,
inputs_shapes
):
def
infer_shape
(
self
,
node
,
inputs_shapes
):
...
...
theano/tensor/subtensor.py
浏览文件 @
bd11e130
from
copy
import
copy
from
copy
import
copy
import
os
import
sys
import
sys
from
textwrap
import
dedent
from
textwrap
import
dedent
import
warnings
import
warnings
import
logging
import
logging
_logger
=
logging
.
getLogger
(
"theano.tensor.subtensor"
)
import
numpy
import
numpy
from
six.moves
import
xrange
from
six.moves
import
xrange
...
@@ -32,6 +30,7 @@ if config.cxx:
...
@@ -32,6 +30,7 @@ if config.cxx:
except
ImportError
:
except
ImportError
:
pass
pass
_logger
=
logging
.
getLogger
(
"theano.tensor.subtensor"
)
# Do a lazy import of the sparse module
# Do a lazy import of the sparse module
sparse_module_ref
=
None
sparse_module_ref
=
None
...
@@ -336,9 +335,9 @@ class Subtensor(Op):
...
@@ -336,9 +335,9 @@ class Subtensor(Op):
theano
.
tensor
.
wscalar
,
theano
.
tensor
.
bscalar
]
theano
.
tensor
.
wscalar
,
theano
.
tensor
.
bscalar
]
invalid_tensor_types
=
[
theano
.
tensor
.
fscalar
,
theano
.
tensor
.
dscalar
,
invalid_tensor_types
=
[
theano
.
tensor
.
fscalar
,
theano
.
tensor
.
dscalar
,
theano
.
tensor
.
cscalar
,
theano
.
tensor
.
zscalar
]
theano
.
tensor
.
cscalar
,
theano
.
tensor
.
zscalar
]
if
(
isinstance
(
entry
,
gof
.
Variable
)
if
(
isinstance
(
entry
,
gof
.
Variable
)
and
and
(
entry
.
type
in
invalid_scal_types
(
entry
.
type
in
invalid_scal_types
or
or
entry
.
type
in
invalid_tensor_types
)):
entry
.
type
in
invalid_tensor_types
)):
raise
TypeError
(
"Expected an integer"
)
raise
TypeError
(
"Expected an integer"
)
if
isinstance
(
entry
,
gof
.
Variable
)
and
entry
.
type
in
scal_types
:
if
isinstance
(
entry
,
gof
.
Variable
)
and
entry
.
type
in
scal_types
:
...
@@ -346,13 +345,13 @@ class Subtensor(Op):
...
@@ -346,13 +345,13 @@ class Subtensor(Op):
elif
isinstance
(
entry
,
gof
.
Type
)
and
entry
in
scal_types
:
elif
isinstance
(
entry
,
gof
.
Type
)
and
entry
in
scal_types
:
return
entry
return
entry
if
(
isinstance
(
entry
,
gof
.
Variable
)
if
(
isinstance
(
entry
,
gof
.
Variable
)
and
and
entry
.
type
in
tensor_types
entry
.
type
in
tensor_types
and
and
numpy
.
all
(
entry
.
type
.
broadcastable
)):
numpy
.
all
(
entry
.
type
.
broadcastable
)):
return
scal
.
get_scalar_type
(
entry
.
type
.
dtype
)
return
scal
.
get_scalar_type
(
entry
.
type
.
dtype
)
elif
(
isinstance
(
entry
,
gof
.
Type
)
elif
(
isinstance
(
entry
,
gof
.
Type
)
and
and
entry
in
tensor_types
entry
in
tensor_types
and
and
numpy
.
all
(
entry
.
broadcastable
)):
numpy
.
all
(
entry
.
broadcastable
)):
return
scal
.
get_scalar_type
(
entry
.
dtype
)
return
scal
.
get_scalar_type
(
entry
.
dtype
)
elif
slice_ok
and
isinstance
(
entry
,
slice
):
elif
slice_ok
and
isinstance
(
entry
,
slice
):
a
=
entry
.
start
a
=
entry
.
start
...
@@ -425,7 +424,8 @@ class Subtensor(Op):
...
@@ -425,7 +424,8 @@ class Subtensor(Op):
conv
(
val
.
step
))
conv
(
val
.
step
))
else
:
else
:
try
:
try
:
return
get_scalar_constant_value
(
val
,
return
get_scalar_constant_value
(
val
,
only_process_constants
=
only_process_constants
)
only_process_constants
=
only_process_constants
)
except
theano
.
tensor
.
NotScalarConstantError
:
except
theano
.
tensor
.
NotScalarConstantError
:
if
allow_partial
:
if
allow_partial
:
...
@@ -477,8 +477,8 @@ class Subtensor(Op):
...
@@ -477,8 +477,8 @@ class Subtensor(Op):
%
(
input
.
type
,
expected_type
))
%
(
input
.
type
,
expected_type
))
# infer the broadcasting pattern
# infer the broadcasting pattern
padded
=
(
self
.
get_constant_idx
((
None
,)
+
inputs
,
allow_partial
=
True
)
padded
=
(
self
.
get_constant_idx
((
None
,)
+
inputs
,
allow_partial
=
True
)
+
+
[
slice
(
None
,
None
,
None
)]
*
(
x
.
type
.
ndim
-
len
(
idx_list
)))
[
slice
(
None
,
None
,
None
)]
*
(
x
.
type
.
ndim
-
len
(
idx_list
)))
broadcastable
=
[]
broadcastable
=
[]
for
i
,
(
p
,
bc
)
in
enumerate
(
izip
(
padded
,
x
.
type
.
broadcastable
)):
for
i
,
(
p
,
bc
)
in
enumerate
(
izip
(
padded
,
x
.
type
.
broadcastable
)):
if
isinstance
(
p
,
slice
):
if
isinstance
(
p
,
slice
):
...
@@ -528,9 +528,9 @@ class Subtensor(Op):
...
@@ -528,9 +528,9 @@ class Subtensor(Op):
if
isinstance
(
idx
,
slice
):
if
isinstance
(
idx
,
slice
):
# If it is the default (None, None, None) slice, or a variant,
# If it is the default (None, None, None) slice, or a variant,
# the shape will be xl
# the shape will be xl
if
((
idx
.
start
in
[
None
,
0
])
if
((
idx
.
start
in
[
None
,
0
])
and
and
(
idx
.
stop
in
[
None
,
sys
.
maxsize
])
(
idx
.
stop
in
[
None
,
sys
.
maxsize
])
and
and
(
idx
.
step
is
None
or
idx
.
step
==
1
)):
(
idx
.
step
is
None
or
idx
.
step
==
1
)):
outshp
.
append
(
xl
)
outshp
.
append
(
xl
)
else
:
else
:
cnf
=
get_canonical_form_slice
(
idx
,
xl
)[
0
]
cnf
=
get_canonical_form_slice
(
idx
,
xl
)[
0
]
...
@@ -556,8 +556,7 @@ class Subtensor(Op):
...
@@ -556,8 +556,7 @@ class Subtensor(Op):
first
=
x
.
zeros_like
()
.
astype
(
theano
.
config
.
floatX
)
first
=
x
.
zeros_like
()
.
astype
(
theano
.
config
.
floatX
)
else
:
else
:
first
=
IncSubtensor
(
self
.
idx_list
)(
x
.
zeros_like
(),
gz
,
*
rest
)
first
=
IncSubtensor
(
self
.
idx_list
)(
x
.
zeros_like
(),
gz
,
*
rest
)
return
([
first
]
return
([
first
]
+
[
DisconnectedType
()()]
*
len
(
rest
))
+
[
DisconnectedType
()()]
*
len
(
rest
))
def
connection_pattern
(
self
,
node
):
def
connection_pattern
(
self
,
node
):
...
@@ -1034,8 +1033,7 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
...
@@ -1034,8 +1033,7 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
dim_offset
=
x
.
ndim
-
y
.
ndim
dim_offset
=
x
.
ndim
-
y
.
ndim
for
dim
in
xrange
(
y
.
ndim
):
for
dim
in
xrange
(
y
.
ndim
):
if
(
x
.
broadcastable
[
dim
+
dim_offset
]
if
(
x
.
broadcastable
[
dim
+
dim_offset
]
and
not
y
.
broadcastable
[
dim
]):
and
not
y
.
broadcastable
[
dim
]):
# It is acceptable to try to increment a subtensor with a
# It is acceptable to try to increment a subtensor with a
# broadcastable dim with a tensor that is not broadcastable
# broadcastable dim with a tensor that is not broadcastable
# on that dimension. However, its length must then be 1.
# on that dimension. However, its length must then be 1.
...
@@ -2133,9 +2131,9 @@ class AdvancedIncSubtensor(Op):
...
@@ -2133,9 +2131,9 @@ class AdvancedIncSubtensor(Op):
return
hash
((
type
(
self
),
self
.
inplace
,
self
.
set_instead_of_inc
))
return
hash
((
type
(
self
),
self
.
inplace
,
self
.
set_instead_of_inc
))
def
__eq__
(
self
,
other
):
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
return
(
type
(
self
)
==
type
(
other
)
and
and
self
.
inplace
==
other
.
inplace
self
.
inplace
==
other
.
inplace
and
and
self
.
set_instead_of_inc
==
other
.
set_instead_of_inc
)
self
.
set_instead_of_inc
==
other
.
set_instead_of_inc
)
def
__str__
(
self
):
def
__str__
(
self
):
return
"
%
s{
%
s,
%
s}"
%
(
self
.
__class__
.
__name__
,
return
"
%
s{
%
s,
%
s}"
%
(
self
.
__class__
.
__name__
,
...
...
theano/tensor/utils.py
浏览文件 @
bd11e130
theano/tensor/var.py
浏览文件 @
bd11e130
import
copy
import
copy
import
pdb
import
sys
import
traceback
as
tb
import
traceback
as
tb
import
warnings
import
warnings
...
@@ -41,9 +39,9 @@ class _tensor_py_operators:
...
@@ -41,9 +39,9 @@ class _tensor_py_operators:
# CASTS
# CASTS
# REMOVED THESE BECAUSE PYTHON appears to require __int__ to return
# REMOVED THESE BECAUSE PYTHON appears to require __int__ to return
# an int. -JB 20081112
# an int. -JB 20081112
#def __int__(self): return convert_to_int32(self)
#
def __int__(self): return convert_to_int32(self)
#def __float__(self): return convert_to_float64(self)
#
def __float__(self): return convert_to_float64(self)
#def __complex__(self): return convert_to_complex128(self)
#
def __complex__(self): return convert_to_complex128(self)
# COMPARISONS
# COMPARISONS
_is_nonzero
=
True
_is_nonzero
=
True
...
@@ -68,7 +66,6 @@ class _tensor_py_operators:
...
@@ -68,7 +66,6 @@ class _tensor_py_operators:
rval
.
_is_nonzero
=
False
rval
.
_is_nonzero
=
False
return
rval
return
rval
def
__nonzero__
(
self
):
def
__nonzero__
(
self
):
# Python 2.x
# Python 2.x
return
self
.
__bool__
()
return
self
.
__bool__
()
...
@@ -215,7 +212,7 @@ class _tensor_py_operators:
...
@@ -215,7 +212,7 @@ class _tensor_py_operators:
# DO NOT USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED
# DO NOT USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED
# BY OPTIMIZATIONS ONLY
# BY OPTIMIZATIONS ONLY
#
#
ARITHMETIC - INPLACE
# ARITHMETIC - INPLACE
# def __iadd__(self, other):
# def __iadd__(self, other):
# return _add_inplace(self, other)
# return _add_inplace(self, other)
# def __isub__(self, other):
# def __isub__(self, other):
...
@@ -642,7 +639,8 @@ class TensorVariable(_tensor_py_operators, Variable):
...
@@ -642,7 +639,8 @@ class TensorVariable(_tensor_py_operators, Variable):
elif
config
.
warn_float64
==
"raise"
:
elif
config
.
warn_float64
==
"raise"
:
raise
Exception
(
msg
)
raise
Exception
(
msg
)
elif
config
.
warn_float64
==
'pdb'
:
elif
config
.
warn_float64
==
'pdb'
:
import
pdb
;
pdb
.
set_trace
()
import
pdb
pdb
.
set_trace
()
TensorType
.
Variable
=
TensorVariable
TensorType
.
Variable
=
TensorVariable
...
...
theano/tensor/xlogx.py
浏览文件 @
bd11e130
...
@@ -13,12 +13,15 @@ class XlogX(scalar.UnaryScalarOp):
...
@@ -13,12 +13,15 @@ class XlogX(scalar.UnaryScalarOp):
if
x
==
0.0
:
if
x
==
0.0
:
return
0.0
return
0.0
return
x
*
numpy
.
log
(
x
)
return
x
*
numpy
.
log
(
x
)
def
impl
(
self
,
x
):
def
impl
(
self
,
x
):
return
XlogX
.
st_impl
(
x
)
return
XlogX
.
st_impl
(
x
)
def
grad
(
self
,
inputs
,
grads
):
def
grad
(
self
,
inputs
,
grads
):
x
,
=
inputs
x
,
=
inputs
gz
,
=
grads
gz
,
=
grads
return
[
gz
*
(
1
+
scalar
.
log
(
x
))]
return
[
gz
*
(
1
+
scalar
.
log
(
x
))]
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
x
,
=
inputs
x
,
=
inputs
z
,
=
outputs
z
,
=
outputs
...
@@ -28,6 +31,7 @@ class XlogX(scalar.UnaryScalarOp):
...
@@ -28,6 +31,7 @@ class XlogX(scalar.UnaryScalarOp):
? 0.0
? 0.0
:
%(x)
s * log(
%(x)
s);"""
%
locals
()
:
%(x)
s * log(
%(x)
s);"""
%
locals
()
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
scalar_xlogx
=
XlogX
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogx'
)
scalar_xlogx
=
XlogX
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogx'
)
xlogx
=
Elemwise
(
scalar_xlogx
,
name
=
'xlogx'
)
xlogx
=
Elemwise
(
scalar_xlogx
,
name
=
'xlogx'
)
...
@@ -41,12 +45,15 @@ class XlogY0(scalar.BinaryScalarOp):
...
@@ -41,12 +45,15 @@ class XlogY0(scalar.BinaryScalarOp):
if
x
==
0.0
:
if
x
==
0.0
:
return
0.0
return
0.0
return
x
*
numpy
.
log
(
y
)
return
x
*
numpy
.
log
(
y
)
def
impl
(
self
,
x
,
y
):
def
impl
(
self
,
x
,
y
):
return
XlogY0
.
st_impl
(
x
,
y
)
return
XlogY0
.
st_impl
(
x
,
y
)
def
grad
(
self
,
inputs
,
grads
):
def
grad
(
self
,
inputs
,
grads
):
x
,
y
=
inputs
x
,
y
=
inputs
gz
,
=
grads
gz
,
=
grads
return
[
gz
*
scalar
.
log
(
y
),
gz
*
x
/
y
]
return
[
gz
*
scalar
.
log
(
y
),
gz
*
x
/
y
]
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
x
,
y
=
inputs
x
,
y
=
inputs
z
,
=
outputs
z
,
=
outputs
...
@@ -56,5 +63,6 @@ class XlogY0(scalar.BinaryScalarOp):
...
@@ -56,5 +63,6 @@ class XlogY0(scalar.BinaryScalarOp):
? 0.0
? 0.0
:
%(x)
s * log(
%(y)
s);"""
%
locals
()
:
%(x)
s * log(
%(y)
s);"""
%
locals
()
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
scalar_xlogy0
=
XlogY0
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogy0'
)
scalar_xlogy0
=
XlogY0
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogy0'
)
xlogy0
=
Elemwise
(
scalar_xlogy0
,
name
=
'xlogy0'
)
xlogy0
=
Elemwise
(
scalar_xlogy0
,
name
=
'xlogy0'
)
theano/tests/test_flake8.py
浏览文件 @
bd11e130
...
@@ -57,30 +57,17 @@ whitelist_flake8 = [
...
@@ -57,30 +57,17 @@ whitelist_flake8 = [
"typed_list/tests/test_type.py"
,
"typed_list/tests/test_type.py"
,
"typed_list/tests/test_opt.py"
,
"typed_list/tests/test_opt.py"
,
"typed_list/tests/test_basic.py"
,
"typed_list/tests/test_basic.py"
,
"tensor/var.py"
,
"tensor/sharedvar.py"
,
"tensor/inplace.py"
,
"tensor/slinalg.py"
,
"tensor/shared_randomstreams.py"
,
"tensor/subtensor.py"
,
"tensor/elemwise.py"
,
"tensor/xlogx.py"
,
"tensor/blas_headers.py"
,
"tensor/blas_headers.py"
,
"tensor/utils.py"
,
"tensor/type.py"
,
"tensor/type.py"
,
"tensor/fourier.py"
,
"tensor/fourier.py"
,
"tensor/sort.py"
,
"tensor/__init__.py"
,
"tensor/__init__.py"
,
"tensor/opt_uncanonicalize.py"
,
"tensor/opt_uncanonicalize.py"
,
"tensor/opt.py"
,
"tensor/blas.py"
,
"tensor/blas.py"
,
"tensor/extra_ops.py"
,
"tensor/extra_ops.py"
,
"tensor/nlinalg.py"
,
"tensor/nlinalg.py"
,
"tensor/blas_c.py"
,
"tensor/blas_c.py"
,
"tensor/elemwise_cgen.py"
,
"tensor/elemwise_cgen.py"
,
"tensor/raw_random.py"
,
"tensor/blas_scipy.py"
,
"tensor/blas_scipy.py"
,
"tensor/basic.py"
,
"tensor/tests/test_subtensor.py"
,
"tensor/tests/test_subtensor.py"
,
"tensor/tests/test_utils.py"
,
"tensor/tests/test_utils.py"
,
"tensor/tests/test_nlinalg.py"
,
"tensor/tests/test_nlinalg.py"
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论