Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
bd11e130
提交
bd11e130
authored
7月 02, 2015
作者:
Frédéric Bastien
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3074 from harlouci/flake8_v2
flake8
上级
cb08bc11
fc6d2310
全部展开
隐藏空白字符变更
内嵌
并排
正在显示
15 个修改的文件
包含
126 行增加
和
138 行删除
+126
-138
basic.py
theano/tensor/basic.py
+0
-0
blas.py
theano/tensor/blas.py
+3
-3
elemwise.py
theano/tensor/elemwise.py
+0
-0
inplace.py
theano/tensor/inplace.py
+0
-1
opt.py
theano/tensor/opt.py
+0
-0
raw_random.py
theano/tensor/raw_random.py
+36
-35
shared_randomstreams.py
theano/tensor/shared_randomstreams.py
+4
-2
sharedvar.py
theano/tensor/sharedvar.py
+8
-6
slinalg.py
theano/tensor/slinalg.py
+9
-16
sort.py
theano/tensor/sort.py
+20
-20
subtensor.py
theano/tensor/subtensor.py
+23
-25
utils.py
theano/tensor/utils.py
+5
-5
var.py
theano/tensor/var.py
+8
-10
xlogx.py
theano/tensor/xlogx.py
+10
-2
test_flake8.py
theano/tests/test_flake8.py
+0
-13
没有找到文件。
theano/tensor/basic.py
浏览文件 @
bd11e130
差异被折叠。
点击展开。
theano/tensor/blas.py
浏览文件 @
bd11e130
...
...
@@ -266,7 +266,7 @@ SOMEPATH/Canopy_64bit/User/lib/python2.7/site-packages/numpy/distutils/system_in
# Using "conda install mkl" will install both, as well as
# optimized versions of numpy and scipy.
try
:
import
mkl
import
mkl
#noqa
except
ImportError
as
e
:
_logger
.
info
(
'Conda mkl is not available:
%
s'
,
e
)
else
:
...
...
@@ -1599,11 +1599,11 @@ class GemmOptimizer(Optimizer):
)
did_something
=
True
nb_replacement
+=
1
except
InconsistencyError
as
e
:
except
InconsistencyError
:
# TODO: retry other applications of gemm (see comment
# in _gemm_from_node)
nb_inconsistency_replace
+=
1
except
ReplacementDidntRemovedError
as
e
:
except
ReplacementDidntRemovedError
:
nb_replacement_didn_t_remove
+=
1
self
.
warned
=
True
fgraph
.
remove_feature
(
u
)
...
...
theano/tensor/elemwise.py
浏览文件 @
bd11e130
差异被折叠。
点击展开。
theano/tensor/inplace.py
浏览文件 @
bd11e130
...
...
@@ -28,7 +28,6 @@ def _scal_inplace(symbol):
def
chk
(
pstate
,
r
):
if
not
r
.
owner
:
return
False
op
=
r
.
owner
.
op
return
r
.
owner
.
op
==
rval
pprint
.
assign
(
chk
,
printing
.
FunctionPrinter
(
symbolname
.
replace
(
'_inplace'
,
'='
)))
...
...
theano/tensor/opt.py
浏览文件 @
bd11e130
差异被折叠。
点击展开。
theano/tensor/raw_random.py
浏览文件 @
bd11e130
"""Define random number Type (`RandomStateType`) and Op (`RandomFunction`)."""
from
__future__
import
print_function
__docformat__
=
"restructuredtext en"
import
sys
from
copy
import
copy
...
...
@@ -15,6 +15,8 @@ from theano import gof
from
six
import
string_types
from
theano.compile
import
optdb
__docformat__
=
"restructuredtext en"
class
RandomStateType
(
gof
.
Type
):
"""A Type wrapper for numpy.random.RandomState
...
...
@@ -85,13 +87,13 @@ class RandomStateType(gof.Type):
# Register RandomStateType's C code for ViewOp.
theano
.
compile
.
register_view_op_c_code
(
RandomStateType
,
"""
Py_XDECREF(
%(oname)
s);
%(oname)
s =
%(iname)
s;
Py_XINCREF(
%(oname)
s);
"""
,
1
)
RandomStateType
,
"""
Py_XDECREF(
%(oname)
s);
%(oname)
s =
%(iname)
s;
Py_XINCREF(
%(oname)
s);
"""
,
1
)
random_state_type
=
RandomStateType
()
...
...
@@ -135,9 +137,8 @@ class RandomFunction(gof.Op):
and
self
.
ndim_added
==
other
.
ndim_added
def
__hash__
(
self
):
return
hash
(
type
(
self
))
^
hash
(
self
.
fn
)
\
^
hash
(
self
.
outtype
)
\
^
hash
(
self
.
inplace
)
^
hash
(
self
.
ndim_added
)
return
(
hash
(
type
(
self
))
^
hash
(
self
.
fn
)
^
hash
(
self
.
outtype
)
^
hash
(
self
.
inplace
)
^
hash
(
self
.
ndim_added
))
def
__getstate__
(
self
):
return
self
.
state
...
...
@@ -233,7 +234,6 @@ class RandomFunction(gof.Op):
# copy of r if self.inplace is False
r
,
shape
,
args
=
inputs
[
0
],
inputs
[
1
],
inputs
[
2
:]
assert
type
(
r
)
==
numpy
.
random
.
RandomState
,
(
type
(
r
),
r
)
r_orig
=
r
# If shape == [], that means no shape is enforced, and numpy is
# trusted to draw the appropriate number of samples, numpy uses
...
...
@@ -245,16 +245,16 @@ class RandomFunction(gof.Op):
shape
=
tuple
(
shape
)
if
(
shape
is
not
None
and
self
.
outtype
.
ndim
!=
len
(
shape
)
+
self
.
ndim_added
):
self
.
outtype
.
ndim
!=
len
(
shape
)
+
self
.
ndim_added
):
raise
ValueError
(
'Shape mismatch: self.outtype.ndim (
%
i) !='
' len(shape) (
%
i) + self.ndim_added (
%
i)'
%
(
self
.
outtype
.
ndim
,
len
(
shape
),
self
.
ndim_added
))
%
(
self
.
outtype
.
ndim
,
len
(
shape
),
self
.
ndim_added
))
if
not
self
.
inplace
:
r
=
copy
(
r
)
rout
[
0
]
=
r
rval
=
self
.
fn
(
r
,
*
(
args
+
[
shape
]))
if
not
isinstance
(
rval
,
numpy
.
ndarray
)
\
or
str
(
rval
.
dtype
)
!=
node
.
outputs
[
1
]
.
type
.
dtype
:
if
(
not
isinstance
(
rval
,
numpy
.
ndarray
)
or
str
(
rval
.
dtype
)
!=
node
.
outputs
[
1
]
.
type
.
dtype
)
:
rval
=
theano
.
_asarray
(
rval
,
dtype
=
node
.
outputs
[
1
]
.
type
.
dtype
)
# When shape is None, numpy has a tendency to unexpectedly
...
...
@@ -288,7 +288,7 @@ class RandomFunction(gof.Op):
def
grad
(
self
,
inputs
,
outputs
):
return
[
theano
.
gradient
.
grad_undefined
(
self
,
k
,
inp
,
'No gradient defined through raw random numbers op'
)
'No gradient defined through raw random numbers op'
)
for
k
,
inp
in
enumerate
(
inputs
)]
def
R_op
(
self
,
inputs
,
eval_points
):
...
...
@@ -325,8 +325,8 @@ def _infer_ndim_bcast(ndim, shape, *args):
else
:
if
shape_ndim
!=
ndim
:
raise
ValueError
(
'ndim should be equal to len(shape), but
\n
'
,
'ndim =
%
s, len(shape) =
%
s, shape =
%
s'
%
(
ndim
,
shape_ndim
,
shape
))
'ndim =
%
s, len(shape) =
%
s, shape =
%
s'
%
(
ndim
,
shape_ndim
,
shape
))
bcast
=
[]
pre_v_shape
=
[]
...
...
@@ -353,7 +353,8 @@ def _infer_ndim_bcast(ndim, shape, *args):
break
else
:
if
n_a_i
==
0
:
raise
ValueError
((
'Auto-shape of -1 must overlap'
raise
ValueError
((
'Auto-shape of -1 must overlap'
'with the shape of one of the broadcastable'
'inputs'
))
else
:
...
...
@@ -373,7 +374,7 @@ def _infer_ndim_bcast(ndim, shape, *args):
# but we need to know ndim
if
not
args
:
raise
TypeError
((
'_infer_ndim_bcast cannot infer shape without'
' either shape or args'
))
' either shape or args'
))
template
=
reduce
(
lambda
a
,
b
:
a
+
b
,
args
)
v_shape
=
template
.
shape
bcast
=
template
.
broadcastable
...
...
@@ -463,7 +464,7 @@ def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):
dtype
=
tensor
.
scal
.
upcast
(
theano
.
config
.
floatX
,
low
.
dtype
,
high
.
dtype
)
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
,
low
,
high
)
op
=
RandomFunction
(
'uniform'
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
))
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
))
return
op
(
random_state
,
size
,
low
,
high
)
...
...
@@ -487,7 +488,7 @@ def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):
dtype
=
tensor
.
scal
.
upcast
(
theano
.
config
.
floatX
,
avg
.
dtype
,
std
.
dtype
)
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
,
avg
,
std
)
op
=
RandomFunction
(
'normal'
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
))
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
))
return
op
(
random_state
,
size
,
avg
,
std
)
...
...
@@ -517,7 +518,8 @@ def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
# p=numpy.asarray([.1, .2, .3], dtype='float64'))
n
=
tensor
.
cast
(
n
,
'int32'
)
op
=
RandomFunction
(
'binomial'
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
(
False
,)
*
ndim
))
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
(
False
,)
*
ndim
))
return
op
(
random_state
,
size
,
n
,
p
)
...
...
@@ -583,7 +585,7 @@ def random_integers(random_state, size=None, low=0, high=1, ndim=None,
high
=
tensor
.
as_tensor_variable
(
high
)
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
,
low
,
high
)
op
=
RandomFunction
(
random_integers_helper
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
))
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
))
return
op
(
random_state
,
size
,
low
,
high
)
...
...
@@ -719,8 +721,9 @@ def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
)
# print "NDIM", ndim, size
op
=
RandomFunction
(
permutation_helper
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
+
(
False
,)),
ndim_added
=
1
)
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
+
(
False
,)),
ndim_added
=
1
)
return
op
(
random_state
,
size
,
n
)
...
...
@@ -738,14 +741,11 @@ def multinomial_helper(random_state, n, pvals, size):
ndim
=
len
(
size
)
else
:
ndim
=
max
(
n
.
ndim
,
pvals
.
ndim
-
1
)
out_ndim
=
ndim
+
1
# broadcast n to ndim dimensions and pvals to ndim+1
if
n
.
ndim
>
ndim
:
raise
ValueError
(
'n.ndim (
%
i) should not be larger than len(size) (
%
i)'
%
(
n
.
ndim
,
ndim
),
n
,
size
)
raise
ValueError
(
'n.ndim (
%
i) should not be larger than len(size) (
%
i)'
%
(
n
.
ndim
,
ndim
),
n
,
size
)
if
n
.
ndim
<
ndim
:
n
=
n
.
reshape
((
1
,)
*
(
ndim
-
n
.
ndim
)
+
n
.
shape
)
...
...
@@ -788,7 +788,7 @@ def multinomial_helper(random_state, n, pvals, size):
# because mtrand.pyx has a ValueError that will trigger if
# sum(pvals[:-1]) > 1.0
pvi
=
pvi
*
(
1.0
-
5e-5
)
#pvi = pvi * .9
#
pvi = pvi * .9
pisum
=
numpy
.
sum
(
pvi
)
elif
pvi
[
-
1
]
<
5e-5
:
# will this even work?
pvi
=
pvi
*
(
1.0
-
5e-5
)
...
...
@@ -859,8 +859,9 @@ def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5],
ndim
,
size
,
bcast
=
_infer_ndim_bcast
(
ndim
,
size
,
n
,
tmp
)
bcast
=
bcast
+
(
pvals
.
type
.
broadcastable
[
-
1
],)
op
=
RandomFunction
(
multinomial_helper
,
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
),
ndim_added
=
1
)
tensor
.
TensorType
(
dtype
=
dtype
,
broadcastable
=
bcast
),
ndim_added
=
1
)
return
op
(
random_state
,
size
,
n
,
pvals
)
...
...
theano/tensor/shared_randomstreams.py
浏览文件 @
bd11e130
"""Define RandomStreams, providing random number variables for Theano
graphs.
"""
__docformat__
=
"restructuredtext en"
import
copy
import
numpy
from
theano.compile.sharedvalue
import
(
SharedVariable
,
shared_constructor
,
shared
)
from
theano.tensor
import
raw_random
__docformat__
=
"restructuredtext en"
class
RandomStateSharedVariable
(
SharedVariable
):
pass
...
...
@@ -77,7 +79,7 @@ class RandomStreams(raw_random.RandomStreamsBase):
for
old_r
,
new_r
in
self
.
state_updates
:
old_r_seed
=
seedgen
.
randint
(
2
**
30
)
old_r
.
set_value
(
numpy
.
random
.
RandomState
(
int
(
old_r_seed
)),
borrow
=
True
)
borrow
=
True
)
def
__getitem__
(
self
,
item
):
"""Retrieve the numpy RandomState instance associated with a
...
...
theano/tensor/sharedvar.py
浏览文件 @
bd11e130
...
...
@@ -41,10 +41,10 @@ def tensor_constructor(value, name=None, strict=False, allow_downcast=None,
broadcastable
=
(
False
,)
*
len
(
value
.
shape
)
type
=
TensorType
(
value
.
dtype
,
broadcastable
=
broadcastable
)
return
TensorSharedVariable
(
type
=
type
,
value
=
numpy
.
array
(
value
,
copy
=
(
not
borrow
)),
name
=
name
,
strict
=
strict
,
allow_downcast
=
allow_downcast
)
value
=
numpy
.
array
(
value
,
copy
=
(
not
borrow
)),
name
=
name
,
strict
=
strict
,
allow_downcast
=
allow_downcast
)
# TensorSharedVariable brings in the tensor operators, is not ideal, but works
...
...
@@ -85,8 +85,10 @@ def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
# Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match.
rval
=
ScalarSharedVariable
(
type
=
tensor_type
,
value
=
numpy
.
array
(
value
,
copy
=
True
),
name
=
name
,
strict
=
strict
,
allow_downcast
=
allow_downcast
)
value
=
numpy
.
array
(
value
,
copy
=
True
),
name
=
name
,
strict
=
strict
,
allow_downcast
=
allow_downcast
)
return
rval
except
Exception
:
traceback
.
print_exc
()
...
...
theano/tensor/slinalg.py
浏览文件 @
bd11e130
import
logging
logger
=
logging
.
getLogger
(
__name__
)
import
numpy
import
warnings
from
six.moves
import
xrange
from
theano.gof
import
Op
,
Apply
from
theano.tensor
import
as_tensor_variable
,
dot
,
DimShuffle
,
Dot
from
theano.tensor.blas
import
Dot22
from
theano
import
tensor
import
theano.tensor
from
theano.tensor.opt
import
(
register_stabilize
,
register_specialize
,
register_canonicalize
)
from
theano.gof
import
local_optimizer
from
theano.gof.opt
import
Optimizer
from
theano.gradient
import
DisconnectedType
import
numpy
try
:
import
scipy.linalg
...
...
@@ -24,6 +11,13 @@ except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy
=
False
from
theano
import
tensor
import
theano.tensor
from
theano.tensor
import
as_tensor_variable
from
theano.gof
import
Op
,
Apply
logger
=
logging
.
getLogger
(
__name__
)
MATRIX_STRUCTURES
=
(
'general'
,
'symmetric'
,
...
...
@@ -123,7 +117,6 @@ class CholeskyGrad(Op):
F
[
k
,
k
]
/=
(
2
*
L
[
k
,
k
])
else
:
F
=
numpy
.
triu
(
dz
)
M
=
N
-
1
for
k
in
xrange
(
N
-
1
,
-
1
,
-
1
):
for
j
in
xrange
(
k
+
1
,
N
):
for
i
in
xrange
(
j
,
N
):
...
...
@@ -182,7 +175,7 @@ class Solve(Op):
else
:
rval
=
scipy
.
linalg
.
solve
(
A
,
b
)
output_storage
[
0
][
0
]
=
rval
# computes shape of x where x = inv(A) * b
def
infer_shape
(
self
,
node
,
shapes
):
Ashape
,
Bshape
=
shapes
...
...
theano/tensor/sort.py
浏览文件 @
bd11e130
...
...
@@ -28,7 +28,7 @@ class SortOp(theano.Op):
def
make_node
(
self
,
input
,
axis
=-
1
):
input
=
theano
.
tensor
.
as_tensor_variable
(
input
)
if
(
axis
is
None
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
axis
=
theano
.
Constant
(
theano
.
gof
.
generic
,
None
)
# axis=None flattens the array before sorting
out_type
=
tensor
(
dtype
=
input
.
dtype
,
broadcastable
=
[
False
])
...
...
@@ -45,7 +45,7 @@ class SortOp(theano.Op):
def
infer_shape
(
self
,
node
,
inputs_shapes
):
if
(
isinstance
(
node
.
inputs
[
1
],
theano
.
Constant
)
and
node
.
inputs
[
1
]
.
data
is
None
):
node
.
inputs
[
1
]
.
data
is
None
):
# That means axis = None,
# So the array is flattened before being sorted
return
[(
mul
(
*
inputs_shapes
[
0
]),)]
...
...
@@ -64,16 +64,17 @@ class SortOp(theano.Op):
" matrix (and axis is None or 0) and tensor3"
)
if
a
.
ndim
==
1
:
idx
=
argsort
(
*
inputs
,
kind
=
self
.
kind
,
order
=
self
.
order
)
#
rev_idx = numpy.where(idx[None, :]==numpy.arange(5)[:,None])[1]
#
rev_idx = numpy.where(idx[None, :]==numpy.arange(5)[:,None])[1]
rev_idx
=
theano
.
tensor
.
eq
(
idx
[
None
,
:],
arange
(
a
.
shape
[
0
])[:,
None
])
.
nonzero
()[
1
]
inp_grad
=
output_grads
[
0
][
rev_idx
]
elif
a
.
ndim
==
2
:
if
(
axis
is
None
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
idx
=
argsort
(
*
inputs
,
kind
=
self
.
kind
,
order
=
self
.
order
)
rev_idx
=
theano
.
tensor
.
eq
(
idx
[
None
,
:],
arange
(
a
.
shape
[
0
]
*
a
.
shape
[
1
])[:,
None
])
.
nonzero
()[
1
]
rev_idx
=
theano
.
tensor
.
eq
(
idx
[
None
,
:],
arange
(
a
.
shape
[
0
]
*
a
.
shape
[
1
])[:,
None
])
.
nonzero
()[
1
]
inp_grad
=
output_grads
[
0
][
rev_idx
]
.
reshape
(
a
.
shape
)
elif
(
axis
==
0
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
==
0
)):
...
...
@@ -85,7 +86,7 @@ class SortOp(theano.Op):
indices
=
self
.
__get_argsort_indices
(
a
,
axis
)
inp_grad
=
output_grads
[
0
][
indices
[
0
],
indices
[
1
],
indices
[
2
]]
elif
(
axis
is
None
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
rev_idx
=
self
.
__get_argsort_indices
(
a
,
axis
)
inp_grad
=
output_grads
[
0
][
rev_idx
]
.
reshape
(
a
.
shape
)
axis_grad
=
theano
.
gradient
.
grad_undefined
(
...
...
@@ -103,13 +104,13 @@ class SortOp(theano.Op):
list of lenght len(a.shape) otherwise
"""
# The goal is to get gradient wrt input from gradient
# The goal is to get gradient wrt input from gradient
# wrt sort(input, axis)
idx
=
argsort
(
a
,
axis
,
kind
=
self
.
kind
,
order
=
self
.
order
)
# rev_idx is the reverse of previous argsort operation
rev_idx
=
argsort
(
idx
,
axis
,
kind
=
self
.
kind
,
order
=
self
.
order
)
# rev_idx is the reverse of previous argsort operation
rev_idx
=
argsort
(
idx
,
axis
,
kind
=
self
.
kind
,
order
=
self
.
order
)
if
(
axis
is
None
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
return
rev_idx
indices
=
[]
if
axis
.
data
>=
0
:
...
...
@@ -120,7 +121,7 @@ class SortOp(theano.Op):
if
i
==
axis_data
:
indices
.
append
(
rev_idx
)
else
:
index_shape
=
[
1
]
*
a
.
ndim
index_shape
=
[
1
]
*
a
.
ndim
index_shape
[
i
]
=
a
.
shape
[
i
]
# it's a way to emulate numpy.ogrid[0: a.shape[0], 0: a.shape[1], 0: a.shape[2]]
indices
.
append
(
theano
.
tensor
.
arange
(
a
.
shape
[
i
])
.
reshape
(
index_shape
))
...
...
@@ -178,28 +179,27 @@ class ArgSortOp(theano.Op):
return
hash
(
type
(
self
))
^
hash
(
self
.
order
)
^
hash
(
self
.
kind
)
def
__str__
(
self
):
return
(
self
.
__class__
.
__name__
+
"{
%
s,
%
s}"
%
(
self
.
kind
,
str
(
self
.
order
)))
return
(
self
.
__class__
.
__name__
+
"{
%
s,
%
s}"
%
(
self
.
kind
,
str
(
self
.
order
)))
def
make_node
(
self
,
input
,
axis
=-
1
):
input
=
theano
.
tensor
.
as_tensor_variable
(
input
)
if
(
axis
is
None
or
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
(
isinstance
(
axis
,
theano
.
Constant
)
and
axis
.
data
is
None
)):
axis
=
theano
.
Constant
(
theano
.
gof
.
generic
,
None
)
bcast
=
[
False
]
else
:
axis
=
theano
.
tensor
.
as_tensor_variable
(
axis
)
bcast
=
input
.
type
.
broadcastable
return
theano
.
Apply
(
self
,
[
input
,
axis
],
[
theano
.
tensor
.
TensorType
(
dtype
=
"int64"
,
broadcastable
=
bcast
)()])
return
theano
.
Apply
(
self
,
[
input
,
axis
],
[
theano
.
tensor
.
TensorType
(
dtype
=
"int64"
,
broadcastable
=
bcast
)()])
def
perform
(
self
,
node
,
inputs
,
output_storage
):
a
=
inputs
[
0
]
axis
=
inputs
[
1
]
z
=
output_storage
[
0
]
z
[
0
]
=
theano
.
_asarray
(
np
.
argsort
(
a
,
axis
,
self
.
kind
,
self
.
order
),
dtype
=
node
.
outputs
[
0
]
.
dtype
)
z
[
0
]
=
theano
.
_asarray
(
np
.
argsort
(
a
,
axis
,
self
.
kind
,
self
.
order
),
dtype
=
node
.
outputs
[
0
]
.
dtype
)
def
infer_shape
(
self
,
node
,
inputs_shapes
):
if
(
isinstance
(
node
.
inputs
[
1
],
theano
.
Constant
)
and
...
...
theano/tensor/subtensor.py
浏览文件 @
bd11e130
from
copy
import
copy
import
os
import
sys
from
textwrap
import
dedent
import
warnings
import
logging
_logger
=
logging
.
getLogger
(
"theano.tensor.subtensor"
)
import
numpy
from
six.moves
import
xrange
...
...
@@ -32,6 +30,7 @@ if config.cxx:
except
ImportError
:
pass
_logger
=
logging
.
getLogger
(
"theano.tensor.subtensor"
)
# Do a lazy import of the sparse module
sparse_module_ref
=
None
...
...
@@ -336,9 +335,9 @@ class Subtensor(Op):
theano
.
tensor
.
wscalar
,
theano
.
tensor
.
bscalar
]
invalid_tensor_types
=
[
theano
.
tensor
.
fscalar
,
theano
.
tensor
.
dscalar
,
theano
.
tensor
.
cscalar
,
theano
.
tensor
.
zscalar
]
if
(
isinstance
(
entry
,
gof
.
Variable
)
and
(
entry
.
type
in
invalid_scal_types
or
entry
.
type
in
invalid_tensor_types
)):
if
(
isinstance
(
entry
,
gof
.
Variable
)
and
(
entry
.
type
in
invalid_scal_types
or
entry
.
type
in
invalid_tensor_types
)):
raise
TypeError
(
"Expected an integer"
)
if
isinstance
(
entry
,
gof
.
Variable
)
and
entry
.
type
in
scal_types
:
...
...
@@ -346,13 +345,13 @@ class Subtensor(Op):
elif
isinstance
(
entry
,
gof
.
Type
)
and
entry
in
scal_types
:
return
entry
if
(
isinstance
(
entry
,
gof
.
Variable
)
and
entry
.
type
in
tensor_types
and
numpy
.
all
(
entry
.
type
.
broadcastable
)):
if
(
isinstance
(
entry
,
gof
.
Variable
)
and
entry
.
type
in
tensor_types
and
numpy
.
all
(
entry
.
type
.
broadcastable
)):
return
scal
.
get_scalar_type
(
entry
.
type
.
dtype
)
elif
(
isinstance
(
entry
,
gof
.
Type
)
and
entry
in
tensor_types
and
numpy
.
all
(
entry
.
broadcastable
)):
elif
(
isinstance
(
entry
,
gof
.
Type
)
and
entry
in
tensor_types
and
numpy
.
all
(
entry
.
broadcastable
)):
return
scal
.
get_scalar_type
(
entry
.
dtype
)
elif
slice_ok
and
isinstance
(
entry
,
slice
):
a
=
entry
.
start
...
...
@@ -425,8 +424,9 @@ class Subtensor(Op):
conv
(
val
.
step
))
else
:
try
:
return
get_scalar_constant_value
(
val
,
only_process_constants
=
only_process_constants
)
return
get_scalar_constant_value
(
val
,
only_process_constants
=
only_process_constants
)
except
theano
.
tensor
.
NotScalarConstantError
:
if
allow_partial
:
return
val
...
...
@@ -477,8 +477,8 @@ class Subtensor(Op):
%
(
input
.
type
,
expected_type
))
# infer the broadcasting pattern
padded
=
(
self
.
get_constant_idx
((
None
,)
+
inputs
,
allow_partial
=
True
)
+
[
slice
(
None
,
None
,
None
)]
*
(
x
.
type
.
ndim
-
len
(
idx_list
)))
padded
=
(
self
.
get_constant_idx
((
None
,)
+
inputs
,
allow_partial
=
True
)
+
[
slice
(
None
,
None
,
None
)]
*
(
x
.
type
.
ndim
-
len
(
idx_list
)))
broadcastable
=
[]
for
i
,
(
p
,
bc
)
in
enumerate
(
izip
(
padded
,
x
.
type
.
broadcastable
)):
if
isinstance
(
p
,
slice
):
...
...
@@ -528,9 +528,9 @@ class Subtensor(Op):
if
isinstance
(
idx
,
slice
):
# If it is the default (None, None, None) slice, or a variant,
# the shape will be xl
if
((
idx
.
start
in
[
None
,
0
])
and
(
idx
.
stop
in
[
None
,
sys
.
maxsize
])
and
(
idx
.
step
is
None
or
idx
.
step
==
1
)):
if
((
idx
.
start
in
[
None
,
0
])
and
(
idx
.
stop
in
[
None
,
sys
.
maxsize
])
and
(
idx
.
step
is
None
or
idx
.
step
==
1
)):
outshp
.
append
(
xl
)
else
:
cnf
=
get_canonical_form_slice
(
idx
,
xl
)[
0
]
...
...
@@ -556,8 +556,7 @@ class Subtensor(Op):
first
=
x
.
zeros_like
()
.
astype
(
theano
.
config
.
floatX
)
else
:
first
=
IncSubtensor
(
self
.
idx_list
)(
x
.
zeros_like
(),
gz
,
*
rest
)
return
([
first
]
+
[
DisconnectedType
()()]
*
len
(
rest
))
return
([
first
]
+
[
DisconnectedType
()()]
*
len
(
rest
))
def
connection_pattern
(
self
,
node
):
...
...
@@ -1034,8 +1033,7 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
dim_offset
=
x
.
ndim
-
y
.
ndim
for
dim
in
xrange
(
y
.
ndim
):
if
(
x
.
broadcastable
[
dim
+
dim_offset
]
and
not
y
.
broadcastable
[
dim
]):
if
(
x
.
broadcastable
[
dim
+
dim_offset
]
and
not
y
.
broadcastable
[
dim
]):
# It is acceptable to try to increment a subtensor with a
# broadcastable dim with a tensor that is not broadcastable
# on that dimension. However, its length must then be 1.
...
...
@@ -2133,9 +2131,9 @@ class AdvancedIncSubtensor(Op):
return
hash
((
type
(
self
),
self
.
inplace
,
self
.
set_instead_of_inc
))
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
inplace
==
other
.
inplace
and
self
.
set_instead_of_inc
==
other
.
set_instead_of_inc
)
return
(
type
(
self
)
==
type
(
other
)
and
self
.
inplace
==
other
.
inplace
and
self
.
set_instead_of_inc
==
other
.
set_instead_of_inc
)
def
__str__
(
self
):
return
"
%
s{
%
s,
%
s}"
%
(
self
.
__class__
.
__name__
,
...
...
theano/tensor/utils.py
浏览文件 @
bd11e130
...
...
@@ -79,11 +79,11 @@ def shape_of_variables(fgraph, input_shapes):
if
not
hasattr
(
fgraph
,
'shape_feature'
):
fgraph
.
attach_feature
(
theano
.
tensor
.
opt
.
ShapeFeature
())
input_dims
=
[
dimension
for
inp
in
fgraph
.
inputs
for
dimension
in
fgraph
.
shape_feature
.
shape_of
[
inp
]]
input_dims
=
[
dimension
for
inp
in
fgraph
.
inputs
for
dimension
in
fgraph
.
shape_feature
.
shape_of
[
inp
]]
output_dims
=
[
dimension
for
shape
in
fgraph
.
shape_feature
.
shape_of
.
values
()
for
dimension
in
shape
]
for
dimension
in
shape
]
compute_shapes
=
theano
.
function
(
input_dims
,
output_dims
)
...
...
@@ -93,8 +93,8 @@ def shape_of_variables(fgraph, input_shapes):
" interface changed. Now by default, it clones the graph it receives."
" To have the old behavior, give it this new parameter `clone=False`."
)
numeric_input_dims
=
[
dim
for
inp
in
fgraph
.
inputs
for
dim
in
input_shapes
[
inp
]]
numeric_input_dims
=
[
dim
for
inp
in
fgraph
.
inputs
for
dim
in
input_shapes
[
inp
]]
numeric_output_dims
=
compute_shapes
(
*
numeric_input_dims
)
sym_to_num_dict
=
dict
(
izip
(
output_dims
,
numeric_output_dims
))
...
...
theano/tensor/var.py
浏览文件 @
bd11e130
import
copy
import
pdb
import
sys
import
traceback
as
tb
import
warnings
...
...
@@ -41,9 +39,9 @@ class _tensor_py_operators:
# CASTS
# REMOVED THESE BECAUSE PYTHON appears to require __int__ to return
# an int. -JB 20081112
#def __int__(self): return convert_to_int32(self)
#def __float__(self): return convert_to_float64(self)
#def __complex__(self): return convert_to_complex128(self)
#
def __int__(self): return convert_to_int32(self)
#
def __float__(self): return convert_to_float64(self)
#
def __complex__(self): return convert_to_complex128(self)
# COMPARISONS
_is_nonzero
=
True
...
...
@@ -68,7 +66,6 @@ class _tensor_py_operators:
rval
.
_is_nonzero
=
False
return
rval
def
__nonzero__
(
self
):
# Python 2.x
return
self
.
__bool__
()
...
...
@@ -215,7 +212,7 @@ class _tensor_py_operators:
# DO NOT USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED
# BY OPTIMIZATIONS ONLY
#
#
ARITHMETIC - INPLACE
# ARITHMETIC - INPLACE
# def __iadd__(self, other):
# return _add_inplace(self, other)
# def __isub__(self, other):
...
...
@@ -642,7 +639,8 @@ class TensorVariable(_tensor_py_operators, Variable):
elif
config
.
warn_float64
==
"raise"
:
raise
Exception
(
msg
)
elif
config
.
warn_float64
==
'pdb'
:
import
pdb
;
pdb
.
set_trace
()
import
pdb
pdb
.
set_trace
()
TensorType
.
Variable
=
TensorVariable
...
...
@@ -744,8 +742,8 @@ class TensorConstant(_tensor_py_operators, Constant):
def
__init__
(
self
,
type
,
data
,
name
=
None
):
Constant
.
__init__
(
self
,
type
,
data
,
name
)
if
(
isinstance
(
data
,
numpy
.
ndarray
)
and
data
.
ndim
>
0
and
len
(
numpy
.
unique
(
data
))
==
1
):
data
.
ndim
>
0
and
len
(
numpy
.
unique
(
data
))
==
1
):
self
.
tag
.
unique_value
=
numpy
.
unique
(
data
)[
0
]
else
:
self
.
tag
.
unique_value
=
None
...
...
theano/tensor/xlogx.py
浏览文件 @
bd11e130
...
...
@@ -13,12 +13,15 @@ class XlogX(scalar.UnaryScalarOp):
if
x
==
0.0
:
return
0.0
return
x
*
numpy
.
log
(
x
)
def
impl
(
self
,
x
):
return
XlogX
.
st_impl
(
x
)
def
grad
(
self
,
inputs
,
grads
):
x
,
=
inputs
gz
,
=
grads
return
[
gz
*
(
1
+
scalar
.
log
(
x
))]
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
x
,
=
inputs
z
,
=
outputs
...
...
@@ -28,7 +31,8 @@ class XlogX(scalar.UnaryScalarOp):
? 0.0
:
%(x)
s * log(
%(x)
s);"""
%
locals
()
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
scalar_xlogx
=
XlogX
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogx'
)
scalar_xlogx
=
XlogX
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogx'
)
xlogx
=
Elemwise
(
scalar_xlogx
,
name
=
'xlogx'
)
...
...
@@ -41,12 +45,15 @@ class XlogY0(scalar.BinaryScalarOp):
if
x
==
0.0
:
return
0.0
return
x
*
numpy
.
log
(
y
)
def
impl
(
self
,
x
,
y
):
return
XlogY0
.
st_impl
(
x
,
y
)
def
grad
(
self
,
inputs
,
grads
):
x
,
y
=
inputs
gz
,
=
grads
return
[
gz
*
scalar
.
log
(
y
),
gz
*
x
/
y
]
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
x
,
y
=
inputs
z
,
=
outputs
...
...
@@ -56,5 +63,6 @@ class XlogY0(scalar.BinaryScalarOp):
? 0.0
:
%(x)
s * log(
%(y)
s);"""
%
locals
()
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
scalar_xlogy0
=
XlogY0
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogy0'
)
scalar_xlogy0
=
XlogY0
(
scalar
.
upgrade_to_float
,
name
=
'scalar_xlogy0'
)
xlogy0
=
Elemwise
(
scalar_xlogy0
,
name
=
'xlogy0'
)
theano/tests/test_flake8.py
浏览文件 @
bd11e130
...
...
@@ -57,30 +57,17 @@ whitelist_flake8 = [
"typed_list/tests/test_type.py"
,
"typed_list/tests/test_opt.py"
,
"typed_list/tests/test_basic.py"
,
"tensor/var.py"
,
"tensor/sharedvar.py"
,
"tensor/inplace.py"
,
"tensor/slinalg.py"
,
"tensor/shared_randomstreams.py"
,
"tensor/subtensor.py"
,
"tensor/elemwise.py"
,
"tensor/xlogx.py"
,
"tensor/blas_headers.py"
,
"tensor/utils.py"
,
"tensor/type.py"
,
"tensor/fourier.py"
,
"tensor/sort.py"
,
"tensor/__init__.py"
,
"tensor/opt_uncanonicalize.py"
,
"tensor/opt.py"
,
"tensor/blas.py"
,
"tensor/extra_ops.py"
,
"tensor/nlinalg.py"
,
"tensor/blas_c.py"
,
"tensor/elemwise_cgen.py"
,
"tensor/raw_random.py"
,
"tensor/blas_scipy.py"
,
"tensor/basic.py"
,
"tensor/tests/test_subtensor.py"
,
"tensor/tests/test_utils.py"
,
"tensor/tests/test_nlinalg.py"
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论