Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
395b5938
提交
395b5938
authored
11月 15, 2011
作者:
nouiz
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #210 from pascanur/all_any
All any
上级
39350199
48aa9e84
隐藏空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
95 行增加
和
14 行删除
+95
-14
basic.txt
doc/library/tensor/basic.txt
+22
-0
__init__.py
theano/tensor/__init__.py
+5
-5
basic.py
theano/tensor/basic.py
+26
-9
elemwise.py
theano/tensor/elemwise.py
+38
-0
test_elemwise.py
theano/tensor/tests/test_elemwise.py
+4
-0
没有找到文件。
doc/library/tensor/basic.txt
浏览文件 @
395b5938
...
@@ -710,6 +710,28 @@ Reductions
...
@@ -710,6 +710,28 @@ Reductions
* an *int* - computed along this axis
* an *int* - computed along this axis
* a *list of ints* - computed along these axes
* a *list of ints* - computed along these axes
.. function:: all(x, axis=None)
:Parameter: *x* - symbolic Tensor (or compatible)
:Parameter: *axis* - axis or axes along which to apply bitwise and
:Returns: bitwise and of *x* along *axis*
axis can be:
* *None* - variance computed along all axes (like numpy)
* an *int* - computed along this axis
* a *list of ints* - computed along these axes
.. function:: any(x, axis=None)
:Parameter: *x* - symbolic Tensor (or compatible)
:Parameter: *axis* - axis or axes along which to apply bitwise or
:Returns: bitwise or of *x* along *axis*
axis can be:
* *None* - variance computed along all axes (like numpy)
* an *int* - computed along this axis
* a *list of ints* - computed along these axes
Indexing
Indexing
========
========
...
...
theano/tensor/__init__.py
浏览文件 @
395b5938
...
@@ -12,7 +12,8 @@ import blas
...
@@ -12,7 +12,8 @@ import blas
import
blas_scipy
import
blas_scipy
import
xlogx
import
xlogx
import
raw_random
,
randomstreams
import
raw_random
import
randomstreams
import
shared_randomstreams
import
shared_randomstreams
from
randomstreams
import
\
from
randomstreams
import
\
RandomStreams
RandomStreams
...
@@ -23,7 +24,7 @@ random = RandomStreams(seed=0xBAD5EED)
...
@@ -23,7 +24,7 @@ random = RandomStreams(seed=0xBAD5EED)
from
elemwise
import
\
from
elemwise
import
\
DimShuffle
,
Elemwise
,
CAReduce
DimShuffle
,
Elemwise
,
CAReduce
import
sharedvar
# adds shared-variable constructors
import
sharedvar
# adds shared-variable constructors
# We import as `_shared` instead of `shared` to avoid confusion between
# We import as `_shared` instead of `shared` to avoid confusion between
# `theano.shared` and `tensor._shared`.
# `theano.shared` and `tensor._shared`.
...
@@ -47,8 +48,7 @@ def shared(*args, **kw):
...
@@ -47,8 +48,7 @@ def shared(*args, **kw):
return
_shared
(
*
args
,
**
kw
)
return
_shared
(
*
args
,
**
kw
)
import
nnet
# used for softmax, sigmoid, etc.
import
nnet
# used for softmax, sigmoid, etc.
from
tensor_grad
import
Rop
,
Lop
,
grad
,
numeric_grad
,
verify_grad
from
tensor_grad
import
Rop
,
Lop
,
grad
,
numeric_grad
,
verify_grad
theano/tensor/basic.py
浏览文件 @
395b5938
...
@@ -32,6 +32,8 @@ _logger=logging.getLogger("theano.tensor.basic")
...
@@ -32,6 +32,8 @@ _logger=logging.getLogger("theano.tensor.basic")
#This is needed as we will hide it later
#This is needed as we will hide it later
python_complex
=
complex
python_complex
=
complex
python_any
=
any
python_all
=
all
# Define common subsets of dtypes (as strings).
# Define common subsets of dtypes (as strings).
int_dtypes
=
map
(
str
,
scal
.
int_types
)
int_dtypes
=
map
(
str
,
scal
.
int_types
)
...
@@ -52,7 +54,7 @@ def check_equal_numpy(x, y):
...
@@ -52,7 +54,7 @@ def check_equal_numpy(x, y):
if
isinstance
(
x
,
numpy
.
ndarray
)
and
isinstance
(
y
,
numpy
.
ndarray
):
if
isinstance
(
x
,
numpy
.
ndarray
)
and
isinstance
(
y
,
numpy
.
ndarray
):
return
x
.
dtype
==
y
.
dtype
and
x
.
shape
==
y
.
shape
and
numpy
.
any
(
abs
(
x
-
y
)
<
1e-10
)
return
x
.
dtype
==
y
.
dtype
and
x
.
shape
==
y
.
shape
and
numpy
.
any
(
abs
(
x
-
y
)
<
1e-10
)
elif
isinstance
(
x
,
numpy
.
random
.
RandomState
)
and
isinstance
(
y
,
numpy
.
random
.
RandomState
):
elif
isinstance
(
x
,
numpy
.
random
.
RandomState
)
and
isinstance
(
y
,
numpy
.
random
.
RandomState
):
return
all
(
numpy
.
all
(
a
==
b
)
for
a
,
b
in
zip
(
x
.
__getstate__
(),
y
.
__getstate__
()))
return
python_
all
(
numpy
.
all
(
a
==
b
)
for
a
,
b
in
zip
(
x
.
__getstate__
(),
y
.
__getstate__
()))
else
:
else
:
return
x
==
y
return
x
==
y
...
@@ -140,7 +142,7 @@ def as_tensor_variable(x, name=None, ndim=None):
...
@@ -140,7 +142,7 @@ def as_tensor_variable(x, name=None, ndim=None):
return
shape_padleft
(
x
,
n_ones
=
(
ndim
-
x
.
type
.
ndim
))
return
shape_padleft
(
x
,
n_ones
=
(
ndim
-
x
.
type
.
ndim
))
else
:
else
:
return
x
return
x
if
isinstance
(
x
,
(
tuple
,
list
))
and
any
(
isinstance
(
xi
,
Variable
)
for
xi
in
x
):
if
isinstance
(
x
,
(
tuple
,
list
))
and
python_
any
(
isinstance
(
xi
,
Variable
)
for
xi
in
x
):
try
:
try
:
return
stack
(
*
x
)
return
stack
(
*
x
)
except
(
TypeError
,
ValueError
):
except
(
TypeError
,
ValueError
):
...
@@ -463,7 +465,7 @@ def get_constant_value(v):
...
@@ -463,7 +465,7 @@ def get_constant_value(v):
# Ensure the Join is joining only scalar variables (so that
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the one
# the constant value can be found at the same index as the one
# used in the sub-tensor).
# used in the sub-tensor).
all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
python_
all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
# Note the '+ 1' is because the first argument to Join is the
# Note the '+ 1' is because the first argument to Join is the
...
@@ -477,7 +479,7 @@ def get_constant_value(v):
...
@@ -477,7 +479,7 @@ def get_constant_value(v):
theano
.
tensor
.
opt
.
MakeVector
)
and
theano
.
tensor
.
opt
.
MakeVector
)
and
# MakeVector normally accept only scalar as input.
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
# We put this check in case there is change in the future
all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
python_
all
(
var
.
ndim
==
0
for
var
in
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
)
and
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
len
(
v
.
owner
.
op
.
idx_list
)
==
1
):
ret
=
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
[
v
.
owner
.
op
.
idx_list
[
0
]]
ret
=
v
.
owner
.
inputs
[
0
]
.
owner
.
inputs
[
v
.
owner
.
op
.
idx_list
[
0
]]
...
@@ -818,7 +820,7 @@ class TensorType(Type):
...
@@ -818,7 +820,7 @@ class TensorType(Type):
if
b
in
named_broadcastable
:
if
b
in
named_broadcastable
:
bcast
=
named_broadcastable
[
b
]
bcast
=
named_broadcastable
[
b
]
else
:
else
:
if
any
(
b
):
if
python_
any
(
b
):
bcast
=
str
(
b
)
bcast
=
str
(
b
)
else
:
else
:
bcast
=
'
%
iD'
%
len
(
b
)
bcast
=
'
%
iD'
%
len
(
b
)
...
@@ -1238,6 +1240,12 @@ class _tensor_py_operators:
...
@@ -1238,6 +1240,12 @@ class _tensor_py_operators:
size
=
property
(
lambda
self
:
prod
(
self
.
shape
))
size
=
property
(
lambda
self
:
prod
(
self
.
shape
))
# We can't implement __len__ to provide a better error message.
# We can't implement __len__ to provide a better error message.
def
any
(
self
,
axis
=
None
):
return
elemwise
.
Any
(
axis
)(
self
)
def
all
(
self
,
axis
=
None
):
return
elemwise
.
All
(
axis
)(
self
)
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# __len__ before calling __getitem__. It also does not catch the raised
# __len__ before calling __getitem__. It also does not catch the raised
# Exception!
# Exception!
...
@@ -3935,7 +3943,7 @@ class Split(Op):
...
@@ -3935,7 +3943,7 @@ class Split(Op):
if
numpy
.
sum
(
splits
)
!=
len_along_axis
:
if
numpy
.
sum
(
splits
)
!=
len_along_axis
:
raise
ValueError
(
'The splits sum to
%
s, expected
%
s'
%
(
numpy
.
sum
(
splits
),
len_along_axis
))
raise
ValueError
(
'The splits sum to
%
s, expected
%
s'
%
(
numpy
.
sum
(
splits
),
len_along_axis
))
if
not
all
(
splits
):
if
not
python_
all
(
splits
):
raise
ValueError
(
'Cannot have a split of zero.'
)
raise
ValueError
(
'Cannot have a split of zero.'
)
# Checking is done, let's roll the splitting algorithm!
# Checking is done, let's roll the splitting algorithm!
...
@@ -4108,7 +4116,7 @@ class Join(Op):
...
@@ -4108,7 +4116,7 @@ class Join(Op):
def
_make_node_internal
(
self
,
axis
,
tensors
,
def
_make_node_internal
(
self
,
axis
,
tensors
,
as_tensor_variable_args
,
output_maker
):
as_tensor_variable_args
,
output_maker
):
orig
=
as_tensor_variable_args
orig
=
as_tensor_variable_args
if
not
all
(
targs
.
type
.
ndim
for
targs
in
as_tensor_variable_args
):
if
not
python_
all
(
targs
.
type
.
ndim
for
targs
in
as_tensor_variable_args
):
raise
TypeError
(
'Join cannot handle arguments of dimension 0. For joining scalar values, see @stack'
);
raise
TypeError
(
'Join cannot handle arguments of dimension 0. For joining scalar values, see @stack'
);
# Handle single-tensor joins immediately.
# Handle single-tensor joins immediately.
if
len
(
as_tensor_variable_args
)
==
1
:
if
len
(
as_tensor_variable_args
)
==
1
:
...
@@ -4166,7 +4174,7 @@ class Join(Op):
...
@@ -4166,7 +4174,7 @@ class Join(Op):
outputs
=
[
output_maker
(
bcastable
)]
outputs
=
[
output_maker
(
bcastable
)]
node
=
Apply
(
self
,
inputs
,
outputs
)
node
=
Apply
(
self
,
inputs
,
outputs
)
if
any
(
not
x
.
type
.
broadcastable
[
0
]
for
x
in
orig
):
if
python_
any
(
not
x
.
type
.
broadcastable
[
0
]
for
x
in
orig
):
node
.
tag
.
shape_zero
=
None
node
.
tag
.
shape_zero
=
None
else
:
else
:
node
.
tag
.
shape_zero
=
len
(
orig
)
node
.
tag
.
shape_zero
=
len
(
orig
)
...
@@ -4759,7 +4767,7 @@ def arange(start, stop=None, step=1, dtype=None):
...
@@ -4759,7 +4767,7 @@ def arange(start, stop=None, step=1, dtype=None):
config
.
floatX
==
'float32'
and
config
.
floatX
==
'float32'
and
numpy_dtype
==
'float64'
and
numpy_dtype
==
'float64'
and
# No explicit float64 in the three arguments?
# No explicit float64 in the three arguments?
all
(
dt
!=
'float64'
python_
all
(
dt
!=
'float64'
for
dt
in
[
s
.
dtype
for
s
in
(
start
,
stop
,
step
)])):
for
dt
in
[
s
.
dtype
for
s
in
(
start
,
stop
,
step
)])):
# We use float32 instead.
# We use float32 instead.
assert
dtype
!=
'float64'
assert
dtype
!=
'float64'
...
@@ -5531,8 +5539,17 @@ def tensordot(x, y=None, axes=2):
...
@@ -5531,8 +5539,17 @@ def tensordot(x, y=None, axes=2):
#TODO: tensordot should be function as described in rst docs.
#TODO: tensordot should be function as described in rst docs.
def
outer
(
x
,
y
):
def
outer
(
x
,
y
):
"""Return vector-vector outer product."""
"""Return vector-vector outer product."""
return
dot
(
return
dot
(
x
.
dimshuffle
(
0
,
'x'
),
x
.
dimshuffle
(
0
,
'x'
),
y
.
dimshuffle
(
'x'
,
0
))
y
.
dimshuffle
(
'x'
,
0
))
def
any
(
x
,
axis
=
None
):
return
elemwise
.
Any
(
axis
)(
x
)
def
all
(
x
,
axis
=
None
):
return
elemwise
.
All
(
axis
)(
x
)
theano/tensor/elemwise.py
浏览文件 @
395b5938
...
@@ -1258,6 +1258,44 @@ for(int i=0;i<%(iname)s->nd;i++){
...
@@ -1258,6 +1258,44 @@ for(int i=0;i<%(iname)s->nd;i++){
return
()
return
()
class
All
(
CAReduce
):
""" Applies `bitwise and` to all the values of a tensor along the
specified axis(es).
Equivalent to CAReduce(scalar.and_, axis=axis)
"""
def
__init__
(
self
,
axis
=
None
):
CAReduce
.
__init__
(
self
,
scalar
.
and_
,
axis
)
def
_output_dtype
(
self
,
idtype
):
return
"int8"
def
__str__
(
self
):
if
self
.
axis
is
None
:
return
"All"
else
:
return
"All{
%
s}"
%
", "
.
join
(
map
(
str
,
self
.
axis
))
class
Any
(
CAReduce
):
""" Applies `bitwise or` to all the values of a tensor along the
specified axis(es).
Equivalent to CAReduce(scalar.or_, axis=axis)
"""
def
__init__
(
self
,
axis
=
None
):
CAReduce
.
__init__
(
self
,
scalar
.
or_
,
axis
)
def
_output_dtype
(
self
,
idtype
):
return
"int8"
def
__str__
(
self
):
if
self
.
axis
is
None
:
return
"Any"
else
:
return
"Any{
%
s}"
%
", "
.
join
(
map
(
str
,
self
.
axis
))
class
Sum
(
CAReduce
):
class
Sum
(
CAReduce
):
"""
"""
Sums all the values of a tensor along the specified axis(es).
Sums all the values of a tensor along the specified axis(es).
...
...
theano/tensor/tests/test_elemwise.py
浏览文件 @
395b5938
...
@@ -312,6 +312,10 @@ class test_CAReduce(unittest.TestCase):
...
@@ -312,6 +312,10 @@ class test_CAReduce(unittest.TestCase):
test_nan
=
True
)
test_nan
=
True
)
self
.
with_linker
(
gof
.
PerformLinker
(),
minimum
,
dtype
=
dtype
,
self
.
with_linker
(
gof
.
PerformLinker
(),
minimum
,
dtype
=
dtype
,
test_nan
=
True
)
test_nan
=
True
)
self
.
with_linker
(
gof
.
PerformLinker
(),
or_
,
dtype
=
dtype
,
test_nan
=
True
)
self
.
with_linker
(
gof
.
PerformLinker
(),
and_
,
dtype
=
dtype
,
test_nan
=
True
)
def
test_c
(
self
):
def
test_c
(
self
):
for
dtype
in
[
"floatX"
,
"complex64"
,
"complex128"
,
"int8"
,
"uint8"
]:
for
dtype
in
[
"floatX"
,
"complex64"
,
"complex128"
,
"int8"
,
"uint8"
]:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论