提交 395b5938 authored 作者: nouiz's avatar nouiz

Merge pull request #210 from pascanur/all_any

All any
...@@ -710,6 +710,28 @@ Reductions ...@@ -710,6 +710,28 @@ Reductions
* an *int* - computed along this axis * an *int* - computed along this axis
* a *list of ints* - computed along these axes * a *list of ints* - computed along these axes
.. function:: all(x, axis=None)
:Parameter: *x* - symbolic Tensor (or compatible)
:Parameter: *axis* - axis or axes along which to apply bitwise and
:Returns: bitwise and of *x* along *axis*
axis can be:
* *None* - variance computed along all axes (like numpy)
* an *int* - computed along this axis
* a *list of ints* - computed along these axes
.. function:: any(x, axis=None)
:Parameter: *x* - symbolic Tensor (or compatible)
:Parameter: *axis* - axis or axes along which to apply bitwise or
:Returns: bitwise or of *x* along *axis*
axis can be:
* *None* - variance computed along all axes (like numpy)
* an *int* - computed along this axis
* a *list of ints* - computed along these axes
Indexing Indexing
======== ========
......
...@@ -12,7 +12,8 @@ import blas ...@@ -12,7 +12,8 @@ import blas
import blas_scipy import blas_scipy
import xlogx import xlogx
import raw_random, randomstreams import raw_random
import randomstreams
import shared_randomstreams import shared_randomstreams
from randomstreams import \ from randomstreams import \
RandomStreams RandomStreams
...@@ -23,7 +24,7 @@ random = RandomStreams(seed=0xBAD5EED) ...@@ -23,7 +24,7 @@ random = RandomStreams(seed=0xBAD5EED)
from elemwise import \ from elemwise import \
DimShuffle, Elemwise, CAReduce DimShuffle, Elemwise, CAReduce
import sharedvar # adds shared-variable constructors import sharedvar # adds shared-variable constructors
# We import as `_shared` instead of `shared` to avoid confusion between # We import as `_shared` instead of `shared` to avoid confusion between
# `theano.shared` and `tensor._shared`. # `theano.shared` and `tensor._shared`.
...@@ -47,8 +48,7 @@ def shared(*args, **kw): ...@@ -47,8 +48,7 @@ def shared(*args, **kw):
return _shared(*args, **kw) return _shared(*args, **kw)
import nnet # used for softmax, sigmoid, etc. import nnet # used for softmax, sigmoid, etc.
from tensor_grad import Rop, Lop, grad, numeric_grad, verify_grad
from tensor_grad import Rop, Lop, grad, numeric_grad, verify_grad
...@@ -32,6 +32,8 @@ _logger=logging.getLogger("theano.tensor.basic") ...@@ -32,6 +32,8 @@ _logger=logging.getLogger("theano.tensor.basic")
#This is needed as we will hide it later #This is needed as we will hide it later
python_complex=complex python_complex=complex
python_any = any
python_all = all
# Define common subsets of dtypes (as strings). # Define common subsets of dtypes (as strings).
int_dtypes = map(str, scal.int_types) int_dtypes = map(str, scal.int_types)
...@@ -52,7 +54,7 @@ def check_equal_numpy(x, y): ...@@ -52,7 +54,7 @@ def check_equal_numpy(x, y):
if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray): if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):
return x.dtype == y.dtype and x.shape == y.shape and numpy.any(abs(x - y) < 1e-10) return x.dtype == y.dtype and x.shape == y.shape and numpy.any(abs(x - y) < 1e-10)
elif isinstance(x, numpy.random.RandomState) and isinstance(y, numpy.random.RandomState): elif isinstance(x, numpy.random.RandomState) and isinstance(y, numpy.random.RandomState):
return all(numpy.all(a==b) for a, b in zip(x.__getstate__(), y.__getstate__())) return python_all(numpy.all(a==b) for a, b in zip(x.__getstate__(), y.__getstate__()))
else: else:
return x == y return x == y
...@@ -140,7 +142,7 @@ def as_tensor_variable(x, name=None, ndim=None): ...@@ -140,7 +142,7 @@ def as_tensor_variable(x, name=None, ndim=None):
return shape_padleft(x, n_ones=(ndim - x.type.ndim)) return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else: else:
return x return x
if isinstance(x, (tuple, list)) and any(isinstance(xi, Variable) for xi in x): if isinstance(x, (tuple, list)) and python_any(isinstance(xi, Variable) for xi in x):
try: try:
return stack(*x) return stack(*x)
except (TypeError, ValueError): except (TypeError, ValueError):
...@@ -463,7 +465,7 @@ def get_constant_value(v): ...@@ -463,7 +465,7 @@ def get_constant_value(v):
# Ensure the Join is joining only scalar variables (so that # Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the one # the constant value can be found at the same index as the one
# used in the sub-tensor). # used in the sub-tensor).
all(var.ndim==0 for var in v.owner.inputs[0].owner.inputs) and python_all(var.ndim==0 for var in v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1): len(v.owner.op.idx_list) == 1):
# Note the '+ 1' is because the first argument to Join is the # Note the '+ 1' is because the first argument to Join is the
...@@ -477,7 +479,7 @@ def get_constant_value(v): ...@@ -477,7 +479,7 @@ def get_constant_value(v):
theano.tensor.opt.MakeVector) and theano.tensor.opt.MakeVector) and
# MakeVector normally accept only scalar as input. # MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future # We put this check in case there is change in the future
all(var.ndim==0 for var in v.owner.inputs[0].owner.inputs) and python_all(var.ndim==0 for var in v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1): len(v.owner.op.idx_list) == 1):
ret = v.owner.inputs[0].owner.inputs[v.owner.op.idx_list[0]] ret = v.owner.inputs[0].owner.inputs[v.owner.op.idx_list[0]]
...@@ -818,7 +820,7 @@ class TensorType(Type): ...@@ -818,7 +820,7 @@ class TensorType(Type):
if b in named_broadcastable: if b in named_broadcastable:
bcast = named_broadcastable[b] bcast = named_broadcastable[b]
else: else:
if any(b): if python_any(b):
bcast = str(b) bcast = str(b)
else: else:
bcast = '%iD' % len(b) bcast = '%iD' % len(b)
...@@ -1238,6 +1240,12 @@ class _tensor_py_operators: ...@@ -1238,6 +1240,12 @@ class _tensor_py_operators:
size = property(lambda self: prod(self.shape)) size = property(lambda self: prod(self.shape))
# We can't implement __len__ to provide a better error message. # We can't implement __len__ to provide a better error message.
def any(self, axis = None):
return elemwise.Any(axis)(self)
def all(self, axis = None):
return elemwise.All(axis)(self)
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls # Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# __len__ before calling __getitem__. It also does not catch the raised # __len__ before calling __getitem__. It also does not catch the raised
# Exception! # Exception!
...@@ -3935,7 +3943,7 @@ class Split(Op): ...@@ -3935,7 +3943,7 @@ class Split(Op):
if numpy.sum(splits) != len_along_axis: if numpy.sum(splits) != len_along_axis:
raise ValueError('The splits sum to %s, expected %s' % (numpy.sum(splits), len_along_axis)) raise ValueError('The splits sum to %s, expected %s' % (numpy.sum(splits), len_along_axis))
if not all(splits): if not python_all(splits):
raise ValueError('Cannot have a split of zero.') raise ValueError('Cannot have a split of zero.')
# Checking is done, let's roll the splitting algorithm! # Checking is done, let's roll the splitting algorithm!
...@@ -4108,7 +4116,7 @@ class Join(Op): ...@@ -4108,7 +4116,7 @@ class Join(Op):
def _make_node_internal(self, axis, tensors, def _make_node_internal(self, axis, tensors,
as_tensor_variable_args, output_maker): as_tensor_variable_args, output_maker):
orig = as_tensor_variable_args orig = as_tensor_variable_args
if not all(targs.type.ndim for targs in as_tensor_variable_args): if not python_all(targs.type.ndim for targs in as_tensor_variable_args):
raise TypeError('Join cannot handle arguments of dimension 0. For joining scalar values, see @stack'); raise TypeError('Join cannot handle arguments of dimension 0. For joining scalar values, see @stack');
# Handle single-tensor joins immediately. # Handle single-tensor joins immediately.
if len(as_tensor_variable_args) == 1: if len(as_tensor_variable_args) == 1:
...@@ -4166,7 +4174,7 @@ class Join(Op): ...@@ -4166,7 +4174,7 @@ class Join(Op):
outputs = [output_maker(bcastable)] outputs = [output_maker(bcastable)]
node = Apply(self, inputs, outputs) node = Apply(self, inputs, outputs)
if any(not x.type.broadcastable[0] for x in orig): if python_any(not x.type.broadcastable[0] for x in orig):
node.tag.shape_zero = None node.tag.shape_zero = None
else: else:
node.tag.shape_zero = len(orig) node.tag.shape_zero = len(orig)
...@@ -4759,7 +4767,7 @@ def arange(start, stop=None, step=1, dtype=None): ...@@ -4759,7 +4767,7 @@ def arange(start, stop=None, step=1, dtype=None):
config.floatX == 'float32' and config.floatX == 'float32' and
numpy_dtype == 'float64' and numpy_dtype == 'float64' and
# No explicit float64 in the three arguments? # No explicit float64 in the three arguments?
all(dt != 'float64' python_all(dt != 'float64'
for dt in [s.dtype for s in (start, stop, step)])): for dt in [s.dtype for s in (start, stop, step)])):
# We use float32 instead. # We use float32 instead.
assert dtype != 'float64' assert dtype != 'float64'
...@@ -5531,8 +5539,17 @@ def tensordot(x, y=None, axes=2): ...@@ -5531,8 +5539,17 @@ def tensordot(x, y=None, axes=2):
#TODO: tensordot should be function as described in rst docs. #TODO: tensordot should be function as described in rst docs.
def outer(x, y): def outer(x, y):
"""Return vector-vector outer product.""" """Return vector-vector outer product."""
return dot( return dot(
x.dimshuffle(0, 'x'), x.dimshuffle(0, 'x'),
y.dimshuffle('x', 0)) y.dimshuffle('x', 0))
def any(x, axis=None):
return elemwise.Any(axis)(x)
def all(x, axis=None):
return elemwise.All(axis)(x)
...@@ -1258,6 +1258,44 @@ for(int i=0;i<%(iname)s->nd;i++){ ...@@ -1258,6 +1258,44 @@ for(int i=0;i<%(iname)s->nd;i++){
return () return ()
class All(CAReduce):
""" Applies `bitwise and` to all the values of a tensor along the
specified axis(es).
Equivalent to CAReduce(scalar.and_, axis=axis)
"""
def __init__(self, axis=None):
CAReduce.__init__(self, scalar.and_, axis)
def _output_dtype(self, idtype):
return "int8"
def __str__(self):
if self.axis is None:
return "All"
else:
return "All{%s}" % ", ".join(map(str, self.axis))
class Any(CAReduce):
""" Applies `bitwise or` to all the values of a tensor along the
specified axis(es).
Equivalent to CAReduce(scalar.or_, axis=axis)
"""
def __init__(self, axis=None):
CAReduce.__init__(self, scalar.or_, axis)
def _output_dtype(self, idtype):
return "int8"
def __str__(self):
if self.axis is None:
return "Any"
else:
return "Any{%s}" % ", ".join(map(str, self.axis))
class Sum(CAReduce): class Sum(CAReduce):
""" """
Sums all the values of a tensor along the specified axis(es). Sums all the values of a tensor along the specified axis(es).
......
...@@ -312,6 +312,10 @@ class test_CAReduce(unittest.TestCase): ...@@ -312,6 +312,10 @@ class test_CAReduce(unittest.TestCase):
test_nan=True) test_nan=True)
self.with_linker(gof.PerformLinker(), minimum, dtype=dtype, self.with_linker(gof.PerformLinker(), minimum, dtype=dtype,
test_nan=True) test_nan=True)
self.with_linker(gof.PerformLinker(), or_, dtype=dtype,
test_nan=True)
self.with_linker(gof.PerformLinker(), and_, dtype=dtype,
test_nan=True)
def test_c(self): def test_c(self):
for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]: for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论