提交 bf9413c8 authored 作者: Pascal Lamblin's avatar Pascal Lamblin 提交者: GitHub

Merge pull request #5091 from abergeron/bool

Add bool dtype in scalar and tensor.
......@@ -124,7 +124,7 @@ do is to loop over the entries in *y* and compute the gradient of
>>> import theano.tensor as T
>>> x = T.dvector('x')
>>> y = x ** 2
>>> J, updates = theano.scan(lambda i, y,x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y,x])
>>> J, updates = theano.scan(lambda i, y, x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y, x])
>>> f = theano.function([x], J, updates=updates)
>>> f([4, 4])
array([[ 8., 0.],
......
......@@ -53,7 +53,7 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"]
MAJOR = 0
MINOR = 9
MICRO = 0
SUFFIX = "dev3" # Should be blank except for rc's, betas, etc.
SUFFIX = "dev4" # Should be blank except for rc's, betas, etc.
ISRELEASED = False
VERSION = '%d.%d.%d%s' % (MAJOR, MINOR, MICRO, SUFFIX)
......
from __future__ import absolute_import, print_function, division
import os
import pickle
import sys
import unittest
from nose.plugins.skip import SkipTest
import theano
from theano.compat import PY3
from theano.gof import CachedConstantError, FunctionGraph
......@@ -32,14 +29,11 @@ class TFunctionGraph(unittest.TestCase):
pickle.loads(s)
def test_node_outputs_not_used(self):
"""In the past, we where removing some not used variable from
fgraph.variables event if the apply had other output used in
the graph. This caused a crash.
This test run the pickle that reproduce this case.
"""
if sys.version_info[:2] < (2, 7):
raise SkipTest("This test need python 2.7 or more recent.")
# In the past, we where removing some not used variable from
# fgraph.variables event if the apply had other output used in
# the graph. This caused a crash.
# This test run the pickle that reproduce this case.
with open(os.path.join(os.path.dirname(__file__),
'test_fg_old_crash.pkl'),
'rb') as f:
......
......@@ -129,7 +129,8 @@ class GpuElemwise(HideC, Elemwise):
support_code += fake_node.op.c_support_code()
except MethodNotDefined:
pass
for npy, ga in [("npy_uint8", "ga_ubyte"),
for npy, ga in [("npy_bool", "ga_bool"),
("npy_uint8", "ga_ubyte"),
("npy_uint16", "ga_ushort"),
("npy_uint32", "ga_uint"),
("npy_uint64", "ga_ulong"),
......
......@@ -408,6 +408,7 @@ class GpuArrayType(Type):
'float16': (float, 'npy_float16', 'NPY_FLOAT16'),
'float32': (float, 'npy_float32', 'NPY_FLOAT32'),
'float64': (float, 'npy_float64', 'NPY_FLOAT64'),
'bool': (int, 'npy_bool', 'NPY_BOOL'),
'uint8': (int, 'npy_uint8', 'NPY_UINT8'),
'int8': (int, 'npy_int8', 'NPY_INT8'),
'uint16': (int, 'npy_uint16', 'NPY_UINT16'),
......
......@@ -477,7 +477,7 @@ def grad(cost, wrt, consider_constant=None,
# function, sure, but nonetheless one we can and should support.
# So before we try to cast it make sure it even has a dtype
if (hasattr(g_cost.type, 'dtype') and
cost.type.dtype not in tensor.discrete_dtypes):
cost.type.dtype in tensor.continuous_dtypes):
# Here we enforce the constraint that floating point variables
# have the same dtype as their gradient.
g_cost = g_cost.astype(cost.type.dtype)
......@@ -485,7 +485,7 @@ def grad(cost, wrt, consider_constant=None,
# This is to be enforced by the Op.grad method for the
# Op that outputs cost.
if hasattr(g_cost.type, 'dtype'):
assert g_cost.type.dtype not in tensor.discrete_dtypes
assert g_cost.type.dtype in tensor.continuous_dtypes
grad_dict[cost] = g_cost
......@@ -1335,12 +1335,11 @@ def _float_ones_like(x):
""" Like ones_like, but forces the object to have a
floating point dtype """
rval = tensor.ones_like(x)
dtype = x.type.dtype
if dtype not in tensor.float_dtypes:
dtype = theano.config.floatX
if rval.type.dtype.find('float') != -1:
return rval
return rval.astype(theano.config.floatX)
return tensor.ones_like(x, dtype=dtype)
class numeric_grad(object):
......
......@@ -237,8 +237,11 @@ optdb['canonicalize'].register('local_cut_gpu_host_gpu',
# 'float64', 'complex128' and 'complex64' are not supported in elemwise
# on the gpu.
elemwise_cuda_dtype_supported = ['float32', 'uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']
elemwise_cuda_dtype_supported = ['float32', 'bool',
'uint8', 'int8',
'uint16', 'int16',
'uint32', 'int32',
'uint64', 'int64']
def dtype_in_elemwise_supported(op):
......@@ -298,8 +301,8 @@ def local_gpu_elemwise_0(node):
return False
# first establish that float32 can store all inputs
upcastable = set(['float32', 'int8', 'int16', 'uint8',
'uint16'])
upcastable = set(['float32', 'bool', 'int8', 'int16',
'uint8', 'uint16'])
# case 1 - all inputs are already float32
if all([i.type.dtype == 'float32' for i in node.inputs]):
# TODO: change this when fusion makes Elemwise with
......
......@@ -28,7 +28,7 @@ class CURAND_Base(GpuOp):
CURAND. This Op uses a generic-typed shared variable to point to a CObject
that encapsulates this opaque reference.
Each random variable is created with a generator of False.
Each random variable is created with a generator of None.
The actual random number generator is allocated from the seed, on the first
call to allocate random numbers (see c_code).
......@@ -210,7 +210,7 @@ class CURAND_Base(GpuOp):
%(fail)s;
}
%(o_generator)s = PyCObject_FromVoidPtr(gen, &free_generator);
assert (%(i_generator)s == Py_False);
assert (%(i_generator)s == Py_None);
}
else if (%(destructive)s)
{
......@@ -244,7 +244,7 @@ class CURAND_Base(GpuOp):
return code
def c_code_cache_version(self):
return (4,)
return (5,)
class CURAND_Normal(CURAND_Base):
......@@ -328,7 +328,7 @@ class CURAND_RandomStreams(object):
else:
msg = "size must be a tuple of int or a Theano variable"
assert isinstance(size, Variable) and size.ndim == 1, msg
generator = theano.shared(False) # makes a generic
generator = theano.shared(None) # makes a generic
s_size = theano.tensor.as_tensor_variable(size)
u = CURAND_Uniform.new_auto_update(generator, ndim, dtype, s_size,
self.next_seed())
......@@ -360,7 +360,7 @@ class CURAND_RandomStreams(object):
else:
msg = "size must be a tuple of int or a Theano variable"
assert isinstance(size, Variable) and size.ndim == 1, msg
generator = theano.shared(False) # makes a generic
generator = theano.shared(None) # makes a generic
s_size = theano.tensor.as_tensor_variable(size)
u = CURAND_Normal.new_auto_update(generator, ndim, dtype, s_size,
self.next_seed())
......
差异被折叠。
......@@ -155,17 +155,6 @@ class test_composite(unittest.TestCase):
si3 = theano.scalar.float32()
sop.make_node(si0 * si3, si1, si2)
def test_composite_neg_bool(self):
# Check that taking the negation of a Boolean intermediate value
# works correctly with Python code. It used to be an issue because
# `-numpy.bool_(True)` is False and `-numpy.bool_(False)` is True.
x = floats('x')
y = - (x > 0)
z = Composite([x], [y]).make_node(x).outputs[0]
f = theano.function([x], z, mode=theano.Mode(linker='py'))
for inp, out in zip([-1, 0, 1], [0, 0, -1]):
self.assertTrue(f(inp) == out)
class test_logical(unittest.TestCase):
def test_gt(self):
......
......@@ -52,6 +52,7 @@ python_all = all
complex_dtypes = list(map(str, scal.complex_types))
continuous_dtypes = list(map(str, scal.continuous_types))
float_dtypes = list(map(str, scal.float_types))
integer_dtypes = list(map(str, scal.integer_types))
discrete_dtypes = list(map(str, scal.discrete_types))
all_dtypes = list(map(str, scal.all_types))
int_dtypes = list(map(str, scal.int_types))
......@@ -302,7 +303,7 @@ class NumpyAutocaster(object):
# returns either an exact x_==x, or the last cast x_
return x_
autocast_int = NumpyAutocaster(('int16', 'int32', 'int64'))
autocast_int = NumpyAutocaster(('int8', 'int16', 'int32', 'int64'))
autocast_float = NumpyAutocaster(('float16', 'float32', 'float64'))
......@@ -379,16 +380,10 @@ def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
x_ = autocast_float(x)
elif isinstance(x, numpy.ndarray):
x_ = x
# Currently we do not have a bool dtype in Theano.
# So we upcast it to uint8 to avoid breaking our interface for
# constant.
if x.dtype == 'bool':
x_ = numpy.asarray(x_, dtype='uint8')
else:
# Here x is probably a list or a tuple. If it contains a long,
# we will behave like the current NumPy version: 1.7 and below,
# it will only work if the long fits in int64. For NumPy 1.7.1+,
# it will work if the long fits in int64 or uint64.
# Here x is probably a list or a tuple. If it contains a
# long, we will behave like the current NumPy version: it
# will work if the long fits in int64 or uint64.
x_ = numpy.asarray(x)
if x_.size == 0 and not hasattr(x, 'dtype'):
x_ = numpy.asarray(x, dtype=config.floatX)
......@@ -521,11 +516,6 @@ def _allclose(a, b, rtol=None, atol=None):
if atol is not None:
atol_ = atol
# Work around bug in Numpy, see
# http://projects.scipy.org/numpy/ticket/1684
if str(b.dtype) in int_dtypes and (numpy.absolute(b) < 0).any():
b = theano._asarray(b, dtype='float64')
return numpy.allclose(a, b, atol=atol_, rtol=rtol_)
......@@ -1247,6 +1237,10 @@ def _conversion(real_value, name):
# what types you are casting to what. That logic is implemented by the
# `cast()` function below.
_convert_to_bool = _conversion(
elemwise.Elemwise(scal.convert_to_bool), 'bool')
"""Cast to boolean"""
_convert_to_int8 = _conversion(
elemwise.Elemwise(scal.convert_to_int8), 'int8')
"""Cast to 8-bit integer"""
......@@ -1300,6 +1294,7 @@ _convert_to_complex128 = _conversion(
"""Cast to double-precision complex"""
_cast_mapping = {
'bool': _convert_to_bool,
'int8': _convert_to_int8,
'int16': _convert_to_int16,
'int32': _convert_to_int32,
......@@ -3191,6 +3186,10 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
for i in axis:
s = true_div(s, shp[i])
# This can happen when axis is an empty list/tuple
if s.dtype != shp.dtype and s.dtype in discrete_dtypes:
s = cast(s, shp.dtype)
if dtype == 'float16' or (dtype is None and input.dtype == 'float16'):
s = cast(s, 'float16')
s.name = 'mean'
......
......@@ -1189,7 +1189,7 @@ def _gemm_canonicalize(r, scale, rval, maxclients):
def scaled(thing):
if scale == 1:
return thing
if scale == -1:
if scale == -1 and thing.type.dtype != 'bool':
return -thing
else:
return scale * thing
......
......@@ -20,12 +20,6 @@ from theano.tensor import elemwise_cgen as cgen
config = theano.config
# We cannot import discrete_dtypes or float_dtypes from tensor.basic yet,
# so we redefine them here
discrete_dtypes = list(map(str, scalar.discrete_types))
float_dtypes = list(map(str, scalar.float_types))
int_dtypes = list(map(str, scalar.int_types))
# tensor depends on elemwise to provide definitions for several ops
# but elemwise needs to make TensorType instances, so we have these as
......@@ -818,9 +812,9 @@ second dimension
# NumPy 1.10.1 raise an error when giving the signature
# when the input is complex. So add it only when inputs is int.
out_dtype = node.outputs[0].dtype
if (out_dtype in float_dtypes and
if (out_dtype in theano.tensor.float_dtypes and
isinstance(self.nfunc, numpy.ufunc) and
node.inputs[0].dtype in discrete_dtypes):
node.inputs[0].dtype in theano.tensor.discrete_dtypes):
char = numpy.sctype2char(out_dtype)
sig = char * node.nin + '->' + char * node.nout
node.tag.sig = sig
......@@ -1076,7 +1070,7 @@ second dimension
# We loop over the "aliased" outputs, i.e., those that are
# inplace (overwrite the contents of one of the inputs) and
# make the output pointers point to theur corresponding input
# make the output pointers point to their corresponding input
# pointers.
for output, oname in izip(aliased_outputs, aliased_onames):
olv_index = inputs.index(dmap[output][0])
......@@ -1641,10 +1635,10 @@ for(int i=0;i<PyArray_NDIM(%(iname)s);i++){
task1_code = self.scalar_op.c_code(
Apply(self.scalar_op,
[get_scalar_type(dtype=input.type.dtype).make_variable()
for input in (node.inputs * 2)],
[get_scalar_type(dtype=output.type.dtype).make_variable()
for input in node.outputs]),
[get_scalar_type(dtype=iv.type.dtype).make_variable()
for iv in (node.inputs * 2)],
[get_scalar_type(dtype=ov.type.dtype).make_variable()
for ov in node.outputs]),
None,
["%s_i" % aname, "%s_i" % inames[0]],
["%s_i" % aname],
......@@ -1708,18 +1702,16 @@ for(int i=0;i<PyArray_NDIM(%(iname)s);i++){
class All(CAReduce):
""" Applies `bitwise and` to all the values of a tensor along the
""" Applies `logical and` to all the values of a tensor along the
specified axis(es).
Equivalent to `CAReduce(scalar.and\_, axis=axis)`.
"""
def __init__(self, axis=None):
CAReduce.__init__(self, scalar.and_, axis)
def _output_dtype(self, idtype):
return "int8"
return "bool"
def __str__(self):
if self.axis is None:
......@@ -1729,7 +1721,7 @@ class All(CAReduce):
def make_node(self, input):
input = as_tensor_variable(input)
if input.dtype not in ["int8", "uint8"]:
if input.dtype != "bool":
input = theano.tensor.neq(input, 0)
ret = super(All, self).make_node(input)
return ret
......@@ -1743,15 +1735,13 @@ class Any(CAReduce):
""" Applies `bitwise or` to all the values of a tensor along the
specified axis(es).
Equivalent to `CAReduce(scalar.or\_, axis=axis)`.
"""
def __init__(self, axis=None):
CAReduce.__init__(self, scalar.or_, axis)
def _output_dtype(self, idtype):
return "int8"
return "bool"
def __str__(self):
if self.axis is None:
......@@ -1761,7 +1751,7 @@ class Any(CAReduce):
def make_node(self, input):
input = as_tensor_variable(input)
if input.dtype not in ["int8", "uint8"]:
if input.dtype != "bool":
input = theano.tensor.neq(input, 0)
ret = super(Any, self).make_node(input)
return ret
......@@ -1863,6 +1853,7 @@ class CAReduceDtype(CAReduce):
if dtype is None:
# If input has a discrete dtype, upcast it to 64
return dict(
bool='int64',
int8='int64',
int16='int64',
int32='int64',
......@@ -1878,6 +1869,7 @@ class CAReduceDtype(CAReduce):
acc_dtype = self.acc_dtype
if acc_dtype is None:
return dict(
bool='int64',
int8='int64',
int16='int64',
int32='int64',
......@@ -1990,7 +1982,7 @@ class Sum(CAReduceDtype):
out = self(*inp)
if out.dtype.find('int') != -1:
if out.dtype not in theano.tensor.continuous_dtypes:
return [x.zeros_like(dtype=theano.config.floatX)]
gz, = grads
......@@ -2101,8 +2093,8 @@ class Prod(CAReduceDtype):
out = self(*inp)
if (out.dtype in discrete_dtypes or
self.acc_dtype in discrete_dtypes):
if (out.dtype in theano.tensor.discrete_dtypes or
self.acc_dtype in theano.tensor.discrete_dtypes):
# There is an int conversion in the way
return [prod_in.zeros_like(dtype=theano.config.floatX)]
......
from __future__ import absolute_import, print_function, division
import numpy as np
import numpy
import warnings
from six.moves import xrange
import theano
......@@ -561,103 +560,6 @@ def diff(x, n=1, axis=-1):
return DiffOp(n=n, axis=axis)(x)
class BinCountOp(theano.Op):
"""
.. note:: Deprecated
Use bincount() instead.
See function bincount for docstring.
"""
compatible_type = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64')
"""Tuple of all compatible dtype for the parameter of this op."""
__props__ = ("minlength",)
def __init__(self, minlength=None):
self.minlength = minlength
if minlength is not None:
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if not bool(numpy_ver >= [1, 6]):
raise NotImplementedError(
"BinCountOp with minlength attribute"
" requires NumPy 1.6 or higher.")
def make_node(self, x, weights):
warnings.warn((
"Tile op is deprecated, use tile function instead."),
stacklevel=3)
x = basic.as_tensor_variable(x)
if x.dtype not in BinCountOp.compatible_type:
raise TypeError("Inputs dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of bincount.
# Until another one is available, we should fail at graph construction
# time, not wait for execution.
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
intp_bitwidth = theano.configdefaults.local_bitwidth()
if intp_bitwidth == 32:
out_type = basic.ivector()
elif intp_bitwidth == 64:
out_type = basic.lvector()
if x.dtype in numpy_unsupported_dtypes:
raise TypeError(
("Input dtypes %s are not supported by numpy.bincount, "
% numpy_unsupported_dtypes), x.dtype)
if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.")
if weights is None:
weights = theano.gof.Constant(theano.gof.Generic(), None)
else:
weights = basic.as_tensor_variable(weights)
out_type = basic.dvector()
if weights.ndim != 1:
raise TypeError("Weights cannot have a number of"
"dimension different of 1.")
return theano.Apply(self, [x, weights], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
weights = inputs[1]
z = output_storage[0]
if weights is not None and weights.shape != x.shape:
raise TypeError("All inputs must have the same shape.")
# Needed for numpy 1.4.1 compatibility
if self.minlength:
out = np.bincount(x, weights=weights, minlength=self.minlength)
else:
out = np.bincount(x, weights=weights)
z[0] = theano._asarray(out, dtype=node.outputs[0].dtype)
def grad(self, inputs, outputs_gradients):
output = self(*inputs)
if output.dtype.find('int') != -1:
return [inp.zeros_like().astype(theano.config.floatX)
for inp in inputs]
raise NotImplementedError()
def infer_shape(self, node, ins_shapes):
x = node.inputs[0]
m = basic.max(x) + 1
if self.minlength is not None:
m = basic.maximum(m, self.minlength)
return [[m]]
def bincount(x, weights=None, minlength=None, assert_nonneg=False):
"""Count number of occurrences of each value in array of ints.
......@@ -773,7 +675,7 @@ class RepeatOp(theano.Op):
x = basic.as_tensor_variable(x)
repeats = basic.as_tensor_variable(repeats)
if repeats.dtype not in tensor.discrete_dtypes:
if repeats.dtype not in tensor.integer_dtypes:
raise TypeError("repeats.dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of repeat.
......
......@@ -75,7 +75,7 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
# float16 limits: -11.0, 7.0f
# We use the float32 limits for float16 for now as the
# computation will happend in float32 anyway.
# computation will happen in float32 anyway.
if (node.inputs[0].type == scalar.float32 or
node.inputs[0].type == scalar.float16):
return """%(z)s = %(x)s < -88.0f ? 0.0 : %(x)s > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-%(x)s));""" % locals()
......
......@@ -5906,7 +5906,10 @@ def local_mul_specialize(node):
if new_inputs:
if len(new_inputs) == 1:
if neg:
rval = -new_inputs[0]
if new_inputs[0].dtype in (T.uint_dtypes + ['bool']):
return
else:
rval = -new_inputs[0]
else:
rval = new_inputs[0]
else:
......
......@@ -1206,9 +1206,7 @@ IntDivInplaceTester = makeBroadcastTester(
CeilTester = makeBroadcastTester(op=tensor.ceil,
expected=lambda a: numpy.asarray(
numpy.ceil(a),
a.dtype),
expected=upcast_float16_ufunc(numpy.ceil),
good=_good_broadcast_unary_normal_no_complex,
grad=copymod(_grad_broadcast_unary_normal,
without=['corner_case'],
......@@ -1217,7 +1215,7 @@ CeilTester = makeBroadcastTester(op=tensor.ceil,
dtype=floatX)]))
CeilInplaceTester = makeBroadcastTester(op=inplace.ceil_inplace,
expected=lambda a: numpy.asarray(numpy.ceil(a), a.dtype),
expected=upcast_float16_ufunc(numpy.ceil),
good=_good_broadcast_unary_normal_no_complex,
# corner cases includes a lot of integers: points where Ceil is not
# continuous (not differentiable)
......@@ -1229,7 +1227,7 @@ CeilInplaceTester = makeBroadcastTester(op=inplace.ceil_inplace,
inplace=True)
FloorTester = makeBroadcastTester(op=tensor.floor,
expected=lambda a: numpy.asarray(numpy.floor(a), a.dtype),
expected=upcast_float16_ufunc(numpy.floor),
good=_good_broadcast_unary_normal_no_complex,
# XXX: why does grad of floor not give huge values at
# the integer points in the 'corner_case' in
......@@ -1238,20 +1236,20 @@ FloorTester = makeBroadcastTester(op=tensor.floor,
grad=_grad_broadcast_unary_normal)
FloorInplaceTester = makeBroadcastTester(op=inplace.floor_inplace,
expected=lambda a: numpy.asarray(numpy.floor(a), a.dtype),
expected=upcast_float16_ufunc(numpy.floor),
good=_good_broadcast_unary_normal_no_complex,
grad=_grad_broadcast_unary_normal,
inplace=True)
TruncInplaceTester = makeBroadcastTester(
op=inplace.trunc_inplace,
expected=lambda a: numpy.asarray(numpy.trunc(a), a.dtype),
expected=upcast_float16_ufunc(numpy.trunc),
good=_good_broadcast_unary_normal_no_complex,
inplace=True)
TruncTester = makeBroadcastTester(
op=tensor.trunc,
expected=lambda a: numpy.asarray(numpy.trunc(a), a.dtype),
expected=upcast_float16_ufunc(numpy.trunc),
good=_good_broadcast_unary_normal_no_complex)
RoundHalfToEvenTester = makeBroadcastTester(
......@@ -5005,7 +5003,7 @@ class T_scalarfromtensor(unittest.TestCase):
self.assertTrue(v == 56, v)
if config.cast_policy == 'custom':
self.assertTrue(isinstance(v, numpy.int16))
self.assertTrue(isinstance(v, numpy.int8))
elif config.cast_policy in ('numpy', 'numpy+floatX'):
self.assertTrue(isinstance(
v, getattr(numpy, str(numpy.asarray(56).dtype))))
......@@ -7120,7 +7118,7 @@ class T_as_tensor_variable(unittest.TestCase):
def test_ndarray_bool(self):
ten = as_tensor_variable(numpy.array([True, False, False, True, True]))
assert ten.type.dtype == 'uint8'
assert ten.type.dtype == 'bool'
def test_memmap(self):
inp = numpy.random.rand(4, 3)
......@@ -8192,25 +8190,3 @@ def test_symbolic_slice():
a, b = x.shape[:2]
output = a.eval({x: numpy.zeros((5, 4, 3, 2), dtype=theano.config.floatX)})
assert output == numpy.array(5)
def test_composite_neg_bool():
# Check that taking the negation of a Boolean intermediate value
# works correctly with Python code. It used to be an issue because
# `-numpy.bool_(True)` is False and `-numpy.bool_(False)` is True.
x = theano.tensor.vector()
f = theano.function([x], - (x > 0), mode=theano.Mode(linker='py'))
utt.assert_allclose(f([-1, 0, 1]), [0, 0, -1])
"""
if __name__ == '__main__':
if 0:
unittest.main()
else:
testcase = FloorInplaceTester
suite = unittest.TestLoader()
suite = suite.loadTestsFromTestCase(testcase)
unittest.TextTestRunner(verbosity=2).run(suite)
"""
......@@ -8,7 +8,7 @@ from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (SearchsortedOp, searchsorted,
CumsumOp, cumsum, CumprodOp, cumprod,
CpuContiguous, cpu_contiguous, BinCountOp,
CpuContiguous, cpu_contiguous,
bincount, DiffOp, diff, squeeze, compress,
RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal,
......@@ -18,9 +18,6 @@ from theano import tensor as T
from theano import config, tensor, function
from theano.tests.unittest_tools import attr
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_16 = bool(numpy_ver >= [1, 6])
def test_cpu_contiguous():
a = T.fmatrix('a')
......@@ -218,12 +215,7 @@ class TestCumprodOp(utt.InferShapeTester):
utt.verify_grad(self.op_class(axis=axis), [a])
class TestBinCountOp(utt.InferShapeTester):
def setUp(self):
super(TestBinCountOp, self).setUp()
self.op_class = BinCountOp
self.op = BinCountOp()
class TestBinCount(utt.InferShapeTester):
def test_bincountFn(self):
w = T.vector('w')
......@@ -263,84 +255,6 @@ class TestBinCountOp(utt.InferShapeTester):
f5 = theano.function([x], bincount(x, assert_nonneg=True))
self.assertRaises(AssertionError, f5, a)
def test_bincountOp(self):
w = T.vector('w')
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
a = np.random.randint(1, 51, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], BinCountOp()(x, weights=None))
f2 = theano.function([x, w], BinCountOp()(x, weights=w))
assert (np.bincount(a) == f1(a)).all()
assert np.allclose(np.bincount(a, weights=weights),
f2(a, weights))
if not numpy_16:
continue
f3 = theano.function([x], BinCountOp(minlength=23)(x, weights=None))
f4 = theano.function([x], BinCountOp(minlength=5)(x, weights=None))
assert (np.bincount(a, minlength=23) == f3(a)).all()
assert (np.bincount(a, minlength=5) == f4(a)).all()
@attr('slow')
def test_infer_shape(self):
for dtype in tensor.discrete_dtypes:
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
self._compile_and_check([x],
[BinCountOp()(x, None)],
[np.random.randint(
1, 51, size=(25,)).astype(dtype)],
self.op_class)
weights = np.random.random((25,)).astype(config.floatX)
self._compile_and_check([x],
[BinCountOp()(x, weights=weights)],
[np.random.randint(
1, 51, size=(25,)).astype(dtype)],
self.op_class)
if not numpy_16:
continue
self._compile_and_check([x],
[BinCountOp(minlength=60)(x, weights=weights)],
[np.random.randint(
1, 51, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp(minlength=5)(x, weights=weights)],
[np.random.randint(
1, 51, size=(25,)).astype(dtype)],
self.op_class)
class TestDiffOp(utt.InferShapeTester):
nb = 10 # Number of time iterating for n
......@@ -510,7 +424,7 @@ class TestRepeatOp(utt.InferShapeTester):
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
for dtype in tensor.integer_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if (dtype == 'uint64' or
......@@ -569,7 +483,7 @@ class TestRepeatOp(utt.InferShapeTester):
a = np.random.random(shp).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
for dtype in tensor.integer_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes:
......@@ -798,13 +712,11 @@ class test_Unique(utt.InferShapeTester):
self.ops = [Unique(),
Unique(True),
Unique(False, True),
Unique(True, True)]
if bool(numpy_ver >= [1, 9]):
self.ops.extend([
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
Unique(True, True),
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)]
def test_basic_vector(self):
"""
......@@ -816,13 +728,11 @@ class test_Unique(utt.InferShapeTester):
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
np.unique(inp, True, True),
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)]
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
......@@ -839,13 +749,11 @@ class test_Unique(utt.InferShapeTester):
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
np.unique(inp, True, True),
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)]
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
......
......@@ -255,6 +255,7 @@ class TensorType(Type):
'float16': (float, 'npy_float16', 'NPY_FLOAT16'),
'float32': (float, 'npy_float32', 'NPY_FLOAT32'),
'float64': (float, 'npy_float64', 'NPY_FLOAT64'),
'bool': (bool, 'npy_bool', 'NPY_BOOL'),
'uint8': (int, 'npy_uint8', 'NPY_UINT8'),
'int8': (int, 'npy_int8', 'NPY_INT8'),
'uint16': (int, 'npy_uint16', 'NPY_UINT16'),
......@@ -340,15 +341,9 @@ class TensorType(Type):
return False
if a.dtype != b.dtype:
return False
if 'int' in str(a.dtype):
if str(a.dtype) not in theano.tensor.continuous_dtypes:
return numpy.all(a == b)
else:
# work around a numpy.allclose bug:
# http://projects.scipy.org/numpy/ticket/1672
if a.ndim == 0 and numpy.isinf(a):
a = a.reshape(1)
b = b.reshape(1)
cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp:
# Numpy claims they are close, this is good enough for us.
......
......@@ -470,19 +470,17 @@ class _tensor_py_operators(object):
def check_bool(args_el):
try:
if isinstance(args_el, (numpy.bool_, bool)) or \
args_el.dtype == 'int8' or args_el.dtype == 'uint8':
raise TypeError(('TensorType does not support boolean '
'mask for indexing such as tensor[x==0]. '
'Instead you can use non_zeros() such as '
'tensor[(x == 0).nonzeros()]. '
'If you are indexing on purpose with an '
'int8, please cast it to int16.'))
if (isinstance(args_el, (numpy.bool_, bool)) or
args_el.dtype == 'bool'):
raise TypeError('TensorType does not support boolean '
'mask for indexing such as tensor[x==0]. '
'Instead you can use non_zeros() such as '
'tensor[(x == 0).nonzeros()]. ')
except AttributeError:
pass
if not isinstance(args_el, theano.tensor.Variable) and \
isinstance(args_el, collections.Iterable):
if (not isinstance(args_el, theano.tensor.Variable) and
isinstance(args_el, collections.Iterable)):
for el in args_el:
check_bool(el)
......
......@@ -56,7 +56,6 @@ whitelist_flake8 = [
"tensor/tests/test_opt.py",
"tensor/tests/test_basic.py",
"tensor/tests/test_blas.py",
"tensor/tests/test_elemwise.py",
"tensor/tests/test_merge.py",
"tensor/tests/test_gc.py",
"tensor/tests/test_complex.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论