提交 3767f231 authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Move test_basic utilities to utils.py and SciPy tests to test_basic_scipy

上级 d3d56a6c
...@@ -70,8 +70,8 @@ jobs: ...@@ -70,8 +70,8 @@ jobs:
float32: [0] float32: [0]
part: part:
- "tests --ignore=tests/tensor --ignore=tests/sparse --ignore=tests/tensor/nnet" - "tests --ignore=tests/tensor --ignore=tests/sparse --ignore=tests/tensor/nnet"
- "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/test_opt.py --ignore=tests/tensor/nnet" - "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_basic_scipy.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/test_opt.py --ignore=tests/tensor/nnet"
- "tests/tensor/test_basic.py" - "tests/tensor/test_basic.py tests/tensor/test_basic_scipy.py"
- "tests/tensor/test_elemwise.py tests/tensor/test_opt.py" - "tests/tensor/test_elemwise.py tests/tensor/test_opt.py"
- "tests/tensor/nnet --ignore-glob='*/test_abstract_conv.py'" - "tests/tensor/nnet --ignore-glob='*/test_abstract_conv.py'"
- "tests/tensor/nnet/test_abstract_conv.py" - "tests/tensor/nnet/test_abstract_conv.py"
......
...@@ -40,3 +40,4 @@ Theano.suo ...@@ -40,3 +40,4 @@ Theano.suo
.ropeproject .ropeproject
core core
.idea .idea
/htmlcov/
...@@ -425,7 +425,7 @@ verify that the op generates the proper output, that the gradient is ...@@ -425,7 +425,7 @@ verify that the op generates the proper output, that the gradient is
valid, that the Op fails in known/expected ways. Because so much of valid, that the Op fails in known/expected ways. Because so much of
this is common, two helper functions exists to make your lives easier: this is common, two helper functions exists to make your lives easier:
``makeTester`` and ``makeBroadcastTester`` (defined in module ``makeTester`` and ``makeBroadcastTester`` (defined in module
``tests.tensor.test_basic``). ``tests.tensor.utils``).
Here is an example of ``makeTester`` generating testcases for the Dot Here is an example of ``makeTester`` generating testcases for the Dot
product op: product op:
...@@ -435,7 +435,7 @@ product op: ...@@ -435,7 +435,7 @@ product op:
from numpy import dot from numpy import dot
from numpy.random import rand from numpy.random import rand
from tests.tensor.test_basic import makeTester from tests.tensor.utils import makeTester
TestDot = makeTester(name = 'DotTester', TestDot = makeTester(name = 'DotTester',
op = dot, op = dot,
......
...@@ -5,6 +5,7 @@ max-line-length = 88 ...@@ -5,6 +5,7 @@ max-line-length = 88
per-file-ignores = per-file-ignores =
**/__init__.py:F401,E402,F403 **/__init__.py:F401,E402,F403
theano/sparse/sandbox/sp2.py:F401 theano/sparse/sandbox/sp2.py:F401
tests/tensor/test_basic_scipy.py:E402
tests/sparse/test_basic.py:E402 tests/sparse/test_basic.py:E402
tests/sparse/test_opt.py:E402 tests/sparse/test_opt.py:E402
tests/sparse/test_sp2.py:E402 tests/sparse/test_sp2.py:E402
......
import pytest
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
import numpy as np import numpy as np
import pytest
import theano import theano
import theano.tensor as tt import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import ( from tests.tensor.test_basic import (
...@@ -14,9 +12,8 @@ from tests.tensor.test_basic import ( ...@@ -14,9 +12,8 @@ from tests.tensor.test_basic import (
TestComparison, TestComparison,
TestJoinAndSplit, TestJoinAndSplit,
TestReshape, TestReshape,
rand,
safe_make_node,
) )
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import ( from theano.gpuarray.basic_ops import (
GpuAlloc, GpuAlloc,
GpuAllocEmpty, GpuAllocEmpty,
...@@ -35,13 +32,14 @@ from theano.gpuarray.basic_ops import ( ...@@ -35,13 +32,14 @@ from theano.gpuarray.basic_ops import (
) )
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.subtensor import GpuSubtensor
# Don't import test classes otherwise they get tested as part of the file
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType from theano.tensor import TensorType
from theano.tensor.basic import alloc from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng() utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed()) rng = np.random.RandomState(seed=utt.fetch_seed())
......
...@@ -397,7 +397,7 @@ class TestMagma: ...@@ -397,7 +397,7 @@ class TestMagma:
fn = theano.function([A], gpu_matrix_inverse(A), mode=mode_with_gpu) fn = theano.function([A], gpu_matrix_inverse(A), mode=mode_with_gpu)
N = 1000 N = 1000
test_rng = np.random.RandomState(seed=1) test_rng = np.random.RandomState(seed=1)
# Copied from tests.tensor.test_basic.rand. # Copied from tests.tensor.utils.rand.
A_val = test_rng.rand(N, N).astype("float32") * 2 - 1 A_val = test_rng.rand(N, N).astype("float32") * 2 - 1
A_val_inv = fn(A_val) A_val_inv = fn(A_val)
utt.assert_allclose(np.eye(N), np.dot(A_val_inv, A_val), atol=1e-2) utt.assert_allclose(np.eye(N), np.dot(A_val_inv, A_val), atol=1e-2)
......
...@@ -4,10 +4,10 @@ import pytest ...@@ -4,10 +4,10 @@ import pytest
import theano import theano
import theano.gpuarray import theano.gpuarray
import theano.tensor.slinalg as slinalg import theano.tensor.slinalg as slinalg
from tests import test_ifelse
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor import test_basic from tests.tensor.test_basic import TestSpecifyShape
from tests.test_ifelse import TestIfelse
from theano import tensor from theano import tensor
from theano.breakpoint import PdbBreakpoint from theano.breakpoint import PdbBreakpoint
from theano.gof.opt import check_stack_trace from theano.gof.opt import check_stack_trace
...@@ -312,12 +312,12 @@ def test_rebroadcast(): ...@@ -312,12 +312,12 @@ def test_rebroadcast():
assert _check_stack_trace(f) assert _check_stack_trace(f)
class TestSpecifyShape(test_basic.TestSpecifyShape): class TestSpecifyShape(TestSpecifyShape):
mode = mode_with_gpu mode = mode_with_gpu
input_type = GpuArrayType input_type = GpuArrayType
class TestGpuIfelse(test_ifelse.TestIfelse): class TestGpuIfelse(TestIfelse):
mode = mode_with_gpu mode = mode_with_gpu
@staticmethod @staticmethod
......
...@@ -4,7 +4,7 @@ import pytest ...@@ -4,7 +4,7 @@ import pytest
import theano import theano
import theano.tensor as tt import theano.tensor as tt
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.tensor.test_basic import ( from tests.tensor.utils import (
_good_broadcast_unary_normal_float_no_complex, _good_broadcast_unary_normal_float_no_complex,
check_floatX, check_floatX,
makeBroadcastTester, makeBroadcastTester,
......
...@@ -3,7 +3,7 @@ import numpy as np ...@@ -3,7 +3,7 @@ import numpy as np
import theano import theano
import theano.tensor as tt import theano.tensor as tt
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.tensor.test_basic import ( from tests.tensor.utils import (
_good_broadcast_unary_normal_no_complex, _good_broadcast_unary_normal_no_complex,
check_floatX, check_floatX,
copymod, copymod,
......
import builtins import builtins
import itertools import itertools
import operator import operator
import os
import warnings import warnings
from copy import copy, deepcopy from copy import copy, deepcopy
from functools import partial, reduce from functools import partial, reduce
...@@ -14,6 +13,43 @@ from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equ ...@@ -14,6 +13,43 @@ from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equ
import theano import theano
import theano.tensor as tt import theano.tensor as tt
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.tensor.utils import (
ALL_DTYPES,
COMPLEX_DTYPES,
REAL_DTYPES,
_eps,
_good_broadcast_unary_normal,
_good_broadcast_unary_normal_float,
_good_broadcast_unary_normal_float_no_complex,
_good_broadcast_unary_normal_no_complex,
_grad_broadcast_unary_normal,
_numpy_true_div,
check_floatX,
copymod,
corner_case_grad,
div_grad_rtol,
eval_outputs,
get_numeric_types,
inplace_func,
makeBroadcastTester,
makeTester,
multi_dtype_cast_checks,
multi_dtype_checks,
rand,
rand_nonzero,
rand_of_dtype,
rand_ranged,
randc128_ranged,
randcomplex,
randcomplex_nonzero,
randint,
randint_nonzero,
randint_ranged,
randuint16,
randuint32,
upcast_float16_ufunc,
upcast_int8_nfunc,
)
from theano import change_flags, compile, config, function, gof, shared from theano import change_flags, compile, config, function, gof, shared
from theano.compat import operator_div from theano.compat import operator_div
from theano.compile import DeepCopyOp from theano.compile import DeepCopyOp
...@@ -150,639 +186,11 @@ from theano.tensor import ( ...@@ -150,639 +186,11 @@ from theano.tensor import (
) )
imported_scipy_special = False
mode_no_scipy = get_default_mode()
try:
import scipy.special
import scipy.stats
imported_scipy_special = True
except ImportError:
if config.mode == "FAST_COMPILE":
mode_no_scipy = "FAST_RUN"
floatX = config.floatX
if config.mode == "FAST_COMPILE": if config.mode == "FAST_COMPILE":
mode_opt = "FAST_RUN" mode_opt = "FAST_RUN"
else: else:
mode_opt = get_default_mode() mode_opt = get_default_mode()
# Use a seeded random number generator so that unittests are deterministic
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
# In order to check random values close to the boundaries when designing
# new tests, you can use utt.MockRandomState, for instance:
# test_rng = MockRandomState(0)
# test_rng = MockRandomState(0.99999982)
# test_rng = MockRandomState(1)
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = get_default_mode()
return function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def eval_outputs(outputs, ops=(), mode=None):
f = inplace_func([], outputs, mode=mode)
variables = f()
if ops:
assert any(isinstance(node.op, ops) for node in f.maker.fgraph.apply_nodes)
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
def get_numeric_subclasses(cls=np.number, ignore=None):
# Return subclasses of `cls` in the numpy scalar hierarchy.
#
# We only return subclasses that correspond to unique data types.
# The hierarchy can be seen here:
# http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html
if ignore is None:
ignore = []
rval = []
dtype = np.dtype(cls)
dtype_num = dtype.num
if dtype_num not in ignore:
# Safety check: we should be able to represent 0 with this data type.
np.array(0, dtype=dtype)
rval.append(cls)
ignore.append(dtype_num)
for sub_ in cls.__subclasses__():
rval += [c for c in get_numeric_subclasses(sub_, ignore=ignore)]
return rval
def get_numeric_types(
with_int=True, with_float=True, with_complex=False, only_theano_types=True
):
# Return numpy numeric data types.
#
# :param with_int: Whether to include integer types.
#
# :param with_float: Whether to include floating point types.
#
# :param with_complex: Whether to include complex types.
#
# :param only_theano_types: If True, then numpy numeric data types that are
# not supported by Theano are ignored (i.e. those that are not declared in
# scalar/basic.py).
#
# :returns: A list of unique data type objects. Note that multiple data types
# may share the same string representation, but can be differentiated through
# their `num` attribute.
#
# Note that when `only_theano_types` is True we could simply return the list
# of types defined in the `scalar` module. However with this function we can
# test more unique dtype objects, and in the future we may use it to
# automatically detect new data types introduced in numpy.
if only_theano_types:
theano_types = [d.dtype for d in theano.scalar.all_types]
rval = []
def is_within(cls1, cls2):
# Return True if scalars defined from `cls1` are within the hierarchy
# starting from `cls2`.
# The third test below is to catch for instance the fact that
# one can use ``dtype=numpy.number`` and obtain a float64 scalar, even
# though `numpy.number` is not under `numpy.floating` in the class
# hierarchy.
return (
cls1 is cls2
or issubclass(cls1, cls2)
or isinstance(np.array([0], dtype=cls1)[0], cls2)
)
for cls in get_numeric_subclasses():
dtype = np.dtype(cls)
if (
(not with_complex and is_within(cls, np.complexfloating))
or (not with_int and is_within(cls, np.integer))
or (not with_float and is_within(cls, np.floating))
or (only_theano_types and dtype not in theano_types)
):
# Ignore this class.
continue
rval.append([str(dtype), dtype, dtype.num])
# We sort it to be deterministic, then remove the string and num elements.
return [x[1] for x in sorted(rval, key=str)]
def _numpy_checker(x, y):
# Checks if x.data and y.data have the same contents.
# Used in DualLinker to compare C version with Python version.
x, y = x[0], y[0]
if x.dtype != y.dtype or x.shape != y.shape or np.any(np.abs(x - y) > 1e-10):
raise Exception("Output mismatch.", {"performlinker": x, "clinker": y})
def safe_make_node(op, *inputs):
# Emulate the behaviour of make_node when op is a function.
#
# Normally op in an instead of the Op class.
node = op(*inputs)
if isinstance(node, list):
return node[0].owner
else:
return node.owner
def upcast_float16_ufunc(fn):
# Decorator that enforces computation is not done in float16 by NumPy.
#
# Some ufuncs in NumPy will compute float values on int8 and uint8
# in half-precision (float16), which is not enough, and not compatible
# with the C code.
#
# :param fn: numpy ufunc
# :returns: function similar to fn.__call__, computing the same
# value with a minimum floating-point precision of float32
def ret(*args, **kwargs):
out_dtype = np.find_common_type([a.dtype for a in args], [np.float16])
if out_dtype == "float16":
# Force everything to float32
sig = "f" * fn.nin + "->" + "f" * fn.nout
kwargs.update(sig=sig)
return fn(*args, **kwargs)
return ret
def upcast_int8_nfunc(fn):
# Decorator that upcasts input of dtype int8 to float32.
#
# This is so that floating-point computation is not carried using
# half-precision (float16), as some NumPy functions do.
#
# :param fn: function computing a floating-point value from inputs
# :returns: function similar to fn, but upcasting its uint8 and int8
# inputs before carrying out the computation.
def ret(*args, **kwargs):
args = list(args)
for i, a in enumerate(args):
if getattr(a, "dtype", None) in ("int8", "uint8"):
args[i] = a.astype("float32")
return fn(*args, **kwargs)
return ret
def makeTester(
name,
op,
expected,
checks=None,
good=None,
bad_build=None,
bad_runtime=None,
grad=None,
mode=None,
grad_rtol=None,
eps=1e-10,
skip=False,
test_memmap=True,
check_name=False,
grad_eps=None,
):
# :param check_name:
# Use only for tester that aren't in Theano.
if checks is None:
checks = {}
if good is None:
good = {}
if bad_build is None:
bad_build = {}
if bad_runtime is None:
bad_runtime = {}
if grad is None:
grad = {}
if grad is True:
grad = good
_op, _expected, _checks, _good = op, expected, checks, good
_bad_build, _bad_runtime, _grad = bad_build, bad_runtime, grad
_mode, _grad_rtol, _eps, skip_ = mode, grad_rtol, eps, skip
_test_memmap = test_memmap
_check_name = check_name
_grad_eps = grad_eps
class Checker:
op = staticmethod(_op)
expected = staticmethod(_expected)
checks = _checks
check_name = _check_name
good = _good
bad_build = _bad_build
bad_runtime = _bad_runtime
grad = _grad
mode = _mode
skip = skip_
test_memmap = _test_memmap
def setup_method(self):
# Verify that the test's name is correctly set.
# Some tests reuse it outside this module.
if self.check_name:
eval(self.__class__.__module__ + "." + self.__class__.__name__)
# We keep a list of temporary files created in add_memmap_values,
# to remove them at the end of the test.
self.tmp_files = []
def add_memmap_values(self, val_dict):
# If test_memmap is True, we create a temporary file
# containing a copy of the data passed in the "val_dict" dict,
# then open it as a memmapped array, and we can use the result as a
# new test value.
if not self.test_memmap:
return val_dict
# Copy dict before modifying them
val_dict = val_dict.copy()
# Note that we sort items in the dictionary to ensure tests are
# deterministic (since the loop below will break on the first valid
# item that can be memmapped).
for k, v in sorted(val_dict.items()):
new_k = "_".join((k, "memmap"))
if new_k in val_dict:
# A corresponding key was already provided
break
new_v = []
for inp in v:
if type(inp) is np.ndarray and inp.size > 0:
f, fname = mkstemp()
self.tmp_files.append((f, fname))
new_inp = np.memmap(
fname, dtype=inp.dtype, mode="w+", shape=inp.shape
)
new_inp[...] = inp[...]
new_v.append(new_inp)
else:
new_v.append(inp)
val_dict[new_k] = new_v
# We only need one value, no need to copy all of them
break
return val_dict
def teardown_method(self):
# This is to avoid a problem with deleting memmap files on windows.
import gc
gc.collect()
for f, fname in self.tmp_files:
os.close(f)
os.remove(fname)
@pytest.mark.skipif(skip, reason="Skipped")
def test_good(self):
good = self.add_memmap_values(self.good)
for testname, inputs in good.items():
inputs = [copy(input) for input in inputs]
inputrs = [
TensorType(
dtype=input.dtype,
broadcastable=[shape_elem == 1 for shape_elem in input.shape],
)()
for input in inputs
]
try:
node = safe_make_node(self.op, *inputrs)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while"
" making a node with inputs %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f = inplace_func(inputrs, node.outputs, mode=mode, name="test_good")
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while" " trying to make a Function"
) % (self.op, testname)
exc.args += (err_msg,)
raise
if isinstance(self.expected, dict) and testname in self.expected:
expecteds = self.expected[testname]
# with numpy version, when we print a number and read it
# back, we don't get exactly the same result, so we accept
# rounding error in that case.
eps = 5e-9
else:
expecteds = self.expected(*inputs)
eps = 1e-10
if any(
[i.dtype in ("float32", "int8", "uint8", "uint16") for i in inputs]
):
eps = 1e-6
eps = np.max([eps, _eps])
try:
variables = f(*inputs)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while calling"
" the Function on the inputs %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
if not isinstance(expecteds, (list, tuple)):
expecteds = (expecteds,)
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not np.allclose(variable, expected, atol=eps, rtol=eps)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong"
" value. With inputs %s, expected %s (dtype %s),"
" got %s (dtype %s). eps=%f"
" np.allclose returns %s %s"
) % (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
eps,
np.allclose(variable, expected, atol=eps, rtol=eps),
np.allclose(variable, expected),
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s (inputs"
" were %s, outputs were %s)"
) % (self.op, testname, description, inputs, variables)
@pytest.mark.skipif(skip, reason="Skipped")
def test_bad_build(self):
for testname, inputs in self.bad_build.items():
inputs = [copy(input) for input in inputs]
inputrs = [shared(input) for input in inputs]
with pytest.raises(Exception):
safe_make_node(self.op, *inputrs)
# The old error string was ("Test %s::%s: %s was successfully
# instantiated on the following bad inputs: %s"
# % (self.op, testname, node, inputs))
@change_flags(compute_test_value="off")
@pytest.mark.skipif(skip, reason="Skipped")
def test_bad_runtime(self):
for testname, inputs in self.bad_runtime.items():
inputrs = [shared(input) for input in inputs]
try:
node = safe_make_node(self.op, *inputrs)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying"
" to make a node with inputs %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f = inplace_func(
[], node.outputs, mode=mode, name="test_bad_runtime"
)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying" " to make a Function"
) % (self.op, testname)
exc.args += (err_msg,)
raise
# Add tester return a ValueError. Should we catch only this
# one?
# TODO: test that only this one is raised and catch only this
# one or the subset that get raised.
with pytest.raises(Exception):
f([])
@pytest.mark.skipif(skip, reason="Skipped")
def test_grad(self):
# Disable old warning that may be triggered by this test.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
for testname, inputs in self.grad.items():
inputs = [copy(input) for input in inputs]
try:
utt.verify_grad(
self.op,
inputs,
mode=self.mode,
rel_tol=_grad_rtol,
eps=_grad_eps,
)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while"
" computing the gradient on the following"
" inputs: %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
finally:
config.warn.sum_div_dimshuffle_bug = backup
@pytest.mark.skipif(skip, reason="Skipped")
def test_grad_none(self):
# Check that None is never returned as input gradient
# when calling self.op.grad
# We use all values in self.good because this has to be true
# whether or not the values work for utt.verify_grad.
if not hasattr(self.op, "grad"):
# This is not actually an Op
return
for testname, inputs in self.good.items():
inputs = [copy(input) for input in inputs]
inputrs = [
TensorType(
dtype=input.dtype,
broadcastable=[shape_elem == 1 for shape_elem in input.shape],
)()
for input in inputs
]
if isinstance(self.expected, dict) and testname in self.expected:
expecteds = self.expected[testname]
# with numpy version, when we print a number and read it
# back, we don't get exactly the same result, so we accept
# rounding error in that case.
else:
expecteds = self.expected(*inputs)
if not isinstance(expecteds, (list, tuple)):
expecteds = (expecteds,)
out_grad_vars = []
for out in expecteds:
if str(out.dtype) in tt.discrete_dtypes:
dtype = floatX
else:
dtype = str(out.dtype)
bcast = [shape_elem == 1 for shape_elem in out.shape]
var = TensorType(dtype=dtype, broadcastable=bcast)()
out_grad_vars.append(var)
try:
in_grad_vars = self.op.grad(inputrs, out_grad_vars)
except (gof.utils.MethodNotDefined, NotImplementedError):
pass
else:
assert None not in in_grad_vars
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def rand(*shape):
r = test_rng.rand(*shape) * 2 - 1
return np.asarray(r, dtype=config.floatX)
def rand_nonzero(shape, eps=3e-4):
# Like rand, but the absolute value has to be at least eps
# covers [0, 1)
r = np.asarray(test_rng.rand(*shape), dtype=config.floatX)
# covers [0, (1 - eps) / 2) U [(1 + eps) / 2, 1)
r = r * (1 - eps) + eps * (r >= 0.5)
# covers [-1, -eps) U [eps, 1)
r = r * 2 - 1
return r
def randint(*shape):
return test_rng.randint(-5, 6, shape)
def randuint32(*shape):
return np.array(test_rng.randint(5, size=shape), dtype=np.uint32)
def randuint16(*shape):
return np.array(test_rng.randint(5, size=shape), dtype=np.uint16)
# XXX: this so-called complex random array as all-zero imaginary parts
def randcomplex(*shape):
r = np.asarray(test_rng.rand(*shape), dtype=config.floatX)
return np.complex128(2 * r - 1)
def randcomplex_nonzero(shape, eps=1e-4):
return np.complex128(rand_nonzero(shape, eps))
def randint_nonzero(*shape):
r = test_rng.randint(-5, 5, shape)
return r + (r == 0) * 5
def rand_ranged(min, max, shape):
return np.asarray(test_rng.rand(*shape) * (max - min) + min, dtype=config.floatX)
def randint_ranged(min, max, shape):
return test_rng.randint(min, max + 1, shape)
def randc128_ranged(min, max, shape):
return np.asarray(test_rng.rand(*shape) * (max - min) + min, dtype="complex128")
def rand_of_dtype(shape, dtype):
if dtype in tt.discrete_dtypes:
return randint(*shape).astype(dtype)
elif dtype in tt.float_dtypes:
return rand(*shape).astype(dtype)
elif dtype in tt.complex_dtypes:
return randcomplex(*shape).astype(dtype)
else:
raise TypeError()
# Used to exclude random numbers too close to certain values
_eps = 1e-2
def makeBroadcastTester(op, expected, checks=None, name=None, **kwargs):
if checks is None:
checks = {}
if name is None:
name = str(op)
# Here we ensure the test name matches the name of the variable defined in
# this script. This is needed to properly identify the test e.g. with the
# --with-id option of nosetests, or simply to rerun a specific test that
# failed.
capitalize = False
if name.startswith("Elemwise{") and name.endswith(",no_inplace}"):
# For instance: Elemwise{add,no_inplace} -> Add
name = name[9:-12]
capitalize = True
elif name.endswith("_inplace"):
# For instance: sub_inplace -> SubInplace
capitalize = True
if capitalize:
name = "".join([x.capitalize() for x in name.split("_")])
# Some tests specify a name that already ends with 'Tester', while in other
# cases we need to add it manually.
if not name.endswith("Tester"):
name += "Tester"
if "inplace" in kwargs:
if kwargs["inplace"]:
_expected = expected
if not isinstance(_expected, dict):
def expected(*inputs):
return np.array(_expected(*inputs), dtype=inputs[0].dtype)
def inplace_check(inputs, outputs):
# this used to be inputs[0] is output[0]
# I changed it so that it was easier to satisfy by the
# DebugMode
return np.all(inputs[0] == outputs[0])
checks = dict(checks, inplace_check=inplace_check)
del kwargs["inplace"]
return makeTester(name, op, expected, checks, **kwargs)
_good_broadcast_binary_normal = dict( _good_broadcast_binary_normal = dict(
same_shapes=(rand(2, 3), rand(2, 3)), same_shapes=(rand(2, 3), rand(2, 3)),
not_same_dimensions=(rand(2, 2), rand(2)), not_same_dimensions=(rand(2, 2), rand(2)),
...@@ -823,29 +231,6 @@ _grad_broadcast_binary_normal = dict( ...@@ -823,29 +231,6 @@ _grad_broadcast_binary_normal = dict(
) )
def check_floatX(inputs, rval):
# :param inputs: Inputs to a function that returned `rval` with these inputs.
#
# :param rval: Value returned by a function with inputs set to `inputs`.
#
# :returns: Either `rval` unchanged, or `rval` cast in float32. The idea is
# that when a numpy function would have returned a float64, Theano may prefer
# to return a float32 instead when `config.cast_policy` is set to
# 'numpy+floatX' and config.floatX to 'float32', and there was no float64
# input.
if (
isinstance(rval, np.ndarray)
and rval.dtype == "float64"
and config.cast_policy == "numpy+floatX"
and config.floatX == "float32"
and all(x.dtype != "float64" for x in inputs)
):
# Then we expect float32 instead of float64.
return rval.astype("float32")
else:
return rval
TestAddBroadcast = makeBroadcastTester( TestAddBroadcast = makeBroadcastTester(
op=add, op=add,
expected=lambda *inputs: check_floatX(inputs, reduce(lambda x, y: x + y, inputs)), expected=lambda *inputs: check_floatX(inputs, reduce(lambda x, y: x + y, inputs)),
...@@ -992,20 +377,6 @@ TestMulInplaceBroadcast = makeBroadcastTester( ...@@ -992,20 +377,6 @@ TestMulInplaceBroadcast = makeBroadcastTester(
) )
def copymod(dct, without=None, **kwargs):
# Return dct but with the keys named by args removed, and with
# kwargs added.
if without is None:
without = []
rval = copy(dct)
for a in without:
if a in rval:
del rval[a]
for kw, val in kwargs.items():
rval[kw] = val
return rval
_good_broadcast_div_mod_normal_float_no_complex = dict( _good_broadcast_div_mod_normal_float_no_complex = dict(
same_shapes=(rand(2, 3), rand_nonzero((2, 3))), same_shapes=(rand(2, 3), rand_nonzero((2, 3))),
scalar=(rand(2, 3), rand_nonzero((1, 1))), scalar=(rand(2, 3), rand_nonzero((1, 1))),
...@@ -1052,26 +423,6 @@ _grad_broadcast_div_mod_normal = dict( ...@@ -1052,26 +423,6 @@ _grad_broadcast_div_mod_normal = dict(
# empty2=(np.asarray([0]), np.asarray([])), # empty2=(np.asarray([0]), np.asarray([])),
) )
div_grad_rtol = None
if config.floatX == "float32":
# We raise the relative tolerance for the grad as there can be errors in
# float32.
# This is probably caused by our way of computing the gradient error.
div_grad_rtol = 0.025
def _numpy_true_div(x, y):
# Performs true division, and cast the result in the type we expect.
#
# We define that function so we can use it in TrueDivTester.expected,
# because simply calling np.true_divide could cause a dtype mismatch.
out = np.true_divide(x, y)
# Use floatX as the result of int / int
if x.dtype in tt.discrete_dtypes and y.dtype in tt.discrete_dtypes:
out = theano._asarray(out, dtype=config.floatX)
return out
TestTrueDivBroadcast = makeBroadcastTester( TestTrueDivBroadcast = makeBroadcastTester(
op=tt.true_div, op=tt.true_div,
expected=_numpy_true_div, expected=_numpy_true_div,
...@@ -1203,8 +554,8 @@ _good_broadcast_pow_normal_float_pow = copy(_good_broadcast_pow_normal_float) ...@@ -1203,8 +554,8 @@ _good_broadcast_pow_normal_float_pow = copy(_good_broadcast_pow_normal_float)
del _good_broadcast_pow_normal_float_pow["empty2"] del _good_broadcast_pow_normal_float_pow["empty2"]
# Disable NAN checking for pow operator per issue #1780 # Disable NAN checking for pow operator per issue #1780
m = copy(theano.compile.get_default_mode()) ignore_isfinite_mode = copy(theano.compile.get_default_mode())
m.check_isfinite = False ignore_isfinite_mode.check_isfinite = False
TestPowBroadcast = makeBroadcastTester( TestPowBroadcast = makeBroadcastTester(
op=pow, op=pow,
...@@ -1212,7 +563,7 @@ TestPowBroadcast = makeBroadcastTester( ...@@ -1212,7 +563,7 @@ TestPowBroadcast = makeBroadcastTester(
good=_good_broadcast_pow_normal_float, good=_good_broadcast_pow_normal_float,
grad=_grad_broadcast_pow_normal, grad=_grad_broadcast_pow_normal,
name="Pow", name="Pow",
mode=m, mode=ignore_isfinite_mode,
) )
TestPowInplaceBroadcast = makeBroadcastTester( TestPowInplaceBroadcast = makeBroadcastTester(
...@@ -1220,27 +571,7 @@ TestPowInplaceBroadcast = makeBroadcastTester( ...@@ -1220,27 +571,7 @@ TestPowInplaceBroadcast = makeBroadcastTester(
expected=lambda x, y: x ** y, expected=lambda x, y: x ** y,
good=_good_broadcast_pow_normal_float_pow, good=_good_broadcast_pow_normal_float_pow,
inplace=True, inplace=True,
mode=m, mode=ignore_isfinite_mode,
)
# Those are corner case when rounding. Their is many rounding algo.
# c round() fct and numpy round are not the same!
corner_case = np.asarray(
[-2.5, -2.0, -1.5, -1.0, -0.5, -0.51, -0.49, 0, 0.49, 0.5, 0.9, 1, 1.5, 2, 2.5],
dtype=floatX,
)
# we remove 0 here as the grad is not always computable numerically.
corner_case_grad = np.asarray(
[-2.5, -2.0, -1.5, -1.0, -0.5, -0.51, -0.49, 0.49, 0.5, 0.9, 1, 1.5, 2, 2.5],
dtype=floatX,
)
_good_broadcast_unary_normal_float = dict(
normal=[rand_ranged(-5, 5, (2, 3))],
corner_case=[corner_case],
complex=[randcomplex(2, 3)],
empty=[np.asarray([], dtype=config.floatX)],
) )
_good_broadcast_unary_normal_float_no_empty = copymod( _good_broadcast_unary_normal_float_no_empty = copymod(
...@@ -1251,73 +582,20 @@ _good_broadcast_unary_normal_float_no_empty_no_complex = copymod( ...@@ -1251,73 +582,20 @@ _good_broadcast_unary_normal_float_no_empty_no_complex = copymod(
_good_broadcast_unary_normal_float_no_empty, without=["complex"] _good_broadcast_unary_normal_float_no_empty, without=["complex"]
) )
_good_broadcast_unary_normal_float_no_complex = copymod(
_good_broadcast_unary_normal_float, without=["complex"]
)
_good_broadcast_unary_normal_float_no_complex_small_neg_range = dict(
normal=[rand_ranged(-2, 5, (2, 3))],
corner_case=[corner_case],
empty=[np.asarray([], dtype=config.floatX)],
)
_good_broadcast_unary_normal = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=config.floatX)],
integers=[randint_ranged(-5, 5, (2, 3))],
# not using -128 because np.allclose would return False
int8=[np.arange(-127, 128, dtype="int8")],
uint8=[np.arange(0, 255, dtype="uint8")],
uint16=[np.arange(0, 65535, dtype="uint16")],
corner_case=[corner_case],
complex=[randcomplex(2, 3)],
empty=[np.asarray([], dtype=config.floatX)],
)
_good_broadcast_unary_normal_no_complex = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=floatX)],
integers=[randint_ranged(-5, 5, (2, 3))],
int8=[np.arange(-127, 128, dtype="int8")],
uint8=[np.arange(0, 89, dtype="uint8")],
uint16=[np.arange(0, 89, dtype="uint16")],
corner_case=[corner_case],
empty=[np.asarray([], dtype=config.floatX)],
big_scalar=[np.arange(17.0, 29.0, 0.5, dtype=floatX)],
)
_grad_broadcast_unary_normal_no_complex = dict( _grad_broadcast_unary_normal_no_complex = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=floatX)], normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=config.floatX)],
corner_case=[corner_case_grad],
)
_grad_broadcast_unary_normal = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=floatX)],
corner_case=[corner_case_grad], corner_case=[corner_case_grad],
# empty = [np.asarray([])] # XXX: should this be included?
) )
# Avoid epsilon around integer values # Avoid epsilon around integer values
_grad_broadcast_unary_normal_noint = dict( _grad_broadcast_unary_normal_noint = dict(
normal=[(rand_ranged(_eps, 1 - _eps, (2, 3)) + randint(2, 3)).astype(floatX)] normal=[(rand_ranged(_eps, 1 - _eps, (2, 3)) + randint(2, 3)).astype(config.floatX)]
)
_grad_broadcast_unary_normal_small_neg_range = dict(
normal=[np.asarray(rand_ranged(-2, 5, (2, 3)), dtype=floatX)],
corner_case=[corner_case_grad],
) )
_grad_broadcast_unary_normal_no_complex_no_corner_case = copymod( _grad_broadcast_unary_normal_no_complex_no_corner_case = copymod(
_grad_broadcast_unary_normal_no_complex, without=["corner_case"] _grad_broadcast_unary_normal_no_complex, without=["corner_case"]
) )
_grad_broadcast_unary_abs1_no_complex = dict(
normal=[np.asarray(rand_ranged(-1 + _eps, 1 - _eps, (2, 3)), dtype=floatX)],
)
_grad_broadcast_unary_0_2_no_complex = dict(
# Don't go too close to 0 or 2 for tests in float32
normal=[np.asarray(rand_ranged(_eps, 1 - _eps, (2, 3)), dtype=floatX)],
)
# inplace ops when the input is integer and the output is float* # inplace ops when the input is integer and the output is float*
# don't have a well defined behavior. We don't test that case. # don't have a well defined behavior. We don't test that case.
...@@ -1389,7 +667,7 @@ TestCeilBroadcast = makeBroadcastTester( ...@@ -1389,7 +667,7 @@ TestCeilBroadcast = makeBroadcastTester(
good=_good_broadcast_unary_normal_no_complex, good=_good_broadcast_unary_normal_no_complex,
grad=copymod( grad=copymod(
_grad_broadcast_unary_normal_noint, _grad_broadcast_unary_normal_noint,
extra=[np.asarray([-2.5, -1.5, -1.51, 0.49, 0.98, 1.02], dtype=floatX)], extra=[np.asarray([-2.5, -1.5, -1.51, 0.49, 0.98, 1.02], dtype=config.floatX)],
), ),
) )
...@@ -1949,442 +1227,6 @@ TestArctanhInplaceBroadcast = makeBroadcastTester( ...@@ -1949,442 +1227,6 @@ TestArctanhInplaceBroadcast = makeBroadcastTester(
inplace=True, inplace=True,
) )
# We can't test it if scipy is not installed!
# Precomputing the result is brittle(it have been broken!)
# As if we do any modification to random number here,
# The input random number will change and the output!
if imported_scipy_special:
expected_erf = scipy.special.erf
expected_erfc = scipy.special.erfc
expected_erfinv = scipy.special.erfinv
expected_erfcinv = scipy.special.erfcinv
expected_gamma = scipy.special.gamma
expected_gammaln = scipy.special.gammaln
expected_psi = scipy.special.psi
expected_tri_gamma = partial(scipy.special.polygamma, 1)
expected_chi2sf = scipy.stats.chi2.sf
expected_j0 = scipy.special.j0
expected_j1 = scipy.special.j1
expected_jv = scipy.special.jv
expected_i0 = scipy.special.i0
expected_i1 = scipy.special.i1
expected_iv = scipy.special.iv
skip_scipy = False
expected_erfcx = scipy.special.erfcx
else:
expected_erf = []
expected_erfc = []
expected_erfcx = []
expected_erfinv = []
expected_erfcinv = []
expected_gamma = []
expected_gammaln = []
expected_psi = []
expected_tri_gamma = []
expected_chi2sf = []
expected_j0 = []
expected_j1 = []
expected_jv = []
expected_i0 = []
expected_i1 = []
expected_iv = []
skip_scipy = "scipy is not present"
TestErfBroadcast = makeBroadcastTester(
op=tt.erf,
expected=expected_erf,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfInplaceBroadcast = makeBroadcastTester(
op=inplace.erf_inplace,
expected=expected_erf,
good=_good_broadcast_unary_normal_float,
mode=mode_no_scipy,
eps=2e-10,
inplace=True,
skip=skip_scipy,
)
TestErfcBroadcast = makeBroadcastTester(
op=tt.erfc,
expected=expected_erfc,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcInplaceBroadcast = makeBroadcastTester(
op=inplace.erfc_inplace,
expected=expected_erfc,
good=_good_broadcast_unary_normal_float_no_complex,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestErfcxBroadcast = makeBroadcastTester(
op=tt.erfcx,
expected=expected_erfcx,
good=_good_broadcast_unary_normal_float_no_complex_small_neg_range,
grad=_grad_broadcast_unary_normal_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcxInplaceBroadcast = makeBroadcastTester(
op=inplace.erfcx_inplace,
expected=expected_erfcx,
good=_good_broadcast_unary_normal_float_no_complex_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestErfinvBroadcast = makeBroadcastTester(
op=tt.erfinv,
expected=expected_erfinv,
good={
"normal": [rand_ranged(-0.9, 0.9, (2, 3))],
"empty": [np.asarray([], dtype=config.floatX)],
},
grad=_grad_broadcast_unary_abs1_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcinvBroadcast = makeBroadcastTester(
op=tt.erfcinv,
expected=expected_erfcinv,
good={
"normal": [rand_ranged(0.001, 1.9, (2, 3))],
"empty": [np.asarray([], dtype=config.floatX)],
},
grad=_grad_broadcast_unary_0_2_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
_good_broadcast_unary_gammaln = dict(
normal=(rand_ranged(-1 + 1e-2, 10, (2, 3)),),
empty=(np.asarray([], dtype=config.floatX),),
int=(randint_ranged(1, 10, (2, 3)),),
uint8=(randint_ranged(1, 6, (2, 3)).astype("uint8"),),
uint16=(randint_ranged(1, 10, (2, 3)).astype("uint16"),),
uint64=(randint_ranged(1, 10, (2, 3)).astype("uint64"),),
)
_grad_broadcast_unary_gammaln = dict(
# smaller range as our grad method does not estimate it well enough.
normal=(rand_ranged(1e-1, 8, (2, 3)),),
)
TestGammaBroadcast = makeBroadcastTester(
op=tt.gamma,
expected=expected_gamma,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
skip=skip_scipy,
)
TestGammaInplaceBroadcast = makeBroadcastTester(
op=inplace.gamma_inplace,
expected=expected_gamma,
good=_good_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
inplace=True,
skip=skip_scipy,
)
TestGammalnBroadcast = makeBroadcastTester(
op=tt.gammaln,
expected=expected_gammaln,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammalnInplaceBroadcast = makeBroadcastTester(
op=inplace.gammaln_inplace,
expected=expected_gammaln,
good=_good_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_psi = dict(
normal=(rand_ranged(1, 10, (2, 3)),),
empty=(np.asarray([], dtype=config.floatX),),
int=(randint_ranged(1, 10, (2, 3)),),
uint8=(randint_ranged(1, 10, (2, 3)).astype("uint8"),),
uint16=(randint_ranged(1, 10, (2, 3)).astype("uint16"),),
)
TestPsiBroadcast = makeBroadcastTester(
op=tt.psi,
expected=expected_psi,
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestPsiInplaceBroadcast = makeBroadcastTester(
op=inplace.psi_inplace,
expected=expected_psi,
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_tri_gamma = _good_broadcast_unary_psi
TestTriGammaBroadcast = makeBroadcastTester(
op=tt.tri_gamma,
expected=expected_tri_gamma,
good=_good_broadcast_unary_psi,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestTriGammaInplaceBroadcast = makeBroadcastTester(
op=inplace.tri_gamma_inplace,
expected=expected_tri_gamma,
good=_good_broadcast_unary_tri_gamma,
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
# chi2sf takes two inputs, a value (x) and a degrees of freedom (k).
# not sure how to deal with that here...
_good_broadcast_unary_chi2sf = dict(
normal=(rand_ranged(1, 10, (2, 3)), np.asarray(1, dtype=config.floatX)),
empty=(np.asarray([], dtype=config.floatX), np.asarray(1, dtype=config.floatX)),
integers=(randint_ranged(1, 10, (2, 3)), np.asarray(1, dtype=config.floatX)),
uint8=(
randint_ranged(1, 10, (2, 3)).astype("uint8"),
np.asarray(1, dtype=config.floatX),
),
uint16=(
randint_ranged(1, 10, (2, 3)).astype("uint16"),
np.asarray(1, dtype=config.floatX),
),
)
TestChi2SFBroadcast = makeBroadcastTester(
op=tt.chi2sf,
expected=expected_chi2sf,
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
name="Chi2SF",
)
TestChi2SFInplaceBroadcast = makeBroadcastTester(
op=inplace.chi2sf_inplace,
expected=expected_chi2sf,
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
name="Chi2SF",
)
_good_broadcast_unary_bessel = dict(
normal=(rand_ranged(-10, 10, (2, 3)),),
empty=(np.asarray([], dtype=config.floatX),),
int=(randint_ranged(-10, 10, (2, 3)),),
uint8=(randint_ranged(0, 10, (2, 3)).astype("uint8"),),
uint16=(randint_ranged(0, 10, (2, 3)).astype("uint16"),),
)
_grad_broadcast_unary_bessel = dict(
normal=(rand_ranged(-10.0, 10.0, (2, 3)),),
)
_good_broadcast_binary_bessel = dict(
normal=(rand_ranged(-5, 5, (2, 3)), rand_ranged(0, 10, (2, 3))),
empty=(np.asarray([], dtype=config.floatX), np.asarray([], dtype=config.floatX)),
integers=(randint_ranged(-5, 5, (2, 3)), randint_ranged(-10, 10, (2, 3))),
uint8=(
randint_ranged(0, 5, (2, 3)).astype("uint8"),
randint_ranged(0, 10, (2, 3)).astype("uint8"),
),
uint16=(
randint_ranged(0, 5, (2, 3)).astype("uint16"),
randint_ranged(0, 10, (2, 3)).astype("uint16"),
),
)
_grad_broadcast_binary_bessel = dict(
normal=(rand_ranged(1, 5, (2, 3)), rand_ranged(0, 10, (2, 3)))
)
TestJ0Broadcast = makeBroadcastTester(
op=tt.j0,
expected=expected_j0,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJ0InplaceBroadcast = makeBroadcastTester(
op=inplace.j0_inplace,
expected=expected_j0,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestJ1Broadcast = makeBroadcastTester(
op=tt.j1,
expected=expected_j1,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJ1InplaceBroadcast = makeBroadcastTester(
op=inplace.j1_inplace,
expected=expected_j1,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestJvBroadcast = makeBroadcastTester(
op=tt.jv,
expected=expected_jv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJvInplaceBroadcast = makeBroadcastTester(
op=inplace.jv_inplace,
expected=expected_jv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
@pytest.mark.skipif(skip_scipy, reason="SciPy needed")
def test_verify_jv_grad():
# Verify Jv gradient.
# Implemented separately due to need to fix first input for which grad is
# not defined.
v_val, x_val = _grad_broadcast_binary_bessel["normal"]
def fixed_first_input_jv(x):
return tt.jv(v_val, x)
utt.verify_grad(fixed_first_input_jv, [x_val])
TestI0Broadcast = makeBroadcastTester(
op=tt.i0,
expected=expected_i0,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestI0InplaceBroadcast = makeBroadcastTester(
op=inplace.i0_inplace,
expected=expected_i0,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestI1Broadcast = makeBroadcastTester(
op=tt.i1,
expected=expected_i1,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestI1InplaceBroadcast = makeBroadcastTester(
op=inplace.i1_inplace,
expected=expected_i1,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestIvBroadcast = makeBroadcastTester(
op=tt.iv,
expected=expected_iv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestIvInplaceBroadcast = makeBroadcastTester(
op=inplace.iv_inplace,
expected=expected_iv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
@pytest.mark.skipif(skip_scipy, reason="SciPy needed")
def test_verify_iv_grad():
# Verify Iv gradient.
# Implemented separately due to need to fix first input for which grad is
# not defined.
v_val, x_val = _grad_broadcast_binary_bessel["normal"]
def fixed_first_input_iv(x):
return tt.iv(v_val, x)
utt.verify_grad(fixed_first_input_iv, [x_val])
TestZerosLikeBroadcast = makeBroadcastTester( TestZerosLikeBroadcast = makeBroadcastTester(
op=tt.zeros_like, op=tt.zeros_like,
expected=np.zeros_like, expected=np.zeros_like,
...@@ -2527,43 +1369,6 @@ def _numpy_second(x, y): ...@@ -2527,43 +1369,6 @@ def _numpy_second(x, y):
return np.broadcast_arrays(x, y)[1] return np.broadcast_arrays(x, y)[1]
# Don't forget to modify the two lines after!
ALL_DTYPES = (
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"complex64",
"complex128",
)
REAL_DTYPES = ALL_DTYPES[:6]
COMPLEX_DTYPES = ALL_DTYPES[-2:]
def multi_dtype_checks(shape1, shape2, dtypes=ALL_DTYPES, nameprefix=""):
for dtype1, dtype2 in itertools.combinations(dtypes, 2):
name1 = "{}_{}_{}".format(nameprefix, dtype1, dtype2)
name2 = "{}_{}_{}".format(nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape1, dtype1)
obj2 = rand_of_dtype(shape2, dtype2)
yield (name1, (obj1, obj2))
yield (name2, (obj2, obj1))
def multi_dtype_cast_checks(shape, dtypes=ALL_DTYPES, nameprefix=""):
for dtype1, dtype2 in itertools.combinations(dtypes, 2):
name1 = "{}_{}_{}".format(nameprefix, dtype1, dtype2)
name2 = "{}_{}_{}".format(nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape, dtype1)
obj2 = rand_of_dtype(shape, dtype2)
yield (name1, (obj1, dtype2))
yield (name2, (obj2, dtype1))
TestSecondBroadcast = makeTester( TestSecondBroadcast = makeTester(
name="SecondBroadcastTester", name="SecondBroadcastTester",
op=second, op=second,
...@@ -3363,7 +2168,7 @@ def test_batched_dot_not_contiguous(): ...@@ -3363,7 +2168,7 @@ def test_batched_dot_not_contiguous():
size = 1 size = 1
for dimsize in _shape: for dimsize in _shape:
size *= dimsize size *= dimsize
return np.arange(size, dtype=floatX).reshape(_shape) return np.arange(size, dtype=config.floatX).reshape(_shape)
X = tensor3() X = tensor3()
W = tensor3() W = tensor3()
...@@ -3378,7 +2183,7 @@ def test_batched_dot_not_contiguous(): ...@@ -3378,7 +2183,7 @@ def test_batched_dot_not_contiguous():
direction = -1 if inverted else 1 direction = -1 if inverted else 1
x = x_container[::direction, ::2, ::2] x = x_container[::direction, ::2, ::2]
assert x.shape == (30, 20, 10) assert x.shape == (30, 20, 10)
assert x.strides[0] == direction * np.dtype(floatX).itemsize assert x.strides[0] == direction * np.dtype(config.floatX).itemsize
assert not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"]) assert not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"])
result = f(x, w) result = f(x, w)
ref_result = np.asarray(list(np.dot(u, v) for u, v in zip(x, w))) ref_result = np.asarray(list(np.dot(u, v) for u, v in zip(x, w)))
...@@ -3725,7 +2530,7 @@ class TestMaxAndArgmax: ...@@ -3725,7 +2530,7 @@ class TestMaxAndArgmax:
x = tt.matrix() x = tt.matrix()
m, i = max_and_argmax(x, axis=1) m, i = max_and_argmax(x, axis=1)
f = theano.function([x], [m, i]) f = theano.function([x], [m, i])
xv = np.zeros((0, 4), dtype=floatX) xv = np.zeros((0, 4), dtype=config.floatX)
mv, iv = f(xv) mv, iv = f(xv)
assert mv.shape == (0,) assert mv.shape == (0,)
assert iv.shape == (0,) assert iv.shape == (0,)
...@@ -4108,8 +2913,8 @@ class TestOuter: ...@@ -4108,8 +2913,8 @@ class TestOuter:
y = tt.tensor(dtype="floatX", broadcastable=(False,) * n) y = tt.tensor(dtype="floatX", broadcastable=(False,) * n)
s1 = np.random.randint(1, 10, m) s1 = np.random.randint(1, 10, m)
s2 = np.random.randint(1, 10, n) s2 = np.random.randint(1, 10, n)
v1 = np.asarray(np.random.rand(*s1)).astype(floatX) v1 = np.asarray(np.random.rand(*s1)).astype(config.floatX)
v2 = np.asarray(np.random.rand(*s2)).astype(floatX) v2 = np.asarray(np.random.rand(*s2)).astype(config.floatX)
o = tt.outer(x, y).eval({x: v1, y: v2}) o = tt.outer(x, y).eval({x: v1, y: v2})
assert_allclose(o, np.outer(v1, v2)) assert_allclose(o, np.outer(v1, v2))
...@@ -4130,8 +2935,8 @@ class TestOuter: ...@@ -4130,8 +2935,8 @@ class TestOuter:
((1, 1), (4, 5)), ((1, 1), (4, 5)),
((1, 1), (1, 1)), ((1, 1), (1, 1)),
]: ]:
data0 = np.random.rand(*shp0).astype(floatX) data0 = np.random.rand(*shp0).astype(config.floatX)
data1 = np.random.rand(*shp1).astype(floatX) data1 = np.random.rand(*shp1).astype(config.floatX)
utt.verify_grad(tt.outer, [data0, data1]) utt.verify_grad(tt.outer, [data0, data1])
...@@ -7064,7 +5869,7 @@ class TestTensordot: ...@@ -7064,7 +5869,7 @@ class TestTensordot:
utt.verify_grad(self.TensorDot(axes), [aval, bval]) utt.verify_grad(self.TensorDot(axes), [aval, bval])
def test_broadcastable1(self): def test_broadcastable1(self):
x = TensorType(dtype=floatX, broadcastable=(True, False, False))("x") x = TensorType(dtype=config.floatX, broadcastable=(True, False, False))("x")
y = tensor3("y") y = tensor3("y")
z = tensordot(x, y) z = tensordot(x, y)
assert z.broadcastable == (True, False) assert z.broadcastable == (True, False)
...@@ -7075,7 +5880,7 @@ class TestTensordot: ...@@ -7075,7 +5880,7 @@ class TestTensordot:
assert np.allclose(np.tensordot(xv, yv), zv) assert np.allclose(np.tensordot(xv, yv), zv)
def test_broadcastable2(self): def test_broadcastable2(self):
x = TensorType(dtype=floatX, broadcastable=(True, False, False))("x") x = TensorType(dtype=config.floatX, broadcastable=(True, False, False))("x")
y = tensor3("y") y = tensor3("y")
axes = [[2, 1], [0, 1]] axes = [[2, 1], [0, 1]]
z = tensordot(x, y, axes=axes) z = tensordot(x, y, axes=axes)
...@@ -7668,30 +6473,6 @@ def test_len(): ...@@ -7668,30 +6473,6 @@ def test_len():
len(x) len(x)
def test_mod():
# We add this test as not all language and C implementation give the same
# sign to the result. This check that the c_code of `Mod` is implemented
# as Python. That is what we want.
x, y = fscalars("xy")
fn = gof.DualLinker().accept(gof.FunctionGraph([x, y], [x % y])).make_function()
for a, b in (
(0, 1),
(1, 1),
(0, -1),
(1, -1),
(-1, -1),
(1, 2),
(-1, 2),
(1, -2),
(-1, -2),
(5, 3),
(-5, 3),
(5, -3),
(-5, -3),
):
assert fn(a, b) == a % b, (a,)
def test_divmod(): def test_divmod():
# Confirm that divmod is equivalent to the python version. # Confirm that divmod is equivalent to the python version.
x, y = fscalars("xy") x, y = fscalars("xy")
...@@ -8204,10 +6985,10 @@ class TestSpecifyShape: ...@@ -8204,10 +6985,10 @@ class TestSpecifyShape:
specify_shape = SpecifyShape() specify_shape = SpecifyShape()
x = vector() x = vector()
xval = np.random.rand(2).astype(floatX) xval = np.random.rand(2).astype(config.floatX)
f = theano.function([x], specify_shape(x, [2]), mode=self.mode) f = theano.function([x], specify_shape(x, [2]), mode=self.mode)
f(xval) f(xval)
xval = np.random.rand(3).astype(floatX) xval = np.random.rand(3).astype(config.floatX)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
f(xval) f(xval)
...@@ -8219,7 +7000,7 @@ class TestSpecifyShape: ...@@ -8219,7 +7000,7 @@ class TestSpecifyShape:
) )
x = matrix() x = matrix()
xval = np.random.rand(2, 3).astype(floatX) xval = np.random.rand(2, 3).astype(config.floatX)
f = theano.function([x], specify_shape(x, [2, 3]), mode=self.mode) f = theano.function([x], specify_shape(x, [2, 3]), mode=self.mode)
assert isinstance( assert isinstance(
[n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0] [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
...@@ -8229,7 +7010,7 @@ class TestSpecifyShape: ...@@ -8229,7 +7010,7 @@ class TestSpecifyShape:
) )
f(xval) f(xval)
for shape_ in [(1, 3), (2, 2), (5, 5)]: for shape_ in [(1, 3), (2, 2), (5, 5)]:
xval = np.random.rand(*shape_).astype(floatX) xval = np.random.rand(*shape_).astype(config.floatX)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
f(xval) f(xval)
...@@ -8239,7 +7020,7 @@ class TestSpecifyShape: ...@@ -8239,7 +7020,7 @@ class TestSpecifyShape:
x = vector() x = vector()
shape_vec = ivector() shape_vec = ivector()
xval = np.random.rand(2).astype(floatX) xval = np.random.rand(2).astype(config.floatX)
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
specify_shape(x, []) specify_shape(x, [])
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
...@@ -8258,7 +7039,7 @@ class TestSpecifyShape: ...@@ -8258,7 +7039,7 @@ class TestSpecifyShape:
f(xval, [2, 2]) f(xval, [2, 2])
x = matrix() x = matrix()
xval = np.random.rand(2, 3).astype(floatX) xval = np.random.rand(2, 3).astype(config.floatX)
for shape_ in [(), (1,), (2, 3, 4)]: for shape_ in [(), (1,), (2, 3, 4)]:
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
specify_shape(x, shape_) specify_shape(x, shape_)
...@@ -8745,7 +7526,7 @@ class TestInferShape(utt.InferShapeTester): ...@@ -8745,7 +7526,7 @@ class TestInferShape(utt.InferShapeTester):
class TestTensorInstanceMethods: class TestTensorInstanceMethods:
def setup_method(self): def setup_method(self):
self.vars = matrices("X", "Y") self.vars = matrices("X", "Y")
self.vals = [m.astype(floatX) for m in [rand(2, 2), rand(2, 2)]] self.vals = [m.astype(config.floatX) for m in [rand(2, 2), rand(2, 2)]]
def test_argmin(self): def test_argmin(self):
X, _ = self.vars X, _ = self.vars
......
import numpy as np
import pytest
scipy = pytest.importorskip("scipy")
from functools import partial
from tests import unittest_tools as utt
from tests.tensor.utils import (
_good_broadcast_unary_chi2sf,
_good_broadcast_unary_normal,
_good_broadcast_unary_normal_float,
_good_broadcast_unary_normal_float_no_complex,
_good_broadcast_unary_normal_float_no_complex_small_neg_range,
_grad_broadcast_unary_0_2_no_complex,
_grad_broadcast_unary_abs1_no_complex,
_grad_broadcast_unary_normal,
_grad_broadcast_unary_normal_small_neg_range,
makeBroadcastTester,
rand_ranged,
randint_ranged,
)
from theano import config, tensor
from theano.compile.mode import get_default_mode
from theano.tensor import inplace
imported_scipy_special = False
mode_no_scipy = get_default_mode()
try:
import scipy.special
import scipy.stats
imported_scipy_special = True
except ImportError:
if config.mode == "FAST_COMPILE":
mode_no_scipy = "FAST_RUN"
# We can't test it if scipy is not installed!
# Precomputing the result is brittle(it have been broken!)
# As if we do any modification to random number here,
# The input random number will change and the output!
if imported_scipy_special:
expected_erf = scipy.special.erf
expected_erfc = scipy.special.erfc
expected_erfinv = scipy.special.erfinv
expected_erfcinv = scipy.special.erfcinv
expected_gamma = scipy.special.gamma
expected_gammaln = scipy.special.gammaln
expected_psi = scipy.special.psi
expected_tri_gamma = partial(scipy.special.polygamma, 1)
expected_chi2sf = scipy.stats.chi2.sf
expected_j0 = scipy.special.j0
expected_j1 = scipy.special.j1
expected_jv = scipy.special.jv
expected_i0 = scipy.special.i0
expected_i1 = scipy.special.i1
expected_iv = scipy.special.iv
skip_scipy = False
expected_erfcx = scipy.special.erfcx
else:
expected_erf = []
expected_erfc = []
expected_erfcx = []
expected_erfinv = []
expected_erfcinv = []
expected_gamma = []
expected_gammaln = []
expected_psi = []
expected_tri_gamma = []
expected_chi2sf = []
expected_j0 = []
expected_j1 = []
expected_jv = []
expected_i0 = []
expected_i1 = []
expected_iv = []
skip_scipy = "scipy is not present"
TestErfBroadcast = makeBroadcastTester(
op=tensor.erf,
expected=expected_erf,
good=_good_broadcast_unary_normal,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfInplaceBroadcast = makeBroadcastTester(
op=inplace.erf_inplace,
expected=expected_erf,
good=_good_broadcast_unary_normal_float,
mode=mode_no_scipy,
eps=2e-10,
inplace=True,
skip=skip_scipy,
)
TestErfcBroadcast = makeBroadcastTester(
op=tensor.erfc,
expected=expected_erfc,
good=_good_broadcast_unary_normal_float_no_complex,
grad=_grad_broadcast_unary_normal,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcInplaceBroadcast = makeBroadcastTester(
op=inplace.erfc_inplace,
expected=expected_erfc,
good=_good_broadcast_unary_normal_float_no_complex,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestErfcxBroadcast = makeBroadcastTester(
op=tensor.erfcx,
expected=expected_erfcx,
good=_good_broadcast_unary_normal_float_no_complex_small_neg_range,
grad=_grad_broadcast_unary_normal_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcxInplaceBroadcast = makeBroadcastTester(
op=inplace.erfcx_inplace,
expected=expected_erfcx,
good=_good_broadcast_unary_normal_float_no_complex_small_neg_range,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestErfinvBroadcast = makeBroadcastTester(
op=tensor.erfinv,
expected=expected_erfinv,
good={
"normal": [rand_ranged(-0.9, 0.9, (2, 3))],
"empty": [np.asarray([], dtype=config.floatX)],
},
grad=_grad_broadcast_unary_abs1_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestErfcinvBroadcast = makeBroadcastTester(
op=tensor.erfcinv,
expected=expected_erfcinv,
good={
"normal": [rand_ranged(0.001, 1.9, (2, 3))],
"empty": [np.asarray([], dtype=config.floatX)],
},
grad=_grad_broadcast_unary_0_2_no_complex,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
_good_broadcast_unary_gammaln = dict(
normal=(rand_ranged(-1 + 1e-2, 10, (2, 3)),),
empty=(np.asarray([], dtype=config.floatX),),
int=(randint_ranged(1, 10, (2, 3)),),
uint8=(randint_ranged(1, 6, (2, 3)).astype("uint8"),),
uint16=(randint_ranged(1, 10, (2, 3)).astype("uint16"),),
uint64=(randint_ranged(1, 10, (2, 3)).astype("uint64"),),
)
_grad_broadcast_unary_gammaln = dict(
# smaller range as our grad method does not estimate it well enough.
normal=(rand_ranged(1e-1, 8, (2, 3)),),
)
TestGammaBroadcast = makeBroadcastTester(
op=tensor.gamma,
expected=expected_gamma,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
skip=skip_scipy,
)
TestGammaInplaceBroadcast = makeBroadcastTester(
op=inplace.gamma_inplace,
expected=expected_gamma,
good=_good_broadcast_unary_gammaln,
mode=mode_no_scipy,
eps=1e-5,
inplace=True,
skip=skip_scipy,
)
TestGammalnBroadcast = makeBroadcastTester(
op=tensor.gammaln,
expected=expected_gammaln,
good=_good_broadcast_unary_gammaln,
grad=_grad_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestGammalnInplaceBroadcast = makeBroadcastTester(
op=inplace.gammaln_inplace,
expected=expected_gammaln,
good=_good_broadcast_unary_gammaln,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_psi = dict(
normal=(rand_ranged(1, 10, (2, 3)),),
empty=(np.asarray([], dtype=config.floatX),),
int=(randint_ranged(1, 10, (2, 3)),),
uint8=(randint_ranged(1, 10, (2, 3)).astype("uint8"),),
uint16=(randint_ranged(1, 10, (2, 3)).astype("uint16"),),
)
TestPsiBroadcast = makeBroadcastTester(
op=tensor.psi,
expected=expected_psi,
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestPsiInplaceBroadcast = makeBroadcastTester(
op=inplace.psi_inplace,
expected=expected_psi,
good=_good_broadcast_unary_psi,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
_good_broadcast_unary_tri_gamma = _good_broadcast_unary_psi
TestTriGammaBroadcast = makeBroadcastTester(
op=tensor.tri_gamma,
expected=expected_tri_gamma,
good=_good_broadcast_unary_psi,
eps=2e-8,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestTriGammaInplaceBroadcast = makeBroadcastTester(
op=inplace.tri_gamma_inplace,
expected=expected_tri_gamma,
good=_good_broadcast_unary_tri_gamma,
eps=2e-8,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestChi2SFBroadcast = makeBroadcastTester(
op=tensor.chi2sf,
expected=expected_chi2sf,
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
name="Chi2SF",
)
TestChi2SFInplaceBroadcast = makeBroadcastTester(
op=inplace.chi2sf_inplace,
expected=expected_chi2sf,
good=_good_broadcast_unary_chi2sf,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
name="Chi2SF",
)
_good_broadcast_unary_bessel = dict(
normal=(rand_ranged(-10, 10, (2, 3)),),
empty=(np.asarray([], dtype=config.floatX),),
int=(randint_ranged(-10, 10, (2, 3)),),
uint8=(randint_ranged(0, 10, (2, 3)).astype("uint8"),),
uint16=(randint_ranged(0, 10, (2, 3)).astype("uint16"),),
)
_grad_broadcast_unary_bessel = dict(
normal=(rand_ranged(-10.0, 10.0, (2, 3)),),
)
_good_broadcast_binary_bessel = dict(
normal=(rand_ranged(-5, 5, (2, 3)), rand_ranged(0, 10, (2, 3))),
empty=(np.asarray([], dtype=config.floatX), np.asarray([], dtype=config.floatX)),
integers=(randint_ranged(-5, 5, (2, 3)), randint_ranged(-10, 10, (2, 3))),
uint8=(
randint_ranged(0, 5, (2, 3)).astype("uint8"),
randint_ranged(0, 10, (2, 3)).astype("uint8"),
),
uint16=(
randint_ranged(0, 5, (2, 3)).astype("uint16"),
randint_ranged(0, 10, (2, 3)).astype("uint16"),
),
)
_grad_broadcast_binary_bessel = dict(
normal=(rand_ranged(1, 5, (2, 3)), rand_ranged(0, 10, (2, 3)))
)
TestJ0Broadcast = makeBroadcastTester(
op=tensor.j0,
expected=expected_j0,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJ0InplaceBroadcast = makeBroadcastTester(
op=inplace.j0_inplace,
expected=expected_j0,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestJ1Broadcast = makeBroadcastTester(
op=tensor.j1,
expected=expected_j1,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJ1InplaceBroadcast = makeBroadcastTester(
op=inplace.j1_inplace,
expected=expected_j1,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestJvBroadcast = makeBroadcastTester(
op=tensor.jv,
expected=expected_jv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestJvInplaceBroadcast = makeBroadcastTester(
op=inplace.jv_inplace,
expected=expected_jv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
def test_verify_jv_grad():
# Verify Jv gradient.
# Implemented separately due to need to fix first input for which grad is
# not defined.
v_val, x_val = _grad_broadcast_binary_bessel["normal"]
def fixed_first_input_jv(x):
return tensor.jv(v_val, x)
utt.verify_grad(fixed_first_input_jv, [x_val])
TestI0Broadcast = makeBroadcastTester(
op=tensor.i0,
expected=expected_i0,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestI0InplaceBroadcast = makeBroadcastTester(
op=inplace.i0_inplace,
expected=expected_i0,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestI1Broadcast = makeBroadcastTester(
op=tensor.i1,
expected=expected_i1,
good=_good_broadcast_unary_bessel,
grad=_grad_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestI1InplaceBroadcast = makeBroadcastTester(
op=inplace.i1_inplace,
expected=expected_i1,
good=_good_broadcast_unary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
TestIvBroadcast = makeBroadcastTester(
op=tensor.iv,
expected=expected_iv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
skip=skip_scipy,
)
TestIvInplaceBroadcast = makeBroadcastTester(
op=inplace.iv_inplace,
expected=expected_iv,
good=_good_broadcast_binary_bessel,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
skip=skip_scipy,
)
def test_verify_iv_grad():
# Verify Iv gradient.
# Implemented separately due to need to fix first input for which grad is
# not defined.
v_val, x_val = _grad_broadcast_binary_bessel["normal"]
def fixed_first_input_iv(x):
return tensor.iv(v_val, x)
utt.verify_grad(fixed_first_input_iv, [x_val])
...@@ -22,8 +22,9 @@ import theano ...@@ -22,8 +22,9 @@ import theano
import theano.tensor as tt import theano.tensor as tt
import theano.tensor.blas_scipy import theano.tensor.blas_scipy
from tests import unittest_tools from tests import unittest_tools
from tests.tensor.test_basic import as_tensor_variable, compile, inplace, inplace_func from tests.tensor.utils import inplace_func
from theano import In, config, shared from theano import In, config, shared
from theano.tensor import as_tensor_variable, inplace
from theano.tensor.blas import ( from theano.tensor.blas import (
Dot22, Dot22,
Dot22Scalar, Dot22Scalar,
...@@ -109,7 +110,7 @@ class TestGemm: ...@@ -109,7 +110,7 @@ class TestGemm:
f = inplace_func( f = inplace_func(
[tz, ta, tx, ty, tb], [tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb), gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l), mode=theano.compile.Mode(optimizer=None, linker=l),
) )
f(z, a, x, y, b) f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b) z_after = self._gemm(z_orig, a, x, y, b)
...@@ -274,12 +275,12 @@ class TestGemm: ...@@ -274,12 +275,12 @@ class TestGemm:
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)] tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb), # f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = compile.Mode(optimizer = None, linker=l)) # mode = theano.compile.Mode(optimizer = None, linker=l))
# f(z, a, x, y, b) # f(z, a, x, y, b)
f = inplace_func( f = inplace_func(
[], [],
gemm_inplace(tz, ta, tx, ty, tb), gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l), mode=theano.compile.Mode(optimizer=None, linker=l),
) )
f() f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
...@@ -336,7 +337,7 @@ class TestGemm: ...@@ -336,7 +337,7 @@ class TestGemm:
f_i = inplace_func( f_i = inplace_func(
[], [],
gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb), gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb),
mode=compile.Mode(optimizer=None, linker=l), mode=theano.compile.Mode(optimizer=None, linker=l),
) )
for j in range(3): for j in range(3):
# tz will not _always_ be overwritten, # tz will not _always_ be overwritten,
...@@ -355,7 +356,7 @@ class TestGemm: ...@@ -355,7 +356,7 @@ class TestGemm:
[], [],
tz_i, tz_i,
updates=[(tz, tt.set_subtensor(tz[:, :, i], tz_i))], updates=[(tz, tt.set_subtensor(tz[:, :, i], tz_i))],
mode=compile.Mode(optimizer=None, linker=l), mode=theano.compile.Mode(optimizer=None, linker=l),
) )
for j in range(3): for j in range(3):
g_i() g_i()
...@@ -612,7 +613,7 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1): ...@@ -612,7 +613,7 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
g = inplace_func( g = inplace_func(
i, i,
o, o,
mode=compile.Mode(linker="py", optimizer=None), mode=theano.compile.Mode(linker="py", optimizer=None),
allow_input_downcast=True, allow_input_downcast=True,
on_unused_input="ignore", on_unused_input="ignore",
) )
...@@ -701,7 +702,7 @@ def test_gemm_opt_double_gemm(): ...@@ -701,7 +702,7 @@ def test_gemm_opt_double_gemm():
g = inplace_func( g = inplace_func(
i, i,
o, o,
mode=compile.Mode(linker="py", optimizer=None), mode=theano.compile.Mode(linker="py", optimizer=None),
on_unused_input="ignore", on_unused_input="ignore",
) )
......
...@@ -10,7 +10,7 @@ import theano ...@@ -10,7 +10,7 @@ import theano
import theano.scalar as scal import theano.scalar as scal
import theano.tensor as tt import theano.tensor as tt
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.tensor.test_basic import inplace_func, rand, randint_ranged from tests.tensor.utils import inplace_func, rand, randint_ranged
from theano import change_flags, config from theano import change_flags, config
from theano.compile import DeepCopyOp from theano.compile import DeepCopyOp
from theano.gof.op import get_test_value from theano.gof.op import get_test_value
......
import os
from copy import copy
from itertools import combinations
from tempfile import mkstemp
import numpy as np
import pytest
import theano
from tests import unittest_tools as utt
from theano import change_flags, config, function, gof, shared, tensor
from theano.compile.mode import get_default_mode
from theano.tensor.type import TensorType
# Used to exclude random numbers too close to certain values
_eps = 1e-2
div_grad_rtol = None
if config.floatX == "float32":
# We raise the relative tolerance for the grad as there can be errors in
# float32.
# This is probably caused by our way of computing the gradient error.
div_grad_rtol = 0.025
# Use a seeded random number generator so that unittests are deterministic
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
# In order to check random values close to the boundaries when designing
# new tests, you can use utt.MockRandomState, for instance:
# test_rng = MockRandomState(0)
# test_rng = MockRandomState(0.99999982)
# test_rng = MockRandomState(1)
# If you update this, don't forget to modify the two lines after!
ALL_DTYPES = (
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"complex64",
"complex128",
)
REAL_DTYPES = ALL_DTYPES[:6]
COMPLEX_DTYPES = ALL_DTYPES[-2:]
def multi_dtype_checks(shape1, shape2, dtypes=ALL_DTYPES, nameprefix=""):
for dtype1, dtype2 in combinations(dtypes, 2):
name1 = "%s_%s_%s" % (nameprefix, dtype1, dtype2)
name2 = "%s_%s_%s" % (nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape1, dtype1)
obj2 = rand_of_dtype(shape2, dtype2)
yield (name1, (obj1, obj2))
yield (name2, (obj2, obj1))
def multi_dtype_cast_checks(shape, dtypes=ALL_DTYPES, nameprefix=""):
for dtype1, dtype2 in combinations(dtypes, 2):
name1 = "%s_%s_%s" % (nameprefix, dtype1, dtype2)
name2 = "%s_%s_%s" % (nameprefix, dtype2, dtype1)
obj1 = rand_of_dtype(shape, dtype1)
obj2 = rand_of_dtype(shape, dtype2)
yield (name1, (obj1, dtype2))
yield (name2, (obj2, dtype1))
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = get_default_mode()
return function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def eval_outputs(outputs, ops=(), mode=None):
f = inplace_func([], outputs, mode=mode)
variables = f()
if ops:
assert any(isinstance(node.op, ops) for node in f.maker.fgraph.apply_nodes)
if isinstance(variables, (tuple, list)) and len(variables) == 1:
return variables[0]
return variables
def get_numeric_subclasses(cls=np.number, ignore=None):
# Return subclasses of `cls` in the numpy scalar hierarchy.
#
# We only return subclasses that correspond to unique data types.
# The hierarchy can be seen here:
# http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html
if ignore is None:
ignore = []
rval = []
dtype = np.dtype(cls)
dtype_num = dtype.num
if dtype_num not in ignore:
# Safety check: we should be able to represent 0 with this data type.
np.array(0, dtype=dtype)
rval.append(cls)
ignore.append(dtype_num)
for sub_ in cls.__subclasses__():
rval += [c for c in get_numeric_subclasses(sub_, ignore=ignore)]
return rval
def get_numeric_types(
with_int=True, with_float=True, with_complex=False, only_theano_types=True
):
# Return numpy numeric data types.
#
# :param with_int: Whether to include integer types.
#
# :param with_float: Whether to include floating point types.
#
# :param with_complex: Whether to include complex types.
#
# :param only_theano_types: If True, then numpy numeric data types that are
# not supported by Theano are ignored (i.e. those that are not declared in
# scalar/basic.py).
#
# :returns: A list of unique data type objects. Note that multiple data types
# may share the same string representation, but can be differentiated through
# their `num` attribute.
#
# Note that when `only_theano_types` is True we could simply return the list
# of types defined in the `scalar` module. However with this function we can
# test more unique dtype objects, and in the future we may use it to
# automatically detect new data types introduced in numpy.
if only_theano_types:
theano_types = [d.dtype for d in theano.scalar.all_types]
rval = []
def is_within(cls1, cls2):
# Return True if scalars defined from `cls1` are within the hierarchy
# starting from `cls2`.
# The third test below is to catch for instance the fact that
# one can use ``dtype=numpy.number`` and obtain a float64 scalar, even
# though `numpy.number` is not under `numpy.floating` in the class
# hierarchy.
return (
cls1 is cls2
or issubclass(cls1, cls2)
or isinstance(np.array([0], dtype=cls1)[0], cls2)
)
for cls in get_numeric_subclasses():
dtype = np.dtype(cls)
if (
(not with_complex and is_within(cls, np.complexfloating))
or (not with_int and is_within(cls, np.integer))
or (not with_float and is_within(cls, np.floating))
or (only_theano_types and dtype not in theano_types)
):
# Ignore this class.
continue
rval.append([str(dtype), dtype, dtype.num])
# We sort it to be deterministic, then remove the string and num elements.
return [x[1] for x in sorted(rval, key=str)]
def _numpy_checker(x, y):
# Checks if x.data and y.data have the same contents.
# Used in DualLinker to compare C version with Python version.
x, y = x[0], y[0]
if x.dtype != y.dtype or x.shape != y.shape or np.any(np.abs(x - y) > 1e-10):
raise Exception("Output mismatch.", {"performlinker": x, "clinker": y})
def safe_make_node(op, *inputs):
# Emulate the behaviour of make_node when op is a function.
#
# Normally op in an instead of the Op class.
node = op(*inputs)
if isinstance(node, list):
return node[0].owner
else:
return node.owner
def upcast_float16_ufunc(fn):
# Decorator that enforces computation is not done in float16 by NumPy.
#
# Some ufuncs in NumPy will compute float values on int8 and uint8
# in half-precision (float16), which is not enough, and not compatible
# with the C code.
#
# :param fn: numpy ufunc
# :returns: function similar to fn.__call__, computing the same
# value with a minimum floating-point precision of float32
def ret(*args, **kwargs):
out_dtype = np.find_common_type([a.dtype for a in args], [np.float16])
if out_dtype == "float16":
# Force everything to float32
sig = "f" * fn.nin + "->" + "f" * fn.nout
kwargs.update(sig=sig)
return fn(*args, **kwargs)
return ret
def upcast_int8_nfunc(fn):
# Decorator that upcasts input of dtype int8 to float32.
#
# This is so that floating-point computation is not carried using
# half-precision (float16), as some NumPy functions do.
#
# :param fn: function computing a floating-point value from inputs
# :returns: function similar to fn, but upcasting its uint8 and int8
# inputs before carrying out the computation.
def ret(*args, **kwargs):
args = list(args)
for i, a in enumerate(args):
if getattr(a, "dtype", None) in ("int8", "uint8"):
args[i] = a.astype("float32")
return fn(*args, **kwargs)
return ret
def rand(*shape):
r = test_rng.rand(*shape) * 2 - 1
return np.asarray(r, dtype=config.floatX)
def rand_nonzero(shape, eps=3e-4):
# Like rand, but the absolute value has to be at least eps
# covers [0, 1)
r = np.asarray(test_rng.rand(*shape), dtype=config.floatX)
# covers [0, (1 - eps) / 2) U [(1 + eps) / 2, 1)
r = r * (1 - eps) + eps * (r >= 0.5)
# covers [-1, -eps) U [eps, 1)
r = r * 2 - 1
return r
def randint(*shape):
return test_rng.randint(-5, 6, shape)
def randuint32(*shape):
return np.array(test_rng.randint(5, size=shape), dtype=np.uint32)
def randuint16(*shape):
return np.array(test_rng.randint(5, size=shape), dtype=np.uint16)
# XXX: this so-called complex random array as all-zero imaginary parts
def randcomplex(*shape):
r = np.asarray(test_rng.rand(*shape), dtype=config.floatX)
return np.complex128(2 * r - 1)
def randcomplex_nonzero(shape, eps=1e-4):
return np.complex128(rand_nonzero(shape, eps))
def randint_nonzero(*shape):
r = test_rng.randint(-5, 5, shape)
return r + (r == 0) * 5
def rand_ranged(min, max, shape):
return np.asarray(test_rng.rand(*shape) * (max - min) + min, dtype=config.floatX)
def randint_ranged(min, max, shape):
return test_rng.randint(min, max + 1, shape)
def randc128_ranged(min, max, shape):
return np.asarray(test_rng.rand(*shape) * (max - min) + min, dtype="complex128")
def rand_of_dtype(shape, dtype):
if dtype in tensor.discrete_dtypes:
return randint(*shape).astype(dtype)
elif dtype in tensor.float_dtypes:
return rand(*shape).astype(dtype)
elif dtype in tensor.complex_dtypes:
return randcomplex(*shape).astype(dtype)
else:
raise TypeError()
def check_floatX(inputs, rval):
# :param inputs: Inputs to a function that returned `rval` with these inputs.
#
# :param rval: Value returned by a function with inputs set to `inputs`.
#
# :returns: Either `rval` unchanged, or `rval` cast in float32. The idea is
# that when a numpy function would have returned a float64, Theano may prefer
# to return a float32 instead when `config.cast_policy` is set to
# 'numpy+floatX' and config.floatX to 'float32', and there was no float64
# input.
if (
isinstance(rval, np.ndarray)
and rval.dtype == "float64"
and config.cast_policy == "numpy+floatX"
and config.floatX == "float32"
and all(x.dtype != "float64" for x in inputs)
):
# Then we expect float32 instead of float64.
return rval.astype("float32")
else:
return rval
def _numpy_true_div(x, y):
# Performs true division, and cast the result in the type we expect.
#
# We define that function so we can use it in TrueDivTester.expected,
# because simply calling np.true_divide could cause a dtype mismatch.
out = np.true_divide(x, y)
# Use floatX as the result of int / int
if x.dtype in tensor.discrete_dtypes and y.dtype in tensor.discrete_dtypes:
out = theano._asarray(out, dtype=config.floatX)
return out
def copymod(dct, without=None, **kwargs):
# Return dct but with the keys named by args removed, and with
# kwargs added.
if without is None:
without = []
rval = copy(dct)
for a in without:
if a in rval:
del rval[a]
for kw, val in kwargs.items():
rval[kw] = val
return rval
def makeTester(
name,
op,
expected,
checks=None,
good=None,
bad_build=None,
bad_runtime=None,
grad=None,
mode=None,
grad_rtol=None,
eps=1e-10,
skip=False,
test_memmap=True,
check_name=False,
grad_eps=None,
):
# :param check_name:
# Use only for tester that aren't in Theano.
if checks is None:
checks = {}
if good is None:
good = {}
if bad_build is None:
bad_build = {}
if bad_runtime is None:
bad_runtime = {}
if grad is None:
grad = {}
if grad is True:
grad = good
_op, _expected, _checks, _good = op, expected, checks, good
_bad_build, _bad_runtime, _grad = bad_build, bad_runtime, grad
_mode, _grad_rtol, _eps, skip_ = mode, grad_rtol, eps, skip
_test_memmap = test_memmap
_check_name = check_name
_grad_eps = grad_eps
class Checker:
op = staticmethod(_op)
expected = staticmethod(_expected)
checks = _checks
check_name = _check_name
good = _good
bad_build = _bad_build
bad_runtime = _bad_runtime
grad = _grad
mode = _mode
skip = skip_
test_memmap = _test_memmap
def setup_method(self):
# Verify that the test's name is correctly set.
# Some tests reuse it outside this module.
if self.check_name:
eval(self.__class__.__module__ + "." + self.__class__.__name__)
# We keep a list of temporary files created in add_memmap_values,
# to remove them at the end of the test.
self.tmp_files = []
def add_memmap_values(self, val_dict):
# If test_memmap is True, we create a temporary file
# containing a copy of the data passed in the "val_dict" dict,
# then open it as a memmapped array, and we can use the result as a
# new test value.
if not self.test_memmap:
return val_dict
# Copy dict before modifying them
val_dict = val_dict.copy()
# Note that we sort items in the dictionary to ensure tests are
# deterministic (since the loop below will break on the first valid
# item that can be memmapped).
for k, v in sorted(val_dict.items()):
new_k = "_".join((k, "memmap"))
if new_k in val_dict:
# A corresponding key was already provided
break
new_v = []
for inp in v:
if type(inp) is np.ndarray and inp.size > 0:
f, fname = mkstemp()
self.tmp_files.append((f, fname))
new_inp = np.memmap(
fname, dtype=inp.dtype, mode="w+", shape=inp.shape
)
new_inp[...] = inp[...]
new_v.append(new_inp)
else:
new_v.append(inp)
val_dict[new_k] = new_v
# We only need one value, no need to copy all of them
break
return val_dict
def teardown_method(self):
# This is to avoid a problem with deleting memmap files on windows.
import gc
gc.collect()
for f, fname in self.tmp_files:
os.close(f)
os.remove(fname)
@pytest.mark.skipif(skip, reason="Skipped")
def test_good(self):
good = self.add_memmap_values(self.good)
for testname, inputs in good.items():
inputs = [copy(input) for input in inputs]
inputrs = [
TensorType(
dtype=input.dtype,
broadcastable=[shape_elem == 1 for shape_elem in input.shape],
)()
for input in inputs
]
try:
node = safe_make_node(self.op, *inputrs)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while"
" making a node with inputs %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f = inplace_func(inputrs, node.outputs, mode=mode, name="test_good")
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while" " trying to make a Function"
) % (self.op, testname)
exc.args += (err_msg,)
raise
if isinstance(self.expected, dict) and testname in self.expected:
expecteds = self.expected[testname]
# with numpy version, when we print a number and read it
# back, we don't get exactly the same result, so we accept
# rounding error in that case.
eps = 5e-9
else:
expecteds = self.expected(*inputs)
eps = 1e-10
if any(
[i.dtype in ("float32", "int8", "uint8", "uint16") for i in inputs]
):
eps = 1e-6
eps = np.max([eps, _eps])
try:
variables = f(*inputs)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while calling"
" the Function on the inputs %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
if not isinstance(expecteds, (list, tuple)):
expecteds = (expecteds,)
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not np.allclose(variable, expected, atol=eps, rtol=eps)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong"
" value. With inputs %s, expected %s (dtype %s),"
" got %s (dtype %s). eps=%f"
" np.allclose returns %s %s"
) % (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
eps,
np.allclose(variable, expected, atol=eps, rtol=eps),
np.allclose(variable, expected),
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s (inputs"
" were %s, outputs were %s)"
) % (self.op, testname, description, inputs, variables)
@pytest.mark.skipif(skip, reason="Skipped")
def test_bad_build(self):
for testname, inputs in self.bad_build.items():
inputs = [copy(input) for input in inputs]
inputrs = [shared(input) for input in inputs]
with pytest.raises(Exception):
safe_make_node(self.op, *inputrs)
# The old error string was ("Test %s::%s: %s was successfully
# instantiated on the following bad inputs: %s"
# % (self.op, testname, node, inputs))
@change_flags(compute_test_value="off")
@pytest.mark.skipif(skip, reason="Skipped")
def test_bad_runtime(self):
for testname, inputs in self.bad_runtime.items():
inputrs = [shared(input) for input in inputs]
try:
node = safe_make_node(self.op, *inputrs)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying"
" to make a node with inputs %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f = inplace_func(
[], node.outputs, mode=mode, name="test_bad_runtime"
)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying" " to make a Function"
) % (self.op, testname)
exc.args += (err_msg,)
raise
# Add tester return a ValueError. Should we catch only this
# one?
# TODO: test that only this one is raised and catch only this
# one or the subset that get raised.
with pytest.raises(Exception):
f([])
@pytest.mark.skipif(skip, reason="Skipped")
def test_grad(self):
# Disable old warning that may be triggered by this test.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
for testname, inputs in self.grad.items():
inputs = [copy(input) for input in inputs]
try:
utt.verify_grad(
self.op,
inputs,
mode=self.mode,
rel_tol=_grad_rtol,
eps=_grad_eps,
)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while"
" computing the gradient on the following"
" inputs: %s"
) % (self.op, testname, inputs)
exc.args += (err_msg,)
raise
finally:
config.warn.sum_div_dimshuffle_bug = backup
@pytest.mark.skipif(skip, reason="Skipped")
def test_grad_none(self):
# Check that None is never returned as input gradient
# when calling self.op.grad
# We use all values in self.good because this has to be true
# whether or not the values work for utt.verify_grad.
if not hasattr(self.op, "grad"):
# This is not actually an Op
return
for testname, inputs in self.good.items():
inputs = [copy(input) for input in inputs]
inputrs = [
TensorType(
dtype=input.dtype,
broadcastable=[shape_elem == 1 for shape_elem in input.shape],
)()
for input in inputs
]
if isinstance(self.expected, dict) and testname in self.expected:
expecteds = self.expected[testname]
# with numpy version, when we print a number and read it
# back, we don't get exactly the same result, so we accept
# rounding error in that case.
else:
expecteds = self.expected(*inputs)
if not isinstance(expecteds, (list, tuple)):
expecteds = (expecteds,)
out_grad_vars = []
for out in expecteds:
if str(out.dtype) in tensor.discrete_dtypes:
dtype = config.floatX
else:
dtype = str(out.dtype)
bcast = [shape_elem == 1 for shape_elem in out.shape]
var = TensorType(dtype=dtype, broadcastable=bcast)()
out_grad_vars.append(var)
try:
in_grad_vars = self.op.grad(inputrs, out_grad_vars)
except (gof.utils.MethodNotDefined, NotImplementedError):
pass
else:
assert None not in in_grad_vars
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def makeBroadcastTester(op, expected, checks=None, name=None, **kwargs):
if checks is None:
checks = {}
if name is None:
name = str(op)
# Here we ensure the test name matches the name of the variable defined in
# this script. This is needed to properly identify the test e.g. with the
# --with-id option of nosetests, or simply to rerun a specific test that
# failed.
capitalize = False
if name.startswith("Elemwise{") and name.endswith(",no_inplace}"):
# For instance: Elemwise{add,no_inplace} -> Add
name = name[9:-12]
capitalize = True
elif name.endswith("_inplace"):
# For instance: sub_inplace -> SubInplace
capitalize = True
if capitalize:
name = "".join([x.capitalize() for x in name.split("_")])
# Some tests specify a name that already ends with 'Tester', while in other
# cases we need to add it manually.
if not name.endswith("Tester"):
name += "Tester"
if "inplace" in kwargs:
if kwargs["inplace"]:
_expected = expected
if not isinstance(_expected, dict):
def expected(*inputs):
return np.array(_expected(*inputs), dtype=inputs[0].dtype)
def inplace_check(inputs, outputs):
# this used to be inputs[0] is output[0]
# I changed it so that it was easier to satisfy by the
# DebugMode
return np.all(inputs[0] == outputs[0])
checks = dict(checks, inplace_check=inplace_check)
del kwargs["inplace"]
return makeTester(name, op, expected, checks, **kwargs)
# Those are corner case when rounding. Their is many rounding algo.
# c round() fct and numpy round are not the same!
corner_case = np.asarray(
[-2.5, -2.0, -1.5, -1.0, -0.5, -0.51, -0.49, 0, 0.49, 0.5, 0.9, 1, 1.5, 2, 2.5],
dtype=config.floatX,
)
# we remove 0 here as the grad is not always computable numerically.
corner_case_grad = np.asarray(
[-2.5, -2.0, -1.5, -1.0, -0.5, -0.51, -0.49, 0.49, 0.5, 0.9, 1, 1.5, 2, 2.5],
dtype=config.floatX,
)
_good_broadcast_unary_normal = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=config.floatX)],
integers=[randint_ranged(-5, 5, (2, 3))],
# not using -128 because np.allclose would return False
int8=[np.arange(-127, 128, dtype="int8")],
uint8=[np.arange(0, 255, dtype="uint8")],
uint16=[np.arange(0, 65535, dtype="uint16")],
corner_case=[corner_case],
complex=[randcomplex(2, 3)],
empty=[np.asarray([], dtype=config.floatX)],
)
_grad_broadcast_unary_normal = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=config.floatX)],
corner_case=[corner_case_grad],
# empty = [np.asarray([])] # XXX: should this be included?
)
_good_broadcast_unary_normal_float = dict(
normal=[rand_ranged(-5, 5, (2, 3))],
corner_case=[corner_case],
complex=[randcomplex(2, 3)],
empty=[np.asarray([], dtype=config.floatX)],
)
_good_broadcast_unary_normal_float_no_complex = copymod(
_good_broadcast_unary_normal_float, without=["complex"]
)
_good_broadcast_unary_normal_float_no_complex_small_neg_range = dict(
normal=[rand_ranged(-2, 5, (2, 3))],
corner_case=[corner_case],
empty=[np.asarray([], dtype=config.floatX)],
)
_grad_broadcast_unary_normal_small_neg_range = dict(
normal=[np.asarray(rand_ranged(-2, 5, (2, 3)), dtype=config.floatX)],
corner_case=[corner_case_grad],
)
_grad_broadcast_unary_abs1_no_complex = dict(
normal=[np.asarray(rand_ranged(-1 + _eps, 1 - _eps, (2, 3)), dtype=config.floatX)],
)
_grad_broadcast_unary_0_2_no_complex = dict(
# Don't go too close to 0 or 2 for tests in float32
normal=[np.asarray(rand_ranged(_eps, 1 - _eps, (2, 3)), dtype=config.floatX)],
)
# chi2sf takes two inputs, a value (x) and a degrees of freedom (k).
# not sure how to deal with that here...
_good_broadcast_unary_chi2sf = dict(
normal=(rand_ranged(1, 10, (2, 3)), np.asarray(1, dtype=config.floatX)),
empty=(np.asarray([], dtype=config.floatX), np.asarray(1, dtype=config.floatX)),
integers=(randint_ranged(1, 10, (2, 3)), np.asarray(1, dtype=config.floatX)),
uint8=(
randint_ranged(1, 10, (2, 3)).astype("uint8"),
np.asarray(1, dtype=config.floatX),
),
uint16=(
randint_ranged(1, 10, (2, 3)).astype("uint16"),
np.asarray(1, dtype=config.floatX),
),
)
_good_broadcast_unary_normal_no_complex = dict(
normal=[np.asarray(rand_ranged(-5, 5, (2, 3)), dtype=config.floatX)],
integers=[randint_ranged(-5, 5, (2, 3))],
int8=[np.arange(-127, 128, dtype="int8")],
uint8=[np.arange(0, 89, dtype="uint8")],
uint16=[np.arange(0, 89, dtype="uint16")],
corner_case=[corner_case],
empty=[np.asarray([], dtype=config.floatX)],
big_scalar=[np.arange(17.0, 29.0, 0.5, dtype=config.floatX)],
)
...@@ -3,19 +3,12 @@ import numpy as np ...@@ -3,19 +3,12 @@ import numpy as np
import theano import theano
import theano.tensor as tt import theano.tensor as tt
import theano.typed_list import theano.typed_list
from tests.tensor.utils import rand_ranged
from theano import In from theano import In
from theano.typed_list.basic import Append, Extend, Insert, Remove, Reverse from theano.typed_list.basic import Append, Extend, Insert, Remove, Reverse
from theano.typed_list.type import TypedListType from theano.typed_list.type import TypedListType
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return np.asarray(
np.random.rand(*shape) * (maximum - minimum) + minimum,
dtype=theano.config.floatX,
)
class TestInplace: class TestInplace:
def test_reverse_inplace(self): def test_reverse_inplace(self):
mySymbolicMatricesList = TypedListType( mySymbolicMatricesList = TypedListType(
...@@ -32,9 +25,9 @@ class TestInplace: ...@@ -32,9 +25,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101]) x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y]), [y, x]) assert np.array_equal(f([x, y]), [y, x])
...@@ -56,9 +49,9 @@ class TestInplace: ...@@ -56,9 +49,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101]) x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], y), [x, y]) assert np.array_equal(f([x], y), [x, y])
...@@ -83,9 +76,9 @@ class TestInplace: ...@@ -83,9 +76,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101]) x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], [y]), [x, y]) assert np.array_equal(f([x], [y]), [x, y])
...@@ -111,9 +104,9 @@ class TestInplace: ...@@ -111,9 +104,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101]) x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y]) assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y])
...@@ -135,9 +128,9 @@ class TestInplace: ...@@ -135,9 +128,9 @@ class TestInplace:
) )
assert f.maker.fgraph.toposort()[0].op.inplace assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101]) x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y], y), [x]) assert np.array_equal(f([x, y], y), [x])
......
...@@ -3,19 +3,12 @@ import pytest ...@@ -3,19 +3,12 @@ import pytest
import theano import theano
import theano.tensor as tt import theano.tensor as tt
import theano.typed_list
from tests import unittest_tools as utt from tests import unittest_tools as utt
from tests.tensor.utils import rand_ranged
from theano.typed_list.basic import TypedListVariable
from theano.typed_list.type import TypedListType from theano.typed_list.type import TypedListType
# Taken from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return np.asarray(
np.random.rand(*shape) * (maximum - minimum) + minimum,
dtype=theano.config.floatX,
)
class TestTypedListType: class TestTypedListType:
def setup_method(self): def setup_method(self):
utt.seed_rng() utt.seed_rng()
...@@ -70,7 +63,7 @@ class TestTypedListType: ...@@ -70,7 +63,7 @@ class TestTypedListType:
myType = TypedListType(tt.TensorType(theano.config.floatX, (False, False))) myType = TypedListType(tt.TensorType(theano.config.floatX, (False, False)))
x = rand_ranged_matrix(-1000, 1000, [100, 100]) x = rand_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([x]), [x]) assert np.array_equal(myType.filter([x]), [x])
...@@ -88,7 +81,7 @@ class TestTypedListType: ...@@ -88,7 +81,7 @@ class TestTypedListType:
def test_load_alot(self): def test_load_alot(self):
myType = TypedListType(tt.TensorType(theano.config.floatX, (False, False))) myType = TypedListType(tt.TensorType(theano.config.floatX, (False, False)))
x = rand_ranged_matrix(-1000, 1000, [10, 10]) x = rand_ranged(-1000, 1000, [10, 10])
testList = [] testList = []
for i in range(10000): for i in range(10000):
testList.append(x) testList.append(x)
...@@ -104,7 +97,7 @@ class TestTypedListType: ...@@ -104,7 +97,7 @@ class TestTypedListType:
myType = TypedListType(myNestedType) myType = TypedListType(myNestedType)
x = rand_ranged_matrix(-1000, 1000, [100, 100]) x = rand_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([[x]]), [[x]]) assert np.array_equal(myType.filter([[x]]), [[x]])
...@@ -160,4 +153,4 @@ class TestTypedListType: ...@@ -160,4 +153,4 @@ class TestTypedListType:
tt.TensorType(theano.config.floatX, (False, False)) tt.TensorType(theano.config.floatX, (False, False))
)() )()
assert isinstance(mySymbolicVariable, theano.typed_list.TypedListVariable) assert isinstance(mySymbolicVariable, TypedListVariable)
...@@ -1947,8 +1947,13 @@ class GradientError(Exception): ...@@ -1947,8 +1947,13 @@ class GradientError(Exception):
self.rel_tol = rel_tol self.rel_tol = rel_tol
def __str__(self): def __str__(self):
# args may have been inserted by e.g. makeTester if hasattr(self, "args"):
args_msg = ", ".join(str(a) for a in self.args) # `self.args` may have been inserted by
# `tests.tensor.utils.makeTester`
args_msg = ", ".join(str(a) for a in self.args)
else:
args_msg = ""
return """\ return """\
GradientError: numeric gradient and analytic gradient exceed tolerance: GradientError: numeric gradient and analytic gradient exceed tolerance:
At position %i of argument %i with shape %s, At position %i of argument %i with shape %s,
......
# Definitions of theano.scalar ops that have their python implementation taken """
# from SciPy. As SciPy is not always available, we treat them separately. `Op`s that have their python implementations taken from SciPy.
As SciPy is not always available, we treat them separately.
"""
import os import os
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论