提交 3767f231 authored 作者: Brandon T. Willard's avatar Brandon T. Willard

Move test_basic utilities to utils.py and SciPy tests to test_basic_scipy

上级 d3d56a6c
......@@ -70,8 +70,8 @@ jobs:
float32: [0]
part:
- "tests --ignore=tests/tensor --ignore=tests/sparse --ignore=tests/tensor/nnet"
- "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/test_opt.py --ignore=tests/tensor/nnet"
- "tests/tensor/test_basic.py"
- "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_basic_scipy.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/test_opt.py --ignore=tests/tensor/nnet"
- "tests/tensor/test_basic.py tests/tensor/test_basic_scipy.py"
- "tests/tensor/test_elemwise.py tests/tensor/test_opt.py"
- "tests/tensor/nnet --ignore-glob='*/test_abstract_conv.py'"
- "tests/tensor/nnet/test_abstract_conv.py"
......
......@@ -40,3 +40,4 @@ Theano.suo
.ropeproject
core
.idea
/htmlcov/
......@@ -425,7 +425,7 @@ verify that the op generates the proper output, that the gradient is
valid, that the Op fails in known/expected ways. Because so much of
this is common, two helper functions exists to make your lives easier:
``makeTester`` and ``makeBroadcastTester`` (defined in module
``tests.tensor.test_basic``).
``tests.tensor.utils``).
Here is an example of ``makeTester`` generating testcases for the Dot
product op:
......@@ -435,7 +435,7 @@ product op:
from numpy import dot
from numpy.random import rand
from tests.tensor.test_basic import makeTester
from tests.tensor.utils import makeTester
TestDot = makeTester(name = 'DotTester',
op = dot,
......
......@@ -5,6 +5,7 @@ max-line-length = 88
per-file-ignores =
**/__init__.py:F401,E402,F403
theano/sparse/sandbox/sp2.py:F401
tests/tensor/test_basic_scipy.py:E402
tests/sparse/test_basic.py:E402
tests/sparse/test_opt.py:E402
tests/sparse/test_sp2.py:E402
......
import pytest
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
......@@ -14,9 +12,8 @@ from tests.tensor.test_basic import (
TestComparison,
TestJoinAndSplit,
TestReshape,
rand,
safe_make_node,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
......@@ -35,13 +32,14 @@ from theano.gpuarray.basic_ops import (
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
# Don't import test classes otherwise they get tested as part of the file
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
......
......@@ -397,7 +397,7 @@ class TestMagma:
fn = theano.function([A], gpu_matrix_inverse(A), mode=mode_with_gpu)
N = 1000
test_rng = np.random.RandomState(seed=1)
# Copied from tests.tensor.test_basic.rand.
# Copied from tests.tensor.utils.rand.
A_val = test_rng.rand(N, N).astype("float32") * 2 - 1
A_val_inv = fn(A_val)
utt.assert_allclose(np.eye(N), np.dot(A_val_inv, A_val), atol=1e-2)
......
......@@ -4,10 +4,10 @@ import pytest
import theano
import theano.gpuarray
import theano.tensor.slinalg as slinalg
from tests import test_ifelse
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor import test_basic
from tests.tensor.test_basic import TestSpecifyShape
from tests.test_ifelse import TestIfelse
from theano import tensor
from theano.breakpoint import PdbBreakpoint
from theano.gof.opt import check_stack_trace
......@@ -312,12 +312,12 @@ def test_rebroadcast():
assert _check_stack_trace(f)
class TestSpecifyShape(test_basic.TestSpecifyShape):
class TestSpecifyShape(TestSpecifyShape):
mode = mode_with_gpu
input_type = GpuArrayType
class TestGpuIfelse(test_ifelse.TestIfelse):
class TestGpuIfelse(TestIfelse):
mode = mode_with_gpu
@staticmethod
......
......@@ -4,7 +4,7 @@ import pytest
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.test_basic import (
from tests.tensor.utils import (
_good_broadcast_unary_normal_float_no_complex,
check_floatX,
makeBroadcastTester,
......
......@@ -3,7 +3,7 @@ import numpy as np
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.test_basic import (
from tests.tensor.utils import (
_good_broadcast_unary_normal_no_complex,
check_floatX,
copymod,
......
差异被折叠。
差异被折叠。
......@@ -22,8 +22,9 @@ import theano
import theano.tensor as tt
import theano.tensor.blas_scipy
from tests import unittest_tools
from tests.tensor.test_basic import as_tensor_variable, compile, inplace, inplace_func
from tests.tensor.utils import inplace_func
from theano import In, config, shared
from theano.tensor import as_tensor_variable, inplace
from theano.tensor.blas import (
Dot22,
Dot22Scalar,
......@@ -109,7 +110,7 @@ class TestGemm:
f = inplace_func(
[tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b)
......@@ -274,12 +275,12 @@ class TestGemm:
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = compile.Mode(optimizer = None, linker=l))
# mode = theano.compile.Mode(optimizer = None, linker=l))
# f(z, a, x, y, b)
f = inplace_func(
[],
gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
......@@ -336,7 +337,7 @@ class TestGemm:
f_i = inplace_func(
[],
gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb),
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
for j in range(3):
# tz will not _always_ be overwritten,
......@@ -355,7 +356,7 @@ class TestGemm:
[],
tz_i,
updates=[(tz, tt.set_subtensor(tz[:, :, i], tz_i))],
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
for j in range(3):
g_i()
......@@ -612,7 +613,7 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
g = inplace_func(
i,
o,
mode=compile.Mode(linker="py", optimizer=None),
mode=theano.compile.Mode(linker="py", optimizer=None),
allow_input_downcast=True,
on_unused_input="ignore",
)
......@@ -701,7 +702,7 @@ def test_gemm_opt_double_gemm():
g = inplace_func(
i,
o,
mode=compile.Mode(linker="py", optimizer=None),
mode=theano.compile.Mode(linker="py", optimizer=None),
on_unused_input="ignore",
)
......
......@@ -10,7 +10,7 @@ import theano
import theano.scalar as scal
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.test_basic import inplace_func, rand, randint_ranged
from tests.tensor.utils import inplace_func, rand, randint_ranged
from theano import change_flags, config
from theano.compile import DeepCopyOp
from theano.gof.op import get_test_value
......
差异被折叠。
......@@ -3,19 +3,12 @@ import numpy as np
import theano
import theano.tensor as tt
import theano.typed_list
from tests.tensor.utils import rand_ranged
from theano import In
from theano.typed_list.basic import Append, Extend, Insert, Remove, Reverse
from theano.typed_list.type import TypedListType
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return np.asarray(
np.random.rand(*shape) * (maximum - minimum) + minimum,
dtype=theano.config.floatX,
)
class TestInplace:
def test_reverse_inplace(self):
mySymbolicMatricesList = TypedListType(
......@@ -32,9 +25,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y]), [y, x])
......@@ -56,9 +49,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], y), [x, y])
......@@ -83,9 +76,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], [y]), [x, y])
......@@ -111,9 +104,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y])
......@@ -135,9 +128,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y], y), [x])
......
......@@ -3,19 +3,12 @@ import pytest
import theano
import theano.tensor as tt
import theano.typed_list
from tests import unittest_tools as utt
from tests.tensor.utils import rand_ranged
from theano.typed_list.basic import TypedListVariable
from theano.typed_list.type import TypedListType
# Taken from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return np.asarray(
np.random.rand(*shape) * (maximum - minimum) + minimum,
dtype=theano.config.floatX,
)
class TestTypedListType:
def setup_method(self):
utt.seed_rng()
......@@ -70,7 +63,7 @@ class TestTypedListType:
myType = TypedListType(tt.TensorType(theano.config.floatX, (False, False)))
x = rand_ranged_matrix(-1000, 1000, [100, 100])
x = rand_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([x]), [x])
......@@ -88,7 +81,7 @@ class TestTypedListType:
def test_load_alot(self):
myType = TypedListType(tt.TensorType(theano.config.floatX, (False, False)))
x = rand_ranged_matrix(-1000, 1000, [10, 10])
x = rand_ranged(-1000, 1000, [10, 10])
testList = []
for i in range(10000):
testList.append(x)
......@@ -104,7 +97,7 @@ class TestTypedListType:
myType = TypedListType(myNestedType)
x = rand_ranged_matrix(-1000, 1000, [100, 100])
x = rand_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([[x]]), [[x]])
......@@ -160,4 +153,4 @@ class TestTypedListType:
tt.TensorType(theano.config.floatX, (False, False))
)()
assert isinstance(mySymbolicVariable, theano.typed_list.TypedListVariable)
assert isinstance(mySymbolicVariable, TypedListVariable)
......@@ -1947,8 +1947,13 @@ class GradientError(Exception):
self.rel_tol = rel_tol
def __str__(self):
# args may have been inserted by e.g. makeTester
args_msg = ", ".join(str(a) for a in self.args)
if hasattr(self, "args"):
# `self.args` may have been inserted by
# `tests.tensor.utils.makeTester`
args_msg = ", ".join(str(a) for a in self.args)
else:
args_msg = ""
return """\
GradientError: numeric gradient and analytic gradient exceed tolerance:
At position %i of argument %i with shape %s,
......
# Definitions of theano.scalar ops that have their python implementation taken
# from SciPy. As SciPy is not always available, we treat them separately.
"""
`Op`s that have their python implementations taken from SciPy.
As SciPy is not always available, we treat them separately.
"""
import os
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论