Unverified 提交 7f1537c7 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: GitHub

Merge pull request #101 from brandonwillard/refactor-tests

Refactor tests.tensor.test_basic
......@@ -70,8 +70,8 @@ jobs:
float32: [0]
part:
- "tests --ignore=tests/tensor --ignore=tests/sparse --ignore=tests/tensor/nnet"
- "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/test_opt.py --ignore=tests/tensor/nnet"
- "tests/tensor/test_basic.py"
- "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_basic_scipy.py --ignore=tests/tensor/test_inplace.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/test_opt.py --ignore=tests/tensor/nnet"
- "tests/tensor/test_basic.py tests/tensor/test_basic_scipy.py tests/tensor/test_inplace.py"
- "tests/tensor/test_elemwise.py tests/tensor/test_opt.py"
- "tests/tensor/nnet --ignore-glob='*/test_abstract_conv.py'"
- "tests/tensor/nnet/test_abstract_conv.py"
......
......@@ -40,3 +40,4 @@ Theano.suo
.ropeproject
core
.idea
/htmlcov/
......@@ -425,7 +425,7 @@ verify that the op generates the proper output, that the gradient is
valid, that the Op fails in known/expected ways. Because so much of
this is common, two helper functions exists to make your lives easier:
``makeTester`` and ``makeBroadcastTester`` (defined in module
``tests.tensor.test_basic``).
``tests.tensor.utils``).
Here is an example of ``makeTester`` generating testcases for the Dot
product op:
......@@ -435,7 +435,7 @@ product op:
from numpy import dot
from numpy.random import rand
from tests.tensor.test_basic import makeTester
from tests.tensor.utils import makeTester
TestDot = makeTester(name = 'DotTester',
op = dot,
......
......@@ -5,6 +5,7 @@ max-line-length = 88
per-file-ignores =
**/__init__.py:F401,E402,F403
theano/sparse/sandbox/sp2.py:F401
tests/tensor/test_basic_scipy.py:E402
tests/sparse/test_basic.py:E402
tests/sparse/test_opt.py:E402
tests/sparse/test_sp2.py:E402
......
import pytest
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
......@@ -14,9 +12,8 @@ from tests.tensor.test_basic import (
TestComparison,
TestJoinAndSplit,
TestReshape,
rand,
safe_make_node,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
......@@ -35,13 +32,14 @@ from theano.gpuarray.basic_ops import (
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
# Don't import test classes otherwise they get tested as part of the file
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
......
......@@ -397,7 +397,7 @@ class TestMagma:
fn = theano.function([A], gpu_matrix_inverse(A), mode=mode_with_gpu)
N = 1000
test_rng = np.random.RandomState(seed=1)
# Copied from tests.tensor.test_basic.rand.
# Copied from tests.tensor.utils.rand.
A_val = test_rng.rand(N, N).astype("float32") * 2 - 1
A_val_inv = fn(A_val)
utt.assert_allclose(np.eye(N), np.dot(A_val_inv, A_val), atol=1e-2)
......
......@@ -4,10 +4,10 @@ import pytest
import theano
import theano.gpuarray
import theano.tensor.slinalg as slinalg
from tests import test_ifelse
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor import test_basic
from tests.tensor.test_basic import TestSpecifyShape
from tests.test_ifelse import TestIfelse
from theano import tensor
from theano.breakpoint import PdbBreakpoint
from theano.gof.opt import check_stack_trace
......@@ -312,12 +312,12 @@ def test_rebroadcast():
assert _check_stack_trace(f)
class TestSpecifyShape(test_basic.TestSpecifyShape):
class TestSpecifyShape(TestSpecifyShape):
mode = mode_with_gpu
input_type = GpuArrayType
class TestGpuIfelse(test_ifelse.TestIfelse):
class TestGpuIfelse(TestIfelse):
mode = mode_with_gpu
@staticmethod
......
......@@ -69,58 +69,21 @@ from theano.scalar.basic import (
)
def inputs():
return floats("xyz")
class TestScalarOps:
def test_straightforward(self):
x, y, z = inputs()
e = mul(add(x, y), div_proxy(x, y))
g = FunctionGraph([x, y], [e])
fn = gof.DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 1.5
# This test is moved to tests.tensor.test_basic.py:test_mod
# We move it their as under ubuntu the c_extract call of theano.scalar
# call PyInt_check and it fail under some os. If work in other case.
# As we use theano.scalar normally, but we use theano.tensor.scalar
# that is not important. Also this make the theano fct fail at call time
# so this is not a silent bug.
# --> This is why it is purposely named 'tes_mod' instead of 'test_mod'.
def tes_mod(self):
# We add this test as not all language and C implementation give the same
# sign to the result. This check that the c_code of `Mod` is implemented
# as Python. That is what we want.
x, y = ints("xy")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x % y])).make_function()
for a, b in (
(0, 1),
(1, 1),
(0, -1),
(1, -1),
(-1, -1),
(1, 2),
(-1, 2),
(1, -2),
(-1, -2),
(5, 3),
(-5, 3),
(5, -3),
(-5, -3),
):
assert fn(a, b) == a % b, (a,)
def has_f16(comp):
if any(v.type == float16 for v in comp.fgraph.variables):
return True
return False
def test_mul_add_div_proxy():
x, y, z = floats("xyz")
e = mul(add(x, y), div_proxy(x, y))
g = FunctionGraph([x, y], [e])
fn = gof.DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 1.5
class TestComposite:
def test_composite_clone_float32(self):
def has_f16(comp):
if any(v.type == float16 for v in comp.fgraph.variables):
return True
return False
w = int8()
x = float16()
y = float32()
......@@ -153,7 +116,7 @@ class TestComposite:
assert not has_f16(nc)
def test_straightforward(self):
x, y, z = inputs()
x, y, z = floats("xyz")
e = mul(add(x, y), div_proxy(x, y))
C = Composite([x, y], [e])
c = C.make_node(x, y)
......@@ -164,7 +127,7 @@ class TestComposite:
def test_flatten(self):
# Test that we flatten multiple Composite.
x, y, z = inputs()
x, y, z = floats("xyz")
C = Composite([x, y], [x + y])
CC = Composite([x, y], [C(x * y, y)])
assert not isinstance(CC.outputs[0].owner.op, Composite)
......@@ -175,7 +138,7 @@ class TestComposite:
assert isinstance(CC.outputs[0].owner.op, Composite)
def test_with_constants(self):
x, y, z = inputs()
x, y, z = floats("xyz")
e = mul(add(70.0, y), div_proxy(x, y))
C = Composite([x, y], [e])
c = C.make_node(x, y)
......@@ -186,7 +149,7 @@ class TestComposite:
assert fn(1.0, 2.0) == 36.0
def test_many_outputs(self):
x, y, z = inputs()
x, y, z = floats("xyz")
e0 = x + y + z
e1 = x + y * z
e2 = x / y
......@@ -243,37 +206,37 @@ class TestComposite:
class TestLogical:
def test_gt(self):
x, y, z = inputs()
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x > y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a > b)
def test_lt(self):
x, y, z = inputs()
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x < y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a < b)
def test_le(self):
x, y, z = inputs()
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x <= y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a <= b)
def test_ge(self):
x, y, z = inputs()
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x >= y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a >= b)
def test_eq(self):
x, y, z = inputs()
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [eq(x, y)])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a == b)
def test_neq(self):
x, y, z = inputs()
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [neq(x, y)])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a != b)
......@@ -424,36 +387,33 @@ class TestUpgradeToFloat:
self._test_binary(binary_op, x_range, y_range)
class TestComplexMod:
def test_mod_complex_fail():
# Make sure % fails on complex numbers.
def test_fail(self):
x = complex64()
y = int32()
with pytest.raises(ComplexError):
x % y
class TestDiv:
def test_0(self):
a = int8()
b = int32()
c = complex64()
d = float64()
f = float32()
assert isinstance((a // b).owner.op, IntDiv)
assert isinstance((b // a).owner.op, IntDiv)
assert isinstance((b / d).owner.op, TrueDiv)
assert isinstance((b / f).owner.op, TrueDiv)
assert isinstance((f / a).owner.op, TrueDiv)
assert isinstance((d / b).owner.op, TrueDiv)
assert isinstance((d / f).owner.op, TrueDiv)
assert isinstance((f / c).owner.op, TrueDiv)
assert isinstance((a / c).owner.op, TrueDiv)
def TestGradGt():
x = complex64()
y = int32()
with pytest.raises(ComplexError):
x % y
def test_div_types():
a = int8()
b = int32()
c = complex64()
d = float64()
f = float32()
assert isinstance((a // b).owner.op, IntDiv)
assert isinstance((b // a).owner.op, IntDiv)
assert isinstance((b / d).owner.op, TrueDiv)
assert isinstance((b / f).owner.op, TrueDiv)
assert isinstance((f / a).owner.op, TrueDiv)
assert isinstance((d / b).owner.op, TrueDiv)
assert isinstance((d / f).owner.op, TrueDiv)
assert isinstance((f / c).owner.op, TrueDiv)
assert isinstance((a / c).owner.op, TrueDiv)
def test_grad_gt():
x = float32(name="x")
y = float32(name="y")
z = x > y
......@@ -461,7 +421,7 @@ def TestGradGt():
assert g.eval({y: 1.0}) == 0.0
def TestGradSwitch():
def test_grad_switch():
# This is a code snippet from the mailing list
# It caused an assert to be raised due to the
......@@ -477,7 +437,7 @@ def TestGradSwitch():
theano.gradient.grad(l, x)
def TestGradIdentity():
def test_grad_identity():
# Check that the grad method of Identity correctly handles int dytpes
x = theano.tensor.imatrix("x")
# tensor_copy is Elemwise{Identity}
......@@ -486,7 +446,7 @@ def TestGradIdentity():
theano.gradient.grad(l, x)
def TestGradInrange():
def test_grad_inrange():
for bound_definition in [(True, True), (False, False)]:
# Instantiate op, and then take the gradient
op = InRange(*bound_definition)
......@@ -512,7 +472,7 @@ def TestGradInrange():
utt.assert_allclose(f(7, 1, 5), [0, 0, 0])
def TestGradAbs():
def test_grad_abs():
a = theano.tensor.fscalar("a")
b = theano.tensor.nnet.relu(a)
c = theano.grad(b, a)
......@@ -523,11 +483,7 @@ def TestGradAbs():
assert ret == 0.5, ret
# Testing of Composite is done in tensor/tests/test_opt.py
# in test_fusion, TestCompositeCodegen
def TestConstant():
def test_constant():
c = constant(2, name="a")
assert c.name == "a"
assert c.dtype == "int8"
......
......@@ -4,7 +4,7 @@ import pytest
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.test_basic import (
from tests.tensor.utils import (
_good_broadcast_unary_normal_float_no_complex,
check_floatX,
makeBroadcastTester,
......
......@@ -3,7 +3,7 @@ import numpy as np
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.test_basic import (
from tests.tensor.utils import (
_good_broadcast_unary_normal_no_complex,
check_floatX,
copymod,
......
差异被折叠。
差异被折叠。
......@@ -22,8 +22,9 @@ import theano
import theano.tensor as tt
import theano.tensor.blas_scipy
from tests import unittest_tools
from tests.tensor.test_basic import as_tensor_variable, compile, inplace, inplace_func
from tests.tensor.utils import inplace_func
from theano import In, config, shared
from theano.tensor import as_tensor_variable, inplace
from theano.tensor.blas import (
Dot22,
Dot22Scalar,
......@@ -109,7 +110,7 @@ class TestGemm:
f = inplace_func(
[tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b)
......@@ -274,12 +275,12 @@ class TestGemm:
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = compile.Mode(optimizer = None, linker=l))
# mode = theano.compile.Mode(optimizer = None, linker=l))
# f(z, a, x, y, b)
f = inplace_func(
[],
gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
......@@ -336,7 +337,7 @@ class TestGemm:
f_i = inplace_func(
[],
gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb),
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
for j in range(3):
# tz will not _always_ be overwritten,
......@@ -355,7 +356,7 @@ class TestGemm:
[],
tz_i,
updates=[(tz, tt.set_subtensor(tz[:, :, i], tz_i))],
mode=compile.Mode(optimizer=None, linker=l),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
for j in range(3):
g_i()
......@@ -612,7 +613,7 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
g = inplace_func(
i,
o,
mode=compile.Mode(linker="py", optimizer=None),
mode=theano.compile.Mode(linker="py", optimizer=None),
allow_input_downcast=True,
on_unused_input="ignore",
)
......@@ -701,7 +702,7 @@ def test_gemm_opt_double_gemm():
g = inplace_func(
i,
o,
mode=compile.Mode(linker="py", optimizer=None),
mode=theano.compile.Mode(linker="py", optimizer=None),
on_unused_input="ignore",
)
......
差异被折叠。
......@@ -10,7 +10,7 @@ import theano
import theano.scalar as scal
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.test_basic import inplace_func, rand, randint_ranged
from tests.tensor.utils import inplace_func, rand, randint_ranged
from theano import change_flags, config
from theano.compile import DeepCopyOp
from theano.gof.op import get_test_value
......
差异被折叠。
......@@ -3,19 +3,12 @@ import numpy as np
import theano
import theano.tensor as tt
import theano.typed_list
from tests.tensor.utils import rand_ranged
from theano import In
from theano.typed_list.basic import Append, Extend, Insert, Remove, Reverse
from theano.typed_list.type import TypedListType
# took from tensors/tests/test_basic.py
def rand_ranged_matrix(minimum, maximum, shape):
return np.asarray(
np.random.rand(*shape) * (maximum - minimum) + minimum,
dtype=theano.config.floatX,
)
class TestInplace:
def test_reverse_inplace(self):
mySymbolicMatricesList = TypedListType(
......@@ -32,9 +25,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y]), [y, x])
......@@ -56,9 +49,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], y), [x, y])
......@@ -83,9 +76,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], [y]), [x, y])
......@@ -111,9 +104,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y])
......@@ -135,9 +128,9 @@ class TestInplace:
)
assert f.maker.fgraph.toposort()[0].op.inplace
x = rand_ranged_matrix(-1000, 1000, [100, 101])
x = rand_ranged(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged(-1000, 1000, [100, 101])
assert np.array_equal(f([x, y], y), [x])
......
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论