Unverified 提交 7855cc09 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: GitHub

Merge pull request #14 from ColCarroll/how-does-git-work

Move from unittest/nose to pytest
......@@ -23,12 +23,11 @@ env:
global:
- NUMPY_VERSION=1.18.1
jobs:
# - DOC=1 PART="theano/tests/test_flake8.py"
- PART="theano/compat theano/compile theano/d3viz theano/gof theano/misc theano/sandbox theano/scalar theano/scan_module theano/tests -e test_flake8.py theano/typed_list"
- PART="theano/sparse theano/tensor --exclude-test=theano.tensor.tests.test_basic --exclude-test=theano.tensor.tests.test_elemwise --exclude-test=theano.tensor.tests.test_opt --exclude-dir=theano/tensor/nnet"
- PART="theano/compat theano/compile theano/d3viz theano/gof theano/misc theano/sandbox theano/scalar theano/scan_module theano/tests theano/typed_list"
- PART="theano/sparse theano/tensor --ignore=theano.tensor.tests.test_basic.py --ignore=theano.tensor.tests.test_elemwise.py --ignore=theano.tensor.tests.test_opt.py --ignore=theano/tensor/nnet"
- PART="theano/tensor/tests/test_basic.py"
- PART="theano/tensor/tests/test_elemwise.py theano/tensor/tests/test_opt.py"
- PART="theano/tensor/nnet -e test_abstract_conv.py"
- PART="theano/tensor/nnet --ignore test_abstract_conv.py"
- PART="theano/tensor/nnet/tests/test_abstract_conv.py"
addons:
......@@ -46,14 +45,11 @@ install:
jobs:
include:
- &doctest
stage: doc
env: DOC=1 PART="theano/tests/test_flake8.py"
- &normaltest
stage: test
env: FAST_COMPILE=1 FLOAT32=1 PART="theano -e test_flake8.py --exclude-dir=theano/tensor/nnet --exclude-dir=theano/tensor/signal"
env: FAST_COMPILE=1 FLOAT32=1 PART="theano --ignore=theano/tensor/nnet --ignore=theano/tensor/signal"
- <<: *normaltest
env: FAST_COMPILE=1 PART="theano -e test_flake8.py --exclude-dir=theano/tensor/nnet --exclude-dir=theano/tensor/signal"
env: FAST_COMPILE=1 PART="theano --ignore=theano/tensor/nnet --ignore=theano/tensor/signal"
- <<: *normaltest
env: FAST_COMPILE=1 FLOAT32=1 PART="theano/tensor/nnet"
- <<: *normaltest
......@@ -84,7 +80,7 @@ script:
- python -c 'import theano; print(theano.config.__str__(print_doc=False))'
- python -c 'import theano; assert(theano.config.blas.ldflags != "")'
# Run tests for the given part
- theano-nose -v --with-timer --timer-top-n 10 $PART
- pytest -v $PART
after_failure:
- cat $HOME/.pip/pip.log
......@@ -7,13 +7,13 @@ if [[ $DOC == "1" ]]; then
# for old versions
conda create --yes -q -n pyenv python=3.6 numpy=1.13.1
conda activate pyenv
conda install --yes -q mkl numpy=1.13.1 scipy=0.19.1 nose=1.3.7 pip flake8=3.5 six=1.11.0 pep8=1.7.1 pyflakes=1.6.0 mkl-service graphviz
python -m pip install pydot-ng flake8-future-import parameterized nose-exclude nose-timer
conda install --yes -q mkl numpy=1.13.1 scipy=0.19.1 pip flake8=3.5 six=1.11.0 pep8=1.7.1 pyflakes=1.6.0 mkl-service graphviz pytest
python -m pip install pydot-ng flake8-future-import
else
conda create --yes -q -n pyenv python=$TRAVIS_PYTHON_VERSION
conda activate pyenv
conda install --yes -q mkl numpy scipy nose pip flake8 six pep8 pyflakes sphinx mkl-service graphviz # libgfortran
python -m pip install -q pydot-ng flake8-future-import parameterized sphinx_rtd_theme nose-exclude nose-timer
conda install --yes -q mkl numpy scipy pip flake8 six pep8 pyflakes sphinx mkl-service graphviz pytest # libgfortran
python -m pip install -q pydot-ng flake8-future-import sphinx_rtd_theme
fi
python -m pip install --no-deps --upgrade -e .
#!/usr/bin/env python
import theano_nose
theano_nose.main()
......@@ -26,7 +26,6 @@ requirements:
test:
requires:
- nose >=1.3.0
- nose-parameterized >=0.5.0
imports:
- theano
......
import pytest
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
......@@ -95,8 +95,7 @@ class TestProdOp(utt.InferShapeTester):
rng = np.random.RandomState(43)
def setUp(self):
super(TestProdOp, self).setUp()
def setup_method(self):
self.op_class = ProdOp # case 1
def test_perform(self):
......@@ -127,8 +126,7 @@ class TestSumDiffOp(utt.InferShapeTester):
rng = np.random.RandomState(43)
def setUp(self):
super(TestSumDiffOp, self).setUp()
def setup_method(self):
self.op_class = SumDiffOp
def test_perform(self):
......
......@@ -32,114 +32,15 @@ module, for this we refer the reader to the `official documentation
<http://docs.python.org/library/unittest.html>`_. We will however
adress certain specificities about how unittests relate to theano.
Unittest Primer
PyTest Primer
===============
A unittest is a subclass of ``unittest.TestCase``, with member
functions with names that start with the string ``test``. For
example:
.. testcode::
import unittest
class MyTestCase(unittest.TestCase):
def test0(self):
pass
# test passes cleanly
def test1(self):
self.assertTrue(2+2 == 5)
# raises an exception, causes test to fail
def test2(self):
assert 2+2 == 5
# causes error in test (basically a failure, but counted separately)
def test2(self):
assert 2+2 == 4
# this test has the same name as a previous one,
# so this is the one that runs.
We use pytest now! New tests should mostly be functions, with assertions
How to Run Unit Tests ?
-----------------------
Two options are available:
theano-nose
~~~~~~~~~~~
The easiest by far is to use ``theano-nose`` which is a command line
utility that recurses through a given directory, finds all unittests
matching a specific criteria and executes them. By default, it will
find & execute tests case in test*.py files whose method name starts
with 'test'.
``theano-nose`` is a wrapper around `nosetests
<http://nose.readthedocs.org/en/latest/>`_. You should be
able to execute it if you installed Theano using pip, or if you ran
"python setup.py develop" after the installation. If ``theano-nose`` is
not found by your shell, you will need to add ``Theano/bin`` to your
``PATH`` environment variable.
.. note::
In Theano versions <= 0.5, ``theano-nose`` was not included. If you
are working with such a version, you can call ``nosetests`` instead
of ``theano-nose`` in all the examples below.
Running all unit tests ::
cd Theano/
theano-nose
Running unit tests with standard out ::
theano-nose -s
Running unit tests contained in a specific .py file ::
theano-nose <filename>.py
Running a specific unit test ::
theano-nose <filename>.py:<classname>.<method_name>
Using unittest module
~~~~~~~~~~~~~~~~~~~~~
To launch tests cases from within python, you can also use the
functionality offered by the ``unittest`` module. The simplest thing
is to run all the tests in a file using ``unittest.main()``. Python's
built-in unittest module uses metaclasses to know about all the
``unittest.TestCase`` classes you have created. This call will run
them all, printing '.' for passed tests, and a stack trace for
exceptions. The standard footer code in theano's test files is:
.. testcode::
if __name__ == '__main__':
unittest.main()
You can also choose to run a subset of the full test suite.
To run all the tests in one or more ``TestCase`` subclasses:
.. code-block:: python
suite = unittest.TestLoader()
suite = suite.loadTestsFromTestCase(MyTestCase0)
suite = suite.loadTestsFromTestCase(MyTestCase1)
...
unittest.TextTestRunner(verbosity=2).run(suite)
To run just a single ``MyTestCase`` member test function called ``test0``:
.. testcode::
MyTestCase('test0').debug()
Mostly `pytest theano/`
Folder Layout
-------------
......
......@@ -32,12 +32,6 @@ Install requirements and optional packages
* ``m2w64-toolchain`` package provides a fully-compatible version of GCC and is then highly recommended.
* ``git`` package installs git source control through conda, which is required for the development versions of Theano and libgpuarray
Package ``parameterized`` is also optional but may be required for unit testing. It is available via ``pip``.
.. code-block:: bash
pip install parameterized
.. _gpu_windows:
Install and configure the GPU drivers (recommended)
......
......@@ -87,12 +87,6 @@ Install requirements and optional packages
* Arguments between <...> are optional.
Package ``parameterized`` is also optional but may be required for unit testing. It is available via ``pip``.
.. code-block:: bash
pip install parameterized
Install and configure the GPU drivers (recommended)
---------------------------------------------------
......@@ -115,5 +109,5 @@ Install and configure the GPU drivers (recommended)
2. Fix 'lib' path
* Add the CUDA 'lib' subdirectory (and/or 'lib64' subdirectory if you have a
64-bit OS) to your ``$LD_LIBRARY_PATH`` environment
64-bit OS) to your ``$LD_LIBRARY_PATH`` environment
variable. Example: ``/usr/local/cuda/lib64``
......@@ -6,7 +6,6 @@ gnumpy
pydot
pydot2
Cython
parameterized
scipy==0.13
......@@ -98,7 +98,7 @@ def do_setup():
install_requires=['numpy>=1.9.1', 'scipy>=0.14', 'six>=1.9.0'],
# pygments is a dependency for Sphinx code highlight
extras_require={
'test': ['nose>=1.3.0', 'parameterized', 'flake8'],
'test': ['nose>=1.3.0', 'flake8'],
'doc': ['Sphinx>=0.5.1', 'pygments']
},
package_data={
......
from __future__ import absolute_import, print_function, division
from functools import partial
import numpy as np
import pytest
import theano
from theano import config, shared
......@@ -16,13 +17,10 @@ from theano.compile.builders import OpFromGraph
from theano.tests import unittest_tools
test_params = unittest_tools.parameterized.expand(
[(OpFromGraph,), (partial(OpFromGraph, inline=True),)])
class Test_OpFromGraph(unittest_tools.InferShapeTester):
class T_OpFromGraph(unittest_tools.InferShapeTester):
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_straightforward(self, cls_ofg):
x, y, z = T.matrices('xyz')
e = x + y * z
......@@ -40,7 +38,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
assert np.all(8.0 == fn(xv, yv, zv))
assert np.all(8.0 == fn(xv, yv, zv))
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_size_changes(self, cls_ofg):
x, y, z = T.matrices('xyz')
e = T.dot(x, y)
......@@ -57,7 +55,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
assert res.shape == (2, 5)
assert np.all(180.0 == res)
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_grad(self, cls_ofg):
x, y, z = T.matrices('xyz')
e = x + y * z
......@@ -70,7 +68,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
zv = np.ones((2, 2), dtype=config.floatX) * 5
assert np.all(11.0 == fn(xv, yv, zv))
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_grad_grad(self, cls_ofg):
x, y, z = T.matrices('xyz')
e = x + y * z
......@@ -84,7 +82,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
zv = np.ones((2, 2), dtype=config.floatX) * 5
assert np.allclose(6.0, fn(xv, yv, zv))
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_shared(self, cls_ofg):
x, y, z = T.matrices('xyz')
s = shared(np.random.rand(2, 2).astype(config.floatX))
......@@ -102,7 +100,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
assert np.allclose(8.0, fn(xv, yv, zv))
assert np.allclose(8.0, fn(xv, yv, zv))
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_shared_grad(self, cls_ofg):
x, y, z = T.matrices('xyz')
s = shared(np.random.rand(2, 2).astype(config.floatX))
......@@ -123,7 +121,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
assert np.allclose(15.0 + s.get_value(),
fn(xv, yv, zv))
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_grad_override(self, cls_ofg):
x, y = T.vectors('xy')
......@@ -192,7 +190,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
assert isinstance(dw2.type, NullType)
assert isinstance(db2.type, DisconnectedType)
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_lop_override(self, cls_ofg):
x = T.vector()
y = 1. / (1. + T.exp(-x))
......@@ -219,7 +217,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
y1val, y2val = fn(xval)
assert np.allclose(y1val, y2val)
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_rop(self, cls_ofg):
a = T.vector()
M = T.matrix()
......@@ -238,7 +236,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
dvval2 = fn(xval, Wval, duval)
assert np.allclose(dvval2, dvval)
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_rop_override(self, cls_ofg):
x, y = T.vectors('xy')
......@@ -266,7 +264,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
# TODO list override case
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_connection_pattern_override(self, cls_ofg):
x, y = T.vectors('xy')
......@@ -297,7 +295,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
y: np.ones((5,), dtype=np.float32)})
assert np.allclose(out, [1.] * 5)
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_nested(self, cls_ofg):
x, y = T.vectors('xy')
u, v = x + y, x - y
......@@ -314,7 +312,7 @@ class T_OpFromGraph(unittest_tools.InferShapeTester):
assert np.allclose(xv, xv2)
assert np.allclose(yv, yv2)
@test_params
@pytest.mark.parametrize("cls_ofg", [OpFromGraph, partial(OpFromGraph, inline=True)])
def test_connection_pattern(self, cls_ofg):
# Basic case
x, y, z = T.matrices('xyz')
......
from __future__ import absolute_import, print_function, division
import sys
import unittest
from nose.plugins.skip import SkipTest
import numpy as np
import pytest
from six import reraise
from theano import config
......@@ -205,7 +204,7 @@ def test_badthunkoutput():
# this should evaluate with no error
f_good([1.0, 2.0, 3.0], [2, 3, 4])
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
try:
f_inconsistent([1.0, 2.0, 3.0], [2, 3, 4])
......@@ -343,7 +342,7 @@ def test_stochasticoptimization():
def test_just_c_code():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb2(x),
mode=debugmode.DebugMode(check_py_code=False))
......@@ -375,7 +374,7 @@ def test_baddestroymap():
def test_baddestroymap_c():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb2i(x),
mode=debugmode.DebugMode(check_py_code=False))
......@@ -386,7 +385,7 @@ def test_baddestroymap_c():
pass
class Test_ViewMap(unittest.TestCase):
class Test_ViewMap():
class BadAddRef(gof.Op):
def make_node(self, a, b):
......@@ -443,7 +442,7 @@ class Test_ViewMap(unittest.TestCase):
def test_badviewmap_c(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb1i(x),
mode=debugmode.DebugMode(check_py_code=False))
......@@ -569,13 +568,13 @@ class Test_ViewMap(unittest.TestCase):
# f([1,2,3,4],[5,6,7,8])
class Test_check_isfinite(unittest.TestCase):
def setUp(self):
class Test_check_isfinite():
def setup_method(self):
self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
self.old_dm = theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite
def tearDown(self):
def teardown_method(self):
theano.tensor.TensorType.filter_checks_isfinite = self.old_ts
theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite = self.old_dm
......@@ -592,17 +591,17 @@ class Test_check_isfinite(unittest.TestCase):
# ValueError
# if not, DebugMode will check internally, and raise InvalidValueError
# passing an invalid value as an input should trigger ValueError
self.assertRaises(debugmode.InvalidValueError, f,
np.log([3, -4, 5]).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(np.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(np.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
with pytest.raises(debugmode.InvalidValueError):
f(np.log([3, -4, 5]).astype(config.floatX))
with pytest.raises(debugmode.InvalidValueError):
f((np.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
with pytest.raises(debugmode.InvalidValueError):
f((np.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
# generating an invalid value internally should trigger
# InvalidValueError
self.assertRaises(debugmode.InvalidValueError, g,
np.asarray([3, -4, 5], dtype=config.floatX))
with pytest.raises(debugmode.InvalidValueError):
g(np.asarray([3, -4, 5], dtype=config.floatX))
# this should disable the exception
theano.tensor.TensorType.filter_checks_isfinite = False
......@@ -749,8 +748,8 @@ class VecAsRowAndCol(gof.Op):
c[0][i, 0] = v[i]
class Test_preallocated_output(unittest.TestCase):
def setUp(self):
class Test_preallocated_output():
def setup_method(self):
self.rng = np.random.RandomState(seed=utt.fetch_seed())
def test_f_contiguous(self):
......@@ -780,7 +779,8 @@ class Test_preallocated_output(unittest.TestCase):
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
self.assertRaises(debugmode.BadThunkOutput, f, a_val, b_val)
with pytest.raises(debugmode.BadThunkOutput):
f(a_val, b_val)
else:
# The python code of this op is good.
f(a_val, b_val)
......@@ -812,7 +812,8 @@ class Test_preallocated_output(unittest.TestCase):
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
self.assertRaises(debugmode.BadThunkOutput, f, a_val, b_val)
with pytest.raises(debugmode.BadThunkOutput):
f(a_val, b_val)
else:
# The python code of this op is good.
f(a_val, b_val)
......
......@@ -3,9 +3,9 @@ import six.moves.cPickle as pickle
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
import theano
from theano.compile.io import In
......@@ -30,7 +30,7 @@ def test_function_dump():
assert np.allclose(fct1(x), fct2(x))
class TestFunctionIn(unittest.TestCase):
class TestFunctionIn():
def test_in_strict(self):
......@@ -55,14 +55,16 @@ class TestFunctionIn(unittest.TestCase):
# This is not a test of the In class per se, but the In class relies
# on the fact that shared variables cannot be explicit inputs
a = theano.shared(1.0)
self.assertRaises(TypeError, theano.function, [a], a + 1)
with pytest.raises(TypeError):
theano.function([a], a + 1)
def test_in_shared_variable(self):
# Ensure that an error is raised if the In wrapped is used to wrap
# a shared variable
a = theano.shared(1.0)
a_wrapped = In(a, update=a + 1)
self.assertRaises(TypeError, theano.function, [a_wrapped])
with pytest.raises(TypeError):
theano.function([a_wrapped])
def test_in_mutable(self):
a = theano.tensor.dvector()
......@@ -98,7 +100,8 @@ class TestFunctionIn(unittest.TestCase):
# an update of a different type
a = theano.tensor.dscalar('a')
b = theano.tensor.dvector('b')
self.assertRaises(TypeError, In, a, update=b)
with pytest.raises(TypeError):
In(a, update=b)
def test_in_update_shared(self):
# Test that using both In() with updates and shared variables with
......@@ -133,17 +136,19 @@ class TestFunctionIn(unittest.TestCase):
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, f, [3], np.array([6], dtype='int16'),
1)
with pytest.raises(TypeError):
f([3], np.array([6], dtype='int16'), 1)
# Value too big for a, silently ignored
assert np.all(f([2 ** 20], np.ones(1, dtype='int8'), 1) == 2)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, f, [3], [312], 1)
with pytest.raises(TypeError):
f([3], [312], 1)
# Value too big for c, raises TypeError
self.assertRaises(TypeError, f, [3], [6], 806)
with pytest.raises(TypeError):
f([3], [6], 806)
def test_in_allow_downcast_floatX(self):
a = theano.tensor.fscalar('a')
......@@ -162,13 +167,15 @@ class TestFunctionIn(unittest.TestCase):
assert np.allclose(f(0.1, 0, 0), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, 0, 0.1, 0)
with pytest.raises(TypeError):
f(0, 0.1, 0)
# If allow_downcast is None, it should work iff floatX=float32
if theano.config.floatX == 'float32':
assert np.allclose(f(0, 0, 0.1), 0.1)
else:
self.assertRaises(TypeError, f, 0, 0, 0.1)
with pytest.raises(TypeError):
f(0, 0, 0.1)
def test_in_allow_downcast_vector_floatX(self):
a = theano.tensor.fvector('a')
......@@ -188,7 +195,9 @@ class TestFunctionIn(unittest.TestCase):
assert np.allclose(f([0.1], z, z), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, z, [0.1], z)
with pytest.raises(TypeError):
f(z, [0.1], z)
# If allow_downcast is None, like False
self.assertRaises(TypeError, f, z, z, [0.1])
with pytest.raises(TypeError):
f(z, z, [0.1])
from __future__ import absolute_import, print_function, division
import unittest
import os
import re
......@@ -8,10 +7,10 @@ import theano
from theano import tensor
class FunctionName(unittest.TestCase):
class FunctionName():
def test_function_name(self):
x = tensor.vector('x')
func = theano.function([x], x + 1.)
regex = re.compile(os.path.basename('.*test_function_name.pyc?:14'))
assert(regex.match(func.name) is not None)
assert regex.match(func.name) is not None
from __future__ import absolute_import, print_function, division
import numpy as np
import unittest
from theano.compile.pfunc import pfunc
from theano.compile.sharedvalue import shared
......@@ -42,7 +41,7 @@ class NNet(object):
self.output_from_hidden = pfunc([self.hidden], self.output)
class TestNnet(unittest.TestCase):
class TestNnet():
def test_nnet(self):
rng = np.random.RandomState(1827)
......@@ -57,7 +56,7 @@ class TestNnet(unittest.TestCase):
mean_cost += cost
mean_cost /= float(len(data))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
self.assertTrue(abs(mean_cost - 0.20588975452) < 1e-6)
assert abs(mean_cost - 0.20588975452) < 1e-6
# Just call functions to make sure they do not crash.
nnet.compute_output(input)
nnet.output_from_hidden(np.ones(10))
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
import pytest
import theano
from theano.compile.mode import Mode, AddFeatureOptimizer
......@@ -22,7 +21,7 @@ def test_no_output_from_implace():
assert (hasattr(op, 'destroy_map') and 0 in op.destroy_map)
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
# Ensure that the elemwise op that produces the output is not inplace when
# using a mode that includes the optimization
opt = AddFeatureOptimizer(NoOutputFromInplace())
......
......@@ -3,14 +3,13 @@ Test compilation modes
"""
from __future__ import absolute_import, print_function, division
import copy
import unittest
import theano
import theano.tensor as T
from theano.compile import Mode
class T_bunch_of_modes(unittest.TestCase):
class Test_bunch_of_modes():
def test1(self):
# this is a quick test after the LazyLinker branch merge
......@@ -45,7 +44,7 @@ class T_bunch_of_modes(unittest.TestCase):
assert 4 == len(set(linker_classes_involved))
class T_old_problem(unittest.TestCase):
class Test_old_problem():
def test_1(self):
# Then, build a mode with the same linker, and a modified optimizer
default_mode = theano.compile.mode.get_default_mode()
......@@ -59,6 +58,3 @@ class T_old_problem(unittest.TestCase):
linker = theano.compile.mode.get_default_mode().linker
assert not hasattr(linker, "fgraph") or linker.fgraph is None
if __name__ == '__main__':
unittest.main()
......@@ -4,10 +4,9 @@ This test is for testing the NanGuardMode.
from __future__ import absolute_import, print_function, division
import logging
from nose.tools import assert_raises
import pytest
import numpy as np
from theano.compile.nanguardmode import NanGuardMode
import theano
import theano.tensor as T
......@@ -39,9 +38,12 @@ def test_NanGuardMode():
_logger = logging.getLogger("theano.compile.nanguardmode")
try:
_logger.propagate = False
assert_raises(AssertionError, fun, infa) # INFs
assert_raises(AssertionError, fun, nana) # NANs
assert_raises(AssertionError, fun, biga) # big values
with pytest.raises(AssertionError):
fun(infa) # INFs
with pytest.raises(AssertionError):
fun(nana) # NANs
with pytest.raises(AssertionError):
fun(infa) # big values
finally:
_logger.propagate = True
......@@ -64,8 +66,11 @@ def test_NanGuardMode():
fun(a) # normal values
try:
_logger.propagate = False
assert_raises(AssertionError, fun, infa) # INFs
assert_raises(AssertionError, fun, nana) # NANs
assert_raises(AssertionError, fun, biga) # big values
with pytest.raises(AssertionError):
fun(infa) # INFs
with pytest.raises(AssertionError):
fun(nana) # NANs
with pytest.raises(AssertionError):
fun(infa) # big values
finally:
_logger.propagate = True
......@@ -20,7 +20,7 @@ def mul(a, b):
return a * b
class OpDecoratorTests(utt.InferShapeTester):
class TestOpDecorator(utt.InferShapeTester):
def test_1arg(self):
x = dmatrix('x')
......
# Test of memory profiling
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import theano
......@@ -11,7 +9,7 @@ import theano.tensor as T
from theano.ifelse import ifelse
class Test_profiling(unittest.TestCase):
class Test_profiling():
# Test of Theano profiling with min_peak_memory=True
def test_profiling(self):
......@@ -103,6 +101,3 @@ class Test_profiling(unittest.TestCase):
theano.config.profile = config1
theano.config.profile_memory = config2
if __name__ == '__main__':
unittest.main()
from __future__ import absolute_import, print_function, division
import numpy as np
import unittest
import pytest
import theano
from theano.tensor import Tensor, TensorType
......@@ -9,7 +9,7 @@ from theano.compile.sharedvalue import SharedVariable
from theano.compile.sharedvalue import generic
class Test_SharedVariable(unittest.TestCase):
class Test_SharedVariable():
def test_ctors(self):
......@@ -32,7 +32,8 @@ class Test_SharedVariable(unittest.TestCase):
def badfunc():
shared(7, bad_kw=False)
self.assertRaises(TypeError, badfunc)
with pytest.raises(TypeError):
badfunc()
def test_strict_generic(self):
......@@ -117,38 +118,47 @@ class Test_SharedVariable(unittest.TestCase):
b = shared(np.int64(7), strict=True)
assert b.type == theano.tensor.lscalar
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.int32(7), strict=True)
assert b.type == theano.tensor.iscalar
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.int16(7), strict=True)
assert b.type == theano.tensor.wscalar
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.int8(7), strict=True)
assert b.type == theano.tensor.bscalar
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.float64(7.234), strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
with pytest.raises(TypeError):
f(b, 8)
b = shared(np.float32(7.234), strict=True)
assert b.type == theano.tensor.fscalar
self.assertRaises(TypeError, f, b, 8)
with pytest.raises(TypeError):
f(b, 8)
b = shared(np.float(7.234), strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
with pytest.raises(TypeError):
f(b, 8)
b = shared(7.234, strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
with pytest.raises(TypeError):
f(b, 8)
b = shared(np.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, np.random.rand(5, 5))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
def test_tensor_strict(self):
def f(var, val):
......@@ -156,40 +166,49 @@ class Test_SharedVariable(unittest.TestCase):
b = shared(np.int64([7]), strict=True)
assert b.type == theano.tensor.lvector
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.int32([7]), strict=True)
assert b.type == theano.tensor.ivector
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.int16([7]), strict=True)
assert b.type == theano.tensor.wvector
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.int8([7]), strict=True)
assert b.type == theano.tensor.bvector
self.assertRaises(TypeError, f, b, 8.23)
with pytest.raises(TypeError):
f(b, 8.23)
b = shared(np.float64([7.234]), strict=True)
assert b.type == theano.tensor.dvector
self.assertRaises(TypeError, f, b, 8)
with pytest.raises(TypeError):
f(b, 8)
b = shared(np.float32([7.234]), strict=True)
assert b.type == theano.tensor.fvector
self.assertRaises(TypeError, f, b, 8)
with pytest.raises(TypeError):
f(b, 8)
# np.float([7.234]) don't work
# b = shared(np.float([7.234]), strict=True)
# assert b.type == theano.tensor.dvector
# self.assertRaises(TypeError, f, b, 8)
# with pytest.raises(TypeError):
# f(b, 8)
# This generate a generic type. Should we cast? I don't think.
# b = shared([7.234], strict=True)
# assert b.type == theano.tensor.dvector
# self.assertRaises(TypeError, f, b, 8)
# with pytest.raises(TypeError):
# f(b, 8)
b = shared(np.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, np.random.rand(5, 5))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
def test_scalar_floatX(self):
......@@ -245,7 +264,8 @@ class Test_SharedVariable(unittest.TestCase):
assert b.get_value() == 8
b = shared(np.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, np.random.rand(5, 5))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
def test_tensor_floatX(self):
def f(var, val):
......@@ -298,8 +318,10 @@ class Test_SharedVariable(unittest.TestCase):
assert b.get_value() == 8
b = shared(np.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, np.random.rand(5, 5))
with pytest.raises(TypeError):
f(b, np.random.rand(5, 5))
def test_err_symbolic_variable(self):
self.assertRaises(TypeError, shared, theano.tensor.ones((2, 3)))
with pytest.raises(TypeError):
shared(theano.tensor.ones((2, 3)))
shared(np.ones((2, 4)))
......@@ -3,22 +3,21 @@ from __future__ import absolute_import, print_function, division
import numpy as np
import os.path as pt
import tempfile
import unittest
import filecmp
import theano as th
import theano.d3viz as d3v
from theano.d3viz.tests import models
from nose.plugins.skip import SkipTest
import pytest
from theano.d3viz.formatting import pydot_imported, pydot_imported_msg
if not pydot_imported:
raise SkipTest('pydot not available: ' + pydot_imported_msg)
pytest.skip('pydot not available: ' + pydot_imported_msg, allow_module_level=True)
class TestD3Viz(unittest.TestCase):
class TestD3Viz():
def setUp(self):
def setup_method(self):
self.rng = np.random.RandomState(0)
self.data_dir = pt.join('data', 'test_d3viz')
......@@ -39,7 +38,7 @@ class TestD3Viz(unittest.TestCase):
def test_mlp_profiled(self):
if th.config.mode in ("DebugMode", "DEBUG_MODE"):
raise SkipTest("Can't profile in DebugMode")
pytest.skip("Can't profile in DebugMode")
m = models.Mlp()
profile = th.compile.profiling.ProfileStats(False)
f = th.function(m.inputs, m.outputs, profile=profile)
......
from __future__ import absolute_import, print_function, division
import numpy as np
import unittest
import theano as th
from theano.d3viz.formatting import PyDotFormatter
from theano.d3viz.tests import models
from nose.plugins.skip import SkipTest
import pytest
from theano.d3viz.formatting import pydot_imported, pydot_imported_msg
if not pydot_imported:
raise SkipTest('pydot not available: ' + pydot_imported_msg)
pytest.skip('pydot not available: ' + pydot_imported_msg, allow_module_level=True)
class TestPyDotFormatter(unittest.TestCase):
class TestPyDotFormatter():
def setUp(self):
def setup_method(self):
self.rng = np.random.RandomState(0)
def node_counts(self, graph):
......@@ -33,7 +32,7 @@ class TestPyDotFormatter(unittest.TestCase):
expected = 11
if th.config.mode == "FAST_COMPILE":
expected = 12
self.assertEqual(len(graph.get_nodes()), expected)
assert len(graph.get_nodes()) == expected
nc = self.node_counts(graph)
if th.config.mode == "FAST_COMPILE":
......
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
import pytest
import numpy as np
......@@ -178,7 +178,7 @@ def Env(inputs, outputs):
def test_clinker_straightforward():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x, y, z = inputs()
e = add(mul(add(x, y), div(x, y)), bad_sub(bad_sub(x, y), z))
lnk = CLinker().accept(Env([x, y, z], [e]))
......@@ -188,7 +188,7 @@ def test_clinker_straightforward():
def test_clinker_literal_inlining():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x, y, z = inputs()
z = Constant(tdouble, 4.12345678)
e = add(mul(add(x, y), div(x, y)), bad_sub(bad_sub(x, y), z))
......@@ -204,7 +204,7 @@ def test_clinker_literal_inlining():
def test_clinker_literal_cache():
# This caused bugs in the past related to the cache.
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
mode = theano.Mode(linker='c')
......@@ -232,7 +232,7 @@ def test_clinker_literal_cache():
def test_clinker_single_node():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x, y, z = inputs()
node = add.make_node(x, y)
lnk = CLinker().accept(Env(node.inputs, node.outputs))
......@@ -242,7 +242,7 @@ def test_clinker_single_node():
def test_clinker_dups():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
# Testing that duplicate inputs are allowed.
x, y, z = inputs()
e = add(x, x)
......@@ -254,7 +254,7 @@ def test_clinker_dups():
def test_clinker_not_used_inputs():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
# Testing that unused inputs are allowed.
x, y, z = inputs()
e = add(x, y)
......@@ -265,7 +265,7 @@ def test_clinker_not_used_inputs():
def test_clinker_dups_inner():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
# Testing that duplicates are allowed inside the graph
x, y, z = inputs()
e = add(mul(y, y), add(x, z))
......@@ -326,7 +326,7 @@ def test_duallinker_straightforward():
def test_duallinker_mismatch():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x, y, z = inputs()
# bad_sub is correct in C but erroneous in Python
e = bad_sub(mul(x, y), mul(y, z))
......@@ -372,7 +372,7 @@ add_fail = AddFail()
def test_c_fail_error():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
x, y, z = inputs()
x = Constant(tdouble, 7.2, name='x')
e = add_fail(mul(x, y), mul(y, z))
......@@ -391,7 +391,7 @@ def test_shared_input_output():
# https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
# The shared variable is both an input and an output of the function.
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
inc = theano.tensor.iscalar('inc')
state = theano.shared(0)
......
......@@ -5,8 +5,7 @@ import traceback
import warnings
import numpy as np
from nose.plugins.skip import SkipTest
import unittest
import pytest
import theano
from theano import config
......@@ -38,7 +37,7 @@ class IncOneC(Op):
return "%(z)s = %(x)s + 1;" % locals()
class TestComputeTestValue(unittest.TestCase):
class TestComputeTestValue():
def test_variable_only(self):
orig_compute_test_value = theano.config.compute_test_value
......@@ -59,7 +58,8 @@ class TestComputeTestValue(unittest.TestCase):
# this test should fail
y.tag.test_value = np.random.rand(6, 5).astype(config.floatX)
self.assertRaises(ValueError, T.dot, x, y)
with pytest.raises(ValueError):
T.dot(x, y)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -77,13 +77,15 @@ class TestComputeTestValue(unittest.TestCase):
# should fail when asked by user
theano.config.compute_test_value = 'raise'
self.assertRaises(ValueError, T.dot, x, y)
with pytest.raises(ValueError):
T.dot(x, y)
# test that a warning is raised if required
theano.config.compute_test_value = 'warn'
warnings.simplefilter('error', UserWarning)
try:
self.assertRaises(UserWarning, T.dot, x, y)
with pytest.raises(UserWarning):
T.dot(x, y)
finally:
# Restore the default behavior.
# TODO There is a cleaner way to do this in Python 2.6, once
......@@ -117,7 +119,8 @@ class TestComputeTestValue(unittest.TestCase):
# this test should fail
z.set_value(np.random.rand(7, 6).astype(config.floatX))
self.assertRaises(ValueError, f, x, y, z)
with pytest.raises(ValueError):
f(x, y, z)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -139,7 +142,8 @@ class TestComputeTestValue(unittest.TestCase):
# this test should fail
y.set_value(np.random.rand(5, 6).astype(config.floatX))
self.assertRaises(ValueError, T.dot, x, y)
with pytest.raises(ValueError):
T.dot(x, y)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -160,7 +164,8 @@ class TestComputeTestValue(unittest.TestCase):
# this test should fail
x = np.random.rand(2, 4).astype(config.floatX)
self.assertRaises(ValueError, T.dot, x, y)
with pytest.raises(ValueError):
T.dot(x, y)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -198,7 +203,8 @@ class TestComputeTestValue(unittest.TestCase):
# this test should fail
x = T.constant(np.random.rand(2, 4), dtype=config.floatX)
self.assertRaises(ValueError, T.dot, x, y)
with pytest.raises(ValueError):
T.dot(x, y)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -213,7 +219,8 @@ class TestComputeTestValue(unittest.TestCase):
y = T.dmatrix('y')
y.tag.test_value = np.random.rand(4, 5)
self.assertRaises(TypeError, T.dot, x, y)
with pytest.raises(TypeError):
T.dot(x, y)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -227,7 +234,8 @@ class TestComputeTestValue(unittest.TestCase):
x.tag.test_value = np.zeros((2, 3), dtype=config.floatX)
y = T.matrix()
y.tag.test_value = np.zeros((2, 2), dtype=config.floatX)
self.assertRaises(ValueError, x.__mul__, y)
with pytest.raises(ValueError):
x.__mul__(y)
finally:
theano.config.compute_test_value = orig_compute_test_value
......@@ -286,6 +294,7 @@ class TestComputeTestValue(unittest.TestCase):
# Get traceback
tb = sys.exc_info()[2]
frame_infos = traceback.extract_tb(tb)
# We should be in the "fx" function defined above
expected = 'test_compute_test_value.py'
assert any((os.path.split(
......@@ -311,12 +320,11 @@ class TestComputeTestValue(unittest.TestCase):
def fx(prior_result, A):
return T.dot(prior_result, A)
self.assertRaises(ValueError,
theano.scan,
fn=fx,
outputs_info=T.ones_like(A.T),
non_sequences=A,
n_steps=k)
with pytest.raises(ValueError):
theano.scan(fn=fx,
outputs_info=T.ones_like(A.T),
non_sequences=A,
n_steps=k)
# Since we have to inspect the traceback,
# we cannot simply use self.assertRaises()
......@@ -360,10 +368,8 @@ class TestComputeTestValue(unittest.TestCase):
o = IncOnePython()(i)
# Check that the c_code function is not implemented
self.assertRaises(
(NotImplementedError, utils.MethodNotDefined),
o.owner.op.c_code,
o.owner, 'o', ['x'], 'z', {'fail': ''})
with pytest.raises((NotImplementedError, utils.MethodNotDefined)):
o.owner.op.c_code(o.owner, 'o', ['x'], 'z', {'fail': ''})
assert hasattr(o.tag, 'test_value')
assert o.tag.test_value == 4
......@@ -373,7 +379,7 @@ class TestComputeTestValue(unittest.TestCase):
def test_no_perform(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
orig_compute_test_value = theano.config.compute_test_value
try:
......@@ -387,9 +393,8 @@ class TestComputeTestValue(unittest.TestCase):
o = IncOneC()(i)
# Check that the perform function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.perform,
o.owner, 0, [None])
with pytest.raises((NotImplementedError, utils.MethodNotDefined)):
o.owner.op.perform(o.owner, 0, [None])
assert hasattr(o.tag, 'test_value')
assert o.tag.test_value == 4
......
from __future__ import absolute_import, print_function, division
import os
import pickle
import unittest
from nose.plugins.skip import SkipTest
import pytest
import theano
from theano.compat import PY3
......@@ -11,12 +10,12 @@ from theano.gof import CachedConstantError, FunctionGraph
from theano import tensor as tt
class TFunctionGraph(unittest.TestCase):
class TFunctionGraph():
def test_constant_cache_error(self):
v = theano.tensor.constant(1)
assert v.cached
self.assertRaises(CachedConstantError, FunctionGraph, [], [v + 1],
clone=False)
with pytest.raises(CachedConstantError):
FunctionGraph([], [v + 1], clone=False)
def test_clone(self):
v = theano.tensor.constant(1)
......@@ -35,7 +34,7 @@ class TFunctionGraph(unittest.TestCase):
# fgraph.variables event if the apply had other output used in
# the graph. This caused a crash.
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
# This test run the pickle that reproduce this case.
with open(os.path.join(os.path.dirname(__file__),
......
from __future__ import absolute_import, print_function, division
from itertools import count
import pickle
import unittest
from nose.plugins.skip import SkipTest
import pytest
import numpy as np
from theano import (
......@@ -253,7 +252,7 @@ class TestToposort:
# is_same_graph #
#################
class TestIsSameGraph(unittest.TestCase):
class TestIsSameGraph():
def check(self, expected, debug=True):
"""
......@@ -325,20 +324,18 @@ class TestIsSameGraph(unittest.TestCase):
# eval #
################
class TestEval(unittest.TestCase):
class TestEval():
def setUp(self):
def setup_method(self):
self.x, self.y = tensor.scalars('x', 'y')
self.z = self.x + self.y
self.w = 2 * self.z
def test_eval(self):
self.assertEqual(self.w.eval({self.x: 1., self.y: 2.}), 6.)
self.assertEqual(self.w.eval({self.z: 3}), 6.)
self.assertTrue(hasattr(self.w, "_fn_cache"),
"variable must have cache after eval")
self.assertFalse(hasattr(pickle.loads(pickle.dumps(self.w)), '_fn_cache'),
"temporary functions must not be serialized")
assert self.w.eval({self.x: 1., self.y: 2.}) == 6.
assert self.w.eval({self.z: 3}) == 6.
assert hasattr(self.w, "_fn_cache"), "variable must have cache after eval"
assert not hasattr(pickle.loads(pickle.dumps(self.w)), '_fn_cache'), "temporary functions must not be serialized"
################
......@@ -391,7 +388,7 @@ class TestAutoName:
def test_sparsevariable(self):
# Get counter value
if not sparse.enable_sparse:
raise SkipTest('Optional package SciPy not installed')
pytest.skip('Optional package SciPy not installed')
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
r1 = sparse.csc_matrix(name='x', dtype='float32')
......
from __future__ import absolute_import, print_function, division
from copy import deepcopy
import unittest
import numpy as np
......@@ -87,7 +86,7 @@ def FunctionGraph(inputs, outputs):
return e
class TestPerformLinker(unittest.TestCase):
class TestPerformLinker():
def test_thunk(self):
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
......@@ -138,7 +137,7 @@ def wrap_linker(fgraph, linkers, wrapper):
return lnk
class TestWrapLinker(unittest.TestCase):
class TestWrapLinker():
def test_0(self):
nodes = []
......
from __future__ import absolute_import, print_function, division
import unittest
from nose.plugins.skip import SkipTest
import numpy as np
import pytest
import theano
import theano.gof.op as op
......@@ -137,7 +136,7 @@ class TestOp:
def test_op_struct(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
sop = StructOp()
c = sop(theano.tensor.constant(0))
mode = None
......@@ -155,7 +154,7 @@ class TestOp:
assert rval == [0, 0]
class TestMakeThunk(unittest.TestCase):
class TestMakeThunk():
def test_no_c_code(self):
class IncOnePython(Op):
......@@ -176,9 +175,8 @@ class TestMakeThunk(unittest.TestCase):
o = IncOnePython()(i)
# Check that the c_code function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.c_code,
o.owner, 'o', ['x'], 'z', {'fail': ''})
with pytest.raises((NotImplementedError, utils.MethodNotDefined)):
o.owner.op.c_code(o.owner, 'o', ['x'], 'z', {'fail': ''})
storage_map = {i: [np.int32(3)],
o: [None]}
......@@ -213,9 +211,8 @@ class TestMakeThunk(unittest.TestCase):
o = IncOneC()(i)
# Check that the perform function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.perform,
o.owner, 0, [None])
with pytest.raises((NotImplementedError, utils.MethodNotDefined)):
o.owner.op.perform(o.owner, 0, [None])
storage_map = {i: [np.int32(3)],
o: [None]}
......@@ -231,8 +228,8 @@ class TestMakeThunk(unittest.TestCase):
assert compute_map[o][0]
assert storage_map[o][0] == 4
else:
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
thunk)
with pytest.raises((NotImplementedError, utils.MethodNotDefined)):
thunk()
def test_no_make_node(self):
class DoubleOp(Op):
......@@ -400,6 +397,3 @@ def test_debug_error_message():
assert raised
finally:
config.compute_test_value = prev_value
if __name__ == '__main__':
unittest.main()
from __future__ import absolute_import, print_function, division
from unittest import TestCase
from theano.compat import exc_message
from theano.gof.optdb import opt, DB
class Test_DB(TestCase):
class Test_DB():
def test_0(self):
......@@ -19,9 +18,9 @@ class Test_DB(TestCase):
db.register('c', Opt(), 'z', 'asdf')
self.assertTrue('a' in db)
self.assertTrue('b' in db)
self.assertTrue('c' in db)
assert 'a' in db
assert 'b' in db
assert 'c' in db
try:
db.register('c', Opt()) # name taken
......
from __future__ import absolute_import, print_function, division
import theano
import numpy as np
from unittest import TestCase
import pytest
from theano.gof import Op, COp, Apply
from theano import Generic
from theano.scalar import Scalar
......@@ -114,7 +114,7 @@ class QuadraticCOpFunc(COp):
y[0] = coefficients.a * (x**2) + coefficients.b * x + coefficients.c
class TestParamsType(TestCase):
class TestParamsType():
def test_hash_and_eq_params(self):
wp1 = ParamsType(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
......@@ -180,9 +180,11 @@ class TestParamsType(TestCase):
a2=random_tensor.astype('float32'),
a3=2000)
# should fail (o.a1 is not int32, o.a2 is not float64)
self.assertRaises(TypeError, w.filter, o, True)
with pytest.raises(TypeError):
w.filter(o, True)
# should fail (o.a1 is not int32, o.a2 is not float64, and downcast is disallowed)
self.assertRaises(TypeError, w.filter, o, False, False)
with pytest.raises(TypeError):
w.filter(o, False, False)
# Should pass.
w.filter(o, strict=False, allow_downcast=True)
......
from __future__ import absolute_import, print_function, division
import os
import numpy as np
import pytest
import theano
from theano import Op, Apply, scalar
from theano.tensor import TensorType
from theano.gof.type import CDataType, EnumType, EnumList, CEnumType
from unittest import TestCase
from nose.plugins.skip import SkipTest
# todo: test generic
......@@ -63,7 +62,7 @@ Py_INCREF(%(out)s);
def test_cdata():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
i = TensorType('float32', (False,))()
c = ProdOp()(i)
i2 = GetOp()(c)
......@@ -181,7 +180,7 @@ class MyOpCEnumType(Op):
val=sub['params'])
class TestEnumTypes(TestCase):
class TestEnumTypes():
def test_enum_class(self):
# Check that invalid enum name raises exception.
......@@ -250,7 +249,7 @@ class TestEnumTypes(TestCase):
def test_op_with_cenumtype(self):
if theano.config.cxx == '':
raise SkipTest('need c++')
pytest.skip('need c++')
million = MyOpCEnumType('million')()
billion = MyOpCEnumType('billion')()
two_billions = MyOpCEnumType('two_billions')()
......
......@@ -2,9 +2,8 @@ from __future__ import absolute_import, print_function, division
import gc
import sys
import time
import unittest
from nose.plugins.skip import SkipTest
import pytest
import numpy as np
from six import itervalues
......@@ -19,10 +18,10 @@ from theano.ifelse import ifelse
import theano
class TestCallbacks(unittest.TestCase):
class TestCallbacks():
# Test the VM_Linker's callback argument, which can be useful for debugging.
def setUp(self):
def setup_method(self):
self.n_callbacks = {}
def callback(self, node, thunk, storage_map, compute_map):
......@@ -67,14 +66,14 @@ def test_c_thunks():
linker=vm.VM_Linker(c_thunks=c_thunks,
use_cloop=False)))
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
with pytest.raises(ValueError):
f(0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
def test_speed():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
pytest.skip("G++ not available, so we need to skip this test.")
def build_graph(x, depth=5):
z = x
......@@ -208,7 +207,7 @@ def test_partial_function():
check_partial_function(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
check_partial_function('cvm')
......@@ -224,7 +223,7 @@ def test_partial_function_with_output_keys():
check_partial_function_output_keys(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
check_partial_function_output_keys('cvm')
......@@ -246,7 +245,7 @@ def test_partial_function_with_updates():
check_updates(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
check_updates('cvm')
......@@ -440,7 +439,7 @@ def test_reallocation():
def test_no_recycling():
if theano.config.cxx == '':
raise SkipTest('need c++')
pytest.skip('need c++')
x = theano.tensor.vector()
for lnk in [vm.VM_Linker(use_cloop=True),
vm.VM_Linker(use_cloop=False, lazy=True),
......
......@@ -19,9 +19,8 @@ import math
import sys
from itertools import product, chain
import nose
import numpy as np
from nose.plugins.skip import SkipTest
import pytest
import theano
import theano.tests.unittest_tools as utt
......@@ -973,7 +972,7 @@ def test_true_half_config_support():
# For cuDNN V5.1 and V6.0:
# "TRUE_HALF_CONFIG is only supported on architectures with true fp16 support (compute capability 5.3 and 6.0)"
if not check_dtype_config_support('float16', 'float16'):
raise SkipTest('FWD: TRUE_HALF_CONFIG not supported on this GPU.')
pytest.skip('FWD: TRUE_HALF_CONFIG not supported on this GPU.')
class CheckDnn:
......@@ -1040,24 +1039,3 @@ class CheckDnn:
for tcase in test.test_gradweight_runtime_algorithms():
print(tcase[0].__name__, *tcase[1:])
print(test_true_half_config_support.__name__)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 1 and args[0] in ('infos', 'list'):
if args[0] == 'infos':
CheckDnn.print_infos()
if args[0] == 'list':
CheckDnn.print_tests()
else:
# We run all tests with nosetests.
module_name = sys.modules[__name__].__file__
if len(args) == 0:
# No args given: run nosetests -vs
args = ['--verbose', '--nocapture']
# Else, use given args.
argv = [sys.argv[0], module_name] + args
CheckDnn.print_infos()
nose.main(argv=argv)
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
import pytest
import theano.tensor
import theano.gpuarray
if theano.gpuarray.pygpu is None:
raise SkipTest("pygpu not installed")
pytest.skip("pygpu not installed", allow_module_level=True)
init_error = None
......@@ -18,9 +18,9 @@ if (not theano.gpuarray.pygpu_activated and
if not theano.gpuarray.pygpu_activated:
if init_error:
raise SkipTest(init_error)
pytest.skip(init_error)
else:
raise SkipTest("pygpu disabled")
pytest.skip("pygpu disabled")
test_ctx_name = None
......
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import pytest
import numpy as np
......@@ -28,11 +27,11 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1)):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
pytest.skip(dnn_available.msg)
mode = mode_with_gpu
if fd != (1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
pytest.skip("Doesn't have CUDNN implementation")
o = self.get_output_shape(i, f, s, b, fd)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
......@@ -52,9 +51,9 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
pytest.skip(dnn_available.msg)
if fd != (1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
pytest.skip("Doesn't have CUDNN implementation")
mode = mode_with_gpu
if not expect_error:
......@@ -65,9 +64,8 @@ class TestDnnConv2d(test_abstract_conv.BaseTestConv2d):
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
else:
assert_raises((RuntimeError, ValueError),
self.run_gradinput,
inputs_shape=i, filters_shape=f,
with pytest.raises((RuntimeError, ValueError)):
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
......@@ -86,11 +84,11 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1, 1)):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
pytest.skip(dnn_available.msg)
mode = mode_with_gpu
if fd != (1, 1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
pytest.skip("Doesn't have CUDNN implementation")
o = self.get_output_shape(i, f, s, b, fd)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
......@@ -110,9 +108,9 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
def tcase_gi(self, i, f, o, s, b, flip, provide_shape, fd=(1, 1, 1), expect_error=False):
if not dnn_available(test_ctx_name):
raise SkipTest(dnn_available.msg)
pytest.skip(dnn_available.msg)
if fd != (1, 1, 1):
raise SkipTest("Doesn't have CUDNN implementation")
pytest.skip("Doesn't have CUDNN implementation")
mode = mode_with_gpu
if not expect_error:
......@@ -123,9 +121,8 @@ class TestDnnConv3d(test_abstract_conv.BaseTestConv3d):
filter_flip=flip, target_op=GpuDnnConvGradI,
filter_dilation=fd)
else:
assert_raises((RuntimeError, ValueError),
self.run_gradinput,
inputs_shape=i, filters_shape=f,
with pytest.raises((RuntimeError, ValueError)):
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
......@@ -177,16 +174,14 @@ class TestCorrMMConv2d(test_abstract_conv.BaseTestConv2d):
target_op=GpuCorrMM_gradInputs,
filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
ref=None,
filter_dilation=fd)
with pytest.raises(ValueError):
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs,
ref=None, filter_dilation=fd)
class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
......@@ -232,9 +227,8 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
target_op=GpuCorr3dMM_gradInputs,
filter_dilation=fd)
else:
assert_raises(ValueError,
self.run_gradinput,
inputs_shape=i, filters_shape=f,
with pytest.raises(ValueError):
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode,
provide_shape=provide_shape, border_mode=b,
......@@ -245,7 +239,7 @@ class TestCorrMMConv3d(test_abstract_conv.BaseTestConv3d):
class TestDnnConvTypes(test_abstract_conv.TestConvTypes):
def setUp(self):
def setup_method(self):
self.input = gpu_ftensor4()
self.filters = gpu_ftensor4()
self.topgrad = gpu_ftensor4()
......
from __future__ import absolute_import, print_function, division
import unittest
from theano.compat import izip
import pytest
from six import iteritems
......@@ -75,20 +75,19 @@ def makeTester(name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu,
_skip = skip
_checks = checks
class Checker(unittest.TestCase, utt.TestOptimizationMixin):
class Checker(utt.TestOptimizationMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setUp(self):
def setup_method(self):
eval(self.__class__.__module__ + '.' + self.__class__.__name__)
def test_all(self):
if skip:
from nose.plugins.skip import SkipTest
raise SkipTest(skip)
pytest.skip(skip)
for testname, inputs in iteritems(cases):
for _ in range(len(inputs)):
......@@ -334,7 +333,7 @@ class G_reshape(test_basic.T_reshape):
class G_comparison(test_basic.test_comparison):
def setUp(self):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
......@@ -342,8 +341,7 @@ class G_comparison(test_basic.test_comparison):
class G_Join_and_Split(test_basic.T_Join_and_Split):
def setUp(self):
super(G_Join_and_Split, self).setUp()
def setup_method(self):
self.mode = mode_with_gpu.excluding('constant_folding')
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
......
from __future__ import absolute_import, print_function, division
from unittest import TestCase
from nose.plugins.skip import SkipTest
import itertools
import numpy as np
import pytest
import theano
from theano import config
......@@ -85,7 +84,7 @@ def test_float16():
utt.assert_allclose(np.asarray(out), np.dot(x, y))
class TestGpuSgemv(TestCase, BaseGemv, utt.TestOptimizationMixin):
class TestGpuSgemv(BaseGemv, utt.TestOptimizationMixin):
mode = mode_with_gpu
dtype = 'float32'
......@@ -156,7 +155,7 @@ GpuGemmBatchTester = makeTester(
)
class TestGpuGemmBatchStrided(TestCase):
class TestGpuGemmBatchStrided():
def test0(self):
# Reported in https://github.com/Theano/Theano/issues/5730
x = tensor.tensor3()
......@@ -170,7 +169,7 @@ class TestGpuGemmBatchStrided(TestCase):
class TestGpuSger(TestGer):
def setUp(self):
def setup_method(self):
self.mode = mode_with_gpu
dtype = self.dtype = 'float32' # optimization isn't dtype-dependent
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
......@@ -184,21 +183,21 @@ class TestGpuSger(TestGer):
self.gemm = gpugemm_inplace
def test_f32_0_0(self):
raise SkipTest('0-sized objects not supported')
pytest.skip('0-sized objects not supported')
def test_f32_1_0(self):
raise SkipTest('0-sized objects not supported')
pytest.skip('0-sized objects not supported')
def test_f32_0_1(self):
raise SkipTest('0-sized objects not supported')
pytest.skip('0-sized objects not supported')
class TestGpuSgerNoTransfer(TestGpuSger):
shared = staticmethod(gpuarray_shared_constructor)
class TestGpuGer_OpContract(TestCase, utt.T_OpContractMixin):
def setUp(self):
class TestGpuGer_OpContract(utt.Test_OpContractMixin):
def setup_method(self):
self.ops = [gpuger_no_inplace, gpuger_inplace]
def clone(self, op):
......
......@@ -16,7 +16,7 @@ from ..blocksparse import (GpuSparseBlockGemv,
class BlockSparse_Gemv_and_Outer(test_blocksparse.BlockSparse_Gemv_and_Outer):
def setUp(self):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu.excluding('constant_folding')
self.gemv_op = gpu_sparse_block_gemv
......
from __future__ import (division, absolute_import, print_function)
import unittest
import numpy as np
import pytest
import theano
import theano.tensor as T
......@@ -13,10 +13,10 @@ from theano.tensor.nnet.tests.test_ctc import (setup_torch_case, setup_ctc_case,
from .config import (mode_with_gpu, mode_without_gpu)
class TestCTC(unittest.TestCase):
def setUp(self):
class TestCTC():
def setup_method(self):
if not ctc_available():
self.skipTest('Optional library warp-ctc not available')
pytest.skip('Optional library warp-ctc not available')
def check_ctc(self, activations, labels, input_length, expected_costs, expected_grads):
# Create symbolic variables
......
from __future__ import absolute_import, print_function, division
from copy import copy
from unittest import TestCase
import numpy as np
import theano
from theano import scalar, gof, tensor
from theano.compile import DebugMode, Mode
from theano.tests.unittest_tools import SkipTest, assert_allclose
from theano.tests.unittest_tools import assert_allclose
from theano.tensor.tests import test_elemwise
......@@ -70,16 +69,16 @@ def test_elemwise_pow():
assert_allclose(out, expected_out)
class TestMathErrorFunctions(TestCase):
class TestMathErrorFunctions():
dtypes = ["float64", "float32", "float16"]
default_arrays = {}
expected_erfinv_outputs = {}
expected_erfcinv_outputs = {}
@classmethod
def setUpClass(cls):
def setup_class(cls):
if not imported_scipy_special:
raise SkipTest("scipy.special needed")
pytest.skip("scipy.special needed")
# NB: erfinv is defined in ]-1;1[, and erfcinv is defined in ]0;2[,
# so we just take some values in an interval that covers both domains
# (this will also allow to test some values outside the domains).
......@@ -358,13 +357,13 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
def test_perform_nan(self):
return
def setUp(self):
super(test_GpuCAReduceCuda, self).setUp()
def setup_method(self):
super(test_GpuCAReduceCuda, self).setup_method()
if get_context(test_ctx_name).kind != b'cuda':
raise SkipTest("Cuda specific tests")
pytest.skip("Cuda specific tests")
class T_gpureduce_dtype(test_elemwise.T_reduce_dtype):
class Test_gpureduce_dtype(test_elemwise.T_reduce_dtype):
mode = mode_with_gpu.excluding('local_cut_useless_reduce')
# GpuDnnReduction doesn't cover all cases, but should cover some
......@@ -376,9 +375,9 @@ class T_gpureduce_dtype(test_elemwise.T_reduce_dtype):
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64']
def setUp(self):
def setup_method(self):
if get_context(test_ctx_name).kind != b'cuda':
raise SkipTest("Cuda specific tests")
pytest.skip("Cuda specific tests")
def speed_reduce10():
......
......@@ -3,6 +3,7 @@ from functools import partial
from itertools import product
import numpy as np
import pytest
from six.moves import xrange
from theano import tensor as T
......@@ -10,24 +11,21 @@ import theano
import theano.tensor.tests.test_extra_ops
from theano.tensor.extra_ops import CumOp
from theano.tests.unittest_tools import SkipTest
from theano.tests import unittest_tools as utt
from .config import mode_with_gpu, test_ctx_name
from ..extra_ops import GpuCumOp
from ..type import get_context
cum_modes = utt.parameterized.expand([('mul',), ('add',)])
class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
mode = mode_with_gpu
def setUp(self):
super(TestGpuCumOp, self).setUp()
def setup_method(self):
super(TestGpuCumOp, self).setup_method()
test_ctx = get_context(test_ctx_name)
if test_ctx.kind != b'cuda':
raise SkipTest("Cuda specific tests")
pytest.skip("Cuda specific tests")
self.max_threads_dim0 = test_ctx.maxlsize0
self.max_grid_size1 = test_ctx.maxgsize2
self.op_class = CumOp
......@@ -38,19 +36,19 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
self.old_rtol = theano.tensor.float32_rtol
theano.tensor.basic.float32_rtol *= 2
def tearDown(self):
super(TestGpuCumOp, self).tearDown()
def teardown_method(self):
super(TestGpuCumOp, self).teardown_method()
# Restore rtol
theano.tensor.basic.float32_rtol = self.old_rtol
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_infer_shape(self, mode):
# GpuCumOp is only defined for float32 for now, so we skip it
# in the unsupported cases
op_class = partial(self.op_class, mode=mode)
gpucumop_supported_dtypes = ('float32',)
if theano.config.floatX not in gpucumop_supported_dtypes:
raise SkipTest('Gpucumop not implemented for dtype %s'
pytest.skip('Gpucumop not implemented for dtype %s'
% theano.config.floatX)
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(theano.config.floatX)
......@@ -61,12 +59,12 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
[a],
GpuCumOp)
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_grad(self, mode):
# no grad for GpuCumOp
pass
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_Strides1D(self, mode):
op_class = partial(self.op_class, mode=mode)
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
......@@ -92,7 +90,7 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
utt.assert_allclose(np_func(a[slicing], axis=axis),
cumop_function(a[slicing]))
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_Strides2D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
......@@ -118,7 +116,7 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
utt.assert_allclose(np_func(a[slicing], axis=axis),
cumop_function(a[slicing]))
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_Strides3D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
......@@ -144,7 +142,7 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
utt.assert_allclose(np_func(a[slicing], axis=axis),
cumop_function(a[slicing]))
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_GpuCumOp1D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
......@@ -169,7 +167,7 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
dtype="float32")
utt.assert_allclose(np_func(a), f(a))
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_GpuCumOp2D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
......@@ -211,7 +209,7 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
a = np.sign(a - 0.5).astype("float32") # Avoid floating point error
utt.assert_allclose(np_func(a, axis=axis), f(a))
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_GpuCumOp3D(self, mode):
np_func = dict(add=np.cumsum, mul=np.cumprod)[mode]
op_class = partial(self.op_class, mode=mode)
......@@ -264,7 +262,7 @@ class TestGpuCumOp(theano.tensor.tests.test_extra_ops.TestCumOp):
a = np.sign(a - 0.5).astype("float32") # Avoid floating point error
utt.assert_allclose(np_func(a, axis=axis), f(a))
@cum_modes
@pytest.mark.parametrized("mode", ["mul", "add"])
def test_GpuCumOp4D(self, mode):
op_class = partial(self.op_class, mode=mode)
# Should not use the GPU version.
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import pytest
import theano
import theano.tensor as T
......@@ -11,20 +11,20 @@ import theano.gpuarray.fft
from .config import mode_with_gpu
# Skip tests if pygpu is not available.
from nose.plugins.skip import SkipTest
import pytest
from theano.gpuarray.fft import pygpu_available, skcuda_available, pycuda_available
if not pygpu_available: # noqa
raise SkipTest('Optional package pygpu not available')
pytest.skip('Optional package pygpu not available', allow_module_level=True)
if not skcuda_available: # noqa
raise SkipTest('Optional package scikit-cuda not available')
pytest.skip('Optional package scikit-cuda not available', allow_module_level=True)
if not pycuda_available: # noqa
raise SkipTest('Optional package pycuda not available')
pytest.skip('Optional package pycuda not available', allow_module_level=True)
# Transform sizes
N = 32
class TestFFT(unittest.TestCase):
class TestFFT():
def test_1Dfft(self):
inputs_val = np.random.random((1, N)).astype('float32')
......@@ -106,9 +106,9 @@ class TestFFT(unittest.TestCase):
inputs_val = np.random.random((1, N)).astype('float64')
inputs = theano.shared(inputs_val)
with self.assertRaises(AssertionError):
with pytest.raises(AssertionError):
theano.gpuarray.fft.curfft(inputs)
with self.assertRaises(AssertionError):
with pytest.raises(AssertionError):
theano.gpuarray.fft.cuirfft(inputs)
def test_norm(self):
......@@ -245,10 +245,13 @@ class TestFFT(unittest.TestCase):
inputs_val = np.random.random((1, N)).astype('float32')
inputs = theano.shared(inputs_val)
self.assertRaises(ValueError, theano.gpuarray.fft.curfft, inputs, norm=123)
with pytest.raises(ValueError):
theano.gpuarray.fft.curfft(inputs, norm=123)
inputs_val = np.random.random((1, N // 2 + 1, 2)).astype('float32')
inputs = theano.shared(inputs_val)
self.assertRaises(ValueError, theano.gpuarray.fft.cuirfft, inputs, norm=123)
self.assertRaises(ValueError, theano.gpuarray.fft.cuirfft, inputs, is_odd=123)
with pytest.raises(ValueError):
theano.gpuarray.fft.cuirfft(inputs, norm=123)
with pytest.raises(ValueError):
theano.gpuarray.fft.cuirfft(inputs, is_odd=123)
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import theano
......@@ -15,7 +14,7 @@ from theano.tensor.nnet.tests.test_abstract_conv import Grouped_conv_noOptim, Te
from theano.tensor.nnet.tests.test_abstract_conv import TestAsymmetricPadding, TestCausalConv
class TestCorrMM(unittest.TestCase):
class TestCorrMM():
def run_conv_valid(self, inputs_shape, filters_shape,
border_mode='valid',
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import theano
......@@ -14,7 +13,7 @@ from .config import mode_with_gpu, mode_without_gpu, ref_cast
from theano.tensor.nnet.tests.test_abstract_conv import Grouped_conv3d_noOptim
class TestCorr3dMM(unittest.TestCase):
class TestCorr3dMM():
def run_conv_valid(self, inputs_shape, filters_shape,
border_mode='valid',
......
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from numpy.linalg.linalg import LinAlgError
import pytest
import theano
from theano import config
......@@ -23,12 +22,11 @@ from theano.tests import unittest_tools as utt
from .. import gpuarray_shared_constructor
from .config import mode_with_gpu, mode_without_gpu
from .test_basic_ops import rand
from nose.tools import assert_raises
class TestCusolver(unittest.TestCase):
class TestCusolver():
def setUp(self):
def setup_method(self):
if not cusolver_available:
self.skipTest('Optional package scikits.cuda.cusolver not available')
......@@ -109,7 +107,8 @@ class TestCusolver(unittest.TestCase):
solver = gpu_solve(A, b, 'symmetric')
fn = theano.function([A, b], [solver], mode=mode_with_gpu)
self.assertRaises(LinAlgError, fn, A_val, x_val)
with pytest.raises(LinAlgError):
fn(A_val, x_val)
def test_linalgerr_solve(self):
np.random.seed(1)
......@@ -124,7 +123,8 @@ class TestCusolver(unittest.TestCase):
solver = gpu_solve(A, b, trans='T')
fn = theano.function([A, b], [solver], mode=mode_with_gpu)
self.assertRaises(LinAlgError, fn, A_val, x_val)
with pytest.raises(LinAlgError):
fn(A_val, x_val)
def verify_solve_grad(self, m, n, A_structure, lower, rng):
# ensure diagonal elements of A relatively large to avoid numerical
......@@ -162,9 +162,9 @@ class TestCusolver(unittest.TestCase):
self.verify_solve_grad(4, 3, 'general', lower=True, rng=rng)
class TestGpuCholesky(unittest.TestCase):
class TestGpuCholesky():
def setUp(self):
def setup_method(self):
if not cusolver_available:
self.skipTest('Optional package scikits.cuda.cusolver not available')
utt.seed_rng()
......@@ -199,21 +199,24 @@ class TestGpuCholesky(unittest.TestCase):
# Invalid Cholesky input test with non-square matrix as input.
A_val = np.random.normal(size=(3, 2)).astype("float32")
fn = self.get_gpu_cholesky_func(True, False)
self.assertRaises(ValueError, fn, A_val)
with pytest.raises(ValueError):
fn(A_val)
def test_invalid_input_fail_vector(self):
# Invalid Cholesky input test with vector as input.
def invalid_input_func():
A = theano.tensor.vector("A", dtype="float32")
GpuCholesky(lower=True, inplace=False)(A)
self.assertRaises(AssertionError, invalid_input_func)
with pytest.raises(AssertionError):
invalid_input_func()
def test_invalid_input_fail_tensor3(self):
# Invalid Cholesky input test with 3D tensor as input.
def invalid_input_func():
A = theano.tensor.tensor3("A", dtype="float32")
GpuCholesky(lower=True, inplace=False)(A)
self.assertRaises(AssertionError, invalid_input_func)
with pytest.raises(AssertionError):
invalid_input_func()
@utt.assertFailure_fast
def test_diag_chol(self):
......@@ -243,7 +246,8 @@ class TestGpuCholesky(unittest.TestCase):
if not np.allclose(A_val, A_val.T):
break
fn = self.get_gpu_cholesky_func(True, False)
self.assertRaises(LinAlgError, fn, A_val)
with pytest.raises(LinAlgError):
fn(A_val)
def test_invalid_input_fail_negative_definite(self):
# Invalid Cholesky input test with negative-definite input.
......@@ -251,12 +255,13 @@ class TestGpuCholesky(unittest.TestCase):
# A = -M.dot(M) will be negative definite for all non-singular M
A_val = -M_val.dot(M_val.T)
fn = self.get_gpu_cholesky_func(True, False)
self.assertRaises(LinAlgError, fn, A_val)
with pytest.raises(LinAlgError):
fn(A_val)
class TestGpuCholesky64(unittest.TestCase):
class TestGpuCholesky64():
def setUp(self):
def setup_method(self):
if not cusolver_available:
self.skipTest('Optional package scikits.cuda.cusolver not available')
utt.seed_rng()
......@@ -291,21 +296,24 @@ class TestGpuCholesky64(unittest.TestCase):
# Invalid Cholesky input test with non-square matrix as input.
A_val = np.random.normal(size=(3, 2)).astype("float64")
fn = self.get_gpu_cholesky_func(True, False)
self.assertRaises(ValueError, fn, A_val)
with pytest.raises(ValueError):
fn(A_val)
def test_invalid_input_fail_vector(self):
# Invalid Cholesky input test with vector as input.
def invalid_input_func():
A = theano.tensor.vector("A", dtype="float64")
GpuCholesky(lower=True, inplace=False)(A)
self.assertRaises(AssertionError, invalid_input_func)
with pytest.raises(AssertionError):
invalid_input_func()
def test_invalid_input_fail_tensor3(self):
# Invalid Cholesky input test with 3D tensor as input.
def invalid_input_func():
A = theano.tensor.tensor3("A", dtype="float64")
GpuCholesky(lower=True, inplace=False)(A)
self.assertRaises(AssertionError, invalid_input_func)
with pytest.raises(AssertionError):
invalid_input_func()
@utt.assertFailure_fast
def test_diag_chol(self):
......@@ -335,7 +343,8 @@ class TestGpuCholesky64(unittest.TestCase):
if not np.allclose(A_val, A_val.T):
break
fn = self.get_gpu_cholesky_func(True, False)
self.assertRaises(LinAlgError, fn, A_val)
with pytest.raises(LinAlgError):
fn(A_val)
def test_invalid_input_fail_negative_definite(self):
# Invalid Cholesky input test with negative-definite input.
......@@ -343,12 +352,13 @@ class TestGpuCholesky64(unittest.TestCase):
# A = -M.dot(M) will be negative definite for all non-singular M
A_val = -M_val.dot(M_val.T)
fn = self.get_gpu_cholesky_func(True, False)
self.assertRaises(LinAlgError, fn, A_val)
with pytest.raises(LinAlgError):
fn(A_val)
class TestMagma(unittest.TestCase):
class TestMagma():
def setUp(self):
def setup_method(self):
if not config.magma.enabled:
self.skipTest('Magma is not enabled, skipping test')
......@@ -429,9 +439,9 @@ class TestMagma(unittest.TestCase):
self.check_svd(A, U, S, VT)
U, S, VT = self.run_gpu_svd(A, full_matrices=False)
self.assertEqual(U.shape[1], min(M, N))
assert U.shape[1], min(M, N)
self.assert_column_orthonormal(U)
self.assertEqual(VT.shape[0], min(M, N))
assert VT.shape[0], min(M, N)
self.assert_column_orthonormal(VT.T)
def test_gpu_svd_tall(self):
......@@ -444,9 +454,9 @@ class TestMagma(unittest.TestCase):
self.check_svd(A, U, S, VT)
U, S, VT = self.run_gpu_svd(A, full_matrices=False)
self.assertEqual(U.shape[1], min(M, N))
assert U.shape[1], min(M, N)
self.assert_column_orthonormal(U)
self.assertEqual(VT.shape[0], min(M, N))
assert VT.shape[0], min(M, N)
self.assert_column_orthonormal(VT.T)
def test_gpu_singular_values(self):
......@@ -623,7 +633,7 @@ def test_cholesky_grad_indef():
matrix = np.array([[1, 0.2], [0.2, -2]]).astype(config.floatX)
cholesky = GpuCholesky(lower=True)
chol_f = theano.function([x], theano.tensor.grad(cholesky(x).sum(), [x]))
with assert_raises(LinAlgError):
with pytest.raises(LinAlgError):
chol_f(matrix)
# cholesky = GpuCholesky(lower=True, on_error='nan')
# chol_f = function([x], grad(gpu_cholesky(x).sum(), [x]))
......
......@@ -3,8 +3,6 @@ from __future__ import absolute_import, print_function, division
import os
import numpy as np
import unittest
import theano
from theano import config, function, tensor
from theano.compat import PY3
......@@ -171,7 +169,7 @@ def test_gpu_opt():
f(pval, uval)
class test_OP_wor(unittest.TestCase):
class test_OP_wor():
def test_select_distinct(self):
# Tests that ChoiceFromUniform always selects distinct elements
......@@ -212,7 +210,8 @@ class test_OP_wor(unittest.TestCase):
uni = np.random.rand(n_selected).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, uni, n_selected)
with pytest.raises(ValueError):
f(pvals, uni, n_selected)
def test_select_proportional_to_weight(self):
# Tests that ChoiceFromUniform selects elements, on average,
......@@ -243,7 +242,7 @@ class test_OP_wor(unittest.TestCase):
assert avg_diff < mean_rtol, avg_diff
class test_function_wor(unittest.TestCase):
class test_function_wor():
def test_select_distinct(self):
# Tests that multinomial_wo_replacement always selects distinct elements
......@@ -284,7 +283,8 @@ class test_function_wor(unittest.TestCase):
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, n_selected)
with pytest.raises(ValueError):
f(pvals, n_selected)
def test_select_proportional_to_weight(self):
# Tests that multinomial_wo_replacement selects elements, on average,
......
......@@ -6,7 +6,7 @@ from .config import mode_with_gpu
from ..neighbours import GpuImages2Neibs
class T_GpuImages2Neibs(test_neighbours.T_Images2Neibs):
class Test_GpuImages2Neibs(test_neighbours.T_Images2Neibs):
mode = mode_with_gpu
op = GpuImages2Neibs
dtypes = ['int64', 'float32', 'float64']
from __future__ import absolute_import, print_function, division
import numpy as np
import unittest
import theano
import theano.tensor as T
......@@ -240,7 +239,7 @@ def softmax_unittest_template(dtypeInput):
cmp(128, 64 * 1024)
class test_SoftMax(unittest.TestCase):
class test_SoftMax():
gpu_op = GpuSoftmax
mode = mode_wo_cudnn
......
from __future__ import absolute_import, print_function, division
from nose.tools import assert_raises
import numpy as np
import pytest
import theano
from theano import tensor
......@@ -22,8 +22,7 @@ from ..dnn import GpuDnnReduction
from ..subtensor import GpuSubtensor
from ..linalg import GpuCusolverSolve, cusolver_available, GpuCholesky
from .config import mode_with_gpu, mode_without_gpu, test_ctx_name, SkipTest
import unittest
from .config import mode_with_gpu, mode_without_gpu, test_ctx_name
from theano.tensor.nnet import abstract_conv
from theano.gpuarray import dnn, blas, opt
......@@ -623,8 +622,8 @@ def test_local_assert_no_cpu_op():
theano.config.assert_no_cpu_op = 'raise'
theano.config.on_opt_error = 'ignore'
assert_raises(AssertionError, theano.function,
[], out, mode=mode_local_assert)
with pytest.raises(AssertionError):
theano.function([], out, mode=mode_local_assert)
finally:
theano.config.assert_no_cpu_op = old
theano.config.on_opt_error = old2
......@@ -651,7 +650,7 @@ def test_no_complex():
@utt.assertFailure_fast
def test_local_lift_solve():
if not cusolver_available or not slinalg.imported_scipy:
raise SkipTest('No cuSolver or SciPy')
pytest.skip('No cuSolver or SciPy')
A = tensor.fmatrix()
b = tensor.fmatrix()
o = slinalg.solve(A, b)
......@@ -669,7 +668,7 @@ def test_local_lift_solve():
def test_gpu_solve_not_inplace():
if not cusolver_available or not slinalg.imported_scipy:
raise SkipTest('No cuSolver or Scipy')
pytest.skip('No cuSolver or Scipy')
A = tensor.fmatrix()
b = tensor.fmatrix()
s = slinalg.solve(A, b)
......@@ -687,7 +686,7 @@ def test_gpu_solve_not_inplace():
@utt.assertFailure_fast
def test_local_lift_cholesky():
if not cusolver_available or not slinalg.imported_scipy:
raise SkipTest('No cuSolver or Scipy')
pytest.skip('No cuSolver or Scipy')
A = tensor.fmatrix()
o = slinalg.cholesky(A)
f_cpu = theano.function([A], o, mode=mode_without_gpu)
......@@ -705,7 +704,7 @@ def test_local_lift_cholesky():
def test_gpu_cholesky_not_inplace():
if not cusolver_available or not slinalg.imported_scipy:
raise SkipTest('No cuSolver or SciPy')
pytest.skip('No cuSolver or SciPy')
A = tensor.fmatrix()
A_squared = A**2
B = slinalg.cholesky(A_squared)
......@@ -772,7 +771,7 @@ def test_crossentropycategorical1hot_lifter():
rng.randint(5, size=(13,)))
class Conv_opt_test(unittest.TestCase):
class TestConv_opt():
def optimizer_2d(self, input_shapes, direction, include_tags, exclude_tags,
op, border_mode='valid', subsample=(1, 1),
......@@ -896,7 +895,7 @@ class Conv_opt_test(unittest.TestCase):
def test_optimizers_2d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
pytest.skip("Need a c compiler.")
imshp2d = [(2, 3, 5, 5), (2, 2, 5, 7), (2, 1, 3, 3)]
kshp2d = [(4, 3, 3, 3), (3, 2, 3, 5), (4, 1, 1, 1)]
......@@ -957,7 +956,7 @@ class Conv_opt_test(unittest.TestCase):
def test_optimizers_3d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
pytest.skip("Need a c compiler.")
imshp3d = [(2, 3, 5, 5, 5), (2, 2, 5, 7, 5), (2, 1, 3, 3, 3)]
kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 5, 3), (4, 1, 1, 1, 1)]
......@@ -1023,7 +1022,7 @@ class Conv_opt_test(unittest.TestCase):
def test_optimizers_non_default(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
pytest.skip("Need a c compiler.")
# conv2d forward pass with Non-default border_mode and filter_dilation
imshp2d = [(2, 3, 5, 5), (4, 2, 5, 5)]
kshp2d = [(4, 3, 3, 3), (3, 2, 3, 3)]
......@@ -1188,7 +1187,7 @@ class Conv_opt_test(unittest.TestCase):
def test_returns_none_2d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
pytest.skip("Need a c compiler.")
# values given don't matter since it returns None
imshp = (2, 3, 5, 5)
kshp = (4, 3, 3, 3)
......@@ -1260,7 +1259,7 @@ class Conv_opt_test(unittest.TestCase):
def test_returns_none_3d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
pytest.skip("Need a c compiler.")
imshp = (2, 3, 5, 5, 5)
kshp = (4, 3, 3, 3, 3)
tshp = (2, 4, 3, 3, 3)
......
......@@ -8,10 +8,8 @@ regular test file.
from __future__ import absolute_import, print_function, division
import os
import sys
from six import reraise
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import pytest
import numpy as np
from theano.compat import PY3
......@@ -23,7 +21,7 @@ from ..type import ContextNotDefined
try:
from . import config as _ # noqa
have_pygpu = True
except SkipTest:
except:
have_pygpu = False
......@@ -32,7 +30,7 @@ def test_unpickle_gpuarray_as_numpy_ndarray_flag1():
# available. test_unpickle_gpuarray_as_numpy_ndarray_flag0 in
# test_type.py test it when pygpu is there.
if have_pygpu:
raise SkipTest("pygpu active")
pytest.skip("pygpu active")
oldflag = config.experimental.unpickle_gpu_on_cpu
config.experimental.unpickle_gpu_on_cpu = False
......@@ -45,7 +43,8 @@ def test_unpickle_gpuarray_as_numpy_ndarray_flag1():
u = CompatUnpickler(fp, encoding="latin1")
else:
u = CompatUnpickler(fp)
assert_raises((ImportError, ContextNotDefined), u.load)
with pytest.raises((ImportError, ContextNotDefined)):
u.load()
finally:
config.experimental.unpickle_gpu_on_cpu = oldflag
......@@ -72,7 +71,7 @@ def test_unpickle_gpuarray_as_numpy_ndarray_flag2():
# when "type" and "copy_reg" are builtin modules.
if sys.platform == 'win32':
exc_type, exc_value, exc_trace = sys.exc_info()
reraise(SkipTest, exc_value, exc_trace)
raise
raise
assert isinstance(mat, np.ndarray)
......
from __future__ import absolute_import, print_function, division
import unittest
import copy
import itertools
import numpy as np
import pytest
import theano
from theano import gradient
from theano import tensor
......@@ -18,17 +18,17 @@ from ..pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad,
GpuDownsampleFactorMaxGradGrad)
class TestPool(unittest.TestCase):
class TestPool():
def test_pool_py_interface(self):
shp = (2, 2, 2, 2)
inp = theano.shared(rand(*shp), 'a')
inp = tensor.as_tensor_variable(inp)
with self.assertRaises(ValueError):
with pytest.raises(ValueError):
# test when pad >= ws
ds_op = GpuPool(ignore_border=True, ndim=2)
ds_op(inp, [2, 2], pad=[3, 3])
with self.assertRaises(ValueError):
with pytest.raises(ValueError):
# test when ignore_border and pad >= 0
ds_op = GpuPool(ignore_border=False, ndim=2)
ds_op(inp, [2, 2], pad=[1, 1])
......@@ -40,7 +40,7 @@ class TestPool(unittest.TestCase):
shp = (2, 2, 2, 2)
inp = theano.shared(rand(*shp), 'a')
inp = tensor.as_tensor_variable(inp)
with self.assertRaises(ValueError):
with pytest.raises(ValueError):
# test when ignore_border and pad >= 0
ds_op = GpuPool(ignore_border=False, ndim=2)
pad = tensor.as_tensor_variable([1, 1])
......
from __future__ import print_function, absolute_import, division
from unittest import TestCase
import numpy as np
import pytest
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import SkipTest
from .config import mode_with_gpu, mode_without_gpu
from .test_basic_ops import rand_gpuarray
......@@ -77,11 +76,11 @@ class BaseTest:
return []
return [int(math.ceil(math.pow(test_size, 1 / self.tensor_size)))] * self.tensor_size
def setUp(self):
def setup_method(self):
if not isinstance(self.tensor_size, int):
raise SkipTest("No tensor ndim defined.")
pytest.skip("No tensor ndim defined.")
if self.tensor_size < 0 or self.tensor_size > 5:
raise SkipTest("We allow from 0 (included) to 5 (inclued) dimensons for these tests.")
pytest.skip("We allow from 0 (included) to 5 (inclued) dimensons for these tests.")
if self.shape is None:
self.shape = self.get_shape()
......@@ -171,29 +170,29 @@ class BaseTest:
self.compute_some_axes(4)
class TestScalar(BaseTest, TestCase):
class TestScalar(BaseTest):
tensor_size = 0
class TestVector(BaseTest, TestCase):
class TestVector(BaseTest):
tensor_size = 1
# Special case
class TestRow(BaseTest, TestCase):
class TestRow(BaseTest):
tensor_size = 2
shape = [1, test_size]
# Special case
class TestColumn(BaseTest, TestCase):
class TestColumn(BaseTest):
tensor_size = 2
shape = [test_size, 1]
class TestMatrix(BaseTest, TestCase):
class TestMatrix(BaseTest):
tensor_size = 2
class TestTensor5(BaseTest, TestCase):
class TestTensor5(BaseTest):
tensor_size = 5
from __future__ import absolute_import, print_function, division
from unittest import TestCase
import numpy as np
from six.moves import xrange
......@@ -13,8 +12,8 @@ from ..elemwise import GpuElemwise
from .config import mode_with_gpu, test_ctx_name
class T_Scan(TestCase):
def setUp(self):
class Test_Scan():
def setup_method(self):
utt.seed_rng()
def test_one_sequence_one_output_weights_gpu1(self):
......
from __future__ import absolute_import, print_function, division
import numpy as np
import unittest
import theano
from theano import tensor
......@@ -279,7 +278,7 @@ def test_adv_subtensor():
assert np.allclose(rval, rep)
class test_gpuextractdiag(unittest.TestCase):
class test_gpuextractdiag():
def test_extractdiag_opt(self):
x = tensor.matrix()
fn = theano.function([x], tensor.ExtractDiag()(x), mode=mode_with_gpu)
......@@ -328,7 +327,7 @@ class TestGpuAllocDiag(test_basic.TestAllocDiag):
)
class test_gpuallocdiag(unittest.TestCase):
class test_gpuallocdiag():
def test_allocdiag_opt(self):
x = tensor.vector()
fn = theano.function([x], tensor.AllocDiag()(x), mode=mode_with_gpu)
......
from __future__ import absolute_import, print_function, division
import os
import nose
import numpy as np
import theano
......@@ -110,8 +109,8 @@ def test_filter_variable():
def test_gpuarray_shared_scalar():
# By default, we don't put scalar as shared variable on the GPU
nose.tools.assert_raises(
TypeError, gpuarray_shared_constructor, np.asarray(1, dtype='float32'))
with pytest.raises(TypeError):
gpuarray_shared_constructor(np.asarray(1, dtype='float32'))
# But we can force that
gpuarray_shared_constructor(np.asarray(1, dtype='float32'),
......
from __future__ import absolute_import, print_function, division
import os
import shutil
import unittest
from tempfile import mkdtemp
import numpy as np
......@@ -12,14 +11,14 @@ from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.misc.pkl_utils import dump, load, StripPickler
class T_dump_load(unittest.TestCase):
def setUp(self):
class Test_dump_load():
def setup_method(self):
# Work in a temporary directory to avoid cluttering the repository
self.origdir = os.getcwd()
self.tmpdir = mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
def teardown_method(self):
# Get back to the original dir, and delete the temporary one
os.chdir(self.origdir)
if self.tmpdir is not None:
......@@ -51,14 +50,14 @@ class T_dump_load(unittest.TestCase):
assert array == np.array(3)
class TestStripPickler(unittest.TestCase):
def setUp(self):
class TestStripPickler():
def setup_method(self):
# Work in a temporary directory to avoid cluttering the repository
self.origdir = os.getcwd()
self.tmpdir = mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
def teardown_method(self):
# Get back to the original dir, and delete the temporary one
os.chdir(self.origdir)
if self.tmpdir is not None:
......
from nose.plugins.skip import SkipTest
# NB: We raise a SkipTest (instead of another type of exception) because we're in a folder,
# thus nosetests will look for test files into this folder. With a SkipTest raised,
# the folder will be skipped by nosetests without failing.
raise SkipTest(
import pytest
pytest.skip(
"You are importing theano.sandbox.cuda. This is the old GPU back-end and "
"is removed from Theano. Use Theano 0.9 to use it. Even better, "
"transition to the new GPU back-end! See "
"https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29")
"https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29",
allow_module_level=True)
from __future__ import absolute_import, print_function, division
import numpy as np
import numpy.linalg
import pytest
import theano
from theano import tensor, function
......@@ -22,7 +23,6 @@ from theano.sandbox.linalg.ops import (Cholesky, # op class
inv_as_solve,
)
from nose.plugins.skip import SkipTest
def test_rop_lop():
......@@ -135,7 +135,7 @@ def test_transinv_to_invtrans():
def test_tag_solve_triangular():
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky op.")
pytest.skip("Scipy needed for the Cholesky op.")
cholesky_lower = Cholesky(lower=True)
cholesky_upper = Cholesky(lower=False)
A = tensor.matrix('A')
......@@ -158,7 +158,7 @@ def test_tag_solve_triangular():
def test_matrix_inverse_solve():
if not imported_scipy:
raise SkipTest("Scipy needed for the Solve op.")
pytest.skip("Scipy needed for the Solve op.")
A = theano.tensor.dmatrix('A')
b = theano.tensor.dmatrix('b')
node = matrix_inverse(A).dot(b).owner
......
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
from theano import gof, tensor, function
......@@ -53,8 +51,8 @@ minimal = Minimal()
# TODO: test that each valid type for A and b works correctly
class T_minimal(unittest.TestCase):
def setUp(self):
class Test_minimal():
def setup_method(self):
self.rng = np.random.RandomState(utt.fetch_seed(666))
def test0(self):
......
......@@ -2,8 +2,8 @@ from __future__ import absolute_import, print_function, division
import os
import sys
from six import reraise
import pytest
from nose.plugins.skip import SkipTest
import numpy as np
import theano
......@@ -81,7 +81,7 @@ def test_n_samples_compatibility():
# when "type" and "copy_reg" are builtin modules.
if sys.platform == 'win32':
exc_type, exc_value, exc_trace = sys.exc_info()
reraise(SkipTest, exc_value, exc_trace)
raise
raise
f = theano.function([X], samples)
......
from __future__ import absolute_import, print_function, division
import numpy as np
import pytest
import os
from theano import config, function, tensor
from theano.compat import PY3
from theano.misc.pkl_utils import CompatUnpickler
from theano.sandbox import multinomial
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import unittest
class test_OP(unittest.TestCase):
class test_OP():
def test_select_distinct(self):
# Tests that ChoiceFromUniform always selects distinct elements
......@@ -60,7 +60,8 @@ class test_OP(unittest.TestCase):
uni = np.random.rand(n_selected).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, uni, n_selected)
with pytest.raises(ValueError):
f(pvals, uni, n_selected)
def test_select_proportional_to_weight(self):
# Tests that ChoiceFromUniform selects elements, on average,
......@@ -91,7 +92,7 @@ class test_OP(unittest.TestCase):
assert avg_diff < mean_rtol, avg_diff
class test_function(unittest.TestCase):
class test_function():
def test_select_distinct(self):
# Tests that multinomial_wo_replacement always selects distinct elements
......@@ -132,7 +133,8 @@ class test_function(unittest.TestCase):
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, n_selected)
with pytest.raises(ValueError):
f(pvals, n_selected)
def test_select_proportional_to_weight(self):
# Tests that multinomial_wo_replacement selects elements, on average,
......
......@@ -2,10 +2,9 @@ from __future__ import absolute_import, print_function, division
import os
import sys
import time
import unittest
from nose.tools import assert_raises
import numpy as np
import pytest
from six.moves import xrange
import theano
......@@ -13,7 +12,6 @@ from theano import change_flags, config, tensor
from theano.sandbox import rng_mrg
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import attr
# TODO: test MRG_RandomStreams
# Partly done in test_consistency_randomstreams
......@@ -320,7 +318,7 @@ def test_broadcastable():
assert uu.broadcastable == (False, True)
@attr('slow')
@pytest.mark.slow
def test_binomial():
# TODO: test size=None, ndim=X
# TODO: test size=X, ndim!=X.ndim
......@@ -377,7 +375,7 @@ def t_binomial(mean, size, const_size, var_input, input, steps, rtol):
inputs=input, target_avg=mean, mean_rtol=rtol)
@attr('slow')
@pytest.mark.slow
def test_normal0():
steps = 50
std = 2.
......@@ -439,7 +437,7 @@ def test_normal0():
prefix='numpy ', allow_01=True, inputs=input, mean_rtol=rtol)
@attr('slow')
@pytest.mark.slow
def test_normal_truncation():
# just a copy of test_normal0 with extra bound check
steps = 50
......@@ -504,7 +502,7 @@ def test_normal_truncation():
sys.stdout.flush()
@attr('slow')
@pytest.mark.slow
def test_truncated_normal():
# just a copy of test_normal0 for truncated normal
steps = 50
......@@ -625,7 +623,7 @@ def test_multinomial_n_samples():
sys.stdout.flush()
class T_MRG(unittest.TestCase):
class Test_MRG():
def test_bad_size(self):
R = MRG_RandomStreams(234)
......@@ -636,11 +634,16 @@ class T_MRG(unittest.TestCase):
(1, 0),
]:
self.assertRaises(ValueError, R.uniform, size)
self.assertRaises(ValueError, R.binomial, size)
self.assertRaises(ValueError, R.multinomial, size, 1, [])
self.assertRaises(ValueError, R.normal, size)
self.assertRaises(ValueError, R.truncated_normal, size)
with pytest.raises(ValueError):
R.uniform(size)
with pytest.raises(ValueError):
R.binomial(size)
with pytest.raises(ValueError):
R.multinomial(size, 1, [])
with pytest.raises(ValueError):
R.normal(size)
with pytest.raises(ValueError):
R.truncated_normal(size)
def test_multiple_rng_aliasing():
......@@ -771,7 +774,8 @@ def rng_mrg_overflow(sizes, fct, mode, should_raise_error):
y = fct(size=size)
f = theano.function([], y, mode=mode)
if should_raise_error:
assert_raises(ValueError, f)
with pytest.raises(ValueError):
f()
else:
f()
......@@ -799,75 +803,82 @@ def test_undefined_grad():
# checking uniform distribution
low = tensor.scalar()
out = srng.uniform((), low=low)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, low)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, low)
high = tensor.scalar()
out = srng.uniform((), low=0, high=high)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, high)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, high)
out = srng.uniform((), low=low, high=high)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out,
(low, high))
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, (low, high))
# checking binomial distribution
prob = tensor.scalar()
out = srng.binomial((), p=prob)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, prob)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, prob)
# checking multinomial distribution
prob1 = tensor.scalar()
prob2 = tensor.scalar()
p = [theano.tensor.as_tensor_variable([prob1, 0.5, 0.25])]
out = srng.multinomial(size=None, pvals=p, n=4)[0]
assert_raises(theano.gradient.NullTypeGradError, theano.grad,
theano.tensor.sum(out), prob1)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(theano.tensor.sum(out), prob1)
p = [theano.tensor.as_tensor_variable([prob1, prob2])]
out = srng.multinomial(size=None, pvals=p, n=4)[0]
assert_raises(theano.gradient.NullTypeGradError, theano.grad,
theano.tensor.sum(out), (prob1, prob2))
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(theano.tensor.sum(out), (prob1, prob2))
# checking choice
p = [theano.tensor.as_tensor_variable([prob1, prob2, 0.1, 0.2])]
out = srng.choice(a=None, size=1, p=p, replace=False)[0]
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out[0],
(prob1, prob2))
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out[0], (prob1, prob2))
p = [theano.tensor.as_tensor_variable([prob1, prob2])]
out = srng.choice(a=None, size=1, p=p, replace=False)[0]
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out[0],
(prob1, prob2))
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out[0], (prob1, prob2))
p = [theano.tensor.as_tensor_variable([prob1, 0.2, 0.3])]
out = srng.choice(a=None, size=1, p=p, replace=False)[0]
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out[0],
prob1)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out[0], prob1)
# checking normal distribution
avg = tensor.scalar()
out = srng.normal((), avg=avg)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, avg)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, avg)
std = tensor.scalar()
out = srng.normal((), avg=0, std=std)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, std)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, std)
out = srng.normal((), avg=avg, std=std)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out,
(avg, std))
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, (avg, std))
# checking truncated normal distribution
avg = tensor.scalar()
out = srng.truncated_normal((), avg=avg)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, avg)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, avg)
std = tensor.scalar()
out = srng.truncated_normal((), avg=0, std=std)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out, std)
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, std)
out = srng.truncated_normal((), avg=avg, std=std)
assert_raises(theano.gradient.NullTypeGradError, theano.grad, out,
(avg, std))
with pytest.raises(theano.gradient.NullTypeGradError):
theano.grad(out, (avg, std))
def test_f16_nonzero(mode=None, op_to_check=rng_mrg.mrg_uniform):
......
......@@ -10,7 +10,6 @@ If you do want to rewrite these tests, bear in mind:
"""
from __future__ import absolute_import, print_function, division
import unittest
import numpy as np
import theano
......@@ -34,7 +33,7 @@ def inputs():
return floats('xyz')
class test_ScalarOps(unittest.TestCase):
class test_ScalarOps():
def test_straightforward(self):
x, y, z = inputs()
......@@ -61,7 +60,7 @@ class test_ScalarOps(unittest.TestCase):
(1, 2), (-1, 2), (1, -2), (-1, -2),
(5, 3), (-5, 3), (5, -3), (-5, -3)
):
self.assertTrue(fn(a, b) == a % b, (a,))
assert fn(a, b) == a % b, (a,)
def has_f16(comp):
......@@ -70,7 +69,7 @@ def has_f16(comp):
return False
class test_composite(unittest.TestCase):
class test_composite():
def test_composite_clone_float32(self):
w = int8()
x = float16()
......@@ -184,84 +183,79 @@ class test_composite(unittest.TestCase):
sop.make_node(si0 * si3, si1, si2)
class test_logical(unittest.TestCase):
class test_logical():
def test_gt(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x > y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a > b))
assert fn(a, b) == (a > b)
def test_lt(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x < y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a < b))
assert fn(a, b) == (a < b)
def test_le(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x <= y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a <= b))
assert fn(a, b) == (a <= b)
def test_ge(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x >= y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a >= b))
assert fn(a, b) == (a >= b)
def test_eq(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [eq(x, y)])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a == b))
assert fn(a, b) == (a == b)
def test_neq(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [neq(x, y)])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a != b))
assert fn(a, b) == (a != b)
def test_or(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x | y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a | b), (a, b))
assert fn(a, b) == (a | b), (a, b)
def test_xor(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x ^ y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a ^ b), (a, b))
assert fn(a, b) == (a ^ b), (a, b)
def test_and(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [and_(x, y)])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a & b), (a, b))
assert fn(a, b) == (a & b), (a, b)
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x & y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a & b), (a, b))
assert fn(a, b) == (a & b), (a, b)
def test_not(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [invert(x)])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == ~a, (a,))
assert fn(a, b) == ~a, (a,)
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [~x])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == ~a, (a,))
assert fn(a, b) == ~a, (a,)
# This class does not inherit from unittest.TestCase, because it would
# interfere with the "yield" mechanism that automatically generates test, see
# http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
# Therefore, it needs to be named "test_..." or "Test_...", so nose can pick
# it up by name, otherwise the tests would not be executed.
class test_upgrade_to_float(object):
class test_upgrade_to_float():
# Test for Ops whose output has to be floating point, even when all
# inputs are ints.
# In particular, when the inputs are int8, the output should be
......@@ -380,7 +374,7 @@ class test_upgrade_to_float(object):
yield test
class test_complex_mod(unittest.TestCase):
class test_complex_mod():
# Make sure % fails on complex numbers.
def test_fail(self):
......@@ -393,7 +387,7 @@ class test_complex_mod(unittest.TestCase):
pass
class test_div(unittest.TestCase):
class test_div():
def test_0(self):
a = int8()
b = int32()
......@@ -493,6 +487,3 @@ def test_constant():
assert c.name is None
assert c.dtype == 'float32'
if __name__ == '__main__':
unittest.main()
from __future__ import absolute_import, print_function, division
import pytest
import theano
from theano.scalar.basic_sympy import SymPyCCode
from theano.scalar.basic import floats
from nose.plugins.skip import SkipTest
try:
import sympy
xs = sympy.Symbol('x')
ys = sympy.Symbol('y')
except ImportError:
raise SkipTest('optional package sympy disabled')
pytest.skip('optional package sympy disabled', allow_module_level=True)
xt, yt = floats('xy')
def test_SymPyCCode():
if not theano.config.cxx:
raise SkipTest("Need cxx for this test")
pytest.skip("Need cxx for this test")
op = SymPyCCode([xs, ys], xs + ys)
e = op(xt, yt)
......
......@@ -2,10 +2,9 @@ from __future__ import absolute_import, print_function, division
import theano
import theano.tensor as T
import unittest
class test_FutureDiv(unittest.TestCase):
class test_FutureDiv():
def test_divide_floats(self):
a = T.dscalar('a')
......
差异被折叠。
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论