提交 ad8a1755 authored 作者: lamblin's avatar lamblin

Merge pull request #1258 from nouiz/warn_infer_shape_test

Warn infer shape test
......@@ -228,13 +228,23 @@ output.
``self._compile_and_check`` compiles a Theano function. It takes as
parameters the lists of input and output Theano variables, as would be
provided to ``theano.function``, and a list of real values to pass to the
compiled function (do not use symmetric shapes, e.g. (3, 3),
as they can easily hide errors). It also takes the op class as a parameter
compiled function. It also takes the op class as a parameter
in order to verify that no instance of it appears in the shape-optimized graph.
If there is an error, the function raises an exception. If you want to
see it fail, you can implement an incorrect ``infer_shape``.
When testing with input values with shapes that take the same value
over different dimensions (for instance, a square matrix, or a tensor3
with shape (n, n, n), or (m, n, m)), it is not possible to detect if
the output shape was computed correctly, or if some shapes with the
same value have been mixed up. For instance, if the infer_shape uses
the width of a matrix instead of its height, then testing with only
square matrices will not detect the problem. This is why the
``self._compile_and_check`` method print a warning in such a case. If
your op work only in such case, you can diable the warning with the
warn=True parameter.
.. code-block:: python
from theano.tests import unittest_tools as utt
......
......@@ -463,7 +463,8 @@ class test_Solve(utt.InferShapeTester):
dtype=config.floatX),
numpy.asarray(rng.rand(5, 1),
dtype=config.floatX)],
self.op_class)
self.op_class,
warn=False)
rng = numpy.random.RandomState(utt.fetch_seed())
A = theano.tensor.matrix()
b = theano.tensor.vector()
......@@ -474,7 +475,8 @@ class test_Solve(utt.InferShapeTester):
dtype=config.floatX),
numpy.asarray(rng.rand(5),
dtype=config.floatX)],
self.op_class)
self.op_class,
warn=False)
class test_Eig(utt.InferShapeTester):
......@@ -497,10 +499,10 @@ class test_Eig(utt.InferShapeTester):
self.op(A), # theano.function outputs
# S must be square
[S],
self.op_class)
self.op_class,
warn=False)
def test_eval(self):
import math
A = theano.tensor.matrix(dtype=self.dtype)
self.assertEquals([e.eval({A: [[1]]}) for e in self.op(A)],
[[1.0], [[1.0]]])
......
......@@ -3434,7 +3434,7 @@ class T_Scan(unittest.TestCase):
else:
d = 0.1
out = theano.clone(y, replace={x:x + d})
theano.printing.debugprint(out)
#theano.printing.debugprint(out)
return theano.function([], out)()
x = theano.shared(numpy.asarray(0., dtype=theano.config.floatX))
......@@ -3503,7 +3503,7 @@ def test_speed():
s_i = theano.shared(numpy.array(1))
s_rinc = tensor.inc_subtensor(shared_r[s_i], shared_r[s_i - 1],
tolerate_inplace_aliasing=True)
theano.printing.debugprint(s_rinc)
#theano.printing.debugprint(s_rinc)
f = theano.function([],
[],
updates=OrderedDict([
......@@ -3518,7 +3518,7 @@ def test_speed():
f() # 999 to update the profiling timers
t3 = time.time()
print 'theano (updates, cvm)', t3 - t2
print shared_r.get_value()
#print shared_r.get_value()
def test_speed_rnn():
......
......@@ -1615,7 +1615,8 @@ class DiagTester(utt.InferShapeTester):
self._compile_and_check(variable,
[self.op(*variable)],
data,
self.op_class)
self.op_class,
warn=False)
def test_grad(self):
for format in sparse.sparse_formats:
......@@ -2591,9 +2592,14 @@ class StructuredAddSVTester(unittest.TestCase):
class SamplingDotTester(utt.InferShapeTester):
x = [tensor.matrix() for t in range(2)]
x.append(sparse.csr_matrix())
a = [numpy.array(numpy.random.random_integers(maximum, size=(3, 3)) - 1,
#unsquare shape
a = [numpy.array(numpy.random.random_integers(5, size=(4, 3)) - 1,
dtype=theano.config.floatX),
numpy.array(numpy.random.random_integers(5, size=(5, 3)) - 1,
dtype=theano.config.floatX),
numpy.array(numpy.random.random_integers(2, size=(4, 5)) - 1,
dtype=theano.config.floatX)
for maximum in [5, 5, 2]]
]
a[2] = sp.csr_matrix(a[2])
def setUp(self):
......
......@@ -135,7 +135,8 @@ class MultinomialTester(utt.InferShapeTester):
self._compile_and_check([self.p],
[multinomial(5, self.p)],
[self._p],
self.op_class)
self.op_class,
warn=False)
if __name__ == '__main__':
......
import sys
import time
import unittest
import numpy
import theano
import theano.tensor as T
from theano import function, Mode
from theano.tests import unittest_tools as utt
from theano.tensor.nnet import conv
......@@ -424,82 +421,62 @@ class TestConv2D(utt.InferShapeTester):
adtens = T.dtensor4()
bdtens = T.dtensor4()
aivec_val = [2, 2, 3, 3]
bivec_val = [2, 2, 2, 2]
aivec_val = [4, 5, 6, 3]
bivec_val = [7, 5, 3, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [2, 2, 3, 3]
bivec_val = [2, 2, 2, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 8, 8]
bivec_val = [4, 2, 5, 5]
aivec_val = [6, 2, 8, 3]
bivec_val = [4, 2, 5, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 8, 8]
bivec_val = [4, 2, 5, 5]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 3, 2]
aivec_val = [3, 6, 7, 5]
bivec_val = [5, 6, 3, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 3, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 2, 3]
aivec_val = [3, 6, 7, 5]
bivec_val = [5, 6, 2, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 2, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 3, 3]
bivec_val = [4, 2, 3, 3]
aivec_val = [5, 2, 4, 3]
bivec_val = [6, 2, 4, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 3, 3]
bivec_val = [4, 2, 3, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
......
import unittest
import sys
import time
import numpy
import theano.tensor as tensor
from theano.tests import unittest_tools as utt
from theano.tensor.signal.downsample import (DownsampleFactorMax, max_pool_2d,
DownsampleFactorMaxGrad)
from theano import function, Mode
from theano import function
class TestDownsampleFactorMax(utt.InferShapeTester):
......@@ -182,9 +180,8 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
[DownsampleFactorMaxGrad(maxpoolshp,
ignore_border=ignore_border)(image, maxout, gz)],
[image_val, maxout_val, gz_val],
DownsampleFactorMaxGrad)
DownsampleFactorMaxGrad,
warn=False)
if __name__ == '__main__':
......
......@@ -6743,7 +6743,7 @@ class TestInferShape(utt.InferShapeTester):
# Join
cdmat = dmatrix()
admat_val = rand(1, 3)
bdmat_val = rand(3, 3)
bdmat_val = rand(2, 3)
cdmat_val = rand(4, 3)
aiscal_val = 0
self._compile_and_check([aiscal, admat, bdmat, cdmat],
......@@ -6821,7 +6821,8 @@ class TestInferShape(utt.InferShapeTester):
adtens4_val = rand(2, 1, 3, 1)
self._compile_and_check([adtens4],
[Rebroadcast(*adict)(adtens4)],
[adtens4_val], Rebroadcast)
[adtens4_val], Rebroadcast,
warn=False)
adtens4_bro = TensorType('float64', (True, True, True, False))()
bdict = [(0, True), (1, False), (2, False), (3, False)]
......@@ -6948,8 +6949,8 @@ class TestInferShape(utt.InferShapeTester):
adtens4_val = rand(3, 4, 2, 5)
self._compile_and_check([adtens4, bdtens4],
[inc_subtensor(adtens4[::, 2:4, ::, ::], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor)
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor,
warn=False)
self._compile_and_check([adtens4, bdmat],
[inc_subtensor(adtens4[2, 2:4, 1, ::], bdmat)],
[adtens4_val, [[1, 2, 3, 4, 5]]], IncSubtensor)
......@@ -6964,7 +6965,8 @@ class TestInferShape(utt.InferShapeTester):
self._compile_and_check([adtens4, bdtens4],
[set_subtensor(adtens4[::, 2:4, ::, ::], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor)
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor,
warn=False)
self._compile_and_check([adtens4, bdmat],
[set_subtensor(adtens4[2, 2:4, 1, ::], bdmat)],
......@@ -7005,7 +7007,8 @@ class TestInferShape(utt.InferShapeTester):
self._compile_and_check([adtens4, bdtens4],
[set_subtensor(adtens4[aivec_val], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]],
AdvancedIncSubtensor1)
AdvancedIncSubtensor1,
warn=False)
aivec_val = [1, 3, 2]
self._compile_and_check([adtens4, advec],
......@@ -7042,7 +7045,8 @@ class TestInferShape(utt.InferShapeTester):
[inc_subtensor(adtens4[aivec_val], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]],
[[[6, 7, 8, 9, 10]]]]],
AdvancedIncSubtensor1)
AdvancedIncSubtensor1,
warn=False)
aivec_val = [1, 2, 1]
self._compile_and_check([adtens4, advec],
......
import cPickle
from copy import copy
from itertools import imap
import time
import unittest
import numpy
from nose.plugins.skip import SkipTest
from numpy.testing import dec
import theano
from theano.gof.python25 import all, any
from theano.gof import Variable, Op
from theano import gof, scalar, config
from theano import tensor
......@@ -92,8 +89,9 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
adtens = TensorType('float64', ib)('x')
adtens_val = numpy.ones(xsh)
self._compile_and_check([adtens],
[DimShuffle(ib, shuffle)(adtens)],
[adtens_val], DimShuffle)
[DimShuffle(ib, shuffle)(adtens)],
[adtens_val], DimShuffle,
warn=False)
def test_too_big_rank(self):
x = tensor.dscalar()
......@@ -469,7 +467,6 @@ class test_Prod(unittest.TestCase):
# (and special cases: 1 zero in the row, more than 1 zero in the row)
x_val = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype='float32')
x = theano.tensor.dmatrix()
# now with verify_grad
unittest_tools.verify_grad(Prod(axis=1), [x_val], mode=self.mode)
......@@ -674,8 +671,8 @@ class T_sum_dtype(unittest.TestCase):
if "complex" in input_dtype:
continue
# Check that we can take the gradient
grad_var = tensor.grad(sum_var.sum(), x,
disconnected_inputs='ignore')
tensor.grad(sum_var.sum(), x,
disconnected_inputs='ignore')
idx += 1
def test_sum_custom_acc_dtype(self):
......@@ -709,8 +706,8 @@ class T_sum_dtype(unittest.TestCase):
if "complex" in input_dtype:
continue
# Check that we can take the gradient
grad_var = tensor.grad(sum_var.sum(), x,
disconnected_inputs='ignore')
tensor.grad(sum_var.sum(), x,
disconnected_inputs='ignore')
else:
self.assertRaises(TypeError,
x.sum, acc_dtype=acc_dtype, axis=axis)
......@@ -768,8 +765,8 @@ class T_mean_dtype(unittest.TestCase):
if "complex" in mean_var.dtype:
continue
try:
grad_var = tensor.grad(mean_var.sum(), x,
disconnected_inputs='ignore')
tensor.grad(mean_var.sum(), x,
disconnected_inputs='ignore')
except NotImplementedError:
# TrueDiv does not seem to have a gradient when
# the numerator is complex.
......@@ -845,8 +842,8 @@ class T_prod_dtype(unittest.TestCase):
if "complex" in output_dtype or "complex" in input_dtype:
continue
# Check that we can take the gradient
grad_var = tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
idx += 1
def test_prod_custom_acc_dtype(self):
......@@ -873,8 +870,8 @@ class T_prod_dtype(unittest.TestCase):
if "complex" in acc_dtype:
continue
# Check that we can take the gradient
grad_var = tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
tensor.grad(prod_var.sum(), x,
disconnected_inputs='ignore')
else:
self.assertRaises(TypeError,
x.prod, acc_dtype=acc_dtype, axis=axis)
......
......@@ -182,7 +182,8 @@ class SqueezeTester(utt.InferShapeTester):
self._compile_and_check([variable],
[self.op(variable)],
[data],
tensor.DimShuffle)
tensor.DimShuffle,
warn=False)
def test_grad(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
......@@ -375,9 +376,11 @@ class TestFillDiagonal(utt.InferShapeTester):
numpy.random.rand()],
self.op_class)
self._compile_and_check([z, y], [self.op(z, y)],
#must be square when nd>2
[numpy.random.rand(8, 8, 8),
numpy.random.rand()],
self.op_class)
self.op_class,
warn=False)
if __name__ == "__main__":
utt.unittest.main()
......
from copy import copy, deepcopy
import logging
import sys
import unittest
......@@ -15,6 +16,7 @@ except ImportError:
"""
Skip this test
"""
_logger = logging.getLogger("theano.tests.unittest_tools")
AddConfigVar('unittests.rseed',
......@@ -173,11 +175,45 @@ class InferShapeTester(unittest.TestCase):
self.mode = mode.including("canonicalize")
def _compile_and_check(self, inputs, outputs, numeric_inputs, cls,
excluding=None):
"""This tests the infer_shape method only"""
excluding=None, warn=True):
"""This tests the infer_shape method only
When testing with input values with shapes that take the same
value over different dimensions (for instance, a square
matrix, or a tensor3 with shape (n, n, n), or (m, n, m)), it
is not possible to detect if the output shape was computed
correctly, or if some shapes with the same value have been
mixed up. For instance, if the infer_shape uses the width of a
matrix instead of its height, then testing with only square
matrices will not detect the problem. If warn=True, we emit a
warning when testing with such values.
"""
mode = self.mode
if excluding:
mode = mode.excluding(*excluding)
if warn:
for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)):
inp = var.type.filter(inp)
if not hasattr(inp, "shape"):
continue
# remove broadcasted dims as it is sure they can't be
# changed to prevent the same dim problem.
if hasattr(var.type, "broadcastable"):
shp = [inp.shape[i] for i in range(inp.ndim)
if not var.type.broadcastable[i]]
else:
shp = inp.shape
if len(set(shp)) != len(shp):
_logger.warn(
"While testing the shape inference, we received an"
" input with a shape that has some repeated values: %s"
", like a square matrix. This makes it impossible to"
" check if the values for these dimensions have been"
" correctly used, or if they have been mixed up.",
str(inp.shape))
break
outputs_function = theano.function(inputs, outputs, mode=mode)
shapes_function = theano.function(inputs, [o.shape for o in outputs],
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论