提交 b8e58590 authored 作者: Frederic's avatar Frederic

Add a warning when doing infer_shape but when the inputs have dimensions of the same size.

This can hid error.
上级 1617a923
......@@ -463,7 +463,8 @@ class test_Solve(utt.InferShapeTester):
dtype=config.floatX),
numpy.asarray(rng.rand(5, 1),
dtype=config.floatX)],
self.op_class)
self.op_class,
warn=False)
rng = numpy.random.RandomState(utt.fetch_seed())
A = theano.tensor.matrix()
b = theano.tensor.vector()
......@@ -474,7 +475,8 @@ class test_Solve(utt.InferShapeTester):
dtype=config.floatX),
numpy.asarray(rng.rand(5),
dtype=config.floatX)],
self.op_class)
self.op_class,
warn=False)
class test_Eig(utt.InferShapeTester):
......@@ -497,7 +499,8 @@ class test_Eig(utt.InferShapeTester):
self.op(A), # theano.function outputs
# S must be square
[S],
self.op_class)
self.op_class,
warn=False)
def test_eval(self):
import math
......
......@@ -1615,7 +1615,8 @@ class DiagTester(utt.InferShapeTester):
self._compile_and_check(variable,
[self.op(*variable)],
data,
self.op_class)
self.op_class,
warn=False)
def test_grad(self):
for format in sparse.sparse_formats:
......@@ -2591,9 +2592,14 @@ class StructuredAddSVTester(unittest.TestCase):
class SamplingDotTester(utt.InferShapeTester):
x = [tensor.matrix() for t in range(2)]
x.append(sparse.csr_matrix())
a = [numpy.array(numpy.random.random_integers(maximum, size=(3, 3)) - 1,
#unsquare shape
a = [numpy.array(numpy.random.random_integers(5, size=(4, 3)) - 1,
dtype=theano.config.floatX),
numpy.array(numpy.random.random_integers(5, size=(5, 3)) - 1,
dtype=theano.config.floatX),
numpy.array(numpy.random.random_integers(2, size=(4, 5)) - 1,
dtype=theano.config.floatX)
for maximum in [5, 5, 2]]
]
a[2] = sp.csr_matrix(a[2])
def setUp(self):
......
......@@ -135,7 +135,8 @@ class MultinomialTester(utt.InferShapeTester):
self._compile_and_check([self.p],
[multinomial(5, self.p)],
[self._p],
self.op_class)
self.op_class,
warn=False)
if __name__ == '__main__':
......
......@@ -424,82 +424,62 @@ class TestConv2D(utt.InferShapeTester):
adtens = T.dtensor4()
bdtens = T.dtensor4()
aivec_val = [2, 2, 3, 3]
bivec_val = [2, 2, 2, 2]
aivec_val = [4, 5, 6, 3]
bivec_val = [7, 5, 3, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [2, 2, 3, 3]
bivec_val = [2, 2, 2, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 8, 8]
bivec_val = [4, 2, 5, 5]
aivec_val = [6, 2, 8, 3]
bivec_val = [4, 2, 5, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 8, 8]
bivec_val = [4, 2, 5, 5]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 3, 2]
aivec_val = [3, 6, 7, 5]
bivec_val = [5, 6, 3, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 3, 2]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 2, 3]
aivec_val = [3, 6, 7, 5]
bivec_val = [5, 6, 2, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 7, 5]
bivec_val = [5, 2, 2, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 3, 3]
bivec_val = [4, 2, 3, 3]
aivec_val = [5, 2, 4, 3]
bivec_val = [6, 2, 4, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='valid')], [adtens_val, bdtens_val], conv.ConvOp)
aivec_val = [3, 2, 3, 3]
bivec_val = [4, 2, 3, 3]
adtens_val = rand(*aivec_val)
bdtens_val = rand(*bivec_val)
self._compile_and_check([adtens, bdtens],
[conv.conv2d(adtens, bdtens, aivec_val, bivec_val,
border_mode='full')], [adtens_val, bdtens_val], conv.ConvOp)
......
......@@ -182,9 +182,8 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
[DownsampleFactorMaxGrad(maxpoolshp,
ignore_border=ignore_border)(image, maxout, gz)],
[image_val, maxout_val, gz_val],
DownsampleFactorMaxGrad)
DownsampleFactorMaxGrad,
warn=False)
if __name__ == '__main__':
......
......@@ -6743,7 +6743,7 @@ class TestInferShape(utt.InferShapeTester):
# Join
cdmat = dmatrix()
admat_val = rand(1, 3)
bdmat_val = rand(3, 3)
bdmat_val = rand(2, 3)
cdmat_val = rand(4, 3)
aiscal_val = 0
self._compile_and_check([aiscal, admat, bdmat, cdmat],
......@@ -6821,7 +6821,8 @@ class TestInferShape(utt.InferShapeTester):
adtens4_val = rand(2, 1, 3, 1)
self._compile_and_check([adtens4],
[Rebroadcast(*adict)(adtens4)],
[adtens4_val], Rebroadcast)
[adtens4_val], Rebroadcast,
warn=False)
adtens4_bro = TensorType('float64', (True, True, True, False))()
bdict = [(0, True), (1, False), (2, False), (3, False)]
......@@ -6948,8 +6949,8 @@ class TestInferShape(utt.InferShapeTester):
adtens4_val = rand(3, 4, 2, 5)
self._compile_and_check([adtens4, bdtens4],
[inc_subtensor(adtens4[::, 2:4, ::, ::], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor)
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor,
warn=False)
self._compile_and_check([adtens4, bdmat],
[inc_subtensor(adtens4[2, 2:4, 1, ::], bdmat)],
[adtens4_val, [[1, 2, 3, 4, 5]]], IncSubtensor)
......@@ -6964,7 +6965,8 @@ class TestInferShape(utt.InferShapeTester):
self._compile_and_check([adtens4, bdtens4],
[set_subtensor(adtens4[::, 2:4, ::, ::], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor)
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]], IncSubtensor,
warn=False)
self._compile_and_check([adtens4, bdmat],
[set_subtensor(adtens4[2, 2:4, 1, ::], bdmat)],
......@@ -7005,7 +7007,8 @@ class TestInferShape(utt.InferShapeTester):
self._compile_and_check([adtens4, bdtens4],
[set_subtensor(adtens4[aivec_val], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]]]],
AdvancedIncSubtensor1)
AdvancedIncSubtensor1,
warn=False)
aivec_val = [1, 3, 2]
self._compile_and_check([adtens4, advec],
......@@ -7042,7 +7045,8 @@ class TestInferShape(utt.InferShapeTester):
[inc_subtensor(adtens4[aivec_val], bdtens4)],
[adtens4_val, [[[[1, 2, 3, 4, 5]]],
[[[6, 7, 8, 9, 10]]]]],
AdvancedIncSubtensor1)
AdvancedIncSubtensor1,
warn=False)
aivec_val = [1, 2, 1]
self._compile_and_check([adtens4, advec],
......
......@@ -92,8 +92,9 @@ class test_DimShuffle(unittest_tools.InferShapeTester):
adtens = TensorType('float64', ib)('x')
adtens_val = numpy.ones(xsh)
self._compile_and_check([adtens],
[DimShuffle(ib, shuffle)(adtens)],
[adtens_val], DimShuffle)
[DimShuffle(ib, shuffle)(adtens)],
[adtens_val], DimShuffle,
warn=False)
def test_too_big_rank(self):
x = tensor.dscalar()
......
......@@ -182,7 +182,8 @@ class SqueezeTester(utt.InferShapeTester):
self._compile_and_check([variable],
[self.op(variable)],
[data],
tensor.DimShuffle)
tensor.DimShuffle,
warn=False)
def test_grad(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
......@@ -375,9 +376,11 @@ class TestFillDiagonal(utt.InferShapeTester):
numpy.random.rand()],
self.op_class)
self._compile_and_check([z, y], [self.op(z, y)],
#must be square when nd>2
[numpy.random.rand(8, 8, 8),
numpy.random.rand()],
self.op_class)
self.op_class,
warn=False)
if __name__ == "__main__":
utt.unittest.main()
......
from copy import copy, deepcopy
import logging
import sys
import unittest
......@@ -15,6 +16,7 @@ except ImportError:
"""
Skip this test
"""
_logger = logging.getLogger("theano.tests.unittest_tools")
AddConfigVar('unittests.rseed',
......@@ -173,11 +175,30 @@ class InferShapeTester(unittest.TestCase):
self.mode = mode.including("canonicalize")
def _compile_and_check(self, inputs, outputs, numeric_inputs, cls,
excluding=None):
excluding=None, warn=True):
"""This tests the infer_shape method only"""
mode = self.mode
if excluding:
mode = mode.excluding(*excluding)
if warn:
for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)):
inp = var.type.filter(inp)
if not hasattr(inp, "shape"):
continue
# remove broadcasted dims as it is sure they can't be
# changed to prevent the same dim problem.
if hasattr(var.type, "broadcastable"):
shp = [inp.shape[i] for i in range(inp.ndim)
if not var.type.broadcastable[i]]
else:
shp = inp.shape
if len(set(shp)) != len(shp):
_logger.warn("While testing the shape, we received input"
" with dimensions of the same shape %s! This"
" lower the quality of the verification.",
str(inp.shape))
break
outputs_function = theano.function(inputs, outputs, mode=mode)
shapes_function = theano.function(inputs, [o.shape for o in outputs],
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论