提交 9a203db3 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #4866 from lamblin/fix_jenkins

Fix jenkins
......@@ -7,6 +7,7 @@ from theano.compile import DeepCopyOp
from theano.tensor.tests import test_subtensor
from ..basic_ops import HostFromGpu, GpuFromHost
from ..elemwise import GpuDimShuffle
from ..subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1)
......@@ -27,6 +28,7 @@ class G_subtensor(test_subtensor.T_subtensor):
inc_sub=GpuIncSubtensor,
adv_sub1=GpuAdvancedSubtensor1,
adv_incsub1=GpuAdvancedIncSubtensor1,
dimshuffle=GpuDimShuffle,
mode=mode_with_gpu,
# avoid errors with limited devices
dtype='float32',
......
......@@ -1369,8 +1369,10 @@ class numeric_grad(object):
# perfectly accurate.
type_eps = {'float64': 1e-7,
'float32': 3e-4,
'float16': 1e-3,
numpy.dtype('float64'): 1e-7,
numpy.dtype('float32'): 3e-4}
numpy.dtype('float32'): 3e-4,
numpy.dtype('float16'): 1e-3}
def __init__(self, f, pt, eps=None, out_type=None):
"""Return the gradient of f at pt.
......@@ -1606,12 +1608,13 @@ def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
pt = [numpy.array(p) for p in pt]
for i, p in enumerate(pt):
if p.dtype not in ('float32', 'float64'):
if p.dtype not in ('float16', 'float32', 'float64'):
raise TypeError(
('verify_grad can work only with floating point '
'inputs, but input %i has dtype "%s".') % (i, p.dtype))
_type_tol = dict( # relative error tolerances for different types
float16=5e-2,
float32=1e-2,
float64=1e-4)
......
......@@ -1014,6 +1014,7 @@ class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
inc_sub = cuda.GpuIncSubtensor
adv_sub1 = cuda.GpuAdvancedSubtensor1
adv_incsub1 = cuda.GpuAdvancedIncSubtensor1
dimshuffle = cuda.GpuDimShuffle
mode = mode_with_gpu
dtype = 'float32'
type = tcn.CudaNdarrayType
......@@ -1075,7 +1076,7 @@ class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
# Test with c_contiguous input
t = self.adv_sub1()(n, idx)
t.owner.op.perform_using_take = True # input c_contiguous, so we reshape
val = self.eval_output_and_check(t, list=True)
val = self.eval_output_and_check(t, op_type=self.adv_sub1)
val = numpy.asarray(val)
good = data[idx]
......
......@@ -3,12 +3,14 @@ This file test tensor op that should also operate on CudaNdaray.
"""
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
from nose_parameterized import parameterized
import numpy
import theano
from theano import tensor
import theano.tensor as T
import theano.tests.unittest_tools as utt
# Skip test if cuda_ndarray is not available.
import theano.sandbox.cuda as cuda
......@@ -139,6 +141,8 @@ def test_get_diagonal_subtensor_view():
test_conv3d2d.test_get_diagonal_subtensor_view(wrap=cuda.CudaNdarray)
def test_conv3d():
test_conv3d2d.test_conv3d(mode=mode_with_gpu,
shared=cuda.shared_constructor)
@parameterized.expand(('valid', 'full'), utt.custom_name_func)
def test_conv3d(border_mode):
test_conv3d2d.check_conv3d(border_mode=border_mode,
mode=mode_with_gpu,
shared=cuda.shared_constructor)
......@@ -92,7 +92,15 @@ def check_diagonal_subtensor_view_traces(fn):
@parameterized.expand(('valid', 'full'), utt.custom_name_func)
def test_conv3d(border_mode, mode=mode_without_gpu, shared=theano.tensor._shared):
def test_conv3d(border_mode):
check_conv3d(border_mode=border_mode,
mode=mode_without_gpu,
shared=theano.tensor._shared)
# This function will also be used in theano/sandbox/cuda/tests/test_tensor_op.py,
# which is not possible if it is decorated by @parameterized.expand
def check_conv3d(border_mode, mode=mode_without_gpu, shared=theano.tensor._shared):
if ndimage is None:
raise SkipTest("conv3d2d tests need SciPy")
......
......@@ -1649,7 +1649,7 @@ TanhInplaceTester = makeBroadcastTester(
grad=_grad_broadcast_unary_normal,
inplace=True)
_eps = 1e-10
_eps = 1e-2
_good_broadcast_unary_arctanh = dict(
normal=(rand_ranged(-1 + _eps, 1 - _eps, (2, 3)),),
integers=(randint_ranged(-1 + _eps, 1 - _eps, (2, 3)),),
......
......@@ -58,12 +58,14 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
mode=None,
dtype=theano.config.floatX,
type=tensor.TensorType,
ignore_topo=DeepCopyOp):
ignore_topo=DeepCopyOp,
dimshuffle=DimShuffle):
self.shared = shared
self.sub = sub
self.inc_sub = inc_sub
self.adv_sub1 = adv_sub1
self.adv_incsub1 = adv_incsub1
self.dimshuffle = dimshuffle
if mode is None:
mode = theano.compile.mode.get_default_mode()
mode = mode.including("local_useless_subtensor")
......@@ -343,28 +345,29 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
numpy_n = numpy.arange(24, dtype=self.dtype).reshape((2, 3, 4))
n = self.shared(numpy_n)
test_cases = [
(0, self.sub, numpy.index_exp[...]),
(1, self.sub, numpy.index_exp[..., 1]),
(1, self.sub, numpy.index_exp[1, ...]),
(1, self.sub, numpy.index_exp[..., 1, 2, 3]),
(1, self.sub, numpy.index_exp[1, ..., 2, 3]),
(1, self.sub, numpy.index_exp[1, 2, 3, ...]),
(3, DimShuffle, numpy.index_exp[..., [0, 2, 3]]),
(1, DimShuffle,
(0, Subtensor, self.sub, numpy.index_exp[...]),
(1, Subtensor, self.sub, numpy.index_exp[..., 1]),
(1, Subtensor, self.sub, numpy.index_exp[1, ...]),
(1, Subtensor, self.sub, numpy.index_exp[..., 1, 2, 3]),
(1, Subtensor, self.sub, numpy.index_exp[1, ..., 2, 3]),
(1, Subtensor, self.sub, numpy.index_exp[1, 2, 3, ...]),
(3, DimShuffle, self.dimshuffle,
numpy.index_exp[..., [0, 2, 3]]),
(1, DimShuffle, self.dimshuffle,
numpy.index_exp[numpy.newaxis, ...])]
# The following test case is not supported by numpy before 1.9
numpy_version = [int(v) for v in numpy.version.version.split('.')[0:2]]
if numpy_version >= [1, 9]:
test_cases.append(
(1, AdvancedSubtensor,
(1, AdvancedSubtensor, AdvancedSubtensor,
numpy.index_exp[..., numpy.newaxis, [1, 2]]))
for length, op_type, slice_ in test_cases:
for length, op_type, op_type_opt, slice_ in test_cases:
numpy_tval = numpy_n[slice_]
t = n[slice_]
self.assertTrue(isinstance(t.owner.op, op_type))
tval = self.eval_output_and_check(t,
op_type=op_type,
op_type=op_type_opt,
length=length)
assert_equal(tval.shape, numpy_tval.shape)
assert_array_equal(tval, numpy_tval)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论