提交 7befad61 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #6323 from nouiz/tests

Fix test in the daily buildbot and make sure to don't loose the current linker.
...@@ -10,7 +10,6 @@ import copy ...@@ -10,7 +10,6 @@ import copy
import sys import sys
import gc import gc
import logging import logging
import six.moves.copyreg as copyreg
from itertools import chain, product as itertools_product from itertools import chain, product as itertools_product
from theano.compat import izip from theano.compat import izip
...@@ -2414,10 +2413,6 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions ...@@ -2414,10 +2413,6 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
return fn return fn
def _pickle_DebugMode_Maker(maker):
raise NotImplementedError('DebugMode is not picklable (yet)')
copyreg.pickle(_Maker, _pickle_DebugMode_Maker)
######################## ########################
# #
# API symbol: DebugMode # API symbol: DebugMode
......
...@@ -317,7 +317,7 @@ class Mode(object): ...@@ -317,7 +317,7 @@ class Mode(object):
self.provided_optimizer) self.provided_optimizer)
# N.B. opt might be a Query instance, not sure what else it might be... # N.B. opt might be a Query instance, not sure what else it might be...
# string? Optimizer? OptDB? who knows??? # string? Optimizer? OptDB? who knows???
return self.clone(optimizer=opt.including(*tags)) return self.clone(optimizer=opt.including(*tags), linker=link)
def register(self, *optimizations): def register(self, *optimizations):
"""Adds new optimization instances to a mode. """Adds new optimization instances to a mode.
...@@ -347,12 +347,12 @@ class Mode(object): ...@@ -347,12 +347,12 @@ class Mode(object):
def excluding(self, *tags): def excluding(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker, link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer) self.provided_optimizer)
return self.clone(optimizer=opt.excluding(*tags)) return self.clone(optimizer=opt.excluding(*tags), linker=link)
def requiring(self, *tags): def requiring(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker, link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer) self.provided_optimizer)
return self.clone(optimizer=opt.requiring(*tags)) return self.clone(optimizer=opt.requiring(*tags), linker=link)
def clone(self, link_kwargs=None, optimizer="", **kwargs): def clone(self, link_kwargs=None, optimizer="", **kwargs):
""" """
......
...@@ -654,9 +654,13 @@ class T_picklefunction(unittest.TestCase): ...@@ -654,9 +654,13 @@ class T_picklefunction(unittest.TestCase):
def test_output_keys(self): def test_output_keys(self):
x = T.vector() x = T.vector()
f = theano.function([x], {'vec': x**2}) f = theano.function([x], {'vec': x**2})
assert isinstance(f([2, 3, 4]), dict) o = f([2, 3, 4])
assert isinstance(o, dict)
assert np.allclose(o['vec'], [4, 9, 16])
g = copy.deepcopy(f) g = copy.deepcopy(f)
assert isinstance(g([2, 3, 4]), dict) o = g([2, 3, 4])
assert isinstance(o, dict)
assert np.allclose(o['vec'], [4, 9, 16])
def test_deepcopy_shared_container(self): def test_deepcopy_shared_container(self):
# Ensure that shared containers remain shared after a deep copy. # Ensure that shared containers remain shared after a deep copy.
......
...@@ -466,12 +466,24 @@ AddConfigVar( ...@@ -466,12 +466,24 @@ AddConfigVar(
# scalable. # scalable.
# Also, please be careful not to modify the first item in the enum when adding # Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode. # new modes, since it is the default mode.
def filter_mode(val):
if val in ['Mode', 'DebugMode', 'FAST_RUN',
'NanGuardMode',
'FAST_COMPILE', 'DEBUG_MODE']:
return val
# This can be executed before Theano is completly imported, so
# theano.Mode is not always available.
elif hasattr(theano, 'Mode') and isinstance(val, theano.Mode):
return val
else:
raise ValueError("Expected one of those string 'Mode', 'DebugMode',"
" 'FAST_RUN', 'NanGuardMode', 'FAST_COMPILE',"
" 'DEBUG_MODE' or an instance of Mode.")
AddConfigVar( AddConfigVar(
'mode', 'mode',
"Default compilation mode", "Default compilation mode",
EnumStr('Mode', 'DebugMode', 'FAST_RUN', ConfigParam('Mode', filter_mode),
'NanGuardMode',
'FAST_COMPILE', 'DEBUG_MODE'),
in_c_key=False) in_c_key=False)
param = "g++" param = "g++"
......
...@@ -165,10 +165,13 @@ class GpuElemwise(HideC, Elemwise): ...@@ -165,10 +165,13 @@ class GpuElemwise(HideC, Elemwise):
scal_v_out = fake_node.outputs scal_v_out = fake_node.outputs
assert len(scal_v_out) == len(node.outputs) assert len(scal_v_out) == len(node.outputs)
try:
kop = fake_node.op.c_code(fake_node, 'elem_scalar', kop = fake_node.op.c_code(fake_node, 'elem_scalar',
inps, outs, inps, outs,
dict(fail='return;')) dict(fail='return;'))
except MethodNotDefined:
raise AssertionError(
"No c code for this scalar. Can not make a GpuElemwise")
# If the following assert fail, then we need to update the # If the following assert fail, then we need to update the
# code handler above. # code handler above.
assert 'npy_float16' not in kop assert 'npy_float16' not in kop
......
...@@ -748,6 +748,8 @@ def local_gpua_elemwise(op, context_name, inputs, outputs): ...@@ -748,6 +748,8 @@ def local_gpua_elemwise(op, context_name, inputs, outputs):
scal_op) scal_op)
if not have_cuda: if not have_cuda:
return None return None
if not scal_op.supports_c_code(inputs, outputs):
return
res = GpuElemwise(scal_op, name=name, res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern), inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec) nfunc_spec=op.nfunc_spec)
......
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
import numpy as np
from nose.tools import assert_raises from nose.tools import assert_raises
import numpy as np
import theano import theano
from theano import tensor from theano import tensor
...@@ -745,6 +745,10 @@ class Conv_opt_test(unittest.TestCase): ...@@ -745,6 +745,10 @@ class Conv_opt_test(unittest.TestCase):
mode = mode_with_gpu.including('conv_meta') mode = mode_with_gpu.including('conv_meta')
ref_func = theano.function([], conv_op, mode=mode_with_gpu) ref_func = theano.function([], conv_op, mode=mode_with_gpu)
# All meta optimizer compile a new function. This need to know
# the current linker, but this information is not available,
# so it use the default mode.
with theano.change_flags(mode=mode):
conv_func = theano.function([], conv_op, mode=mode) conv_func = theano.function([], conv_op, mode=mode)
assert any([isinstance(node.op, op) assert any([isinstance(node.op, op)
for node in conv_func.maker.fgraph.toposort()]) for node in conv_func.maker.fgraph.toposort()])
...@@ -787,13 +791,20 @@ class Conv_opt_test(unittest.TestCase): ...@@ -787,13 +791,20 @@ class Conv_opt_test(unittest.TestCase):
mode = mode_with_gpu.including('conv_meta') mode = mode_with_gpu.including('conv_meta')
ref_func = theano.function([], conv_op, mode=mode_with_gpu) ref_func = theano.function([], conv_op, mode=mode_with_gpu)
# All meta optimizer compile a new function. This need to know
# the current linker, but this information is not available,
# so it use the default mode.
with theano.change_flags(mode=mode):
conv_func = theano.function([], conv_op, mode=mode) conv_func = theano.function([], conv_op, mode=mode)
if op is not None: if op is not None:
assert any([isinstance(node.op, op) assert any([isinstance(node.op, op)
for node in conv_func.maker.fgraph.toposort()]) for node in conv_func.maker.fgraph.toposort()])
utt.assert_allclose(conv_func(), ref_func()) utt.assert_allclose(conv_func(), ref_func())
def test_optimizers(self): def test_optimizers_2d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
imshp2d = [(2, 3, 5, 5), (2, 2, 5, 7), (2, 1, 3, 3)] imshp2d = [(2, 3, 5, 5), (2, 2, 5, 7), (2, 1, 3, 3)]
kshp2d = [(4, 3, 3, 3), (3, 2, 3, 5), (4, 1, 1, 1)] kshp2d = [(4, 3, 3, 3), (3, 2, 3, 5), (4, 1, 1, 1)]
tshp2d = [(2, 4, 3, 3), (2, 3, 3, 3), (2, 4, 3, 3)] tshp2d = [(2, 4, 3, 3), (2, 3, 3, 3), (2, 4, 3, 3)]
...@@ -827,6 +838,10 @@ class Conv_opt_test(unittest.TestCase): ...@@ -827,6 +838,10 @@ class Conv_opt_test(unittest.TestCase):
'conv_gemm:default', 'conv_gemm:default',
dnn.GpuDnnConv) dnn.GpuDnnConv)
def test_optimizers_3d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
imshp3d = [(2, 3, 5, 5, 5), (2, 2, 5, 7, 5), (2, 1, 3, 3, 3)] imshp3d = [(2, 3, 5, 5, 5), (2, 2, 5, 7, 5), (2, 1, 3, 3, 3)]
kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 5, 3), (4, 1, 1, 1, 1)] kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 5, 3), (4, 1, 1, 1, 1)]
tshp3d = [(2, 4, 3, 3, 3), (2, 3, 3, 3, 3), (2, 4, 3, 3, 3)] tshp3d = [(2, 4, 3, 3, 3), (2, 3, 3, 3, 3), (2, 4, 3, 3, 3)]
...@@ -865,6 +880,9 @@ class Conv_opt_test(unittest.TestCase): ...@@ -865,6 +880,9 @@ class Conv_opt_test(unittest.TestCase):
'conv_gemm:default', 'conv_gemm:default',
dnn.GpuDnnConv) dnn.GpuDnnConv)
def test_optimizers_non_default(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
# conv2d forward pass with Non-default border_mode and filter_dilation # conv2d forward pass with Non-default border_mode and filter_dilation
imshp2d = [(2, 3, 5, 5), (4, 2, 5, 5)] imshp2d = [(2, 3, 5, 5), (4, 2, 5, 5)]
kshp2d = [(4, 3, 3, 3), (3, 2, 3, 3)] kshp2d = [(4, 3, 3, 3), (3, 2, 3, 3)]
......
差异被折叠。
...@@ -52,7 +52,8 @@ class Erf(UnaryScalarOp): ...@@ -52,7 +52,8 @@ class Erf(UnaryScalarOp):
z, = out z, = out
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = erf(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = erf((%(cast)s)%(x)s);" % locals()
erf = Erf(upgrade_to_float, name='erf') erf = Erf(upgrade_to_float, name='erf')
...@@ -83,7 +84,8 @@ class Erfc(UnaryScalarOp): ...@@ -83,7 +84,8 @@ class Erfc(UnaryScalarOp):
z, = out z, = out
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = erfc(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = erfc((%(cast)s)%(x)s);" % locals()
# scipy.special.erfc don't support complex. Why? # scipy.special.erfc don't support complex. Why?
erfc = Erfc(upgrade_to_float_no_complex, name='erfc') erfc = Erfc(upgrade_to_float_no_complex, name='erfc')
...@@ -275,11 +277,8 @@ class GammaLn(UnaryScalarOp): ...@@ -275,11 +277,8 @@ class GammaLn(UnaryScalarOp):
# For some reason, on the GPU, uint64 inputs don't get casted # For some reason, on the GPU, uint64 inputs don't get casted
# automatically to float64. This make the compilation crash # automatically to float64. This make the compilation crash
dtype = "" dtype = ""
if node.outputs[0].dtype == 'float64': cast = node.outputs[0].type.dtype_specs()[1]
dtype = "(double)" return """%(z)s = lgamma((%(cast)s)%(x)s);""" % locals()
elif node.outputs[0].dtype == 'float32':
dtype = "(float)"
return """%(z)s = lgamma(%(dtype)s%(x)s);""" % locals()
gammaln = GammaLn(upgrade_to_float, name='gammaln') gammaln = GammaLn(upgrade_to_float, name='gammaln')
......
from __future__ import (division, absolute_import, print_function) from __future__ import (division, absolute_import, print_function)
import unittest import unittest
from nose.plugins.skip import SkipTest
import numpy as np import numpy as np
import theano import theano
...@@ -84,6 +85,9 @@ class TestCTC(unittest.TestCase): ...@@ -84,6 +85,9 @@ class TestCTC(unittest.TestCase):
""" """
def setUp(self): def setUp(self):
if theano.config.mode == "FAST_COMPILE" or theano.config.cxx == "":
raise SkipTest("We need a c compiler")
if not ctc_available(): if not ctc_available():
self.skipTest('Optional library warp-ctc not available') self.skipTest('Optional library warp-ctc not available')
......
...@@ -1242,7 +1242,8 @@ CeilTester = makeBroadcastTester( ...@@ -1242,7 +1242,8 @@ CeilTester = makeBroadcastTester(
CeilInplaceTester = makeBroadcastTester( CeilInplaceTester = makeBroadcastTester(
op=inplace.ceil_inplace, op=inplace.ceil_inplace,
expected=upcast_float16_ufunc(np.ceil), expected=upcast_float16_ufunc(np.ceil),
good=_good_broadcast_unary_normal_no_complex, good=copymod(_good_broadcast_unary_normal_no_complex,
without=['integers', 'int8', 'uint8', 'uint16']),
# corner cases includes a lot of integers: points where Ceil is not # corner cases includes a lot of integers: points where Ceil is not
# continuous (not differentiable) # continuous (not differentiable)
inplace=True) inplace=True)
...@@ -1256,7 +1257,8 @@ FloorTester = makeBroadcastTester( ...@@ -1256,7 +1257,8 @@ FloorTester = makeBroadcastTester(
FloorInplaceTester = makeBroadcastTester( FloorInplaceTester = makeBroadcastTester(
op=inplace.floor_inplace, op=inplace.floor_inplace,
expected=upcast_float16_ufunc(np.floor), expected=upcast_float16_ufunc(np.floor),
good=_good_broadcast_unary_normal_no_complex, good=copymod(_good_broadcast_unary_normal_no_complex,
without=["integers", "int8", "uint8", "uint16"]),
inplace=True) inplace=True)
TruncInplaceTester = makeBroadcastTester( TruncInplaceTester = makeBroadcastTester(
...@@ -1603,7 +1605,8 @@ Arctan2InplaceTester = makeBroadcastTester( ...@@ -1603,7 +1605,8 @@ Arctan2InplaceTester = makeBroadcastTester(
op=inplace.arctan2_inplace, op=inplace.arctan2_inplace,
expected=np.arctan2, expected=np.arctan2,
good=copymod(_good_broadcast_binary_arctan2, good=copymod(_good_broadcast_binary_arctan2,
without=['integers', 'int8', 'uint8', 'uint16']), without=['integers', 'int8', 'uint8',
'uint16', 'dtype_mixup_2']),
inplace=True) inplace=True)
CoshTester = makeBroadcastTester( CoshTester = makeBroadcastTester(
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论