提交 7befad61 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #6323 from nouiz/tests

Fix test in the daily buildbot and make sure to don't loose the current linker.
...@@ -10,7 +10,6 @@ import copy ...@@ -10,7 +10,6 @@ import copy
import sys import sys
import gc import gc
import logging import logging
import six.moves.copyreg as copyreg
from itertools import chain, product as itertools_product from itertools import chain, product as itertools_product
from theano.compat import izip from theano.compat import izip
...@@ -2414,10 +2413,6 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions ...@@ -2414,10 +2413,6 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
return fn return fn
def _pickle_DebugMode_Maker(maker):
raise NotImplementedError('DebugMode is not picklable (yet)')
copyreg.pickle(_Maker, _pickle_DebugMode_Maker)
######################## ########################
# #
# API symbol: DebugMode # API symbol: DebugMode
......
...@@ -317,7 +317,7 @@ class Mode(object): ...@@ -317,7 +317,7 @@ class Mode(object):
self.provided_optimizer) self.provided_optimizer)
# N.B. opt might be a Query instance, not sure what else it might be... # N.B. opt might be a Query instance, not sure what else it might be...
# string? Optimizer? OptDB? who knows??? # string? Optimizer? OptDB? who knows???
return self.clone(optimizer=opt.including(*tags)) return self.clone(optimizer=opt.including(*tags), linker=link)
def register(self, *optimizations): def register(self, *optimizations):
"""Adds new optimization instances to a mode. """Adds new optimization instances to a mode.
...@@ -347,12 +347,12 @@ class Mode(object): ...@@ -347,12 +347,12 @@ class Mode(object):
def excluding(self, *tags): def excluding(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker, link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer) self.provided_optimizer)
return self.clone(optimizer=opt.excluding(*tags)) return self.clone(optimizer=opt.excluding(*tags), linker=link)
def requiring(self, *tags): def requiring(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker, link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer) self.provided_optimizer)
return self.clone(optimizer=opt.requiring(*tags)) return self.clone(optimizer=opt.requiring(*tags), linker=link)
def clone(self, link_kwargs=None, optimizer="", **kwargs): def clone(self, link_kwargs=None, optimizer="", **kwargs):
""" """
......
...@@ -654,9 +654,13 @@ class T_picklefunction(unittest.TestCase): ...@@ -654,9 +654,13 @@ class T_picklefunction(unittest.TestCase):
def test_output_keys(self): def test_output_keys(self):
x = T.vector() x = T.vector()
f = theano.function([x], {'vec': x**2}) f = theano.function([x], {'vec': x**2})
assert isinstance(f([2, 3, 4]), dict) o = f([2, 3, 4])
assert isinstance(o, dict)
assert np.allclose(o['vec'], [4, 9, 16])
g = copy.deepcopy(f) g = copy.deepcopy(f)
assert isinstance(g([2, 3, 4]), dict) o = g([2, 3, 4])
assert isinstance(o, dict)
assert np.allclose(o['vec'], [4, 9, 16])
def test_deepcopy_shared_container(self): def test_deepcopy_shared_container(self):
# Ensure that shared containers remain shared after a deep copy. # Ensure that shared containers remain shared after a deep copy.
......
...@@ -466,12 +466,24 @@ AddConfigVar( ...@@ -466,12 +466,24 @@ AddConfigVar(
# scalable. # scalable.
# Also, please be careful not to modify the first item in the enum when adding # Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode. # new modes, since it is the default mode.
def filter_mode(val):
if val in ['Mode', 'DebugMode', 'FAST_RUN',
'NanGuardMode',
'FAST_COMPILE', 'DEBUG_MODE']:
return val
# This can be executed before Theano is completly imported, so
# theano.Mode is not always available.
elif hasattr(theano, 'Mode') and isinstance(val, theano.Mode):
return val
else:
raise ValueError("Expected one of those string 'Mode', 'DebugMode',"
" 'FAST_RUN', 'NanGuardMode', 'FAST_COMPILE',"
" 'DEBUG_MODE' or an instance of Mode.")
AddConfigVar( AddConfigVar(
'mode', 'mode',
"Default compilation mode", "Default compilation mode",
EnumStr('Mode', 'DebugMode', 'FAST_RUN', ConfigParam('Mode', filter_mode),
'NanGuardMode',
'FAST_COMPILE', 'DEBUG_MODE'),
in_c_key=False) in_c_key=False)
param = "g++" param = "g++"
......
...@@ -165,10 +165,13 @@ class GpuElemwise(HideC, Elemwise): ...@@ -165,10 +165,13 @@ class GpuElemwise(HideC, Elemwise):
scal_v_out = fake_node.outputs scal_v_out = fake_node.outputs
assert len(scal_v_out) == len(node.outputs) assert len(scal_v_out) == len(node.outputs)
kop = fake_node.op.c_code(fake_node, 'elem_scalar', try:
inps, outs, kop = fake_node.op.c_code(fake_node, 'elem_scalar',
dict(fail='return;')) inps, outs,
dict(fail='return;'))
except MethodNotDefined:
raise AssertionError(
"No c code for this scalar. Can not make a GpuElemwise")
# If the following assert fail, then we need to update the # If the following assert fail, then we need to update the
# code handler above. # code handler above.
assert 'npy_float16' not in kop assert 'npy_float16' not in kop
......
...@@ -748,6 +748,8 @@ def local_gpua_elemwise(op, context_name, inputs, outputs): ...@@ -748,6 +748,8 @@ def local_gpua_elemwise(op, context_name, inputs, outputs):
scal_op) scal_op)
if not have_cuda: if not have_cuda:
return None return None
if not scal_op.supports_c_code(inputs, outputs):
return
res = GpuElemwise(scal_op, name=name, res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern), inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec) nfunc_spec=op.nfunc_spec)
......
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
import numpy as np
from nose.tools import assert_raises from nose.tools import assert_raises
import numpy as np
import theano import theano
from theano import tensor from theano import tensor
...@@ -745,7 +745,11 @@ class Conv_opt_test(unittest.TestCase): ...@@ -745,7 +745,11 @@ class Conv_opt_test(unittest.TestCase):
mode = mode_with_gpu.including('conv_meta') mode = mode_with_gpu.including('conv_meta')
ref_func = theano.function([], conv_op, mode=mode_with_gpu) ref_func = theano.function([], conv_op, mode=mode_with_gpu)
conv_func = theano.function([], conv_op, mode=mode) # All meta optimizer compile a new function. This need to know
# the current linker, but this information is not available,
# so it use the default mode.
with theano.change_flags(mode=mode):
conv_func = theano.function([], conv_op, mode=mode)
assert any([isinstance(node.op, op) assert any([isinstance(node.op, op)
for node in conv_func.maker.fgraph.toposort()]) for node in conv_func.maker.fgraph.toposort()])
utt.assert_allclose(conv_func(), ref_func()) utt.assert_allclose(conv_func(), ref_func())
...@@ -787,13 +791,20 @@ class Conv_opt_test(unittest.TestCase): ...@@ -787,13 +791,20 @@ class Conv_opt_test(unittest.TestCase):
mode = mode_with_gpu.including('conv_meta') mode = mode_with_gpu.including('conv_meta')
ref_func = theano.function([], conv_op, mode=mode_with_gpu) ref_func = theano.function([], conv_op, mode=mode_with_gpu)
conv_func = theano.function([], conv_op, mode=mode) # All meta optimizer compile a new function. This need to know
# the current linker, but this information is not available,
# so it use the default mode.
with theano.change_flags(mode=mode):
conv_func = theano.function([], conv_op, mode=mode)
if op is not None: if op is not None:
assert any([isinstance(node.op, op) assert any([isinstance(node.op, op)
for node in conv_func.maker.fgraph.toposort()]) for node in conv_func.maker.fgraph.toposort()])
utt.assert_allclose(conv_func(), ref_func()) utt.assert_allclose(conv_func(), ref_func())
def test_optimizers(self): def test_optimizers_2d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
imshp2d = [(2, 3, 5, 5), (2, 2, 5, 7), (2, 1, 3, 3)] imshp2d = [(2, 3, 5, 5), (2, 2, 5, 7), (2, 1, 3, 3)]
kshp2d = [(4, 3, 3, 3), (3, 2, 3, 5), (4, 1, 1, 1)] kshp2d = [(4, 3, 3, 3), (3, 2, 3, 5), (4, 1, 1, 1)]
tshp2d = [(2, 4, 3, 3), (2, 3, 3, 3), (2, 4, 3, 3)] tshp2d = [(2, 4, 3, 3), (2, 3, 3, 3), (2, 4, 3, 3)]
...@@ -827,6 +838,10 @@ class Conv_opt_test(unittest.TestCase): ...@@ -827,6 +838,10 @@ class Conv_opt_test(unittest.TestCase):
'conv_gemm:default', 'conv_gemm:default',
dnn.GpuDnnConv) dnn.GpuDnnConv)
def test_optimizers_3d(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
imshp3d = [(2, 3, 5, 5, 5), (2, 2, 5, 7, 5), (2, 1, 3, 3, 3)] imshp3d = [(2, 3, 5, 5, 5), (2, 2, 5, 7, 5), (2, 1, 3, 3, 3)]
kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 5, 3), (4, 1, 1, 1, 1)] kshp3d = [(4, 3, 3, 3, 3), (3, 2, 3, 5, 3), (4, 1, 1, 1, 1)]
tshp3d = [(2, 4, 3, 3, 3), (2, 3, 3, 3, 3), (2, 4, 3, 3, 3)] tshp3d = [(2, 4, 3, 3, 3), (2, 3, 3, 3, 3), (2, 4, 3, 3, 3)]
...@@ -865,6 +880,9 @@ class Conv_opt_test(unittest.TestCase): ...@@ -865,6 +880,9 @@ class Conv_opt_test(unittest.TestCase):
'conv_gemm:default', 'conv_gemm:default',
dnn.GpuDnnConv) dnn.GpuDnnConv)
def test_optimizers_non_default(self):
if theano.config.cxx == "":
raise SkipTest("Need a c compiler.")
# conv2d forward pass with Non-default border_mode and filter_dilation # conv2d forward pass with Non-default border_mode and filter_dilation
imshp2d = [(2, 3, 5, 5), (4, 2, 5, 5)] imshp2d = [(2, 3, 5, 5), (4, 2, 5, 5)]
kshp2d = [(4, 3, 3, 3), (3, 2, 3, 3)] kshp2d = [(4, 3, 3, 3), (3, 2, 3, 3)]
......
...@@ -1124,6 +1124,38 @@ class ScalarOp(Op): ...@@ -1124,6 +1124,38 @@ class ScalarOp(Op):
""" """
raise theano.gof.utils.MethodNotDefined() raise theano.gof.utils.MethodNotDefined()
def supports_c_code(self, inputs, outputs):
"""Returns True if the current op has functioning C code for
the given Elemwise inputs, outputs.
"""
try:
tmp_s_input = []
# To keep the same aliasing between inputs
mapping = dict()
for ii in inputs:
if ii in mapping:
tmp_s_input.append(mapping[ii])
else:
tmp = get_scalar_type(ii.dtype).make_variable()
tmp_s_input.append(tmp)
mapping[ii] = tmp_s_input[-1]
with theano.change_flags(compute_test_value='ignore'):
s_op = self(*tmp_s_input, return_list=True)
# if the scalar_op don't have a c implementation,
# we skip its fusion to allow the fusion of the
# other ops.
self.c_code(s_op[0].owner,
"test_presence_of_c_code",
["x" for x in inputs],
["z" for z in outputs],
{"fail": "%(fail)s"})
except (theano.gof.utils.MethodNotDefined, NotImplementedError):
return False
return True
class UnaryScalarOp(ScalarOp): class UnaryScalarOp(ScalarOp):
nin = 1 nin = 1
...@@ -2492,7 +2524,8 @@ class Ceil(UnaryScalarOp): ...@@ -2492,7 +2524,8 @@ class Ceil(UnaryScalarOp):
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
(z,) = outputs (z,) = outputs
return "%(z)s = ceil(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = ceil((%(cast)s)%(x)s);" % locals()
ceil = Ceil(upgrade_to_float_no_complex, name='ceil') ceil = Ceil(upgrade_to_float_no_complex, name='ceil')
...@@ -2515,7 +2548,8 @@ class Floor(UnaryScalarOp): ...@@ -2515,7 +2548,8 @@ class Floor(UnaryScalarOp):
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs (x,) = inputs
(z,) = outputs (z,) = outputs
return "%(z)s = floor(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = floor((%(cast)s)%(x)s);" % locals()
floor = Floor(upgrade_to_float_no_complex, name='floor') floor = Floor(upgrade_to_float_no_complex, name='floor')
...@@ -2755,7 +2789,8 @@ class Log(UnaryScalarOp): ...@@ -2755,7 +2789,8 @@ class Log(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = log(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = log((%(cast)s)%(x)s);" % locals()
log = Log(upgrade_to_float, name='log') log = Log(upgrade_to_float, name='log')
...@@ -2794,7 +2829,8 @@ class Log2(UnaryScalarOp): ...@@ -2794,7 +2829,8 @@ class Log2(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = log2(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = log2((%(cast)s)%(x)s);" % locals()
log2 = Log2(upgrade_to_float, name='log2') log2 = Log2(upgrade_to_float, name='log2')
...@@ -2833,7 +2869,8 @@ class Log10(UnaryScalarOp): ...@@ -2833,7 +2869,8 @@ class Log10(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = log10(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = log10((%(cast)s)%(x)s);" % locals()
log10 = Log10(upgrade_to_float, name='log10') log10 = Log10(upgrade_to_float, name='log10')
...@@ -2870,7 +2907,8 @@ class Log1p(UnaryScalarOp): ...@@ -2870,7 +2907,8 @@ class Log1p(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = log1p(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = log1p((%(cast)s)%(x)s);" % locals()
log1p = Log1p(upgrade_to_float, name='log1p') log1p = Log1p(upgrade_to_float, name='log1p')
...@@ -2905,7 +2943,8 @@ class Exp(UnaryScalarOp): ...@@ -2905,7 +2943,8 @@ class Exp(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = exp(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = exp((%(cast)s)%(x)s);" % locals()
exp = Exp(upgrade_to_float, name='exp') exp = Exp(upgrade_to_float, name='exp')
...@@ -2938,7 +2977,8 @@ class Exp2(UnaryScalarOp): ...@@ -2938,7 +2977,8 @@ class Exp2(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = exp2(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = exp2((%(cast)s)%(x)s);" % locals()
exp2 = Exp2(upgrade_to_float, name='exp2') exp2 = Exp2(upgrade_to_float, name='exp2')
...@@ -2971,7 +3011,8 @@ class Expm1(UnaryScalarOp): ...@@ -2971,7 +3011,8 @@ class Expm1(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = expm1(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = expm1((%(cast)s)%(x)s);" % locals()
def c_code_cache_version(self): def c_code_cache_version(self):
return (5,) return (5,)
...@@ -3033,7 +3074,8 @@ class Sqrt(UnaryScalarOp): ...@@ -3033,7 +3074,8 @@ class Sqrt(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = sqrt(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = sqrt((%(cast)s)%(x)s);" % locals()
sqrt = Sqrt(upgrade_to_float, name='sqrt') sqrt = Sqrt(upgrade_to_float, name='sqrt')
...@@ -3134,7 +3176,8 @@ class Cos(UnaryScalarOp): ...@@ -3134,7 +3176,8 @@ class Cos(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = cos(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = cos((%(cast)s)%(x)s);" % locals()
cos = Cos(upgrade_to_float, name='cos') cos = Cos(upgrade_to_float, name='cos')
...@@ -3167,7 +3210,8 @@ class ArcCos(UnaryScalarOp): ...@@ -3167,7 +3210,8 @@ class ArcCos(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = acos(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = acos((%(cast)s)%(x)s);" % locals()
arccos = ArcCos(upgrade_to_float, name='arccos') arccos = ArcCos(upgrade_to_float, name='arccos')
...@@ -3202,7 +3246,8 @@ class Sin(UnaryScalarOp): ...@@ -3202,7 +3246,8 @@ class Sin(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = sin(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = sin((%(cast)s)%(x)s);" % locals()
sin = Sin(upgrade_to_float, name='sin') sin = Sin(upgrade_to_float, name='sin')
...@@ -3235,7 +3280,8 @@ class ArcSin(UnaryScalarOp): ...@@ -3235,7 +3280,8 @@ class ArcSin(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = asin(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = asin((%(cast)s)%(x)s);" % locals()
arcsin = ArcSin(upgrade_to_float, name='arcsin') arcsin = ArcSin(upgrade_to_float, name='arcsin')
...@@ -3268,7 +3314,8 @@ class Tan(UnaryScalarOp): ...@@ -3268,7 +3314,8 @@ class Tan(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = tan(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = tan((%(cast)s)%(x)s);" % locals()
tan = Tan(upgrade_to_float, name='tan') tan = Tan(upgrade_to_float, name='tan')
...@@ -3301,7 +3348,8 @@ class ArcTan(UnaryScalarOp): ...@@ -3301,7 +3348,8 @@ class ArcTan(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = atan(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = atan((%(cast)s)%(x)s);" % locals()
arctan = ArcTan(upgrade_to_float, name='arctan') arctan = ArcTan(upgrade_to_float, name='arctan')
...@@ -3346,7 +3394,8 @@ class ArcTan2(BinaryScalarOp): ...@@ -3346,7 +3394,8 @@ class ArcTan2(BinaryScalarOp):
if (node.inputs[0].type in complex_types or if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types): node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = atan2(%(y)s, %(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = atan2((%(cast)s)%(y)s, (%(cast)s)%(x)s);" % locals()
arctan2 = ArcTan2(upgrade_to_float, name='arctan2') arctan2 = ArcTan2(upgrade_to_float, name='arctan2')
...@@ -3383,7 +3432,8 @@ class Cosh(UnaryScalarOp): ...@@ -3383,7 +3432,8 @@ class Cosh(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = cosh(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = cosh((%(cast)s)%(x)s);" % locals()
cosh = Cosh(upgrade_to_float, name='cosh') cosh = Cosh(upgrade_to_float, name='cosh')
...@@ -3416,7 +3466,8 @@ class ArcCosh(UnaryScalarOp): ...@@ -3416,7 +3466,8 @@ class ArcCosh(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = acosh(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = acosh((%(cast)s)%(x)s);" % locals()
arccosh = ArcCosh(upgrade_to_float, name='arccosh') arccosh = ArcCosh(upgrade_to_float, name='arccosh')
...@@ -3453,7 +3504,8 @@ class Sinh(UnaryScalarOp): ...@@ -3453,7 +3504,8 @@ class Sinh(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = sinh(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = sinh((%(cast)s)%(x)s);" % locals()
sinh = Sinh(upgrade_to_float, name='sinh') sinh = Sinh(upgrade_to_float, name='sinh')
...@@ -3486,7 +3538,8 @@ class ArcSinh(UnaryScalarOp): ...@@ -3486,7 +3538,8 @@ class ArcSinh(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = asinh(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = asinh((%(cast)s)%(x)s);" % locals()
arcsinh = ArcSinh(upgrade_to_float, name='arcsinh') arcsinh = ArcSinh(upgrade_to_float, name='arcsinh')
...@@ -3524,7 +3577,8 @@ class Tanh(UnaryScalarOp): ...@@ -3524,7 +3577,8 @@ class Tanh(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = tanh(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = tanh((%(cast)s)%(x)s);" % locals()
tanh = Tanh(upgrade_to_float, name='tanh') tanh = Tanh(upgrade_to_float, name='tanh')
...@@ -3557,7 +3611,8 @@ class ArcTanh(UnaryScalarOp): ...@@ -3557,7 +3611,8 @@ class ArcTanh(UnaryScalarOp):
(z,) = outputs (z,) = outputs
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = atanh(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = atanh((%(cast)s)%(x)s);" % locals()
arctanh = ArcTanh(upgrade_to_float, name='arctanh') arctanh = ArcTanh(upgrade_to_float, name='arctanh')
......
...@@ -52,7 +52,8 @@ class Erf(UnaryScalarOp): ...@@ -52,7 +52,8 @@ class Erf(UnaryScalarOp):
z, = out z, = out
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = erf(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = erf((%(cast)s)%(x)s);" % locals()
erf = Erf(upgrade_to_float, name='erf') erf = Erf(upgrade_to_float, name='erf')
...@@ -83,7 +84,8 @@ class Erfc(UnaryScalarOp): ...@@ -83,7 +84,8 @@ class Erfc(UnaryScalarOp):
z, = out z, = out
if node.inputs[0].type in complex_types: if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
return "%(z)s = erfc(%(x)s);" % locals() cast = node.outputs[0].type.dtype_specs()[1]
return "%(z)s = erfc((%(cast)s)%(x)s);" % locals()
# scipy.special.erfc don't support complex. Why? # scipy.special.erfc don't support complex. Why?
erfc = Erfc(upgrade_to_float_no_complex, name='erfc') erfc = Erfc(upgrade_to_float_no_complex, name='erfc')
...@@ -275,11 +277,8 @@ class GammaLn(UnaryScalarOp): ...@@ -275,11 +277,8 @@ class GammaLn(UnaryScalarOp):
# For some reason, on the GPU, uint64 inputs don't get casted # For some reason, on the GPU, uint64 inputs don't get casted
# automatically to float64. This make the compilation crash # automatically to float64. This make the compilation crash
dtype = "" dtype = ""
if node.outputs[0].dtype == 'float64': cast = node.outputs[0].type.dtype_specs()[1]
dtype = "(double)" return """%(z)s = lgamma((%(cast)s)%(x)s);""" % locals()
elif node.outputs[0].dtype == 'float32':
dtype = "(float)"
return """%(z)s = lgamma(%(dtype)s%(x)s);""" % locals()
gammaln = GammaLn(upgrade_to_float, name='gammaln') gammaln = GammaLn(upgrade_to_float, name='gammaln')
......
from __future__ import (division, absolute_import, print_function) from __future__ import (division, absolute_import, print_function)
import unittest import unittest
from nose.plugins.skip import SkipTest
import numpy as np import numpy as np
import theano import theano
...@@ -84,6 +85,9 @@ class TestCTC(unittest.TestCase): ...@@ -84,6 +85,9 @@ class TestCTC(unittest.TestCase):
""" """
def setUp(self): def setUp(self):
if theano.config.mode == "FAST_COMPILE" or theano.config.cxx == "":
raise SkipTest("We need a c compiler")
if not ctc_available(): if not ctc_available():
self.skipTest('Optional library warp-ctc not available') self.skipTest('Optional library warp-ctc not available')
......
...@@ -1242,7 +1242,8 @@ CeilTester = makeBroadcastTester( ...@@ -1242,7 +1242,8 @@ CeilTester = makeBroadcastTester(
CeilInplaceTester = makeBroadcastTester( CeilInplaceTester = makeBroadcastTester(
op=inplace.ceil_inplace, op=inplace.ceil_inplace,
expected=upcast_float16_ufunc(np.ceil), expected=upcast_float16_ufunc(np.ceil),
good=_good_broadcast_unary_normal_no_complex, good=copymod(_good_broadcast_unary_normal_no_complex,
without=['integers', 'int8', 'uint8', 'uint16']),
# corner cases includes a lot of integers: points where Ceil is not # corner cases includes a lot of integers: points where Ceil is not
# continuous (not differentiable) # continuous (not differentiable)
inplace=True) inplace=True)
...@@ -1256,7 +1257,8 @@ FloorTester = makeBroadcastTester( ...@@ -1256,7 +1257,8 @@ FloorTester = makeBroadcastTester(
FloorInplaceTester = makeBroadcastTester( FloorInplaceTester = makeBroadcastTester(
op=inplace.floor_inplace, op=inplace.floor_inplace,
expected=upcast_float16_ufunc(np.floor), expected=upcast_float16_ufunc(np.floor),
good=_good_broadcast_unary_normal_no_complex, good=copymod(_good_broadcast_unary_normal_no_complex,
without=["integers", "int8", "uint8", "uint16"]),
inplace=True) inplace=True)
TruncInplaceTester = makeBroadcastTester( TruncInplaceTester = makeBroadcastTester(
...@@ -1603,7 +1605,8 @@ Arctan2InplaceTester = makeBroadcastTester( ...@@ -1603,7 +1605,8 @@ Arctan2InplaceTester = makeBroadcastTester(
op=inplace.arctan2_inplace, op=inplace.arctan2_inplace,
expected=np.arctan2, expected=np.arctan2,
good=copymod(_good_broadcast_binary_arctan2, good=copymod(_good_broadcast_binary_arctan2,
without=['integers', 'int8', 'uint8', 'uint16']), without=['integers', 'int8', 'uint8',
'uint16', 'dtype_mixup_2']),
inplace=True) inplace=True)
CoshTester = makeBroadcastTester( CoshTester = makeBroadcastTester(
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论