提交 8a5929fd authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #4611 from Sentient07/CrashFix#1

Crash Fix
......@@ -75,10 +75,11 @@ class GpuElemwise(HideC, Elemwise):
pass
try:
support_code = self.scalar_op.c_support_code()
if (support_code.strip() != "#define THEANO_MACRO_MOD(x,y) (x % y)" and
support_code.strip() != ""):
if "struct" in support_code:
# The macro is fine, the C++ struct is not.
raise SupportCodeError(support_code)
raise SupportCodeError(
"struct aren't supported in GpuElemwise support_code" +
support_code)
except MethodNotDefined:
pass
......
......@@ -881,6 +881,10 @@ def local_gpua_softmaxwithbias(node, context_name):
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_assert(node, context_name):
# Check if input nodes are already on the GPU
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [host_from_gpu(node.op(as_gpuarray_variable(node.inputs[0],
context_name),
*node.inputs[1:]))]
......@@ -946,7 +950,7 @@ def local_lift_abstractconv2d(node, context_name):
return [node.op(*inps)]
# Register this here so that it goes after the abstract lifting
register_opt()(conv_groupopt)
register_opt('fast_compile')(conv_groupopt)
@register_opt("low_memory")
......
......@@ -1031,6 +1031,8 @@ class ShapeFeature(object):
# don't make the optimizer merge a zillion ones together
# by always returning the same object to represent 1
return self.lscalar_one
if type(s_i) is float and int(s_i) == s_i:
s_i = int(s_i)
if (type(s_i) in integer_types or
isinstance(s_i, numpy.integer) or
(isinstance(s_i, numpy.ndarray) and s_i.ndim == 0)):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论