提交 a9557171 authored 作者: James Bergstra's avatar James Bergstra

merge

......@@ -7,3 +7,14 @@ from .var import (CudaNdarrayVariable,
import basic_ops
import opt
import theano.compile.sandbox
def handle_shared_float32(tf):
"""Set the CudaNdarrayType as the default handler for shared float32 arrays
"""
if tf:
theano.compile.sandbox.shared_constructor(shared_constructor)
else:
raise NotImplementedError('removing our handler')
......@@ -44,8 +44,8 @@ def local_gpu_elemwise_0(node):
if isinstance(node.op, tensor.Elemwise):
if any(hasattr(i.owner, 'op') and isinstance(i.owner.op, HostFromGpu) for i in node.inputs):
if any(o.type.dtype == 'float64' for o in node.outputs):
print 'EXITING FROM local_gpu_elemwise_0', node
sys.exit()
print 'WARNING: THERE ARE STILL float64s in your graph local_gpu_elemwise_0', node
else:
# move the add to a GpuAdd
new_op = GpuElemwise(node.op.scalar_op, node.op.inplace_pattern)
return [host_from_gpu(new_op(*(gpu_from_host(i) for i in node.inputs)))]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论