提交 27ba127c authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #2631 from abergeron/minor_fixes

Minor fixes
......@@ -524,7 +524,7 @@ import theano and print the config variable, as in:
slower otherwise.
This can be any compiler binary (full path or not) but things may
break if the interface if not g++-compatible to some degree.
break if the interface is not g++-compatible to some degree.
.. attribute:: config.nvcc.fastmath
......
......@@ -217,6 +217,8 @@ class Print(Op):
"""
view_map = {0: [0]}
__props__ = ('message', 'attrs', 'global_fn')
def __init__(self, message="", attrs=("__str__",), global_fn=_print_fn):
self.message = message
self.attrs = tuple(attrs) # attrs should be a hashable iterable
......@@ -238,13 +240,6 @@ class Print(Op):
def R_op(self, inputs, eval_points):
return [x for x in eval_points]
def __eq__(self, other):
return (type(self) == type(other) and self.message == other.message
and self.attrs == other.attrs)
def __hash__(self):
return hash(self.message) ^ hash(self.attrs)
def __setstate__(self, dct):
dct.setdefault('global_fn', _print_fn)
self.__dict__.update(dct)
......
......@@ -129,13 +129,13 @@ class InputToGpuOptimizer(Optimizer):
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, CudaNdarrayType):
return
continue
# This happen frequently as we do 2 pass of the gpu optimizations
if (len(input.clients) == 1 and
(input.clients[0][0] == 'output' or
input.clients[0][0].op == gpu_from_host)):
return
continue
try:
new_input = host_from_gpu(gpu_from_host(input))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论