提交 ff9bc957 authored 作者: sentient07's avatar sentient07

Removed hash_from_dict and a fix for _c_code

上级 9a3d3ef1
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
from collections import OrderedDict
import theano import theano
from theano.gof.utils import ( from theano.gof.utils import (
give_variables_names, hash_from_dict, remove, unique) give_variables_names, remove, unique)
def test_give_variables_names(): def test_give_variables_names():
...@@ -49,22 +48,6 @@ def test_remove(): ...@@ -49,22 +48,6 @@ def test_remove():
assert list(remove(even, range(5))) == list(filter(odd, range(5))) assert list(remove(even, range(5))) == list(filter(odd, range(5)))
def test_hash_from_dict():
dicts = [{}, {0: 0}, {0: 1}, {1: 0}, {1: 1},
{0: (0,)}, {0: [1]},
{0: (0, 1)}, {0: [1, 0]}]
for elem in dicts[:]:
dicts.append(OrderedDict(elem))
hashs = []
for idx, d in enumerate(dicts):
h = hash_from_dict(d)
assert h not in hashs
hashs.append(h)
# List are not hashable. So they are transformed into tuple.
assert hash_from_dict({0: (0,)}) == hash_from_dict({0: [0]})
def test_stack_trace(): def test_stack_trace():
orig = theano.config.traceback.limit orig = theano.config.traceback.limit
try: try:
......
from __future__ import absolute_import, print_function, division from __future__ import absolute_import, print_function, division
from collections import OrderedDict
import linecache import linecache
import sys import sys
import traceback import traceback
...@@ -570,36 +569,3 @@ def hash_from_file(file_path): ...@@ -570,36 +569,3 @@ def hash_from_file(file_path):
""" """
return hash_from_code(open(file_path, 'rb').read()) return hash_from_code(open(file_path, 'rb').read())
def hash_from_dict(d):
"""
Work around the fact that dict are not hashable in python.
This request that all object have a sorted order that depend only
on the key of the object. We support only integer/float/string keys.
Also, we transform values that are list into tuple as list are not
hashable.
Notes
-----
Special case for OrderedDict, it use the order of the dict,
so the key don't need to be sortable.
"""
if isinstance(d, OrderedDict):
items = list(iteritems(d))
else:
items = list(d.items())
items.sort()
first_part = [k for k, v in items]
second_part = []
for k, v in items:
assert isinstance(k, (string_types, integer_types, float))
if isinstance(v, (tuple, list)):
second_part += [tuple(v)]
else:
second_part += [v]
tuple_items = tuple(first_part + second_part + [d.__class__])
return hash(tuple_items)
# License : https://github.com/slezica/python-frozendict/blob/master/LICENSE.txt # License : https://github.com/slezica/python-frozendict/blob/master/LICENSE.txt
from __future__ import absolute_import, print_function, division
import collections import collections
import operator import operator
import functools import functools
......
...@@ -28,11 +28,11 @@ import theano ...@@ -28,11 +28,11 @@ import theano
from six.moves import xrange from six.moves import xrange
from theano.compat import izip from theano.compat import izip
from theano.gof import Op, Apply, local_optimizer, EquilibriumDB from theano.gof import Op, Apply, local_optimizer, EquilibriumDB
from theano.gof.utils import hash_from_dict
from theano.sandbox.cuda import GpuElemwise, CudaNdarrayType, GpuOp from theano.sandbox.cuda import GpuElemwise, CudaNdarrayType, GpuOp
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
gpu_contiguous) gpu_contiguous)
from theano.sandbox.cuda.opt import gpu_seqopt from theano.sandbox.cuda.opt import gpu_seqopt
from theano.misc.frozendict import frozendict
import pycuda import pycuda
from pycuda.compiler import SourceModule from pycuda.compiler import SourceModule
...@@ -183,13 +183,14 @@ class PycudaElemwiseKernelOp(GpuOp): ...@@ -183,13 +183,14 @@ class PycudaElemwiseKernelOp(GpuOp):
class PycudaElemwiseSourceModuleOp(GpuOp): class PycudaElemwiseSourceModuleOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin) nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout) nout = property(lambda self: self.scalar_op.nout)
__props__ = ("scalar_op", "inplace_pattern", "name")
def __init__(self, scalar_op, inplace_pattern=None, name=None): def __init__(self, scalar_op, inplace_pattern=None, name=None):
if inplace_pattern is None: if inplace_pattern is None:
inplace_pattern = {} inplace_pattern = frozendict({})
self.name = name self.name = name
self.scalar_op = scalar_op self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern self.inplace_pattern = frozendict(inplace_pattern)
def __str__(self): def __str__(self):
if self.name is None: if self.name is None:
...@@ -203,15 +204,6 @@ class PycudaElemwiseSourceModuleOp(GpuOp): ...@@ -203,15 +204,6 @@ class PycudaElemwiseSourceModuleOp(GpuOp):
else: else:
return self.name return self.name
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op and
self.inplace_pattern == other.inplace_pattern)
def __hash__(self):
return (hash(type(self)) ^ hash(self.scalar_op) ^
hash_from_dict(self.inplace_pattern))
def make_node(self, *inputs): def make_node(self, *inputs):
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs] _inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin: if self.nin > 0 and len(_inputs) != self.nin:
...@@ -284,12 +276,7 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op): ...@@ -284,12 +276,7 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op):
inplace_pattern = {} inplace_pattern = {}
self.name = name self.name = name
self.scalar_op = scalar_op self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern self.inplace_pattern = frozendict(inplace_pattern)
# As we have a dict in props, we need to implement __hash__
def __hash__(self):
return hash((type(self), hash(self.scalar_op),
hash_from_dict(self.inplace_pattern)))
def __str__(self): def __str__(self):
if self.name is None: if self.name is None:
......
...@@ -3853,7 +3853,7 @@ class Composite(ScalarOp): ...@@ -3853,7 +3853,7 @@ class Composite(ScalarOp):
self.nin = len(inputs) self.nin = len(inputs)
self.nout = len(outputs) self.nout = len(outputs)
self.init_fgraph() # self.fgraph self.init_fgraph() # self.fgraph
self.init_c_code()
# Postpone the creation in case it isn't needed. # Postpone the creation in case it isn't needed.
# self.init_name() # self.name # self.init_name() # self.name
self.name = None self.name = None
......
...@@ -581,6 +581,17 @@ second dimension ...@@ -581,6 +581,17 @@ second dimension
out_broadcastables)] out_broadcastables)]
return Apply(self, inputs, outputs) return Apply(self, inputs, outputs)
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = list(self.inplace_pattern.items())
items.sort()
return "Elemwise{%s}%s" % (self.scalar_op, str(items))
else:
return "Elemwise{%s}" % (self.scalar_op)
else:
return self.name
def R_op(self, inputs, eval_points): def R_op(self, inputs, eval_points):
outs = self(*inputs, **dict(return_list=True)) outs = self(*inputs, **dict(return_list=True))
rval = [None for x in outs] rval = [None for x in outs]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论