提交 443a7ae4 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5239 from Sentient07/new-frozen-dict

New frozen dict
......@@ -9,9 +9,11 @@ All rights reserved.
Contains code from NumPy, Copyright (c) 2005-2016, NumPy Developers.
All rights reserved.
Contain CnMeM under the same license with this copyright:
Contains CnMeM under the same license with this copyright:
Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
Contains frozendict code from slezica’s python-frozendict(https://github.com/slezica/python-frozendict/blob/master/frozendict/__init__.py), Copyright (c) 2012 Santiago Lezica. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
......
from __future__ import absolute_import, print_function, division
from collections import OrderedDict
import theano
from theano.gof.utils import (
give_variables_names, hash_from_dict, remove, unique)
give_variables_names, remove, unique)
def test_give_variables_names():
......@@ -49,22 +48,6 @@ def test_remove():
assert list(remove(even, range(5))) == list(filter(odd, range(5)))
def test_hash_from_dict():
dicts = [{}, {0: 0}, {0: 1}, {1: 0}, {1: 1},
{0: (0,)}, {0: [1]},
{0: (0, 1)}, {0: [1, 0]}]
for elem in dicts[:]:
dicts.append(OrderedDict(elem))
hashs = []
for idx, d in enumerate(dicts):
h = hash_from_dict(d)
assert h not in hashs
hashs.append(h)
# List are not hashable. So they are transformed into tuple.
assert hash_from_dict({0: (0,)}) == hash_from_dict({0: [0]})
def test_stack_trace():
orig = theano.config.traceback.limit
try:
......
from __future__ import absolute_import, print_function, division
from collections import OrderedDict
import linecache
import sys
import traceback
......@@ -570,36 +569,3 @@ def hash_from_file(file_path):
"""
return hash_from_code(open(file_path, 'rb').read())
def hash_from_dict(d):
"""
Work around the fact that dict are not hashable in python.
This request that all object have a sorted order that depend only
on the key of the object. We support only integer/float/string keys.
Also, we transform values that are list into tuple as list are not
hashable.
Notes
-----
Special case for OrderedDict, it use the order of the dict,
so the key don't need to be sortable.
"""
if isinstance(d, OrderedDict):
items = list(iteritems(d))
else:
items = list(d.items())
items.sort()
first_part = [k for k, v in items]
second_part = []
for k, v in items:
assert isinstance(k, (string_types, integer_types, float))
if isinstance(v, (tuple, list)):
second_part += [tuple(v)]
else:
second_part += [v]
tuple_items = tuple(first_part + second_part + [d.__class__])
return hash(tuple_items)
# License : https://github.com/slezica/python-frozendict/blob/master/LICENSE.txt
from __future__ import absolute_import, print_function, division
import collections
import operator
import functools
class frozendict(collections.Mapping):
"""
An immutable wrapper around dictionaries that implements the complete :py:class:`collections.Mapping`
interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired.
"""
dict_cls = dict
def __init__(self, *args, **kwargs):
self._dict = self.dict_cls(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._dict)
def __hash__(self):
if self._hash is None:
hashes = map(hash, self.items())
self._hash = functools.reduce(operator.xor, hashes, 0)
return self._hash
class FrozenOrderedDict(frozendict):
"""
A FrozenDict subclass that maintains key order
"""
dict_cls = collections.OrderedDict
......@@ -28,11 +28,11 @@ import theano
from six.moves import xrange
from theano.compat import izip
from theano.gof import Op, Apply, local_optimizer, EquilibriumDB
from theano.gof.utils import hash_from_dict
from theano.sandbox.cuda import GpuElemwise, CudaNdarrayType, GpuOp
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
gpu_contiguous)
from theano.sandbox.cuda.opt import gpu_seqopt
from theano.misc.frozendict import frozendict
import pycuda
from pycuda.compiler import SourceModule
......@@ -183,13 +183,14 @@ class PycudaElemwiseKernelOp(GpuOp):
class PycudaElemwiseSourceModuleOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
__props__ = ("scalar_op", "inplace_pattern")
def __init__(self, scalar_op, inplace_pattern=None, name=None):
if inplace_pattern is None:
inplace_pattern = {}
inplace_pattern = frozendict({})
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
self.inplace_pattern = frozendict(inplace_pattern)
def __str__(self):
if self.name is None:
......@@ -203,15 +204,6 @@ class PycudaElemwiseSourceModuleOp(GpuOp):
else:
return self.name
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op and
self.inplace_pattern == other.inplace_pattern)
def __hash__(self):
return (hash(type(self)) ^ hash(self.scalar_op) ^
hash_from_dict(self.inplace_pattern))
def make_node(self, *inputs):
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
......@@ -284,12 +276,7 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op):
inplace_pattern = {}
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
# As we have a dict in props, we need to implement __hash__
def __hash__(self):
return hash((type(self), hash(self.scalar_op),
hash_from_dict(self.inplace_pattern)))
self.inplace_pattern = frozendict(inplace_pattern)
def __str__(self):
if self.name is None:
......
......@@ -3853,7 +3853,6 @@ class Composite(ScalarOp):
self.nin = len(inputs)
self.nout = len(outputs)
self.init_fgraph() # self.fgraph
# Postpone the creation in case it isn't needed.
# self.init_name() # self.name
self.name = None
......@@ -3975,6 +3974,7 @@ class Composite(ScalarOp):
# see __hash__ for comment on why there is no mention of fgraph
# or module cache key here.
self.init_c_code() # self._c_code and self.nodenames
other.init_c_code()
return (self._c_code == other._c_code)
def __hash__(self):
......
......@@ -16,9 +16,8 @@ from theano.scalar import get_scalar_type
from theano.printing import pprint
from theano.gradient import DisconnectedType
from theano.gof.null_type import NullType
from theano.gof.utils import hash_from_dict
from theano.tensor import elemwise_cgen as cgen
from theano.misc.frozendict import frozendict
config = theano.config
......@@ -472,14 +471,16 @@ second dimension
"""
__props__ = ("scalar_op", "inplace_pattern")
def __init__(self, scalar_op, inplace_pattern=None, name=None,
nfunc_spec=None, openmp=None):
if inplace_pattern is None:
inplace_pattern = {}
inplace_pattern = frozendict({})
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items())
self.inplace_pattern = frozendict(inplace_pattern)
self.destroy_map = dict((o, [i]) for o, i in self.inplace_pattern.items())
self.ufunc = None
self.nfunc = None
......@@ -489,8 +490,6 @@ second dimension
if nfunc_spec:
self.nfunc = getattr(numpy, nfunc_spec[0])
# precompute the hash of this node
self._rehash()
super(Elemwise, self).__init__(openmp=openmp)
def __getstate__(self):
......@@ -498,20 +497,19 @@ second dimension
d.pop('ufunc')
d.pop('nfunc')
d.pop('__epydoc_asRoutine', None)
d.pop('_hashval')
return d
def __setstate__(self, d):
super(Elemwise, self).__setstate__(d)
self.ufunc = None
self.nfunc = None
self.inplace_pattern = frozendict(self.inplace_pattern)
if getattr(self, 'nfunc_spec', None):
self.nfunc = getattr(numpy, self.nfunc_spec[0])
elif 0 < self.scalar_op.nin < 32:
self.ufunc = numpy.frompyfunc(self.scalar_op.impl,
self.scalar_op.nin,
self.scalar_op.nout)
self._rehash()
def get_output_info(self, dim_shuffle, *inputs):
"""Return the outputs dtype and broadcastable pattern and the
......@@ -584,26 +582,6 @@ second dimension
out_broadcastables)]
return Apply(self, inputs, outputs)
def __eq__(self, other):
if type(self) == type(other):
items = list(self.inplace_pattern.items())
other_items = list(other.inplace_pattern.items())
items.sort()
other_items.sort()
rval = ((self.scalar_op == other.scalar_op) and
(items == other_items))
return rval
return False
def _rehash(self):
inplace_pattern_hash = hash_from_dict(self.inplace_pattern)
h = hash('Elemwise') ^ hash(self.scalar_op) ^ inplace_pattern_hash
assert h == getattr(self, '_hashval', h)
self._hashval = h
def __hash__(self):
return self._hashval
def __str__(self):
if self.name is None:
if self.inplace_pattern:
......
......@@ -5024,6 +5024,7 @@ class T_local_erfc(unittest.TestCase):
mode_fusion.check_isfinite = False
f = theano.function([x], T.grad(T.log(T.erfc(x)).sum(), x), mode=mode)
assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)
assert all(numpy.isfinite(f(val)))
assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论