提交 f571f4a2 authored 作者: James Bergstra's avatar James Bergstra

merge

...@@ -370,9 +370,9 @@ class Method(Component): ...@@ -370,9 +370,9 @@ class Method(Component):
return memo[r] return memo[r]
except KeyError: except KeyError:
if require: if require:
raise AllocationError('There is no storage associated to %s used by %s.' raise AllocationError('There is no storage associated to %s used by %s = %s.'
' Verify that it is indeed a Member of the' ' Verify that it is indeed a Member of the'
' enclosing module or of one of its submodules.' % (r, self)) ' enclosing module or of one of its submodules.' % (r, self.name, self))
else: else:
return io.In(result = r, value = gof.Container(r, storage = [None]), mutable = False) return io.In(result = r, value = gof.Container(r, storage = [None]), mutable = False)
# Wrap the inputs in In instances. TODO: allow the inputs to _be_ In instances # Wrap the inputs in In instances. TODO: allow the inputs to _be_ In instances
...@@ -639,6 +639,10 @@ class ComponentList(Composite): ...@@ -639,6 +639,10 @@ class ComponentList(Composite):
raise TypeError('ComponentList may only contain Components.', c, type(c)) raise TypeError('ComponentList may only contain Components.', c, type(c))
self._components.append(c) self._components.append(c)
def extend(self, other):
for o in other:
self.append(o)
def __add__(self, other): def __add__(self, other):
if isinstance(other, (list, tuple)): if isinstance(other, (list, tuple)):
return ComponentList(self._components + map(wrap,other)) return ComponentList(self._components + map(wrap,other))
......
...@@ -581,6 +581,9 @@ class _tensor_py_operators: ...@@ -581,6 +581,9 @@ class _tensor_py_operators:
def __dot__(left, right): return dot(left, right) def __dot__(left, right): return dot(left, right)
def __rdot__(right, left): return dot(left, right) def __rdot__(right, left): return dot(left, right)
def sum(self, axis=None):
return elemwise.Sum(axis)(self)
class TensorResult(Result, _tensor_py_operators): class TensorResult(Result, _tensor_py_operators):
"""Subclass to add the tensor operators to the basic `Result` class.""" """Subclass to add the tensor operators to the basic `Result` class."""
...@@ -809,9 +812,16 @@ class MaxAndArgmax(Op): ...@@ -809,9 +812,16 @@ class MaxAndArgmax(Op):
# gMax * dMax/dx + gArgMax * dArgMax/dx, gMax * dMax/daxis + gArgMax * dArgMax/daxis # gMax * dMax/dx + gArgMax * dArgMax/dx, gMax * dMax/daxis + gArgMax * dArgMax/daxis
# g_max has one less dimension than x, so you need to complete g_max to x's shape # g_max has one less dimension than x, so you need to complete g_max to x's shape
# when axis=0 the broadcasting mechanism does it automatically # when axis=0 the broadcasting mechanism does it automatically
assert axis.data == 0
g_x = eq(max(x, axis), x) * g_max assert axis.data == 0 or axis.data == x.ndim-1
g_max_pad = shape_padleft(g_max) if axis.data==0 else \
shape_padright(g_max)
xmax = max(x, axis)
xmax_pad = shape_padleft(xmax) if axis.data==0 else \
shape_padright(xmax)
g_x = eq(xmax_pad, x) * g_max_pad
return g_x, None return g_x, None
@_redefine_asRoutine(MaxAndArgmax()) @_redefine_asRoutine(MaxAndArgmax())
def max_and_argmax(a): def max_and_argmax(a):
pass pass
...@@ -1624,7 +1634,7 @@ pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Join), ...@@ -1624,7 +1634,7 @@ pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Join),
@constructor @constructor
def shape_padleft(tensor, n_ones): def shape_padleft(tensor, n_ones=1):
"""Reshape `tensor` by left-padding the shape with `n_ones` 1s """Reshape `tensor` by left-padding the shape with `n_ones` 1s
See also: `shape_padright` and `Dimshuffle` See also: `shape_padright` and `Dimshuffle`
...@@ -1639,7 +1649,7 @@ def rightpad_shape(tensor, n_ones): ...@@ -1639,7 +1649,7 @@ def rightpad_shape(tensor, n_ones):
return DimShuffle(tensor.broadcastable, pattern)(tensor) return DimShuffle(tensor.broadcastable, pattern)(tensor)
@constructor @constructor
def shape_padright(tensor, n_ones): def shape_padright(tensor, n_ones=1):
"""Reshape `tensor` by right-padding the shape with `n_ones` 1s """Reshape `tensor` by right-padding the shape with `n_ones` 1s
See also: `shape_padleft` and `Dimshuffle` See also: `shape_padleft` and `Dimshuffle`
......
...@@ -212,7 +212,7 @@ class RandomKit(SymbolicInputKit): ...@@ -212,7 +212,7 @@ class RandomKit(SymbolicInputKit):
return out return out
def distribute(self, value, indices, containers): def distribute(self, value, indices, containers):
rg = partial(numpy.random.RandomState(value).randint, 2**30) rg = partial(numpy.random.RandomState(int(value)).randint, 2**30)
elems = deque(zip(indices, containers)) elems = deque(zip(indices, containers))
i = 0 i = 0
while elems: while elems:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论