提交 4e873cfe authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Cosmetic changes

上级 67a685e0
...@@ -47,7 +47,7 @@ class DimShuffle(Op): ...@@ -47,7 +47,7 @@ class DimShuffle(Op):
the second of the resulting tensor, etc. If the tensor has the second of the resulting tensor, etc. If the tensor has
shape (20, 30, 40), the resulting tensor will have dimensions shape (20, 30, 40), the resulting tensor will have dimensions
(1, 40, 1, 20, 30). (AxBxC tensor is mapped to 1xCx1xAxB tensor) (1, 40, 1, 20, 30). (AxBxC tensor is mapped to 1xCx1xAxB tensor)
DimShuffle((True, False), [1]) DimShuffle((True, False), [1])
This op will only work on 2d tensors with the first dimension broadcastable. This op will only work on 2d tensors with the first dimension broadcastable.
...@@ -65,7 +65,7 @@ class DimShuffle(Op): ...@@ -65,7 +65,7 @@ class DimShuffle(Op):
DimShuffle((False, False), [0, 'x', 1]) -> AxB to Ax1xB DimShuffle((False, False), [0, 'x', 1]) -> AxB to Ax1xB
DimShuffle((False, False), [1, 'x', 0]) -> AxB to Bx1xA DimShuffle((False, False), [1, 'x', 0]) -> AxB to Bx1xA
""" """
def __init__(self, input_broadcastable, new_order, inplace = False): def __init__(self, input_broadcastable, new_order, inplace = False):
""" """
Usage: DimShuffle(input_broadcastable, new_order, inplace = False) Usage: DimShuffle(input_broadcastable, new_order, inplace = False)
...@@ -128,11 +128,11 @@ class DimShuffle(Op): ...@@ -128,11 +128,11 @@ class DimShuffle(Op):
ob.append(True) ob.append(True)
else: else:
ob.append(ib[value]) ob.append(ib[value])
output = Tensor(dtype = input.type.dtype, output = Tensor(dtype = input.type.dtype,
broadcastable = ob).make_result() broadcastable = ob).make_result()
return Apply(self, [input], [output]) return Apply(self, [input], [output])
def __eq__(self, other): def __eq__(self, other):
# it's probably not necessary to compare input_broadcastable # it's probably not necessary to compare input_broadcastable
return type(self) == type(other) \ return type(self) == type(other) \
...@@ -188,7 +188,7 @@ class DimShuffle(Op): ...@@ -188,7 +188,7 @@ class DimShuffle(Op):
class Elemwise(Op): class Elemwise(Op):
""" """
Generalizes a scalar op to tensors. Generalizes a scalar op to tensors.
All the inputs must have the same number of dimensions. When the All the inputs must have the same number of dimensions. When the
Op is performed, for each dimension, each input's size for that Op is performed, for each dimension, each input's size for that
dimension must be the same. As a special case, it can also be 1 dimension must be the same. As a special case, it can also be 1
...@@ -215,7 +215,7 @@ class Elemwise(Op): ...@@ -215,7 +215,7 @@ class Elemwise(Op):
def __init__(self, scalar_op, inplace_pattern = {}, name = None): def __init__(self, scalar_op, inplace_pattern = {}, name = None):
""" """
Usage: Elemwise(scalar_op, inplace_pattern = {}) Usage: Elemwise(scalar_op, inplace_pattern = {})
* scalar_op: an instance of a subclass of scalar.ScalarOp which works uniquely on * scalar_op: an instance of a subclass of scalar.ScalarOp which works uniquely on
scalars scalars
* inplace_pattern: a dictionary that maps the index of an output to the * inplace_pattern: a dictionary that maps the index of an output to the
...@@ -238,7 +238,7 @@ class Elemwise(Op): ...@@ -238,7 +238,7 @@ class Elemwise(Op):
using DimShuffle. using DimShuffle.
""" """
inputs = map(as_tensor, inputs) inputs = map(as_tensor, inputs)
shadow = self.scalar_op.make_node(*[Scalar(dtype = t.type.dtype)() for t in inputs]) shadow = self.scalar_op.make_node(*[Scalar(dtype = t.type.dtype)() for t in inputs])
target_length = max([input.type.ndim for input in inputs]) target_length = max([input.type.ndim for input in inputs])
...@@ -254,7 +254,7 @@ class Elemwise(Op): ...@@ -254,7 +254,7 @@ class Elemwise(Op):
args.append(DimShuffle(input.type.broadcastable, ['x']*difference + range(length), inplace = True)(input)) args.append(DimShuffle(input.type.broadcastable, ['x']*difference + range(length), inplace = True)(input))
inputs = args inputs = args
# # Following conditions should always be true? # # Following conditions should always be true?
# try: # try:
# assert len(set([len(input.type.broadcastable) for input in inputs])) == 1 # assert len(set([len(input.type.broadcastable) for input in inputs])) == 1
# except (AssertionError, AttributeError): # except (AssertionError, AttributeError):
...@@ -317,7 +317,7 @@ class Elemwise(Op): ...@@ -317,7 +317,7 @@ class Elemwise(Op):
ret.append(None) ret.append(None)
continue continue
r = transform(scalar_igrad) r = transform(scalar_igrad)
# list of all the dimensions that are broadcastable for that input so we # list of all the dimensions that are broadcastable for that input so we
# can sum over them # can sum over them
# todo: only count dimensions that were effectively broadcasted # todo: only count dimensions that were effectively broadcasted
...@@ -382,7 +382,7 @@ class Elemwise(Op): ...@@ -382,7 +382,7 @@ class Elemwise(Op):
inames = gof.utils.uniq(inames) inames = gof.utils.uniq(inames)
inputs = gof.utils.uniq(node.inputs) inputs = gof.utils.uniq(node.inputs)
defines = "" defines = ""
undefs = "" undefs = ""
dmap = dict([(node.outputs[i], [node.inputs[o]]) for i, o in self.inplace_pattern.items()]) dmap = dict([(node.outputs[i], [node.inputs[o]]) for i, o in self.inplace_pattern.items()])
...@@ -402,7 +402,7 @@ class Elemwise(Op): ...@@ -402,7 +402,7 @@ class Elemwise(Op):
aliased_outputs, aliased_onames = aliased aliased_outputs, aliased_onames = aliased
else: else:
aliased_outputs, aliased_onames = [], [] aliased_outputs, aliased_onames = [], []
orders = [[x and 'x' or i for i, x in enumerate(input.type.broadcastable)] for input in inputs] orders = [[x and 'x' or i for i, x in enumerate(input.type.broadcastable)] for input in inputs]
nnested = len(orders[0]) nnested = len(orders[0])
sub = dict(sub) sub = dict(sub)
...@@ -419,7 +419,7 @@ class Elemwise(Op): ...@@ -419,7 +419,7 @@ class Elemwise(Op):
alloc += cgen.make_declare([range(nnested)], [odtype], dict(sub, lv0 = oname)) alloc += cgen.make_declare([range(nnested)], [odtype], dict(sub, lv0 = oname))
alloc += cgen.make_alloc(orders, odtype, sub) alloc += cgen.make_alloc(orders, odtype, sub)
alloc += cgen.make_checks([range(nnested)], [odtype], dict(sub, lv0 = oname)) alloc += cgen.make_checks([range(nnested)], [odtype], dict(sub, lv0 = oname))
for output, oname in zip(aliased_outputs, aliased_onames): for output, oname in zip(aliased_outputs, aliased_onames):
iname = inames[inputs.index(dmap[output][0])] iname = inames[inputs.index(dmap[output][0])]
alloc += """ alloc += """
...@@ -454,7 +454,7 @@ class Elemwise(Op): ...@@ -454,7 +454,7 @@ class Elemwise(Op):
all_code = [code] all_code = [code]
loop = cgen.make_loop(orders + [range(nnested)] * len(real_onames), idtypes + list(real_odtypes), all_code, sub) loop = cgen.make_loop(orders + [range(nnested)] * len(real_onames), idtypes + list(real_odtypes), all_code, sub)
return decl, checks, alloc, loop return decl, checks, alloc, loop
def c_code(self, node, name, inames, onames, sub): def c_code(self, node, name, inames, onames, sub):
code = "\n".join(self._c_all(node, name, inames, onames, sub)) code = "\n".join(self._c_all(node, name, inames, onames, sub))
return code return code
...@@ -468,7 +468,7 @@ class Elemwise(Op): ...@@ -468,7 +468,7 @@ class Elemwise(Op):
class CAReduce(Op): class CAReduce(Op):
""" """
Reduces a scalar operation along the specified axis(es). Reduces a scalar operation along the specified axis(es).
The output will have the same shape as the input minus the reduced The output will have the same shape as the input minus the reduced
dimensions. It will contain the result of accumulating all values dimensions. It will contain the result of accumulating all values
over the reduced dimensions using the specified scalar op. over the reduced dimensions using the specified scalar op.
...@@ -506,7 +506,7 @@ class CAReduce(Op): ...@@ -506,7 +506,7 @@ class CAReduce(Op):
else: else:
self.axis = axis self.axis = axis
self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1) self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
def make_node(self, input): def make_node(self, input):
input = as_tensor(input) input = as_tensor(input)
axis = self.axis axis = self.axis
...@@ -524,13 +524,13 @@ class CAReduce(Op): ...@@ -524,13 +524,13 @@ class CAReduce(Op):
return hash(self.scalar_op) return hash(self.scalar_op)
else: else:
return hash(self.scalar_op) ^ hash(tuple(self.axis)) return hash(self.scalar_op) ^ hash(tuple(self.axis))
def __str__(self): def __str__(self):
if self.axis is not None: if self.axis is not None:
return "Reduce{%s}{%s}" % (self.scalar_op, ", ".join(str(x) for x in self.axis)) return "Reduce{%s}{%s}" % (self.scalar_op, ", ".join(str(x) for x in self.axis))
else: else:
return "Reduce{%s}" % self.scalar_op return "Reduce{%s}" % self.scalar_op
def perform(self, node, (input, ), (output, )): def perform(self, node, (input, ), (output, )):
axis = self.axis axis = self.axis
if axis is None: if axis is None:
...@@ -551,7 +551,7 @@ class CAReduce(Op): ...@@ -551,7 +551,7 @@ class CAReduce(Op):
iname = inames[0] iname = inames[0]
oname = onames[0] oname = onames[0]
idtype = input.type.dtype_specs()[1] idtype = input.type.dtype_specs()[1]
odtype = output.type.dtype_specs()[1] odtype = output.type.dtype_specs()[1]
...@@ -565,7 +565,7 @@ class CAReduce(Op): ...@@ -565,7 +565,7 @@ class CAReduce(Op):
order1 = [i for i in xrange(input.type.ndim) if i not in axis] order1 = [i for i in xrange(input.type.ndim) if i not in axis]
order = order1 + list(axis) order = order1 + list(axis)
nnested = len(order1) nnested = len(order1)
sub = dict(sub) sub = dict(sub)
...@@ -607,10 +607,10 @@ class CAReduce(Op): ...@@ -607,10 +607,10 @@ class CAReduce(Op):
all_code = [("", "")] * nnested + [(task0_decl, code1), ""] all_code = [("", "")] * nnested + [(task0_decl, code1), ""]
else: else:
all_code = [("", "")] * nnested + [(task0_decl, "")] + [("", "")] * (len(axis) - 2) + [("", code1), ""] all_code = [("", "")] * nnested + [(task0_decl, "")] + [("", "")] * (len(axis) - 2) + [("", code1), ""]
loop = cgen.make_loop([order, range(nnested) + ['x'] * len(axis)], [idtype, odtype], all_code, sub) loop = cgen.make_loop([order, range(nnested) + ['x'] * len(axis)], [idtype, odtype], all_code, sub)
return decl, checks, alloc, loop return decl, checks, alloc, loop
def c_code(self, node, name, inames, onames, sub): def c_code(self, node, name, inames, onames, sub):
code = "\n".join(self._c_all(node, name, inames, onames, sub)) code = "\n".join(self._c_all(node, name, inames, onames, sub))
return code return code
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论