提交 e5b29d4e authored 作者: lamblin's avatar lamblin

Merge pull request #564 from nouiz/alloc

Alloc
...@@ -23,6 +23,10 @@ Interface changes ...@@ -23,6 +23,10 @@ Interface changes
instance, function([x, y], [y]). You can use the kwarg instance, function([x, y], [y]). You can use the kwarg
``on_unused_input={'raise', 'warn', 'ignore'}`` to control this. ``on_unused_input={'raise', 'warn', 'ignore'}`` to control this.
(Pascal L.) (Pascal L.)
* tensor.alloc() now raise an error during graph build time
when we try to create less dimensions then the number of dimensions
the provieded value have. In the past, the error was at run time.
(Frederic B.)
New Features New Features
* debugprint new param ids=["CHAR", "id", "int", ""] * debugprint new param ids=["CHAR", "id", "int", ""]
......
...@@ -743,9 +743,14 @@ class ScalarOp(Op): ...@@ -743,9 +743,14 @@ class ScalarOp(Op):
if hasattr(self, 'name') and self.name: if hasattr(self, 'name') and self.name:
return self.name return self.name
else: else:
return "%s{%s}" % (self.__class__.__name__, param = [(k, v) for k, v in self.__dict__.items()
", ".join("%s=%s" % (k, v) for k, v in if k not in ["name", "_op_use_c_code"]]
self.__dict__.items() if k != "name")) if param:
return "%s{%s}" % (self.__class__.__name__,
", ".join("%s=%s" % (k, v)
for k, v in param))
else:
return self.__class__.__name__
def c_code_cache_version(self): def c_code_cache_version(self):
return (3,) return (3,)
......
...@@ -2801,6 +2801,10 @@ class Alloc(gof.Op): ...@@ -2801,6 +2801,10 @@ class Alloc(gof.Op):
v = as_tensor_variable(value) v = as_tensor_variable(value)
sh = [as_tensor_variable(s) for s in shape] sh = [as_tensor_variable(s) for s in shape]
bcast = [] bcast = []
if v.ndim > len(sh):
raise TypeError("Alloc value to use have more dimensions"
" then the specified dimensions",
v.ndim, len(sh))
for i, s in enumerate(sh): for i, s in enumerate(sh):
if s.type.dtype[:3] not in ('int', 'uin'): if s.type.dtype[:3] not in ('int', 'uin'):
if config.exception_verbosity == 'high': if config.exception_verbosity == 'high':
......
...@@ -1304,7 +1304,7 @@ def local_alloc_elemwise(node): ...@@ -1304,7 +1304,7 @@ def local_alloc_elemwise(node):
-> elemwise(x, y.TensorType(no broadcast flag)) -> elemwise(x, y.TensorType(no broadcast flag))
BROADCAST CONDITION: the condition is that the one input that are BROADCAST CONDITION: the condition is that the one input that are
not to be optimized to have the same braodcast pattern as the not to be optimized to have the same broadcast pattern as the
output output
We can change the alloc by a dimshuffle as the elemwise We can change the alloc by a dimshuffle as the elemwise
......
...@@ -1239,11 +1239,17 @@ AllocTester = makeBroadcastTester( ...@@ -1239,11 +1239,17 @@ AllocTester = makeBroadcastTester(
correct23 = (rand(4,7), numpy.int32(2), numpy.int32(4), numpy.int32(7)), correct23 = (rand(4,7), numpy.int32(2), numpy.int32(4), numpy.int32(7)),
), ),
bad_runtime = dict( bad_runtime = dict(
bad_shape12 = (rand(7), numpy.int32(7), numpy.int32(5)), bad_shape12 = (rand(7), numpy.int32(7), numpy.int32(5)),
too_big32 = (rand(6,2,4), numpy.int32(6), numpy.int32(2)), ),
too_big32b = (rand(6,2,4), numpy.int32(2), numpy.int32(4)), bad_build = dict(
), too_big32 = (rand(6,2,4), numpy.int32(6), numpy.int32(2)),
) too_big32b = (rand(6,2,4), numpy.int32(6), numpy.int32(4)),
too_big32c = (rand(6,2,4), numpy.int32(2), numpy.int32(4)),
too_big32d = (rand(6,2,4), numpy.int32(2), numpy.int32(6)),
too_big32e = (rand(6,2,4), numpy.int32(4), numpy.int32(6)),
too_big32f = (rand(6,2,4), numpy.int32(4), numpy.int32(2)),
),
)
# Since not all inputs of Alloc are differentiable, we need different testers # Since not all inputs of Alloc are differentiable, we need different testers
s1, s2, s3 = randint_ranged(1, 13, (3,)) s1, s2, s3 = randint_ranged(1, 13, (3,))
......
...@@ -3146,23 +3146,27 @@ class T_local_sum(unittest.TestCase): ...@@ -3146,23 +3146,27 @@ class T_local_sum(unittest.TestCase):
f = theano.function([a],t_like(a).sum(d),mode=mode) f = theano.function([a],t_like(a).sum(d),mode=mode)
assert numpy.allclose(f(input),n_like(input).sum(d)) assert numpy.allclose(f(input),n_like(input).sum(d))
assert len(f.maker.env.nodes)==nb_nodes[1] assert len(f.maker.env.nodes)==nb_nodes[1]
assert f.maker.env.toposort()[-1].op==T.alloc topo = f.maker.env.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.Sum) for node in topo])
for i in range(3): for i in range(3):
f = theano.function([a],t_like(a).sum(i),mode=mode) f = theano.function([a],t_like(a).sum(i),mode=mode)
assert numpy.allclose(f(input),n_like(input).sum(i)) assert numpy.allclose(f(input),n_like(input).sum(i))
assert len(f.maker.env.nodes)==nb_nodes[2] assert len(f.maker.env.nodes)==nb_nodes[2]
assert f.maker.env.toposort()[-1].op==T.alloc topo = f.maker.env.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.Sum) for node in topo])
backup = config.warn.sum_sum_bug backup = config.warn.sum_sum_bug
config.warn.sum_sum_bug = False config.warn.sum_sum_bug = False
try: try:
for d, dd in [(0,0),(1,0),(2,0),(0,1),(1,1),(2,1)]: for d, dd in [(0,0),(1,0),(2,0),(0,1),(1,1),(2,1)]:
f = theano.function([a],t_like(a).sum(d).sum(dd),mode=mode) f = theano.function([a],t_like(a).sum(d).sum(dd),mode=mode)
print f.maker.env.toposort()
assert numpy.allclose(f(input),n_like(input).sum(d).sum(dd)) assert numpy.allclose(f(input),n_like(input).sum(d).sum(dd))
assert len(f.maker.env.nodes)==nb_nodes[3] assert len(f.maker.env.nodes)==nb_nodes[3]
assert f.maker.env.toposort()[-1].op==T.alloc topo = f.maker.env.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.Sum) for node in topo])
finally: finally:
config.warn.sum_sum_bug = backup config.warn.sum_sum_bug = backup
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论