提交 e5b29d4e authored 作者: lamblin's avatar lamblin

Merge pull request #564 from nouiz/alloc

Alloc
......@@ -23,6 +23,10 @@ Interface changes
instance, function([x, y], [y]). You can use the kwarg
``on_unused_input={'raise', 'warn', 'ignore'}`` to control this.
(Pascal L.)
* tensor.alloc() now raise an error during graph build time
when we try to create less dimensions then the number of dimensions
the provieded value have. In the past, the error was at run time.
(Frederic B.)
New Features
* debugprint new param ids=["CHAR", "id", "int", ""]
......
......@@ -743,9 +743,14 @@ class ScalarOp(Op):
if hasattr(self, 'name') and self.name:
return self.name
else:
param = [(k, v) for k, v in self.__dict__.items()
if k not in ["name", "_op_use_c_code"]]
if param:
return "%s{%s}" % (self.__class__.__name__,
", ".join("%s=%s" % (k, v) for k, v in
self.__dict__.items() if k != "name"))
", ".join("%s=%s" % (k, v)
for k, v in param))
else:
return self.__class__.__name__
def c_code_cache_version(self):
return (3,)
......
......@@ -2801,6 +2801,10 @@ class Alloc(gof.Op):
v = as_tensor_variable(value)
sh = [as_tensor_variable(s) for s in shape]
bcast = []
if v.ndim > len(sh):
raise TypeError("Alloc value to use have more dimensions"
" then the specified dimensions",
v.ndim, len(sh))
for i, s in enumerate(sh):
if s.type.dtype[:3] not in ('int', 'uin'):
if config.exception_verbosity == 'high':
......
......@@ -1304,7 +1304,7 @@ def local_alloc_elemwise(node):
-> elemwise(x, y.TensorType(no broadcast flag))
BROADCAST CONDITION: the condition is that the one input that are
not to be optimized to have the same braodcast pattern as the
not to be optimized to have the same broadcast pattern as the
output
We can change the alloc by a dimshuffle as the elemwise
......
......@@ -1240,10 +1240,16 @@ AllocTester = makeBroadcastTester(
),
bad_runtime = dict(
bad_shape12 = (rand(7), numpy.int32(7), numpy.int32(5)),
),
bad_build = dict(
too_big32 = (rand(6,2,4), numpy.int32(6), numpy.int32(2)),
too_big32b = (rand(6,2,4), numpy.int32(2), numpy.int32(4)),
too_big32b = (rand(6,2,4), numpy.int32(6), numpy.int32(4)),
too_big32c = (rand(6,2,4), numpy.int32(2), numpy.int32(4)),
too_big32d = (rand(6,2,4), numpy.int32(2), numpy.int32(6)),
too_big32e = (rand(6,2,4), numpy.int32(4), numpy.int32(6)),
too_big32f = (rand(6,2,4), numpy.int32(4), numpy.int32(2)),
),
)
)
# Since not all inputs of Alloc are differentiable, we need different testers
s1, s2, s3 = randint_ranged(1, 13, (3,))
......
......@@ -3146,23 +3146,27 @@ class T_local_sum(unittest.TestCase):
f = theano.function([a],t_like(a).sum(d),mode=mode)
assert numpy.allclose(f(input),n_like(input).sum(d))
assert len(f.maker.env.nodes)==nb_nodes[1]
assert f.maker.env.toposort()[-1].op==T.alloc
topo = f.maker.env.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.Sum) for node in topo])
for i in range(3):
f = theano.function([a],t_like(a).sum(i),mode=mode)
assert numpy.allclose(f(input),n_like(input).sum(i))
assert len(f.maker.env.nodes)==nb_nodes[2]
assert f.maker.env.toposort()[-1].op==T.alloc
topo = f.maker.env.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.Sum) for node in topo])
backup = config.warn.sum_sum_bug
config.warn.sum_sum_bug = False
try:
for d, dd in [(0,0),(1,0),(2,0),(0,1),(1,1),(2,1)]:
f = theano.function([a],t_like(a).sum(d).sum(dd),mode=mode)
print f.maker.env.toposort()
assert numpy.allclose(f(input),n_like(input).sum(d).sum(dd))
assert len(f.maker.env.nodes)==nb_nodes[3]
assert f.maker.env.toposort()[-1].op==T.alloc
topo = f.maker.env.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.Sum) for node in topo])
finally:
config.warn.sum_sum_bug = backup
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论