提交 50f364ee authored 作者: abergeron's avatar abergeron

Merge pull request #2330 from nouiz/join_opt

Join opt. Remove empty constant as inputs and merge MakeVector inputs.
......@@ -2661,6 +2661,77 @@ def local_join_1(node):
return [tensors[0]]
@register_specialize
@register_canonicalize
@gof.local_optimizer([T.Join])
def local_join_empty(node):
"""Join(i, x, y, empty) => Join(i, x, y)
remove empty inputs to joins. The empty inputs can be anywhere.
"""
if not isinstance(node.op, T.Join):
return
new_inputs = []
try:
join_idx = get_scalar_constant_value(node.inputs[0])
except NotScalarConstantError:
return
for idx in range(1, len(node.inputs)):
inp = node.inputs[idx]
# We can not use size == 0,, as this can change shape from 3,0
# to 2,0. This trigger DebugMode error. This happen with
# stack(...,[]) as this add a dimshuffle on [], that add a
# dimensions with shape 1.
if isinstance(inp, theano.Constant) and inp.data.shape[join_idx] == 0:
continue
new_inputs.append(inp)
if len(new_inputs) < len(node.inputs) - 1:
if len(new_inputs) == 0:
# T.join do not work in that case.
# constant folding will take care of this case.
return
ret = T.join(node.inputs[0], *new_inputs)
o = node.outputs[0]
if ret.dtype != o.dtype:
# Join can upcast some inputs
return
if ret.type != o.type:
assert ret.dtype == o.dtype
assert ret.ndim == o.ndim
ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
@register_specialize
@register_canonicalize
@gof.local_optimizer([T.Join])
def local_join_make_vector(node):
"""Join(0, make_vector1, make_vector2, ...) => Join(0, make_vector12, ...)
Merge MakeVector inputs to Join. This can make the join completly
disapear with the local_join_1 opt.
"""
if not isinstance(node.op, T.Join) or node.outputs[0].ndim != 1:
return
new_inputs = [node.inputs[1]]
for idx in range(2, len(node.inputs)):
inp = node.inputs[idx]
if (inp.owner and
isinstance(inp.owner.op, MakeVector) and
new_inputs[-1].owner and
isinstance(new_inputs[-1].owner.op, MakeVector) and
# MakeVector have a dtype parameter
inp.owner.op == new_inputs[-1].owner.op):
inps = new_inputs[-1].owner.inputs + inp.owner.inputs
new_inputs[-1] = inp.owner.op(*inps)
else:
new_inputs.append(inp)
if len(new_inputs) < len(node.inputs) - 1:
ret = T.join(node.inputs[0], *new_inputs)
return [ret]
###############
# Switch opts #
###############
......
......@@ -119,6 +119,10 @@ class DownsampleFactorMax(Op):
TODO: why is poolsize an op parameter here?
"""
self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]):
raise ValueError(
"DownsampleFactorMax downsample parameters must be ints."
" Got %s" % str(ds))
self.ignore_border = ignore_border
def __eq__(self, other):
......
......@@ -4632,6 +4632,74 @@ def test_local_join_1():
assert f.maker.fgraph.outputs[0].dtype == config.floatX
def test_local_join_empty():
#test for vector, vector, empty to vector
empty_vec = numpy.asarray([], dtype=config.floatX)
a = tensor.vector('a')
s = tensor.join(0, a, a, empty_vec)
f = function([a], s, mode=mode_opt)
val = f([1])
assert numpy.all(val == [1])
e = f.maker.fgraph.toposort()
assert len([n for n in e if isinstance(n.op, Join)]) == 1
assert all([not isinstance(n.op, Join) or len(n.inputs) == 3
for n in e if isinstance(n.op, Join)])
assert f.maker.fgraph.outputs[0].dtype == config.floatX
#test for matrix join(1,a)
empty_mat = numpy.asarray([[]], dtype=config.floatX)
m = tensor.matrix('m')
s = join(1, empty_mat, m, m, m)
f = function([m], s, mode=mode_opt)
val = f([[1]])
assert numpy.all(val == [[1]])
e = f.maker.fgraph.toposort()
assert len([n for n in e if isinstance(n.op, Join)]) == 1
assert all([not isinstance(n.op, Join) or len(n.inputs) == 4
for n in e if isinstance(n.op, Join)])
assert f.maker.fgraph.outputs[0].dtype == config.floatX
#test for vector, vector, empty to matrix
# We can't optimize this case.
s = tensor.stack(a, a, empty_vec)
f = function([a], s, mode=mode_opt)
val = f([])
assert numpy.all(val == [1])
e = f.maker.fgraph.toposort()
assert len([n for n in e if isinstance(n.op, Join)]) == 1
assert all([not isinstance(n.op, Join) or len(n.inputs) == 4
for n in e if isinstance(n.op, Join)])
assert f.maker.fgraph.outputs[0].dtype == config.floatX
#test for matrix join(0,a)
# We can't optimize this case.
s = join(0, m, numpy.asarray([[2.]], dtype=config.floatX), m)
f = function([m], s, mode=mode_opt)
val = f([[1]])
assert numpy.all(val == [[1], [2], [1]])
e = f.maker.fgraph.toposort()
assert len([n for n in e if isinstance(n.op, Join)]) == 1
assert all([not isinstance(n.op, Join) or len(n.inputs) == 4
for n in e if isinstance(n.op, Join)])
assert f.maker.fgraph.outputs[0].dtype == config.floatX
def test_local_join_make_vector():
a, b, c, d, e = tensor.scalars('abcde')
v = tensor.vector('v')
mv = MakeVector(config.floatX)
s = tensor.join(0, mv(a), v, mv(b, c), mv(d, e))
f = function([a, b, c, d, e, v], s, mode=mode_opt)
theano.printing.debugprint(f)
val = f(1, 2, 3, 4, 6, [7, 8])
assert numpy.all(val == [1, 7, 8, 2, 3, 4, 6])
e = f.maker.fgraph.toposort()
assert len([n for n in e if isinstance(n.op, Join)]) == 1
assert all([not isinstance(n.op, Join) or len(n.inputs) == 4
for n in e if isinstance(n.op, Join)])
assert f.maker.fgraph.outputs[0].dtype == config.floatX
def test_local_add_specialize():
# test of non-zero dimension
a = tensor.vector()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论