提交 0041ce67 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Remove the try .. except construct since it masked some errors in the opt.

上级 994c8d35
...@@ -744,17 +744,14 @@ def local_logsoftmax(node): ...@@ -744,17 +744,14 @@ def local_logsoftmax(node):
Note: only forward pass is affected Note: only forward pass is affected
""" """
try: if (isinstance(node.op, tensor.Elemwise) and
if (isinstance(node.op, tensor.Elemwise) and
isinstance(node.op.scalar_op, scalar.basic.Log) and isinstance(node.op.scalar_op, scalar.basic.Log) and
len(node.inputs) == 1 and len(node.inputs) == 1 and
node.inputs[0].owner and node.inputs[0].owner is not None and
isinstance(node.inputs[0].owner.op, Softmax)): isinstance(node.inputs[0].owner.op, Softmax)):
inVars = node.inputs[0].owner.inputs[0] inVars = node.inputs[0].owner.inputs[0]
new_op = LogSoftmax() new_op = LogSoftmax()
return [new_op(inVars)] return [new_op(inVars)]
except AttributeError:
pass
@opt.register_specialize('stabilize', 'fast_compile') @opt.register_specialize('stabilize', 'fast_compile')
...@@ -765,28 +762,29 @@ def local_logsoftmax_grad(node): ...@@ -765,28 +762,29 @@ def local_logsoftmax_grad(node):
Note: only grad is affected Note: only grad is affected
""" """
try: if (isinstance(node.op, SoftmaxGrad) and
if (isinstance(node.op, SoftmaxGrad) and len(node.inputs) == 2 and
len(node.inputs) == 2 and node.inputs[0].owner is not None and
isinstance(node.inputs[0].owner.op, tensor.Elemwise) and isinstance(node.inputs[0].owner.op, tensor.Elemwise) and
node.inputs[0].owner.inputs[1].owner.op == softmax_op and len(node.inputs[0].owner.inputs) >= 2 and
node.inputs[1] == node.inputs[0].owner.inputs[1] and node.inputs[0].owner.inputs[1].owner is not None and
not ( node.inputs[0].owner.inputs[1].owner.op == softmax_op and
# skip if it will be optimized by node.inputs[1] == node.inputs[0].owner.inputs[1] and
# local_advanced_indexing_crossentropy_onehot_grad not (
node.inputs[0].owner.op == tensor.true_div and # skip if it will be optimized by
isinstance(node.inputs[0].owner.inputs[0].owner.op, # local_advanced_indexing_crossentropy_onehot_grad
subtensor.AdvancedIncSubtensor))): node.inputs[0].owner.op == tensor.true_div and
# get parameters from unoptimized op node.inputs[0].owner.inputs[0].owner is not None and
sm = node.inputs[0].owner.inputs[1] isinstance(node.inputs[0].owner.inputs[0].owner.op,
# sm_input = node.inputs[1].owner.inputs[0] subtensor.AdvancedIncSubtensor))):
grads = node.inputs[0].owner.inputs[0] # get parameters from unoptimized op
if grads.broadcastable[1] and not sm.broadcastable[1]: sm = node.inputs[0].owner.inputs[1]
grads = tensor.alloc(grads, grads.shape[0], sm.shape[1]) # sm_input = node.inputs[1].owner.inputs[0]
grads = node.inputs[0].owner.inputs[0]
return [grads - tensor.sum(grads, axis=1, keepdims=True) * sm] if grads.broadcastable[1] and not sm.broadcastable[1]:
except AttributeError: grads = tensor.alloc(grads, grads.shape[0], sm.shape[1])
pass
return [grads - tensor.sum(grads, axis=1, keepdims=True) * sm]
def softmax_graph(c): def softmax_graph(c):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论