提交 d92efdc7 authored 作者: affanv14's avatar affanv14

added L_op to nnet.py

上级 04cf990b
...@@ -99,15 +99,14 @@ class SoftmaxWithBias(gof.Op): ...@@ -99,15 +99,14 @@ class SoftmaxWithBias(gof.Op):
# data type matches. # data type matches.
output_storage[0][0] = e_x.astype(x_dtype, copy=False) output_storage[0][0] = e_x.astype(x_dtype, copy=False)
def grad(self, inp, grads): def L_op(self, inp, outputs, grads):
x, b = inp x, b = inp
g_sm, = grads g_sm, = grads
if isinstance(g_sm.type, DisconnectedType): if isinstance(g_sm.type, DisconnectedType):
return [DisconnectedType()(), DisconnectedType()()] return [DisconnectedType()(), DisconnectedType()()]
sm = softmax_with_bias(x, b) dx = softmax_grad(g_sm, outputs[0])
dx = softmax_grad(g_sm, sm)
db = tensor.sum(dx, axis=0) db = tensor.sum(dx, axis=0)
return dx, db return dx, db
...@@ -439,11 +438,10 @@ class Softmax(gof.Op): ...@@ -439,11 +438,10 @@ class Softmax(gof.Op):
sm = e_x / e_x.sum(axis=1)[:, None] sm = e_x / e_x.sum(axis=1)[:, None]
output_storage[0][0] = sm output_storage[0][0] = sm
def grad(self, inp, grads): def L_op(self, inp, outputs, grads):
x, = inp x, = inp
g_sm, = grads g_sm, = grads
sm = softmax_op(x) return [softmax_grad(g_sm, outputs[0])]
return [softmax_grad(g_sm, sm)]
def R_op(self, inputs, eval_points): def R_op(self, inputs, eval_points):
# I think the Jacobian is symmetric so the R_op # I think the Jacobian is symmetric so the R_op
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论