提交 e550df89 authored 作者: Yang Zhang's avatar Yang Zhang 提交者: Gokul

Fix indent 2

上级 ebfd5698
...@@ -2067,7 +2067,7 @@ def grad_clip(x, lower_bound, upper_bound): ...@@ -2067,7 +2067,7 @@ def grad_clip(x, lower_bound, upper_bound):
class GradScale(ViewOp): class GradScale(ViewOp):
def __init__(self,multiplier): def __init__(self,multiplier):
self.multiplier=multiplier self.multiplier=multiplier
def grad(self, args, g_outs): def grad(self, args, g_outs):
return [self.multiplier*g_out for g_out in g_outs] return [self.multiplier*g_out for g_out in g_outs]
...@@ -2075,14 +2075,14 @@ class GradScale(ViewOp): ...@@ -2075,14 +2075,14 @@ class GradScale(ViewOp):
def grad_scale(x,multiplier): def grad_scale(x,multiplier):
""" """
This op scale or inverse the gradient in the backpropagation. This op scale or inverse the gradient in the backpropagation.
:param x: the variable we want its gradient inputs scale :param x: the variable we want its gradient inputs scale
:param multiplier: scale of the gradient :param multiplier: scale of the gradient
:examples: :examples:
x = theano.tensor.fscalar() x = theano.tensor.fscalar()
fx = theano.tensor.sin(x) fx = theano.tensor.sin(x)
fp = theano.tensor.grad(fx, wrt=x) fp = theano.tensor.grad(fx, wrt=x)
fprime = theano.function([x], fp) fprime = theano.function([x], fp)
print(fprime(2))#-0.416 print(fprime(2))#-0.416
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论