提交 4afefe16 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #5335 from lamblin/fix_dnnbatchnorm_testvalues

[WIP] Use __call__ instead of make_node to avoid bypassing test values
......@@ -1597,7 +1597,7 @@ class GpuDnnSoftmax(GpuDnnSoftmaxBase):
def grad(self, inp, grads):
x, = inp
g_sm, = grads
sm = self.make_node(x).outputs[0]
sm = self(x)
return [GpuDnnSoftmaxGrad(
self.algo,
self.mode
......@@ -1685,7 +1685,7 @@ class GpuDnnBatchNorm(DnnBase):
def grad(self, inputs, grads):
x, scale, bias, epsilon = inputs
dy = grads[0]
_, x_mean, x_invstd = self.make_node(x, scale, bias, epsilon).outputs
_, x_mean, x_invstd = self(x, scale, bias, epsilon)
return GpuDnnBatchNormGrad(self.mode)(x, dy, scale, x_mean,
x_invstd, epsilon) + [DisconnectedType()()]
......
......@@ -2248,7 +2248,7 @@ err%(name)s = cudnnSoftmaxForward(
def grad(self, inp, grads):
x, = inp
g_sm, = grads
sm = self.make_node(x).outputs[0]
sm = self(x)
return [GpuDnnSoftmaxGrad(
self.tensor_format,
self.algo,
......@@ -2603,7 +2603,7 @@ err%(name)s = cudnnBatchNormalizationForwardTraining(
def grad(self, inputs, grads):
x, scale, bias = inputs
dy = grads[0]
_, x_mean, x_invstd = self.make_node(x, scale, bias).outputs
_, x_mean, x_invstd = self(x, scale, bias)
return GpuDnnBatchNormGrad(self.mode, self.epsilon)(x, dy, scale,
x_mean, x_invstd)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论