提交 b03e3514 authored 作者: slefrancois's avatar slefrancois

dnn_batchnorm epsilon to input

上级 a2b1a9e2
......@@ -1422,7 +1422,7 @@ class GpuDnnBatchNorm(DnnBase):
value is 1e-5 (imposed by cuDNN).
"""
__props__ = ('mode', 'epsilon')
__props__ = ('mode',)
def __init__(self, mode='per-activation', epsilon=1e-4):
DnnBase.__init__(self, ['dnn_batchnorm_base.c', 'dnn_batchnorm.c'],
......@@ -1441,7 +1441,6 @@ class GpuDnnBatchNorm(DnnBase):
params.append(('MODE', ("CUDNN_BATCHNORM_SPATIAL"
if self.mode == "spatial"
else "CUDNN_BATCHNORM_PER_ACTIVATION")))
params.append(('EPSILON', str(self.epsilon)))
return params
def infer_shape(self, node, shape):
......@@ -1452,17 +1451,23 @@ class GpuDnnBatchNorm(DnnBase):
x = as_gpuarray_variable(x, ctx_name)
scale = as_gpuarray_variable(scale, ctx_name)
bias = as_gpuarray_variable(bias, ctx_name)
epsilon = as_scalar(self.epsilon).astype('float64')
assert x.ndim == 4
assert scale.ndim == 4
assert bias.ndim == 4
return Apply(self, [x, scale, bias], [x.type(), scale.type(), scale.type()])
return Apply(self, [x, scale, bias, epsilon], [x.type(), scale.type(), scale.type()])
def grad(self, inputs, grads):
x, scale, bias = inputs
x, scale, bias, epsilon = inputs
dy = grads[0]
_, x_mean, x_invstd = self.make_node(x, scale, bias).outputs
return GpuDnnBatchNormGrad(self.mode, self.epsilon)(x, dy, scale,
x_mean, x_invstd)
return GpuDnnBatchNormGrad(self.mode)(x, dy, scale, x_mean,
x_invstd) + [DisconnectedType()()]
def connection_pattern(self, node):
# Specificy that epsilon is not connected to outputs.
return [[True, True, True], [True, True, True], [True, True, True],
[False, False, False]]
class GpuDnnBatchNormInference(DnnBase):
......@@ -1481,7 +1486,7 @@ class GpuDnnBatchNormInference(DnnBase):
value is 1e-5 (imposed by cuDNN).
"""
__props__ = ('mode', 'epsilon')
__props__ = ('mode',)
def __init__(self, mode='per-activation', epsilon=1e-4):
DnnBase.__init__(self, ['dnn_batchnorm_base.c', 'dnn_batchnorm_inf.c'],
......@@ -1500,7 +1505,6 @@ class GpuDnnBatchNormInference(DnnBase):
params.append(('MODE', ("CUDNN_BATCHNORM_SPATIAL"
if self.mode == "spatial"
else "CUDNN_BATCHNORM_PER_ACTIVATION")))
params.append(('EPSILON', str(self.epsilon)))
return params
def infer_shape(self, node, shape):
......@@ -1514,15 +1518,16 @@ class GpuDnnBatchNormInference(DnnBase):
bias = as_gpuarray_variable(bias, ctx_name)
estimated_mean = as_gpuarray_variable(estimated_mean, ctx_name)
estimated_variance = as_gpuarray_variable(estimated_variance, ctx_name)
epsilon = as_scalar(self.epsilon).astype('float64')
assert x.ndim == 4
assert scale.ndim == 4
assert bias.ndim == 4
assert estimated_mean.ndim == 4
assert estimated_variance.ndim == 4
return Apply(self, [x, scale, bias, estimated_mean, estimated_variance], [x.type()])
return Apply(self, [x, scale, bias, estimated_mean, estimated_variance, epsilon], [x.type()])
def grad(self, inputs, grads):
x, scale, bias, est_mean, est_var = inputs
x, scale, bias, est_mean, est_var, epsilon = inputs
dy = grads[0]
if self.mode == "per-activation":
......@@ -1533,7 +1538,7 @@ class GpuDnnBatchNormInference(DnnBase):
for t in (scale, bias, est_mean, est_var))
# define helper expressions
est_var_eps = est_var + self.epsilon
est_var_eps = est_var + epsilon
est_std = theano.tensor.sqrt(est_var_eps)
two = theano.tensor.constant(2.)
......@@ -1543,11 +1548,15 @@ class GpuDnnBatchNormInference(DnnBase):
dbias = dy.sum(axes, keepdims=True)
dmean = -dy.sum(axes, keepdims=True) * (scale / est_std)
dvar = -(dy * (x - est_mean)).sum(axes, keepdims=True) * (scale / (two * est_var_eps * est_std))
return [dx, dscale, dbias, dmean, dvar]
return [dx, dscale, dbias, dmean, dvar, DisconnectedType()()]
def connection_pattern(self, node):
# Specificy that epsilon is not connected to outputs.
return [[True], [True], [True], [True], [True], [False]]
class GpuDnnBatchNormGrad(DnnBase):
__props__ = ('mode', 'epsilon')
__props__ = ('mode',)
def __init__(self, mode='per-activation', epsilon=1e-4):
DnnBase.__init__(self, ['dnn_batchnorm_base.c', 'dnn_batchnorm_grad.c'],
......@@ -1566,7 +1575,6 @@ class GpuDnnBatchNormGrad(DnnBase):
params.append(('MODE', ("CUDNN_BATCHNORM_SPATIAL"
if self.mode == "spatial"
else "CUDNN_BATCHNORM_PER_ACTIVATION")))
params.append(('EPSILON', str(self.epsilon)))
return params
def make_node(self, x, dy, scale, x_mean, x_invstd):
......@@ -1576,8 +1584,9 @@ class GpuDnnBatchNormGrad(DnnBase):
scale = as_gpuarray_variable(scale, ctx_name)
x_mean = as_gpuarray_variable(x_mean, ctx_name)
x_invstd = as_gpuarray_variable(x_invstd, ctx_name)
epsilon = as_scalar(self.epsilon).astype('float64')
assert x.ndim == 4 and dy.ndim == 4 and scale.ndim == 4 and x_mean.ndim == 4 and x_invstd.ndim == 4
return Apply(self, [x, dy, scale, x_mean, x_invstd], [x.type(), scale.type(), scale.type()])
return Apply(self, [x, dy, scale, x_mean, x_invstd, epsilon], [x.type(), scale.type(), scale.type()])
def infer_shape(self, node, shape):
return [shape[0], shape[2], shape[2]]
......
#section support_code_struct
int dnn_batchnorm_op(PyGpuArrayObject *inp, PyGpuArrayObject *scale,
PyGpuArrayObject *bias, PyGpuArrayObject **outp,
PyGpuArrayObject **x_mean, PyGpuArrayObject **x_invstd,
PyGpuContextObject *c) {
PyGpuArrayObject *bias, npy_float64 epsilon,
PyGpuArrayObject **outp, PyGpuArrayObject **x_mean,
PyGpuArrayObject **x_invstd, PyGpuContextObject *c) {
if (c_set_tensorNd(inp, bn_input) != 0)
return 1;
if (c_set_tensorNd(scale, bn_params) != 0)
......@@ -48,7 +48,7 @@ int dnn_batchnorm_op(PyGpuArrayObject *inp, PyGpuArrayObject *scale,
0,
NULL, // running mean, deliberately unused
NULL, // running var, deliberately unused
EPSILON,
epsilon,
PyGpuArray_DEV_DATA(*x_mean),
PyGpuArray_DEV_DATA(*x_invstd)
);
......
......@@ -22,9 +22,9 @@ cudnnTensorDescriptor_t bn_doutput;
int dnn_batchnorm_grad(PyGpuArrayObject *inp, PyGpuArrayObject *doutp,
PyGpuArrayObject *scale, PyGpuArrayObject *x_mean,
PyGpuArrayObject *x_invstd, PyGpuArrayObject **dinp,
PyGpuArrayObject **dscale, PyGpuArrayObject **dbias,
PyGpuContextObject *c) {
PyGpuArrayObject *x_invstd, npy_float64 epsilon,
PyGpuArrayObject **dinp, PyGpuArrayObject **dscale,
PyGpuArrayObject **dbias, PyGpuContextObject *c) {
if (c_set_tensorNd(inp, bn_input) != 0)
return 1;
if (c_set_tensorNd(doutp, bn_doutput) != 0)
......@@ -79,7 +79,7 @@ int dnn_batchnorm_grad(PyGpuArrayObject *inp, PyGpuArrayObject *doutp,
PyGpuArray_DEV_DATA(scale),
PyGpuArray_DEV_DATA(*dscale),
PyGpuArray_DEV_DATA(*dbias),
EPSILON,
epsilon,
PyGpuArray_DEV_DATA(x_mean),
PyGpuArray_DEV_DATA(x_invstd)
);
......
......@@ -2,8 +2,8 @@
int dnn_batchnorm_op(PyGpuArrayObject *inp, PyGpuArrayObject *scale,
PyGpuArrayObject *bias, PyGpuArrayObject *est_mean,
PyGpuArrayObject *est_var, PyGpuArrayObject **outp,
PyGpuContextObject *c) {
PyGpuArrayObject *est_var, npy_float64 epsilon,
PyGpuArrayObject **outp, PyGpuContextObject *c) {
if (c_set_tensorNd(inp, bn_input) != 0)
return 1;
if (c_set_tensorNd(scale, bn_params) != 0)
......@@ -43,7 +43,7 @@ int dnn_batchnorm_op(PyGpuArrayObject *inp, PyGpuArrayObject *scale,
PyGpuArray_DEV_DATA(bias),
PyGpuArray_DEV_DATA(est_mean),
PyGpuArray_DEV_DATA(est_var),
EPSILON
epsilon
);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError, "Error during batchnorm: %s\n",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论