提交 35e54bb7 authored 作者: Frédéric Bastien's avatar Frédéric Bastien 提交者: GitHub

Merge pull request #4852 from Faruk-Ahmed/add_ddof_to_var_and_std

add ddof to var and std, apply Fred-fix to opt
...@@ -3164,7 +3164,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, ...@@ -3164,7 +3164,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
@constructor @constructor
def var(input, axis=None, keepdims=False): def var(input, axis=None, ddof=0, keepdims=False):
""" """
Computes the variance along the given axis(es) of a tensor `input`. Computes the variance along the given axis(es) of a tensor `input`.
...@@ -3173,6 +3173,8 @@ def var(input, axis=None, keepdims=False): ...@@ -3173,6 +3173,8 @@ def var(input, axis=None, keepdims=False):
axis: None or int or (list of int) (see `Sum`) axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor. Compute the variance along this axis of the tensor.
None means all axes (like numpy). None means all axes (like numpy).
ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute
the unbiased estimate.
keepdims : bool keepdims : bool
If this is set to True, the axes which are reduced are If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option, left in the result as dimensions with size one. With this option,
...@@ -3187,6 +3189,9 @@ def var(input, axis=None, keepdims=False): ...@@ -3187,6 +3189,9 @@ def var(input, axis=None, keepdims=False):
""" """
if isinstance(ddof, (bool)):
raise ValueError('Parameter keepdims is now at index 3: (input, axis=None, ddof=0, keepdims=False)')
input_ndim = input.type.ndim input_ndim = input.type.ndim
if axis is None: if axis is None:
axis = list(range(input_ndim)) axis = list(range(input_ndim))
...@@ -3204,13 +3209,19 @@ def var(input, axis=None, keepdims=False): ...@@ -3204,13 +3209,19 @@ def var(input, axis=None, keepdims=False):
centered_input = input - mean_input centered_input = input - mean_input
# return the mean sqr # return the mean sqr
if ddof == 0:
v = mean((centered_input ** 2), axis, keepdims=keepdims) v = mean((centered_input ** 2), axis, keepdims=keepdims)
else:
shp = shape(input) - ddof
v = sum((centered_input ** 2), axis=axis, keepdims=keepdims)
for i in axis:
v = true_div(v, shp[i])
v.name = 'var' v.name = 'var'
return v return v
@constructor @constructor
def std(input, axis=None, keepdims=False): def std(input, axis=None, ddof=0, keepdims=False):
""" """
Computes the standard deviation along the given axis(es) of a tensor `input`. Computes the standard deviation along the given axis(es) of a tensor `input`.
...@@ -3234,7 +3245,10 @@ def std(input, axis=None, keepdims=False): ...@@ -3234,7 +3245,10 @@ def std(input, axis=None, keepdims=False):
""" """
ret = sqrt(var(input=input, axis=axis, keepdims=keepdims)) if isinstance(ddof, (bool)):
raise ValueError('Parameter keepdims is now at index 3: (input, axis=None, ddof=0, keepdims=False)')
ret = sqrt(var(input=input, axis=axis, ddof=ddof, keepdims=keepdims))
ret.name = 'std' ret.name = 'std'
return ret return ret
......
...@@ -1970,17 +1970,17 @@ def local_subtensor_make_vector(node): ...@@ -1970,17 +1970,17 @@ def local_subtensor_make_vector(node):
ret = [x.owner.inputs[v]] ret = [x.owner.inputs[v]]
except IndexError: except IndexError:
raise NotScalarConstantError("Bad user graph!") raise NotScalarConstantError("Bad user graph!")
return ret return ret
except NotScalarConstantError: except NotScalarConstantError:
pass pass
elif idx.ndim == 1 and isinstance(idx, T.Constant): elif idx.ndim == 1 and isinstance(idx, T.Constant):
values = list(map(int, list(idx.value))) values = list(map(int, list(idx.value)))
ret = [make_vector(*[x.owner.inputs[v] for v in values])] ret = make_vector(*[x.owner.inputs[v] for v in values])
# Copy over stack trace from previous output to new output # Copy over stack trace from previous output to new output
copy_stack_trace(node.outputs[0], ret) copy_stack_trace(node.outputs[0], ret)
return ret ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
else: else:
raise TypeError('case not expected') raise TypeError('case not expected')
elif isinstance(idx, slice): elif isinstance(idx, slice):
...@@ -1993,6 +1993,7 @@ def local_subtensor_make_vector(node): ...@@ -1993,6 +1993,7 @@ def local_subtensor_make_vector(node):
ret = make_vector(*x.owner.inputs[const_slice]) ret = make_vector(*x.owner.inputs[const_slice])
# Copy over stack trace from previous outputs to new output # Copy over stack trace from previous outputs to new output
copy_stack_trace(node.outputs, ret) copy_stack_trace(node.outputs, ret)
ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret] return [ret]
except NotScalarConstantError: except NotScalarConstantError:
pass pass
......
...@@ -6365,6 +6365,15 @@ def test_var(): ...@@ -6365,6 +6365,15 @@ def test_var():
f = function([a], var(a, axis=2)) f = function([a], var(a, axis=2))
assert numpy.allclose(numpy.var(a_val, axis=2), f(a_val)) assert numpy.allclose(numpy.var(a_val, axis=2), f(a_val))
f = function([a], var(a, axis=0, ddof=0))
assert numpy.allclose(numpy.var(a_val, axis=0, ddof=0), f(a_val))
f = function([a], var(a, axis=1,ddof=1))
assert numpy.allclose(numpy.var(a_val, axis=1,ddof=1), f(a_val))
f = function([a], var(a, axis=2, ddof=1))
assert numpy.allclose(numpy.var(a_val, axis=2, ddof=1), f(a_val))
class T_sum(unittest.TestCase): class T_sum(unittest.TestCase):
def test_sum_overflow(self): def test_sum_overflow(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论