提交 3a890101 authored 作者: Faruk Ahmed's avatar Faruk Ahmed

add ddof to var and std, apply Fred-fix to opt

上级 68290a96
......@@ -3099,7 +3099,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
@constructor
def var(input, axis=None, keepdims=False):
def var(input, axis=None, ddof=0, keepdims=False):
"""
Computes the variance along the given axis(es) of a tensor `input`.
......@@ -3108,6 +3108,8 @@ def var(input, axis=None, keepdims=False):
axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor.
None means all axes (like numpy).
ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute
the unbiased estimate.
keepdims : bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
......@@ -3122,6 +3124,9 @@ def var(input, axis=None, keepdims=False):
"""
if isinstance(ddof, (bool)):
raise ValueError('Parameter keepdims is now at index 3: (input, axis=None, ddof=0, keepdims=False)')
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
......@@ -3139,13 +3144,19 @@ def var(input, axis=None, keepdims=False):
centered_input = input - mean_input
# return the mean sqr
v = mean((centered_input ** 2), axis, keepdims=keepdims)
if ddof == 0:
v = mean((centered_input ** 2), axis, keepdims=keepdims)
else:
shp = shape(input) - ddof
v = sum((centered_input ** 2), axis=axis, keepdims=keepdims)
for i in axis:
v = true_div(v, shp[i])
v.name = 'var'
return v
@constructor
def std(input, axis=None, keepdims=False):
def std(input, axis=None, ddof=0, keepdims=False):
"""
Computes the standard deviation along the given axis(es) of a tensor `input`.
......@@ -3169,7 +3180,10 @@ def std(input, axis=None, keepdims=False):
"""
ret = sqrt(var(input=input, axis=axis, keepdims=keepdims))
if isinstance(ddof, (bool)):
raise ValueError('Parameter keepdims is now at index 3: (input, axis=None, ddof=0, keepdims=False)')
ret = sqrt(var(input=input, axis=axis, ddof=ddof, keepdims=keepdims))
ret.name = 'std'
return ret
......
......@@ -1902,17 +1902,17 @@ def local_subtensor_make_vector(node):
ret = [x.owner.inputs[v]]
except IndexError:
raise NotScalarConstantError("Bad user graph!")
return ret
except NotScalarConstantError:
pass
elif idx.ndim == 1 and isinstance(idx, T.Constant):
values = list(map(int, list(idx.value)))
ret = [make_vector(*[x.owner.inputs[v] for v in values])]
ret = make_vector(*[x.owner.inputs[v] for v in values])
# Copy over stack trace from previous output to new output
copy_stack_trace(node.outputs[0], ret)
return ret
ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
else:
raise TypeError('case not expected')
elif isinstance(idx, slice):
......@@ -1925,6 +1925,7 @@ def local_subtensor_make_vector(node):
ret = make_vector(*x.owner.inputs[const_slice])
# Copy over stack trace from previous outputs to new output
copy_stack_trace(node.outputs, ret)
ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
except NotScalarConstantError:
pass
......
......@@ -6332,6 +6332,15 @@ def test_var():
f = function([a], var(a, axis=2))
assert numpy.allclose(numpy.var(a_val, axis=2), f(a_val))
f = function([a], var(a, axis=0, ddof=0))
assert numpy.allclose(numpy.var(a_val, axis=0, ddof=0), f(a_val))
f = function([a], var(a, axis=1,ddof=1))
assert numpy.allclose(numpy.var(a_val, axis=1,ddof=1), f(a_val))
f = function([a], var(a, axis=2, ddof=1))
assert numpy.allclose(numpy.var(a_val, axis=2, ddof=1), f(a_val))
class T_sum(unittest.TestCase):
def test_sum_overflow(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论