提交 e5361019 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Reorder keyword args

上级 6eaba038
...@@ -1826,15 +1826,15 @@ class _tensor_py_operators: ...@@ -1826,15 +1826,15 @@ class _tensor_py_operators:
dot = __dot__ dot = __dot__
def sum(self, axis=None, dtype=None, acc_dtype=None, keepdims=False): def sum(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.sum`""" """See `theano.tensor.sum`"""
return sum(self, axis=axis, dtype=dtype, acc_dtype=acc_dtype, return sum(self, axis=axis, dtype=dtype, keepdims=keepdims,
keepdims=keepdims) acc_dtype=acc_dtype)
def prod(self, axis=None, dtype=None, acc_dtype=None, keepdims=False): def prod(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.prod`""" """See `theano.tensor.prod`"""
return prod(self, axis=axis, dtype=dtype, acc_dtype=acc_dtype, return prod(self, axis=axis, dtype=dtype, keepdims=keepdims,
keepdims=keepdims) acc_dtype=acc_dtype)
def norm(self, L, axis=None): def norm(self, L, axis=None):
if L == 0: if L == 0:
...@@ -1844,10 +1844,10 @@ class _tensor_py_operators: ...@@ -1844,10 +1844,10 @@ class _tensor_py_operators:
# optimizations will/should catch cases like L=1, L=2 # optimizations will/should catch cases like L=1, L=2
return pow(pow(abs_(self), L).sum(axis=axis), 1.0 / L) return pow(pow(abs_(self), L).sum(axis=axis), 1.0 / L)
def mean(self, axis=None, dtype=None, acc_dtype=None, keepdims=False): def mean(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.mean`""" """See `theano.tensor.mean`"""
return mean(self, axis=axis, dtype=dtype, acc_dtype=acc_dtype, return mean(self, axis=axis, dtype=dtype, keepdims=keepdims,
keepdims=keepdims) acc_dtype=acc_dtype)
def var(self, axis=None, keepdims=False): def var(self, axis=None, keepdims=False):
"""See `theano.tensor.var`""" """See `theano.tensor.var`"""
...@@ -3780,7 +3780,7 @@ pprint.assign(tensor_copy, printing.IgnorePrinter()) ...@@ -3780,7 +3780,7 @@ pprint.assign(tensor_copy, printing.IgnorePrinter())
@constructor @constructor
def sum(input, axis=None, dtype=None, acc_dtype=None, keepdims=False): def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
""" """
Computes the sum along the given axis(es) of a tensor `input` Computes the sum along the given axis(es) of a tensor `input`
...@@ -3806,7 +3806,7 @@ pprint.assign(Sum(), printing.FunctionPrinter('sum')) ...@@ -3806,7 +3806,7 @@ pprint.assign(Sum(), printing.FunctionPrinter('sum'))
@constructor @constructor
def prod(input, axis=None, dtype=None, acc_dtype=None, keepdims=False): def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
""" """
Computes the product along the given axis(es) of a tensor `input` Computes the product along the given axis(es) of a tensor `input`
...@@ -3871,8 +3871,8 @@ class Mean(elemwise.CAReduce): ...@@ -3871,8 +3871,8 @@ class Mean(elemwise.CAReduce):
@constructor @constructor
def mean(input, axis=None, dtype=None, acc_dtype=None, op=False, def mean(input, axis=None, dtype=None, op=False, keepdims=False,
keepdims=False): acc_dtype=None):
""" """
Computes the mean value along the given axis(es) of a tensor `input` Computes the mean value along the given axis(es) of a tensor `input`
...@@ -3880,13 +3880,6 @@ def mean(input, axis=None, dtype=None, acc_dtype=None, op=False, ...@@ -3880,13 +3880,6 @@ def mean(input, axis=None, dtype=None, acc_dtype=None, op=False,
None means all axes (like numpy). None means all axes (like numpy).
:type axis: None or int or (list of int) (see `Sum`) :type axis: None or int or (list of int) (see `Sum`)
:param acc_dtype: dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type).
If None, then we use the same rules as `sum()`.
:type dtype: None or string
:param dtype: dtype to cast the result of the inner summation into. :param dtype: dtype to cast the result of the inner summation into.
For instance, by default, a sum of a float32 tensor will be For instance, by default, a sum of a float32 tensor will be
done in float64 (acc_dtype would be float64 by default), done in float64 (acc_dtype would be float64 by default),
...@@ -3897,6 +3890,13 @@ def mean(input, axis=None, dtype=None, acc_dtype=None, op=False, ...@@ -3897,6 +3890,13 @@ def mean(input, axis=None, dtype=None, acc_dtype=None, op=False,
left in the result as dimensions with size one. With this option, left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor. the result will broadcast correctly against the original tensor.
:param acc_dtype: dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type).
If None, then we use the same rules as `sum()`.
:type acc_dtype: None or string
:note: for gpu, if you specify dtype=float32, everything will be done :note: for gpu, if you specify dtype=float32, everything will be done
on the gpu. on the gpu.
""" """
...@@ -3927,8 +3927,8 @@ def mean(input, axis=None, dtype=None, acc_dtype=None, op=False, ...@@ -3927,8 +3927,8 @@ def mean(input, axis=None, dtype=None, acc_dtype=None, op=False,
# Let sum() infer the appropriate dtype. # Let sum() infer the appropriate dtype.
sum_dtype = None sum_dtype = None
s = sum(input, axis=axis, dtype=sum_dtype, acc_dtype=acc_dtype, s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims,
keepdims=keepdims) acc_dtype=acc_dtype)
shp = shape(input) shp = shape(input)
# Cast shp into a float type # Cast shp into a float type
......
...@@ -1609,14 +1609,6 @@ class CAReduceDtype(CAReduce): ...@@ -1609,14 +1609,6 @@ class CAReduceDtype(CAReduce):
- list of dimensions that we want to reduce - list of dimensions that we want to reduce
- if None, all dimensions are reduced - if None, all dimensions are reduced
:param acc_dtype: The dtype of the internal accumulator.
If None (default), we use a minimum precision, or the input dtype
if its precision is higher
- for int dtypes, we use int64;
- for uint dtypes, we use uint64;
- for float dtypes, we use float64;
- for complex dtypes, we use complex128.
:param dtype: The dtype of the returned :param dtype: The dtype of the returned
tensor. If None, then we use the default dtype which is the same tensor. If None, then we use the default dtype which is the same
as the input tensor's dtype except when: as the input tensor's dtype except when:
...@@ -1628,6 +1620,15 @@ class CAReduceDtype(CAReduce): ...@@ -1628,6 +1620,15 @@ class CAReduceDtype(CAReduce):
This behavior is similar in spirit to that of numpy (except numpy This behavior is similar in spirit to that of numpy (except numpy
uses the default machine integer while we always use 64 bit uses the default machine integer while we always use 64 bit
integers to avoid platform-dependent behavior). integers to avoid platform-dependent behavior).
:param acc_dtype: The dtype of the internal accumulator.
If None (default), we use the dtype in the list below,
or the input dtype if its precision is higher:
- for int dtypes, we use at least int64;
- for uint dtypes, we use at least uint64;
- for float dtypes, we use at least float64;
- for complex dtypes, we use at least complex128.
""" """
CAReduce.__init__(self, scalar_op, axis=axis) CAReduce.__init__(self, scalar_op, axis=axis)
self.dtype = dtype self.dtype = dtype
...@@ -1753,14 +1754,6 @@ class Sum(CAReduceDtype): ...@@ -1753,14 +1754,6 @@ class Sum(CAReduceDtype):
(use None to sum over all axes, and a list or tuple to sum along more (use None to sum over all axes, and a list or tuple to sum along more
than one axis). than one axis).
:param acc_dtype: The dtype of the internal accumulator.
If None (default), we use a minimum precision, or the input dtype
if its precision is higher
- for int dtypes, we use int64;
- for uint dtypes, we use uint64;
- for float dtypes, we use float64;
- for complex dtypes, we use complex128.
:param dtype: The dtype of the internal accumulator and returned :param dtype: The dtype of the internal accumulator and returned
tensor. If None, then we use the default dtype which is the same as the tensor. If None, then we use the default dtype which is the same as the
input tensor's dtype except when: input tensor's dtype except when:
...@@ -1769,6 +1762,14 @@ class Sum(CAReduceDtype): ...@@ -1769,6 +1762,14 @@ class Sum(CAReduceDtype):
- the input dtype is an unsigned integer of precision < 64 bit, in - the input dtype is an unsigned integer of precision < 64 bit, in
which case we use uint64 which case we use uint64
This value does not depend on the value of "acc_dtype". This value does not depend on the value of "acc_dtype".
:param acc_dtype: The dtype of the internal accumulator.
If None (default), we use the dtype in the list below,
or the input dtype if its precision is higher:
- for int dtypes, we use at least int64;
- for uint dtypes, we use at least uint64;
- for float dtypes, we use at least float64;
- for complex dtypes, we use at least complex128.
""" """
CAReduceDtype.__init__(self, scalar.add, axis=axis, CAReduceDtype.__init__(self, scalar.add, axis=axis,
dtype=dtype, acc_dtype=acc_dtype) dtype=dtype, acc_dtype=acc_dtype)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论