提交 6d7be386 authored 作者: Frederic's avatar Frederic

don't accept the DEFAULT value for axis and update the docstring for…

don't accept the DEFAULT value for axis and update the docstring for theano.{max,min,argmax,argmin,max_and_argmax}
上级 23d425f1
...@@ -1874,19 +1874,6 @@ class MaxAndArgmax(Op): ...@@ -1874,19 +1874,6 @@ class MaxAndArgmax(Op):
def make_node(self, x, axis=None): def make_node(self, x, axis=None):
x = _as_tensor_variable(x) x = _as_tensor_variable(x)
if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different.
axis = 0
if axis=='DEFAULT':
axis=x.type.ndim - 1
warnings.warn(("The default axis of MaxAndArgmax will change! "
"Now we return the max and the armax over the last dimensions. "
"It will change to be the same as numpy: the max and argmax over "
"all dimensions. To hide this warning and be compatible with the "
"future behavior, set axis to -1 to have the current behavior. "
"MaxAndArgmax currently support axis over only 1 dimensions, so "
"you must flatten the tensor to have the futur behavior."),
stacklevel=3)
if isinstance(axis,int): if isinstance(axis,int):
axis = [axis] axis = [axis]
elif isinstance(axis,(tuple,list)): elif isinstance(axis,(tuple,list)):
...@@ -1982,24 +1969,10 @@ def max(x, axis=None): ...@@ -1982,24 +1969,10 @@ def max(x, axis=None):
""" """
Return maximum elements obtained by iterating over given axis Return maximum elements obtained by iterating over given axis
Default axis is the last one. This will change. Default axis is None: sum over all dimensions.
:note: we return an error as numpy when we reduce a dim with a shape of 0 :note: we return an error as numpy when we reduce a dim with a shape of 0
:note2: see MaxAndArgmax note for a difference between numpy and theano when axis==None
""" """
if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different.
axis = 0
elif axis=='DEFAULT':
axis = x.type.ndim - 1
warnings.warn(("The default axis of max will change! Now we return the "
"max over the last dimensions. It will change to be the same as numpy: "
"the max over all dimensions. To hide this warning and be compatible "
"with the future behavior, set axis to -1 to have the current "
"behavior. To have the futur behavior set axis to range(nb dim), but "
"this don't support the grad. To have the grad, you must flatten the "
"tensor before calling max()."),
stacklevel=2)
if isinstance(axis,(list,tuple)) and len(axis)>1: if isinstance(axis,(list,tuple)) and len(axis)>1:
return CAReduce(scal.maximum,axis)(x) return CAReduce(scal.maximum,axis)(x)
try: try:
...@@ -2013,20 +1986,8 @@ def argmax(x, axis=None): ...@@ -2013,20 +1986,8 @@ def argmax(x, axis=None):
""" """
Return indexes of maximum elements obtained by iterating over given axis Return indexes of maximum elements obtained by iterating over given axis
Default axis is the last one. This will change. Default axis is None: sum over all dimensions.
""" """
if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different.
axis = 0
elif axis=='DEFAULT':
axis = x.type.ndim - 1
warnings.warn(("The default axis of argmax will change! Now we return "
"the argmax over the last dimensions. It will change to be the same as "
"numpy: the argmax over all dimensions. To hide this warning and be "
"compatible with the future behavior, set axis to -1 to have the "
"current behavior. To have the futur behavior, you must flatten the "
"tensor before calling max()."),
stacklevel=2)
# In python (using MaxAndArgmax.perform()) this leads to an wasteful # In python (using MaxAndArgmax.perform()) this leads to an wasteful
# implementation that goes through the data twice instead of once # implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine. # but when Argmax.c_impl() is in place, it should be fine.
...@@ -2034,19 +1995,6 @@ def argmax(x, axis=None): ...@@ -2034,19 +1995,6 @@ def argmax(x, axis=None):
@constructor @constructor
def min(x, axis=None): def min(x, axis=None):
if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different.
axis = 0
elif axis=='DEFAULT':
axis = x.type.ndim - 1
warnings.warn(("The default axis of min will change! Now we return the "
"min over the last dimensions. It will change to be the same as numpy: "
"the min over all dimensions. To hide this warning and be compatible "
"with the future behavior, set axis to -1 to have the current "
"behavior. To have the future behavior, set axis to range(x.ndim), but "
"this does not support the grad. To be able to get the grad, you must "
"flatten the tensor before calling min()."),
stacklevel=2)
str_x_type = str(x.dtype) str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes: if str_x_type.startswith('float') or str_x_type in int_dtypes:
return -max(-x, axis=axis) return -max(-x, axis=axis)
...@@ -2056,18 +2004,6 @@ def min(x, axis=None): ...@@ -2056,18 +2004,6 @@ def min(x, axis=None):
@constructor @constructor
def argmin(x, axis=None): def argmin(x, axis=None):
if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different.
axis = 0
elif axis=='DEFAULT':
axis = x.type.ndim - 1
warnings.warn(("The default axis of argmin will change! Now we return "
"the argmin over the last dimensions. It will change to be the same as "
"numpy: the argmin over all dimensions. To hide this warning and be "
"compatible with the future behavior, set axis to -1 to have the "
"current behavior. To have the futur behavior, you must flatten the "
"axis before calling argmin."),
stacklevel=2)
str_x_type = str(x.dtype) str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes: if str_x_type.startswith('float') or str_x_type in int_dtypes:
return argmax(-x, axis=axis) return argmax(-x, axis=axis)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论