提交 677e7a2e authored 作者: Frederic's avatar Frederic

change the default of theano.{max,min,argmax,argmin,max_and_argmax} to the same as numpy.

上级 f40a8a25
...@@ -1862,13 +1862,6 @@ specify_shape = SpecifyShape() ...@@ -1862,13 +1862,6 @@ specify_shape = SpecifyShape()
class MaxAndArgmax(Op): class MaxAndArgmax(Op):
"""Calculate the max and argmax over a given axis. """Calculate the max and argmax over a given axis.
.. note::
If axis is None it means to calculate the max over the last dimension which is
DIFFERENT FROM NUMPY!!
To have the behavior of numpy do a flatten of the input before passing the data to this op.
If the input to flatten is not ccontiguous, this will make a copy to a contiguous version.
""" """
nin=2 # tensor, axis nin=2 # tensor, axis
nout=2 # max val, max idx nout=2 # max val, max idx
...@@ -1879,7 +1872,7 @@ class MaxAndArgmax(Op): ...@@ -1879,7 +1872,7 @@ class MaxAndArgmax(Op):
def __hash__(self): def __hash__(self):
return hash(type(self)) return hash(type(self))
def make_node(self, x, axis='DEFAULT'): def make_node(self, x, axis=None):
x = _as_tensor_variable(x) x = _as_tensor_variable(x)
if x.type.ndim <= 1 and axis in ('DEFAULT', None): if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different. # The old and new behavior are not different.
...@@ -1894,16 +1887,6 @@ class MaxAndArgmax(Op): ...@@ -1894,16 +1887,6 @@ class MaxAndArgmax(Op):
"MaxAndArgmax currently support axis over only 1 dimensions, so " "MaxAndArgmax currently support axis over only 1 dimensions, so "
"you must flatten the tensor to have the futur behavior."), "you must flatten the tensor to have the futur behavior."),
stacklevel=3) stacklevel=3)
elif axis is None:
axis = x.type.ndim - 1
warnings.warn(("The behavior of MaxAndArgmax when axis==None will "
"change! Now we return the max and argmax over the last "
"dimensions. It will change to the max and argmax over all "
"dimensions as numpy. To hide this warning and be compatible with "
"the future behavior, set axis to -1 to have the current behavior. "
"MaxAndArgmax currently support axis over only 1 dimensions, so "
"you must flatten the tensor to have the futur behavior."),
stacklevel=3)
if isinstance(axis,int): if isinstance(axis,int):
axis = [axis] axis = [axis]
elif isinstance(axis,(tuple,list)): elif isinstance(axis,(tuple,list)):
...@@ -1995,7 +1978,7 @@ def max_and_argmax(a): ...@@ -1995,7 +1978,7 @@ def max_and_argmax(a):
@constructor @constructor
def max(x, axis='DEFAULT'): def max(x, axis=None):
""" """
Return maximum elements obtained by iterating over given axis Return maximum elements obtained by iterating over given axis
...@@ -2017,16 +2000,6 @@ def max(x, axis='DEFAULT'): ...@@ -2017,16 +2000,6 @@ def max(x, axis='DEFAULT'):
"this don't support the grad. To have the grad, you must flatten the " "this don't support the grad. To have the grad, you must flatten the "
"tensor before calling max()."), "tensor before calling max()."),
stacklevel=2) stacklevel=2)
elif axis is None:
axis = x.type.ndim - 1
warnings.warn(("The behavior of max when axis==None will change! Now "
"we return the max over the last dimensions. It will change to the max "
"over all dimensions as numpy. To hide this warning and be compatible "
"with the future behavior, set axis to -1 to have the current "
"behavior. To have the futur behavior set axis to range(nb dim), but "
"this don't support the grad. To have the grad, you must flatten the "
"tensor before calling max()."),
stacklevel=2)
if isinstance(axis,(list,tuple)) and len(axis)>1: if isinstance(axis,(list,tuple)) and len(axis)>1:
return CAReduce(scal.maximum,axis)(x) return CAReduce(scal.maximum,axis)(x)
try: try:
...@@ -2036,7 +2009,7 @@ def max(x, axis='DEFAULT'): ...@@ -2036,7 +2009,7 @@ def max(x, axis='DEFAULT'):
return max_and_argmax(x,axis)[0] return max_and_argmax(x,axis)[0]
@constructor @constructor
def argmax(x, axis='DEFAULT'): def argmax(x, axis=None):
""" """
Return indexes of maximum elements obtained by iterating over given axis Return indexes of maximum elements obtained by iterating over given axis
...@@ -2054,22 +2027,13 @@ def argmax(x, axis='DEFAULT'): ...@@ -2054,22 +2027,13 @@ def argmax(x, axis='DEFAULT'):
"current behavior. To have the futur behavior, you must flatten the " "current behavior. To have the futur behavior, you must flatten the "
"tensor before calling max()."), "tensor before calling max()."),
stacklevel=2) stacklevel=2)
elif axis is None:
axis = x.type.ndim - 1
warnings.warn(("The behavior of argmax when axis==None will change! "
"Now we return the argmax over the last dimensions. It will change to "
"the argmax over all dimensions as numpy. To hide this warning and be "
"compatible with the future behavior, set axis to -1 to have the "
"current behavior. To have the futur behavior, you must flatten the "
"tensor before calling argmax()."),
stacklevel=2)
# In python (using MaxAndArgmax.perform()) this leads to an wasteful # In python (using MaxAndArgmax.perform()) this leads to an wasteful
# implementation that goes through the data twice instead of once # implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine. # but when Argmax.c_impl() is in place, it should be fine.
return max_and_argmax(x,axis)[1] return max_and_argmax(x,axis)[1]
@constructor @constructor
def min(x, axis='DEFAULT'): def min(x, axis=None):
if x.type.ndim <= 1 and axis in ('DEFAULT', None): if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different. # The old and new behavior are not different.
axis = 0 axis = 0
...@@ -2083,16 +2047,6 @@ def min(x, axis='DEFAULT'): ...@@ -2083,16 +2047,6 @@ def min(x, axis='DEFAULT'):
"this does not support the grad. To be able to get the grad, you must " "this does not support the grad. To be able to get the grad, you must "
"flatten the tensor before calling min()."), "flatten the tensor before calling min()."),
stacklevel=2) stacklevel=2)
elif axis is None:
axis = x.type.ndim - 1
warnings.warn(("The behavior of min when axis is None will change! Now "
"we return the min over the last dimensions. It will change to the min "
"over all dimensions as numpy. To hide this warning and be compatible "
"with the future behavior, set axis to -1 to have the current "
"behavior. To have the future behavior, set axis to range(x.ndim), but "
"this does not support the grad. To be able to get the grad, you must "
"flatten the tensor before calling min()."),
stacklevel=2)
str_x_type = str(x.dtype) str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes: if str_x_type.startswith('float') or str_x_type in int_dtypes:
return -max(-x, axis=axis) return -max(-x, axis=axis)
...@@ -2101,7 +2055,7 @@ def min(x, axis='DEFAULT'): ...@@ -2101,7 +2055,7 @@ def min(x, axis='DEFAULT'):
raise NotImplementedError() raise NotImplementedError()
@constructor @constructor
def argmin(x, axis='DEFAULT'): def argmin(x, axis=None):
if x.type.ndim <= 1 and axis in ('DEFAULT', None): if x.type.ndim <= 1 and axis in ('DEFAULT', None):
# The old and new behavior are not different. # The old and new behavior are not different.
axis = 0 axis = 0
...@@ -2114,15 +2068,6 @@ def argmin(x, axis='DEFAULT'): ...@@ -2114,15 +2068,6 @@ def argmin(x, axis='DEFAULT'):
"current behavior. To have the futur behavior, you must flatten the " "current behavior. To have the futur behavior, you must flatten the "
"axis before calling argmin."), "axis before calling argmin."),
stacklevel=2) stacklevel=2)
elif axis is None:
axis = x.type.ndim - 1
warnings.warn(("The behavior of argmin when axis==None will change! "
"Now we return the argmin over the last dimensions. It will change to "
"the argmin over all dimensions as numpy. To hide this warning and be "
"compatible with the future behavior, set axis to -1 to have the "
"current behavior. To have the futur behavior, you must flatten the "
"axis before calling argmin."),
stacklevel=2)
str_x_type = str(x.dtype) str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes: if str_x_type.startswith('float') or str_x_type in int_dtypes:
return argmax(-x, axis=axis) return argmax(-x, axis=axis)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论