提交 0f4436f9 authored 作者: notoraptor's avatar notoraptor

Small corrections

(error message, profile output message, and comment).
上级 7cf174c8
......@@ -104,7 +104,7 @@ class BaseTest:
def compute_host(self, test_tensor, axis):
M = self.get_host_tensor()
f = theano.function([M], [T.max(M, axis=axis), T.argmax(M, axis=axis)],
name='HOST/shape:' + str(test_tensor.shape) + '/axis:' + str(axis), mode=mode_without_gpu)
name='shape:' + str(test_tensor.shape) + '/axis:' + str(axis) + '/HOST', mode=mode_without_gpu)
check_if_gpu_maxandargmax_not_in_graph(f)
f(test_tensor)
theano_max, theano_argmax = f(test_tensor)
......@@ -115,7 +115,7 @@ class BaseTest:
def compute_gpu(self, test_gpu_tensor, test_host_tensor, axis):
M = self.get_gpu_tensor()
f = theano.function([M], [T.max(M, axis=axis), T.argmax(M, axis=axis)],
name='GPU/shape:' + str(test_gpu_tensor.shape) + '/axis:' + str(axis), mode=mode_with_gpu)
name='shape:' + str(test_gpu_tensor.shape) + '/axis:' + str(axis) + '/GPU', mode=mode_with_gpu)
check_if_gpu_maxandargmax_in_graph(f)
f(test_gpu_tensor)
theano_max, theano_argmax = f(test_gpu_tensor)
......
......@@ -1312,7 +1312,7 @@ class MaxAndArgmax(Op):
return [None, None]
if len(self.axis) != 1:
raise ValueError(('R_op supported for arg_max only for '
'constant axis!'))
'one axis!'))
if self.axis[0] > 1:
raise ValueError(('R_op supported for arg_max only when '
' axis is 0 or 1'))
......
......@@ -56,10 +56,6 @@ def local_max_and_argmax(node):
if isinstance(node.op, T.MaxAndArgmax):
axis = node.op.get_params(node)
if len(node.outputs[1].clients) == 0:
# MaxAndArgmax support variable axis,
# but CAReduce support only constant axis.
# Axis il already constant in the new version of MaxAndArgmax.
new = CAReduce(scal.maximum, axis)(node.inputs[0])
return [new, None]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论