Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
02e78aca
提交
02e78aca
authored
6月 15, 2012
作者:
Frederic
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ajout parametre keepdims
上级
b8165faa
显示空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
190 行增加
和
72 行删除
+190
-72
basic.py
theano/tensor/basic.py
+190
-72
没有找到文件。
theano/tensor/basic.py
浏览文件 @
02e78aca
...
@@ -1464,11 +1464,11 @@ class _tensor_py_operators:
...
@@ -1464,11 +1464,11 @@ class _tensor_py_operators:
size
=
property
(
lambda
self
:
prod
(
self
.
shape
))
size
=
property
(
lambda
self
:
prod
(
self
.
shape
))
# We can't implement __len__ to provide a better error message.
# We can't implement __len__ to provide a better error message.
def
any
(
self
,
axis
=
None
):
def
any
(
self
,
axis
=
None
,
keepdims
=
False
):
return
elemwise
.
Any
(
axis
)(
self
)
return
elemwise
.
Any
(
axis
,
keepdims
)(
self
)
def
all
(
self
,
axis
=
None
):
def
all
(
self
,
axis
=
None
,
keepdims
=
False
):
return
elemwise
.
All
(
axis
)(
self
)
return
elemwise
.
All
(
axis
,
keepdims
)(
self
)
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# __len__ before calling __getitem__. It also does not catch the raised
# __len__ before calling __getitem__. It also does not catch the raised
...
@@ -1618,13 +1618,13 @@ class _tensor_py_operators:
...
@@ -1618,13 +1618,13 @@ class _tensor_py_operators:
def
__rdot__
(
right
,
left
):
def
__rdot__
(
right
,
left
):
return
dot
(
left
,
right
)
return
dot
(
left
,
right
)
def
sum
(
self
,
axis
=
None
,
dtype
=
None
):
def
sum
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
):
"""See `theano.tensor.sum`"""
"""See `theano.tensor.sum`"""
return
sum
(
self
,
axis
=
axis
,
dtype
=
dtype
)
return
sum
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
)
def
prod
(
self
,
axis
=
None
,
dtype
=
None
):
def
prod
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
)
"""See `theano.tensor.prod`"""
"""See `theano.tensor.prod`"""
return
prod
(
self
,
axis
=
axis
,
dtype
=
dtype
)
return
prod
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
)
def
norm
(
self
,
L
,
axis
=
None
):
def
norm
(
self
,
L
,
axis
=
None
):
if
L
==
0
:
if
L
==
0
:
...
@@ -1634,21 +1634,21 @@ class _tensor_py_operators:
...
@@ -1634,21 +1634,21 @@ class _tensor_py_operators:
#optimizations will/should catch cases like L=1, L=2
#optimizations will/should catch cases like L=1, L=2
return
pow
(
pow
(
abs_
(
self
),
L
)
.
sum
(
axis
=
axis
),
1.0
/
L
)
return
pow
(
pow
(
abs_
(
self
),
L
)
.
sum
(
axis
=
axis
),
1.0
/
L
)
def
mean
(
self
,
axis
=
None
,
dtype
=
None
):
def
mean
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
):
"""See `theano.tensor.mean`"""
"""See `theano.tensor.mean`"""
return
mean
(
self
,
axis
=
axis
,
dtype
=
dtype
)
return
mean
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
)
def
var
(
self
,
axis
=
None
):
def
var
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See `theano.tensor.var`"""
"""See `theano.tensor.var`"""
return
var
(
self
,
axis
)
return
var
(
self
,
axis
,
keepdims
=
keepdims
)
def
min
(
self
,
axis
=
None
):
def
min
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See `theano.tensor.min`"""
"""See `theano.tensor.min`"""
return
min
(
self
,
axis
)
return
min
(
self
,
axis
,
keepdims
=
keepdims
)
def
max
(
self
,
axis
=
None
):
def
max
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See `theano.tensor.max`"""
"""See `theano.tensor.max`"""
return
max
(
self
,
axis
)
return
max
(
self
,
axis
,
keepdims
=
keepdims
)
#TO TRUMP NUMPY OPERATORS
#TO TRUMP NUMPY OPERATORS
__array_priority__
=
1000
__array_priority__
=
1000
...
@@ -2182,7 +2182,7 @@ specify_shape = SpecifyShape()
...
@@ -2182,7 +2182,7 @@ specify_shape = SpecifyShape()
class
MaxAndArgmax
(
Op
):
class
MaxAndArgmax
(
Op
):
"""Calculate the max and argmax over a given axis.
"""Calculate the max and argmax over a given axis
or over all axes
.
"""
"""
nin
=
2
# tensor, axis
nin
=
2
# tensor, axis
nout
=
2
# max val, max idx
nout
=
2
# max val, max idx
...
@@ -2203,8 +2203,8 @@ class MaxAndArgmax(Op):
...
@@ -2203,8 +2203,8 @@ class MaxAndArgmax(Op):
list
(
axis
)
list
(
axis
)
axis
.
sort
()
axis
.
sort
()
assert
axis
==
range
(
x
.
type
.
ndim
),
(
assert
axis
==
range
(
x
.
type
.
ndim
),
(
"MaxAndArgmax do
n'
t support multiple"
"MaxAndArgmax do
es no
t support multiple"
" ax
is. the max fct support
it."
)
" ax
es. the max fct supports
it."
)
# we make the axis all positive to make the infer_shape work
# we make the axis all positive to make the infer_shape work
# with negative axis
# with negative axis
if
x
.
type
.
ndim
>
0
and
axis
is
not
None
:
if
x
.
type
.
ndim
>
0
and
axis
is
not
None
:
...
@@ -2274,6 +2274,11 @@ class MaxAndArgmax(Op):
...
@@ -2274,6 +2274,11 @@ class MaxAndArgmax(Op):
max_pos
],
None
]
max_pos
],
None
]
def
grad
(
self
,
inp
,
grads
):
def
grad
(
self
,
inp
,
grads
):
# The strict sense mathematical gradient of the maximum function is
# not calculated here for it is not defined at every point where some
# coordinates are identical. However, since the latter set has null
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# @note: This function should work correctly for L{vector}s.
# (x, y), (gz, gw)
# (x, y), (gz, gw)
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
...
@@ -2311,58 +2316,133 @@ class MaxAndArgmax(Op):
...
@@ -2311,58 +2316,133 @@ class MaxAndArgmax(Op):
_max_and_argmax
=
MaxAndArgmax
()
_max_and_argmax
=
MaxAndArgmax
()
@_redefine_asRoutine
(
_max_and_argmax
)
def
makeKeepDims
(
x
,
y
,
axis
):
def
max_and_argmax
(
a
):
"""
pass
Reintroduces in y with length one the axes of x which have been left out
in a prior reduction of x. With this option, the resulting tensor will
broadcast correctly against the original tensor x.
"""
i
=
0
for
j
,
_
in
enumerate
(
x
.
type
.
broadcastable
):
if
j
in
axis
:
new_dims
.
append
(
'x'
)
else
:
new_dims
.
append
(
i
)
i
+=
1
return
DimShuffle
(
y
.
type
.
broadcastable
,
new_dims
)(
y
)
@_constructor
def
max_and_argmax
(
a
,
axis
=
None
,
keepdims
=
False
):
"""
Returns maximum elements and their indices obtained by iterating over
given axis
When axis is None (the default value), the max is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out
,
argout
=
_max_and_argmax
(
a
,
axis
)
if
keepdims
:
out
=
makeKeepDims
(
a
,
out
,
axis
)
argout
=
makeKeepDims
(
a
,
argout
,
axis
)
return
[
out
,
argout
]
@constructor
@constructor
def
max
(
x
,
axis
=
None
):
def
max
(
x
,
axis
=
None
,
keepdims
=
False
):
"""
"""
Return maximum elements obtained by iterating over given axis
Returns maximum elements obtained by iterating over given axis
When axis is None (the default value), the max is performed
over the flattened tensor.
Default axis is None: max over all dimensions.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
:note: we return an error as numpy when we reduce a dim with a shape of 0
:note: we return an error as numpy when we reduce a dim with a shape of 0
"""
"""
if
isinstance
(
axis
,
(
list
,
tuple
))
and
len
(
axis
)
>
1
:
if
isinstance
(
axis
,
(
list
,
tuple
))
and
len
(
axis
)
>
1
:
return
CAReduce
(
scal
.
maximum
,
axis
)(
x
)
out
=
CAReduce
(
scal
.
maximum
,
axis
)(
x
)
try
:
try
:
const
=
get_constant_value
(
axis
)
const
=
get_constant_value
(
axis
)
return
CAReduce
(
scal
.
maximum
,
list
(
const
))(
x
)
out
=
CAReduce
(
scal
.
maximum
,
list
(
const
))(
x
)
except
Exception
:
except
Exception
:
return
max_and_argmax
(
x
,
axis
)[
0
]
out
=
max_and_argmax
(
x
,
axis
)[
0
]
if
keepdims
:
out
=
makeKeepDims
(
x
,
out
,
axis
)
return
out
@constructor
@constructor
def
argmax
(
x
,
axis
=
None
):
def
argmax
(
x
,
axis
=
None
,
keepdims
=
False
):
"""
"""
Return
index
es of maximum elements obtained by iterating over given axis
Return
s indic
es of maximum elements obtained by iterating over given axis
When axis is None (the default value), the argmax is performed
When axis is None (the default value), the argmax is performed
over the flattened tensor.
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
"""
# In python (using MaxAndArgmax.perform()) this leads to a
n
wasteful
# In python (using MaxAndArgmax.perform()) this leads to a wasteful
# implementation that goes through the data twice instead of once
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
# but when Argmax.c_impl() is in place, it should be fine.
return
max_and_argmax
(
x
,
axis
)[
1
]
argout
=
max_and_argmax
(
x
,
axis
)[
1
]
if
keepdims
:
argout
=
makeKeepDims
(
x
,
argout
,
axis
)
return
argout
@constructor
@constructor
def
min
(
x
,
axis
=
None
):
def
min
(
x
,
axis
=
None
,
keepdims
=
False
):
"""
Returns minimum elements obtained by iterating over given axis
When axis is None (the default value), the min is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
str_x_type
=
str
(
x
.
dtype
)
str_x_type
=
str
(
x
.
dtype
)
if
str_x_type
.
startswith
(
'float'
)
or
str_x_type
in
int_dtypes
:
if
str_x_type
.
startswith
(
'float'
)
or
str_x_type
in
int_dtypes
:
return
-
max
(
-
x
,
axis
=
axi
s
)
out
=
-
max
(
-
x
,
axis
=
axis
,
keepdims
=
keepdim
s
)
else
:
else
:
#Be careful about unsigned integers, complex
#Be careful about unsigned integers, complex
raise
NotImplementedError
()
raise
NotImplementedError
()
@constructor
@constructor
def
argmin
(
x
,
axis
=
None
):
def
argmin
(
x
,
axis
=
None
,
keepdims
=
False
):
"""
Returns indices of minimum elements obtained by iterating over given axis
When axis is None (the default value), the argmin is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
str_x_type
=
str
(
x
.
dtype
)
str_x_type
=
str
(
x
.
dtype
)
if
str_x_type
.
startswith
(
'float'
)
or
str_x_type
in
int_dtypes
:
if
str_x_type
.
startswith
(
'float'
)
or
str_x_type
in
int_dtypes
:
return
argmax
(
-
x
,
axis
=
axis
)
return
argmax
(
-
x
,
axis
=
axis
,
keepdims
=
keepdims
)
else
:
else
:
#Be careful about unsigned integers, complex
#Be careful about unsigned integers, complex
raise
NotImplementedError
()
raise
NotImplementedError
()
...
@@ -3029,27 +3109,51 @@ pprint.assign(tensor_copy, printing.IgnorePrinter())
...
@@ -3029,27 +3109,51 @@ pprint.assign(tensor_copy, printing.IgnorePrinter())
@constructor
@constructor
def
sum
(
input
,
axis
=
None
,
dtype
=
None
):
def
sum
(
input
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
):
"""
"""
Sum a tensor along the given axis(es).
Computes the sum along the given axis(es) of a tensor `input`
When axis is None (the default value), the sum is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
For full documentation see ``tensor.elemwise.Sum``.
For full documentation see ``tensor.elemwise.Sum``.
In particular please pay attention to the important warning when using
In particular please pay attention to the important warning when using
a custom dtype.
a custom dtype.
"""
"""
return
elemwise
.
Sum
(
axis
=
axis
,
dtype
=
dtype
)(
input
)
out
=
elemwise
.
Sum
(
axis
=
axis
,
dtype
=
dtype
)(
input
)
if
keepdims
:
out
=
makeKeepDims
(
input
,
out
,
axis
)
return
out
pprint
.
assign
(
Sum
(),
printing
.
FunctionPrinter
(
'sum'
))
pprint
.
assign
(
Sum
(),
printing
.
FunctionPrinter
(
'sum'
))
@constructor
@constructor
def
prod
(
input
,
axis
=
None
,
dtype
=
None
):
def
prod
(
input
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
):
"""
"""
Returns the Product of a tensor's elements along the given axis(es).
Computes the product along the given axis(es) of a tensor `input`
When axis is None (the default value), the product is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
For full documentation see ``tensor.elemwise.Prod``.
For full documentation see ``tensor.elemwise.Prod``.
"""
"""
return
elemwise
.
Prod
(
axis
,
dtype
=
dtype
)(
input
)
out
=
elemwise
.
Prod
(
axis
,
dtype
=
dtype
)(
input
)
if
keepdims
:
out
=
makeKeepDims
(
input
,
out
,
axis
)
return
out
class
Mean
(
elemwise
.
CAReduce
):
class
Mean
(
elemwise
.
CAReduce
):
...
@@ -3088,8 +3192,9 @@ class Mean(elemwise.CAReduce):
...
@@ -3088,8 +3192,9 @@ class Mean(elemwise.CAReduce):
@constructor
@constructor
def
mean
(
input
,
axis
=
None
,
dtype
=
None
,
op
=
False
):
def
mean
(
input
,
axis
=
None
,
dtype
=
None
,
op
=
False
,
keepdims
=
False
):
"""Compute the mean value along the given axis of a tensor `input`
"""
Computes the mean value along the given axis(es) of a tensor `input`
:param axis: compute the mean along this axis of the tensor.
:param axis: compute the mean along this axis of the tensor.
None means all axes (like numpy).
None means all axes (like numpy).
...
@@ -3102,9 +3207,14 @@ def mean(input, axis=None, dtype=None, op=False):
...
@@ -3102,9 +3207,14 @@ def mean(input, axis=None, dtype=None, op=False):
If None, then we use the same rules as `sum()`.
If None, then we use the same rules as `sum()`.
:type dtype: None or string
:type dtype: None or string
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
:note: for gpu, if you specify dtype=float32, everything will be done
:note: for gpu, if you specify dtype=float32, everything will be done
on the gpu.
on the gpu.
"""
"""
if
op
:
if
op
:
if
dtype
not
in
(
None
,
'float64'
):
if
dtype
not
in
(
None
,
'float64'
):
raise
NotImplementedError
(
raise
NotImplementedError
(
...
@@ -3112,7 +3222,10 @@ def mean(input, axis=None, dtype=None, op=False):
...
@@ -3112,7 +3222,10 @@ def mean(input, axis=None, dtype=None, op=False):
'and will always use float64. If you want to specify '
'and will always use float64. If you want to specify '
'the dtype, call tensor.mean(..., op=False).'
,
'the dtype, call tensor.mean(..., op=False).'
,
dtype
)
dtype
)
return
Mean
(
axis
)(
input
)
out
=
Mean
(
axis
)(
input
)
if
keepdims
:
out
=
makeKeepDims
(
input
,
out
,
axis
)
return
out
if
dtype
is
not
None
:
if
dtype
is
not
None
:
# The summation will be done with the specified dtype.
# The summation will be done with the specified dtype.
...
@@ -3122,7 +3235,7 @@ def mean(input, axis=None, dtype=None, op=False):
...
@@ -3122,7 +3235,7 @@ def mean(input, axis=None, dtype=None, op=False):
# Let sum() infer the appropriate dtype.
# Let sum() infer the appropriate dtype.
sum_dtype
=
None
sum_dtype
=
None
s
=
sum
(
input
,
axis
=
axis
,
dtype
=
sum_dtype
)
s
=
sum
(
input
,
axis
=
axis
,
dtype
=
sum_dtype
,
keepdims
=
keepdims
)
shp
=
shape
(
input
)
shp
=
shape
(
input
)
# Cast shp into a float type
# Cast shp into a float type
...
@@ -3138,6 +3251,7 @@ def mean(input, axis=None, dtype=None, op=False):
...
@@ -3138,6 +3251,7 @@ def mean(input, axis=None, dtype=None, op=False):
elif
isinstance
(
axis
,
int
):
elif
isinstance
(
axis
,
int
):
axis
=
[
axis
]
axis
=
[
axis
]
# This sequential division will possibly be optimized by Theano:
for
i
in
axis
:
for
i
in
axis
:
s
=
true_div
(
s
,
shp
[
i
])
s
=
true_div
(
s
,
shp
[
i
])
...
@@ -3145,54 +3259,50 @@ def mean(input, axis=None, dtype=None, op=False):
...
@@ -3145,54 +3259,50 @@ def mean(input, axis=None, dtype=None, op=False):
@constructor
@constructor
def
var
(
input
,
axis
=
None
):
def
var
(
input
,
axis
=
None
,
keepdims
=
False
):
"""Compute the variance along the given axis of a tensor `input`.
"""
Computes the variance along the given axis(es) of a tensor `input`.
:param axis: Compute the variance along this axis of the tensor.
:param axis: Compute the variance along this axis of the tensor.
None means all axes (like numpy).
None means all axes (like numpy).
:type axis: None or int or (list of int) (see `Sum`)
:type axis: None or int or (list of int) (see `Sum`)
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
"""
"""
input_ndim
=
input
.
type
.
ndim
input_ndim
=
input
.
type
.
ndim
if
axis
is
None
:
if
axis
is
None
:
axis
=
range
(
input_ndim
)
axis
=
range
(
input_ndim
)
if
isinstance
(
axis
,
int
):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
axis
=
[
axis
]
#make a pattern that will undo the reduction of dimensions caused by mean
pattern
=
[]
next_dim
=
0
for
i
in
xrange
(
input_ndim
):
if
i
in
axis
:
pattern
.
append
(
'x'
)
else
:
pattern
.
append
(
next_dim
)
next_dim
+=
1
#compute the axis-wise mean
#compute the axis-wise mean
mean_input_reduced
=
mean
(
input
,
axis
)
mean_input
=
mean
(
input
,
axis
,
keepdims
=
True
)
#broadcast that back out to match input
mean_input
=
DimShuffle
(
list
(
mean_input_reduced
.
type
.
broadcastable
),
pattern
)(
mean_input_reduced
)
#center the input
#center the input
centered_input
=
input
-
mean_input
centered_input
=
input
-
mean_input
#return the mean sqr
#return the mean sqr
return
mean
((
centered_input
**
2
),
axis
)
return
mean
((
centered_input
**
2
),
axis
,
keepdims
=
keepdims
)
@constructor
@constructor
def
std
(
input
,
axis
=
None
):
def
std
(
input
,
axis
=
None
,
keepdims
=
False
):
"""Compute the standard deviation along the given axis of a tensor `input`.
"""
Computes the standard deviation along the given axis(es) of a tensor `input`.
:param axis: Compute the standard deviation along this axis of the tensor.
:param axis: Compute the standard deviation along this axis of the tensor.
None means all axes (like numpy).
None means all axes (like numpy).
:type axis: None or int or (list of int) (see `Sum`)
:type axis: None or int or (list of int) (see `Sum`)
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
"""
"""
return
sqrt
(
var
(
input
=
input
,
axis
=
axis
))
return
sqrt
(
var
(
input
=
input
,
axis
=
axis
,
keepdims
=
keepdims
))
if
0
:
if
0
:
## COMMENTED OUT FEB 17 2010
## COMMENTED OUT FEB 17 2010
...
@@ -6327,9 +6437,17 @@ def outer(x, y):
...
@@ -6327,9 +6437,17 @@ def outer(x, y):
y
.
dimshuffle
(
'x'
,
0
))
y
.
dimshuffle
(
'x'
,
0
))
def
any
(
x
,
axis
=
None
):
def
any
(
x
,
axis
=
None
,
keepdims
=
False
):
return
elemwise
.
Any
(
axis
)(
x
)
out
=
elemwise
.
Any
(
axis
)(
x
)
if
keepdims
:
out
=
makeKeepDims
(
x
,
out
,
axis
)
return
out
def
all
(
x
,
axis
=
None
,
keepdims
=
False
):
out
=
elemwise
.
All
(
axis
)(
x
)
def
all
(
x
,
axis
=
None
):
if
keepdims
:
return
elemwise
.
All
(
axis
)(
x
)
out
=
makeKeepDims
(
x
,
out
,
axis
)
return
out
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论