提交 46d35a55 authored 作者: Pascal Lamblin's avatar Pascal Lamblin 提交者: GitHub

Merge pull request #4810 from xoltar/conf_mat-3637

Confusion matrix
...@@ -17,7 +17,8 @@ from .nnet import ( ...@@ -17,7 +17,8 @@ from .nnet import (
graph_merge_softmax_with_crossentropy_softmax, h_softmax, graph_merge_softmax_with_crossentropy_softmax, h_softmax,
logsoftmax, logsoftmax_op, prepend_0_to_each_row, prepend_1_to_each_row, logsoftmax, logsoftmax_op, prepend_0_to_each_row, prepend_1_to_each_row,
prepend_scalar_to_each_row, relu, softmax, softmax_grad, softmax_graph, prepend_scalar_to_each_row, relu, softmax, softmax_grad, softmax_graph,
softmax_op, softmax_simplifier, softmax_with_bias, elu) softmax_op, softmax_simplifier, softmax_with_bias, elu,
confusion_matrix)
from . import opt from . import opt
from .conv import ConvOp from .conv import ConvOp
from .Conv3D import * from .Conv3D import *
......
...@@ -20,6 +20,7 @@ from six.moves import xrange ...@@ -20,6 +20,7 @@ from six.moves import xrange
import theano import theano
from theano import gof from theano import gof
from theano import scalar from theano import scalar
from theano.tensor import extra_ops
from theano.gof.opt import copy_stack_trace from theano.gof.opt import copy_stack_trace
from theano.tensor import basic as tensor, subtensor, opt, elemwise from theano.tensor import basic as tensor, subtensor, opt, elemwise
from theano.tensor.type import (values_eq_approx_remove_inf, from theano.tensor.type import (values_eq_approx_remove_inf,
...@@ -32,7 +33,6 @@ from theano.gradient import DisconnectedType ...@@ -32,7 +33,6 @@ from theano.gradient import DisconnectedType
from theano.gradient import grad_not_implemented from theano.gradient import grad_not_implemented
from theano.tensor.nnet.blocksparse import sparse_block_dot from theano.tensor.nnet.blocksparse import sparse_block_dot
############ ############
# #
# TENSOR OPS # TENSOR OPS
...@@ -2407,3 +2407,56 @@ class ScalarSoftsign(theano.scalar.UnaryScalarOp): ...@@ -2407,3 +2407,56 @@ class ScalarSoftsign(theano.scalar.UnaryScalarOp):
scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float, scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float,
name='scalar_softsign') name='scalar_softsign')
softsign = elemwise.Elemwise(scalar_softsign, name='softsign') softsign = elemwise.Elemwise(scalar_softsign, name='softsign')
def confusion_matrix(actual, pred):
"""
Computes the confusion matrix of given vectors containing
actual observations and predicted observations.
Parameters
----------
actual : 1-d tensor variable
pred : 1-d tensor variable
Returns
-------
conf_mat : Confusion matrix of actual and predictions observations as shown below.
| Predicted
___________|___________
Actual |
|
order : 1-d array of order of entries in rows and columns
Examples
--------
>>> import theano
>>> from theano.tensor.nnet import confusion_matrix
>>> x = theano.tensor.vector()
>>> y = theano.tensor.vector()
>>> f = theano.function([x, y], confusion_matrix(x, y))
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> print(f(y_true, y_pred))
[array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]]), array([ 0., 1., 2.])]
"""
if actual.ndim != 1:
raise ValueError('actual must be 1-d tensor variable')
if pred.ndim != 1:
raise ValueError('pred must be 1-d tensor variable')
order = extra_ops.Unique(False, False, False)(tensor.concatenate([actual, pred]))
colA = actual.dimshuffle(0, 'x')
colP = pred.dimshuffle(0, 'x')
oneHotA = tensor.eq(colA, order).astype('int64')
oneHotP = tensor.eq(colP, order).astype('int64')
conf_mat = tensor.dot(oneHotA.T, oneHotP)
return [conf_mat, order]
...@@ -32,7 +32,8 @@ from theano.tensor.nnet import (categorical_crossentropy, ...@@ -32,7 +32,8 @@ from theano.tensor.nnet import (categorical_crossentropy,
relu, relu,
h_softmax, h_softmax,
elu, elu,
binary_crossentropy) binary_crossentropy,
confusion_matrix)
from theano.tensor import matrix, vector, lvector, scalar from theano.tensor import matrix, vector, lvector, scalar
from theano.tensor.nnet.nnet import softsign from theano.tensor.nnet.nnet import softsign
from theano.tensor.tests.test_basic import (makeBroadcastTester, check_floatX, from theano.tensor.tests.test_basic import (makeBroadcastTester, check_floatX,
...@@ -1744,3 +1745,30 @@ SoftsignTester = makeBroadcastTester( ...@@ -1744,3 +1745,30 @@ SoftsignTester = makeBroadcastTester(
good=_good_broadcast_unary_normal_float_no_complex, good=_good_broadcast_unary_normal_float_no_complex,
name='SoftsignTester', name='SoftsignTester',
) )
def test_confusion_matrix():
# Defining numpy implementation of confusion matrix
def numpy_conf_mat(actual, pred):
order = numpy.union1d(actual, pred)
colA = numpy.matrix(actual).T
colP = numpy.matrix(pred).T
oneHotA = colA.__eq__(order).astype('int64')
oneHotP = colP.__eq__(order).astype('int64')
conf_mat = numpy.dot(oneHotA.T, oneHotP)
conf_mat = numpy.asarray(conf_mat)
return [conf_mat, order]
x = tensor.vector()
y = tensor.vector()
f = theano.function([x, y], confusion_matrix(x, y))
list_inputs = [[[0, 1, 2, 1, 0], [0, 0, 2, 1, 2]],
[[2, 0, 2, 2, 0, 1], [0, 0, 2, 2, 0, 2]]]
for case in list_inputs:
a = numpy.asarray(case[0])
b = numpy.asarray(case[1])
out_exp = numpy_conf_mat(a, b)
outs = f(case[0], case[1])
for exp, out in zip(out_exp, outs):
utt.assert_allclose(exp, out)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论