提交 9a0e0f39 authored 作者: RadhikaG's avatar RadhikaG 提交者: Bryn Keller

Refactored confusion_matrix op to function.

上级 2a08577a
...@@ -17,7 +17,8 @@ from .nnet import ( ...@@ -17,7 +17,8 @@ from .nnet import (
graph_merge_softmax_with_crossentropy_softmax, h_softmax, graph_merge_softmax_with_crossentropy_softmax, h_softmax,
logsoftmax, logsoftmax_op, prepend_0_to_each_row, prepend_1_to_each_row, logsoftmax, logsoftmax_op, prepend_0_to_each_row, prepend_1_to_each_row,
prepend_scalar_to_each_row, relu, softmax, softmax_grad, softmax_graph, prepend_scalar_to_each_row, relu, softmax, softmax_grad, softmax_graph,
softmax_op, softmax_simplifier, softmax_with_bias, elu) softmax_op, softmax_simplifier, softmax_with_bias, elu,
confusion_matrix)
from . import opt from . import opt
from .conv import ConvOp from .conv import ConvOp
from .Conv3D import * from .Conv3D import *
......
...@@ -20,6 +20,8 @@ from six.moves import xrange ...@@ -20,6 +20,8 @@ from six.moves import xrange
import theano import theano
from theano import gof from theano import gof
from theano import scalar from theano import scalar
from theano.tensor import basic as tensor, subtensor, opt
from theano.tensor import extra_ops
from theano.gof.opt import copy_stack_trace from theano.gof.opt import copy_stack_trace
from theano.tensor import basic as tensor, subtensor, opt, elemwise from theano.tensor import basic as tensor, subtensor, opt, elemwise
from theano.tensor.type import (values_eq_approx_remove_inf, from theano.tensor.type import (values_eq_approx_remove_inf,
...@@ -32,7 +34,6 @@ from theano.gradient import DisconnectedType ...@@ -32,7 +34,6 @@ from theano.gradient import DisconnectedType
from theano.gradient import grad_not_implemented from theano.gradient import grad_not_implemented
from theano.tensor.nnet.blocksparse import sparse_block_dot from theano.tensor.nnet.blocksparse import sparse_block_dot
############ ############
# #
# TENSOR OPS # TENSOR OPS
...@@ -2409,15 +2410,15 @@ scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float, ...@@ -2409,15 +2410,15 @@ scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float,
softsign = elemwise.Elemwise(scalar_softsign, name='softsign') softsign = elemwise.Elemwise(scalar_softsign, name='softsign')
class ConfusionMatrix(gof.Op): def confusion_matrix(actual, pred):
""" """
Computes the confusion matrix of given vectors containing Computes the confusion matrix of given vectors containing
actual observations and predicted observations. actual observations and predicted observations.
Parameters Parameters
---------- ----------
actual : 1-d tensor actual : 1-d tensor variable
pred : 1-d tensor pred : 1-d tensor variable
Returns Returns
------- -------
...@@ -2430,43 +2431,37 @@ class ConfusionMatrix(gof.Op): ...@@ -2430,43 +2431,37 @@ class ConfusionMatrix(gof.Op):
order : Order of entries in terms of original data order : Order of entries in terms of original data
""" Examples
--------
__props__ = () >>> import theano
>>> from theano.tensor.nnet import confusion_matrix
def make_node(self, actual, pred):
actual = tensor.as_tensor_variable(actual) >>> x = theano.tensor.vector()
pred = tensor.as_tensor_variable(pred) >>> y = theano.tensor.vector()
>>> f = theano.function([x, y], confusion_matrix(x, y))
>>> a = [0, 1, 2, 1, 0]
>>> b = [0, 0, 2, 1, 2]
>>> print(f(a, b))
[array([[0, 0, 1],
[2, 1, 0],
[0, 0, 1]]), array([ 0., 1., 2.])]
"""
if actual.type.ndim != 1: if actual.type.ndim != 1:
raise ValueError('actual must be 1-d tensor') raise ValueError('actual must be 1-d tensor variable')
if pred.type.ndim != 1: if pred.type.ndim != 1:
raise ValueError('pred must be 1-d tensor') raise ValueError('pred must be 1-d tensor variable')
if actual.shape[0] != pred.shape[0]:
conf = tensor.TensorType(dtype='int64', broadcastable=(False, False)).make_variable() raise ValueError('actual and pred must have the same length')
order = actual.type()
node = Apply(op=self, inputs=[actual, pred], outputs=[conf, order])
return node
def perform(self, node, input_storage, output_storage):
actual, pred = input_storage
if len(actual) != len(pred):
raise ValueError('Lengths of actual and pred must be the same.')
order = numpy.union1d(actual, pred) order = extra_ops.Unique(False, False, False)(tensor.concatenate([actual, pred]))
order = order[~numpy.isnan(order)]
colA = numpy.matrix(actual).T colA = actual.dimshuffle(0, 'x')
colP = numpy.matrix(pred).T colP = pred.dimshuffle(0, 'x')
oneHotA = colA.__eq__(order).astype('int64') oneHotA = tensor.eq(colA, order).astype('int64')
oneHotP = colP.__eq__(order).astype('int64') oneHotP = tensor.eq(colP, order).astype('int64')
conf_mat = numpy.dot(oneHotA.T, oneHotP) conf_mat = tensor.dot(oneHotA.T, oneHotP)
output_storage[0][0] = conf_mat return [conf_mat, order]
output_storage[1][0] = order
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论