提交 002a62bb authored 作者: James Bergstra's avatar James Bergstra

Softmax accepts a vector argument

上级 7eb3caf2
...@@ -317,12 +317,12 @@ class Softmax(gof.Op): ...@@ -317,12 +317,12 @@ class Softmax(gof.Op):
def make_node(self, x): def make_node(self, x):
x = tensor.as_tensor_variable(x) x = tensor.as_tensor_variable(x)
if x.type.ndim != 2 \ if x.type.ndim not in (1, 2) \
or x.type.dtype not in ['float32', 'float64']: or x.type.dtype not in ['float32', 'float64']:
raise ValueError('x must be 2-d tensor of floats') raise ValueError('x must be 1-d or 2-d tensor of floats')
if x.ndim == 1:
sm = x.type.make_variable() x = tensor.shape_padleft(x, n_ones=1)
return gof.Apply(self, [x], [sm]) return gof.Apply(self, [x], [x.type()])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
x, = input_storage x, = input_storage
......
...@@ -42,6 +42,13 @@ class T_Softmax(unittest.TestCase): ...@@ -42,6 +42,13 @@ class T_Softmax(unittest.TestCase):
return softmax(a)[:,3] return softmax(a)[:,3]
utt.verify_grad(f, [numpy.random.rand(3,4)]) utt.verify_grad(f, [numpy.random.rand(3,4)])
def test_vector(self):
x = T.vector()
f = theano.function([x], softmax(x))
xv = numpy.random.randn(6)
assert numpy.allclose(f(xv), numpy.exp(xv) / numpy.exp(xv).sum())
class T_SoftmaxWithBias(unittest.TestCase): class T_SoftmaxWithBias(unittest.TestCase):
def setUp(self): def setUp(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论