提交 56888c31 authored 作者: Nicolas Bouchard's avatar Nicolas Bouchard 提交者: Frederic

Move tests of sp_sum.

上级 85c30d7b
...@@ -1287,11 +1287,11 @@ neg = Neg() ...@@ -1287,11 +1287,11 @@ neg = Neg()
class SpSum(gof.op.Op): class SpSum(gof.op.Op):
""" """TODO: rewrite
TODO: rewrite
Scale each columns of a sparse matrix by the Scale each columns of a sparse matrix by the
corresponding element of a dense vector corresponding element of a dense vector
""" """
axis = None axis = None
sparse_grad = False sparse_grad = False
...@@ -1351,7 +1351,7 @@ class SpSum(gof.op.Op): ...@@ -1351,7 +1351,7 @@ class SpSum(gof.op.Op):
if self.axis == 0: if self.axis == 0:
if x.format == 'csc': if x.format == 'csc':
z[0] = numpy.asarray(x.sum(axis=self.axis)).reshape( z[0] = numpy.asarray(x.sum(axis=self.axis)).reshape(
(x.shape[1],)) (x.shape[1], ))
else: else:
z[0] = numpy.asarray(x.asformat(x.format).sum( z[0] = numpy.asarray(x.asformat(x.format).sum(
axis=self.axis)).reshape((x.shape[1],)) axis=self.axis)).reshape((x.shape[1],))
......
...@@ -362,72 +362,6 @@ class TestSP(unittest.TestCase): ...@@ -362,72 +362,6 @@ class TestSP(unittest.TestCase):
# symbolic stuff # symbolic stuff
utt.verify_grad(d, [kvals]) utt.verify_grad(d, [kvals])
def test_sp_sum(self):
from theano.sparse import SpSum
# TODO: test both grad.
rng = numpy.random.RandomState(42)
from theano.sparse.basic import SparseFromDense,DenseFromSparse
cases = [("csc", scipy.sparse.csc_matrix), ("csr", scipy.sparse.csr_matrix)]
for format, cast in cases:
#print 'format: %(format)s' % locals()
x = theano.sparse.SparseType(format=format,
dtype=theano.config.floatX)()
x_data = numpy.arange(20).reshape(5,4).astype(theano.config.floatX)
# Sum on all axis
#print 'sum on all axis...'
z = theano.sparse.sandbox.sp.sp_sum(x)
assert z.type.broadcastable == ()
f = theano.function([x], z)
x_val = cast(x_data)
out = f(x_val)
expected = x_val.sum()
assert out == expected
# Sum on axis 0
#print 'sum on axis 0...'
z = theano.sparse.sandbox.sp.sp_sum(x, axis=0)
assert z.type.broadcastable == (False,)
f = theano.function([x], z)
x_val = cast(x_data)
out = f(x_val)
expected = x_val.sum(axis=0)
assert (out == expected).all()
# Sum on axis 1
#print 'sum on axis 1...'
z = theano.sparse.sandbox.sp.sp_sum(x, axis=1)
assert z.type.broadcastable == (False,)
f = theano.function([x], z)
x_val = cast(x_data)
out = f(x_val)
expected = numpy.asarray(x_val.sum(axis=1)).reshape(x_val.shape[0])
assert (out == expected).all()
# Sparse gradient on Sum on all axis
# unfinished, and suspended until verify_grad get fixed
if False:
# print 'grad on sum on all axis...'
def fun(x):
## verify_grad does not handle sparse data, so here's some casting as a workaround.
# x is a dense matrix: make it sparse
sparse_var = SparseFromDense(format)(x)
# apply op
dense_sum = theano.sparse.sandbox.sp.SpSum(axis=None, sparse_grad=False)(sparse_var)
return dense_sum
# cast back to dense so that verify_grad can work
dense_sum = theano.sparse.DenseFromSparse()(sparse_sum)
return dense_sum
x_val = x_data.copy()
# print type(x_val)
import pdb;pdb.set_trace()
tensor.verify_grad(fun, [x_val], rng=rng)
#utt.verify_grad(SpSum(axis=None), [x_val])
# print 'ok'
def test_diag(): def test_diag():
m = theano.sparse.csc_matrix() m = theano.sparse.csc_matrix()
......
...@@ -1329,6 +1329,73 @@ def test_size(): ...@@ -1329,6 +1329,73 @@ def test_size():
check() check()
def test_sp_sum():
from theano.sparse import SpSum
# TODO: test both grad.
rng = numpy.random.RandomState(42)
from theano.sparse.basic import SparseFromDense,DenseFromSparse
cases = [("csc", scipy.sparse.csc_matrix), ("csr", scipy.sparse.csr_matrix)]
for format, cast in cases:
#print 'format: %(format)s' % locals()
x = theano.sparse.SparseType(format=format,
dtype=theano.config.floatX)()
x_data = numpy.arange(20).reshape(5,4).astype(theano.config.floatX)
# Sum on all axis
#print 'sum on all axis...'
z = theano.sparse.sp_sum(x)
assert z.type.broadcastable == ()
f = theano.function([x], z)
x_val = cast(x_data)
out = f(x_val)
expected = x_val.sum()
assert out == expected
# Sum on axis 0
#print 'sum on axis 0...'
z = theano.sparse.sp_sum(x, axis=0)
assert z.type.broadcastable == (False,)
f = theano.function([x], z)
x_val = cast(x_data)
out = f(x_val)
expected = x_val.sum(axis=0)
assert (out == expected).all()
# Sum on axis 1
#print 'sum on axis 1...'
z = theano.sparse.sp_sum(x, axis=1)
assert z.type.broadcastable == (False,)
f = theano.function([x], z)
x_val = cast(x_data)
out = f(x_val)
expected = numpy.asarray(x_val.sum(axis=1)).reshape(x_val.shape[0])
assert (out == expected).all()
# Sparse gradient on Sum on all axis
# unfinished, and suspended until verify_grad get fixed
if False:
# print 'grad on sum on all axis...'
def fun(x):
## verify_grad does not handle sparse data, so here's some casting as a workaround.
# x is a dense matrix: make it sparse
sparse_var = SparseFromDense(format)(x)
# apply op
dense_sum = theano.sparse.SpSum(axis=None, sparse_grad=False)(sparse_var)
return dense_sum
# cast back to dense so that verify_grad can work
dense_sum = theano.sparse.DenseFromSparse()(sparse_sum)
return dense_sum
x_val = x_data.copy()
# print type(x_val)
import pdb;pdb.set_trace()
tensor.verify_grad(fun, [x_val], rng=rng)
#utt.verify_grad(SpSum(axis=None), [x_val])
# print 'ok'
class Remove0Tester(utt.InferShapeTester): class Remove0Tester(utt.InferShapeTester):
def setUp(self): def setUp(self):
super(Remove0Tester, self).setUp() super(Remove0Tester, self).setUp()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论