提交 34caad67 authored 作者: nouiz's avatar nouiz

Merge pull request #335 from vlb/master

CCW#37: REVIEW/TEST/DOC theano/sparse/sandbox/sp.py:RemoveO
......@@ -462,9 +462,10 @@ class Function(object):
try:
s = finder[item]
except KeyError:
raise TypeError("Unknown input or state: %s" % item)
raise TypeError("Unknown input or state: %s" % str(item))
if s is DUPLICATE:
raise TypeError("Ambiguous name: %s - please check the names of the inputs of your function for duplicates." % item)
raise TypeError("Ambiguous name: %s - please check the names "\
"of the inputs of your function for duplicates." % str(item))
if isinstance(s, gof.Container):
return s.value
else:
......@@ -475,9 +476,10 @@ class Function(object):
except KeyError:
# Print informative error message.
msg = get_info_on_inputs(named_inputs, n_unnamed_inputs)
raise TypeError("Unknown input or state: %s. %s" % (item, msg))
raise TypeError("Unknown input or state: %s. %s" % (str(item), msg))
if s is DUPLICATE:
raise TypeError("Ambiguous name: %s - please check the names of the inputs of your function for duplicates." % item)
raise TypeError("Ambiguous name: %s - please check the names "\
"of the inputs of your function for duplicates." % str(item))
if isinstance(s, gof.Container):
s.value = value
s.provided += 1
......
......@@ -244,32 +244,35 @@ class Remove0(Op):
"""
Remove explicit zeros from a sparse matrix, and resort indices
"""
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
def perform(self,node, (x,), (z,)):
if x.format != 'csc':
raise TypeError('Remove0 only works on csc matrices')
def __init__(self, inplace=False, *args, **kwargs):
Op.__init__(self, *args, **kwargs)
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
M, N = x.shape
def __eq__(self,other):
return type(self) == type(other) and self.inplace == other.inplace
data = x.data
indices = x.indices
indptr = x.indptr
def __hash__(self):
return 64153 ^ hash(type(self)) ^ hash(self.inplace)
#TODO: try using ndarrays and then prune() on the result
new_data = []
new_indices = []
new_indptr = [0]
def __str__(self):
l = []
if self.inplace:
l.append('inplace')
return self.__class__.__name__+'{%s}'%', '.join(l)
for j in xrange(0, N):
for i_idx in xrange(indptr[j], indptr[j+1]):
if data[i_idx] != 0:
new_data.append(data[i_idx])
new_indices.append(indices[i_idx])
new_indptr.append(len(new_indices))
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
z[0] = sparse.csc_matrix((new_data, new_indices, new_indptr), (M,N))
def perform(self,node, (x,), (z,)):
if self.inplace:
c = x
else:
c = x.copy()
c.eliminate_zeros()
z[0] = c
def grad(self, (x,), (gz,)):
return [gz]
......@@ -303,7 +306,7 @@ class EnsureSortedIndices(Op):
def infer_shape(self, node, i0_shapes):
return i0_shapes
def __str__(self):
if self.inplace:
return self.__class__.__name__ + "{inplace}"
......
......@@ -16,7 +16,6 @@ from theano import function, tensor
import theano
from theano.sparse.sandbox import sp
from theano.tests import unittest_tools as utt
from theano.sparse.tests.test_basic import random_lil
class TestSP(unittest.TestCase):
......@@ -27,43 +26,44 @@ class TestSP(unittest.TestCase):
# fixed parameters
bsize = 10 # batch size
imshp = (28,28)
kshp = (5,5)
imshp = (28, 28)
kshp = (5, 5)
nkern = 5
ssizes = ((1,1),(2,2),(3,3),(4,4))
convmodes = ('full','valid')
ssizes = ((1, 1), (2, 2), (3, 3), (4, 4))
convmodes = ('full', 'valid')
# symbolic stuff
bias = tensor.dvector()
kerns = tensor.dmatrix()
input = tensor.dmatrix()
rng = numpy.random.RandomState(3423489)
filters = rng.randn(nkern,numpy.prod(kshp))
filters = rng.randn(nkern, numpy.prod(kshp))
biasvals = rng.randn(nkern)
for mode in ('FAST_COMPILE','FAST_RUN'): #, profmode):
for mode in ('FAST_COMPILE', 'FAST_RUN'): # , profmode):
ttot, ntot = 0, 0
for conv_mode in convmodes:
for ss in ssizes:
output, outshp = sp.convolve(kerns, kshp, nkern, input,\
output, outshp = sp.convolve(kerns, kshp, nkern, input,\
imshp, ss, bias=bias, mode=conv_mode)
f = function([kerns, bias, input], output, mode=mode)
# now test with real values
img2d = numpy.arange(bsize*numpy.prod(imshp)).reshape((bsize,)+imshp)
img1d = img2d.reshape(bsize,-1)
img2d = numpy.arange(bsize * numpy.prod(imshp)).reshape(( \
bsize,) + imshp)
img1d = img2d.reshape(bsize, -1)
# create filters (need to be flipped to use convolve2d)
filtersflipped = numpy.zeros((nkern,)+kshp)
filtersflipped = numpy.zeros((nkern,) + kshp)
for k in range(nkern):
it = reversed(filters[k,:])
it = reversed(filters[k, :])
for i in range(kshp[0]):
for j in range(kshp[1]):
filtersflipped[k,i,j] = it.next()
# compute output with convolve2d
if conv_mode=='valid':
if conv_mode == 'valid':
fulloutshp = numpy.array(imshp) - numpy.array(kshp) + 1
else:
fulloutshp = numpy.array(imshp) + numpy.array(kshp) - 1
......@@ -71,11 +71,11 @@ class TestSP(unittest.TestCase):
refout = numpy.zeros((bsize,)+tuple(fulloutshp)+(nkern,))
for b in range(bsize):
for n in range(nkern):
refout[b,...,n] = convolve2d(\
img2d[b,:,:], filtersflipped[n,...],conv_mode)
refout[b,...,n] = convolve2d(img2d[b,:,:],
filtersflipped[n,...],
conv_mode)
ntot += time.time() - ntime1
# need to flatten images
bench1 = refout[:,0::ss[0],0::ss[1],:].reshape(bsize,-1,nkern)
bench1 += biasvals.reshape(1,1,nkern)
......@@ -426,6 +426,42 @@ class TestSP(unittest.TestCase):
#utt.verify_grad(SpSum(axis=None), [x_val])
print 'ok'
def test_remove0():
print
print 'test_remove0()'
configs=[
# structure type, numpy matching class
('csc',scipy.sparse.csc_matrix),
('csr',scipy.sparse.csr_matrix),
]
for format,matrix_class in configs:
print 'config: format=\'%(format)s\', matrix_class=%(matrix_class)s'%locals()
# real
origin = (numpy.arange(9) + 1).reshape((3, 3)).astype(theano.config.floatX)
mat = matrix_class(origin).astype(theano.config.floatX)
mat[0,1] = mat[1,0] = mat[2,2] = 0
assert mat.size == 9
# symbolic
x = theano.sparse.SparseType(format=format, dtype=theano.config.floatX)()
# the In thingy has to be there because theano has as rule not to optimize inputs
f = theano.function([theano.In(x, borrow=True, mutable=True)], sp.Remove0()(x))
# assert optimization is applied in modes with optimization
if theano.config.mode not in ['FAST_COMPILE']:
# list of apply nodes in the optimized graph.
nodes = f.maker.env.toposort()
v = [True for node in nodes if isinstance(node.op, sp.Remove0) and node.op.inplace]
assert len(v), 'Inplacing optimization should have been applied.'
# checking
# makes sense to change its name
target = mat
result = f(mat)
mat.eliminate_zeros()
assert result.size == target.size, 'Matrices sizes differ. Have zeros been removed ?'
def test_diagonal():
for K in 1, 5:
......@@ -456,13 +492,13 @@ def test_ensure_sorted_indices():
# csr
input_tensor = theano.sparse.csr_dmatrix()
sample = scipy.sparse.csr_matrix(random_lil((x,y),'float64',sparsity))
sort_op = sp.ensure_sorted_indices(input_tensor)
f = theano.function([input_tensor], sort_op)
sorted_scipy = sample.sorted_indices()
sorted_theano = f(sample)
assert numpy.all(sorted_theano.todense() == sorted_scipy.todense())
def test_diagonal_grad():
def d(x):
return sp.sp_sum(sp.square_diagonal(x), sparse_grad=True)
......@@ -532,6 +568,9 @@ def test_col_scale():
print >> sys.stderr, "WARNING: skipping gradient test because verify_grad doesn't support sparse arguments"
if __name__ == '__main__':
if 0:
test_remove0()
exit()
if 1:
testcase = TestSP
suite = unittest.TestLoader()
......
......@@ -731,7 +731,7 @@ class ShapeFeature(object):
def default_infer_shape(self, node, i_shapes):
"""Return a list of shape tuple or None for the outputs of node.
This function is used for Ops that don't implement infer_shape.
Ops that do implement infer_shape should use the i_shapes parameter,
but this default implementation ignores it.
......@@ -746,7 +746,7 @@ class ShapeFeature(object):
def unpack(self, s_i):
"""Return a symbolic integer scalar for the shape element s_i.
The s_i argument was produced by the infer_shape() of an Op subclass.
"""
# unpack the s_i that the Op returned
......@@ -777,7 +777,7 @@ class ShapeFeature(object):
def set_shape(self, r, s):
"""Assign the shape `s` to previously un-shaped variable `r`.
:type r: a variable
:type s: None or a tuple of symbolic integers
"""
......@@ -1948,6 +1948,21 @@ compile.optdb.register('local_inplace_incsubtensor1',
failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace') # DEBUG
@gof.local_optimizer([None])
def local_inplace_remove0(node):
"""
Optimization to insert inplace versions of Remove0.
"""
if isinstance(node.op, theano.sparse.sandbox.sp.Remove0) and not node.op.inplace:
new_op = node.op.__class__(inplace=True)
new_node = new_op(*node.inputs)
return [new_node]
return False
compile.optdb.register('local_inplace_remove0',
TopoOptimizer(local_inplace_remove0,
failure_callback=TopoOptimizer.warn_inplace), 60,
'fast_run', 'inplace')
@register_canonicalize
@register_stabilize
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论