提交 87c38921 authored 作者: lamblin's avatar lamblin

Merge pull request #557 from nouiz/mixed

Mixed
......@@ -2,6 +2,10 @@
Updates in the Trunk since the last release:
Sparse Sandbox bugfix
* Fix the grad of theano.sparse.sandbox.sp.row_scale. It didn't
returned the right number of element. (Frederic B.)
Documentation
* Added in the tutorial documentation on how to extend Theano.
This explains how to make a Theano Op from a Python function.
......@@ -35,6 +39,10 @@ New Features
* MRG random now raises an error with a clear message when the passed shape
contains dimensions with bad value like 0. (Frédéric B. reported by Ian G.)
Sparse
* Implement theano.sparse.mul(sparse1, sparse2) when both input don't
have the same sparsity pattern. (Frederic B.)
Sparse Sandbox graduate
* Remove0 op: it removes stored elements with value 0. (Frederic B.)
......
......@@ -542,7 +542,7 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False,
def get_id_str(obj):
if obj in done:
id_str = "[@%s]" % done[obj]
id_str = done[obj]
elif ids == "id":
id_str = "[@%s]" % str(id(r))
elif ids == "int":
......
......@@ -18,7 +18,7 @@ import numpy
import theano
from theano.gof import Op, Apply, local_optimizer, EquilibriumDB
from theano.sandbox.cuda import GpuElemwise, CudaNdarrayType
from theano.sandbox.cuda import GpuElemwise, CudaNdarrayType, GpuOp
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable, gpu_contiguous
from theano.sandbox.cuda.opt import gpu_seqopt
......@@ -71,7 +71,7 @@ class TheanoElementwiseKernel(pycuda.elementwise.ElementwiseKernel):
self.func.prepared_call(_grid, *invocation_args)
class PycudaElemwiseSourceModuleOp(Op):
class PycudaElemwiseSourceModuleOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
......@@ -145,7 +145,7 @@ class PycudaElemwiseSourceModuleOp(Op):
self.pycuda_fct(inputs[0], inputs[1], z[0], numpy.intc(inputs[1].size), block=block, grid=grid)
class PycudaElemwiseKernelOp(Op):
class PycudaElemwiseKernelOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
......
......@@ -118,7 +118,8 @@ def test_sum():
theano.tensor.basic.float32_rtol = 2e-5
assert _allclose(f2(val), f(val)), ('shape', shape,
'pattern', pattern,
sum([shape[i] for i in pattern]))
sum([shape[i] for i in pattern]),
f2(val), f(val), val)
finally:
theano.tensor.basic.float32_rtol = orig_rtol
......
......@@ -1180,12 +1180,9 @@ class MulSS(gof.op.Op):
assert _is_sparse(x) and _is_sparse(y)
assert len(x.shape) == 2
assert y.shape == x.shape
if (numpy.all(y.indptr == x.indptr) and
numpy.all(y.indices == x.indices)):
out[0] = y.copy()
out[0].data *= x.data
else:
raise NotImplementedError() # RowScale / ColScale
# This call the element-wise multiple
# x * y call dot...
out[0] = x.multiply(y)
def grad(self, (x, y), (gz,)):
return y * gz, x * gz
......
......@@ -263,7 +263,7 @@ class RowScaleCSC(Op):
z[0] = scipy_sparse.csc_matrix((y_data, indices, indptr), (M, N))
def grad(self, (x, s), (gz,)):
return [row_scale(gz, s), sp_sum(x * gz, axis=0)]
return [row_scale(gz, s), sp_sum(x * gz, axis=1)]
def col_scale(x, s):
......
......@@ -806,9 +806,8 @@ class SamplingDotCsr(gof.Op):
raise NotImplementedError(
'Complex types are not supported for pattern')
# TODO: why 2 times the same inputs?
dot_out = scalar.upcast(node.inputs[0].type.dtype,
node.inputs[0].type.dtype)
node.inputs[1].type.dtype)
if dot_out == "float32":
conv_type = "float"
......
......@@ -17,6 +17,7 @@ import theano
from theano.sparse.sandbox import sp
from theano.sparse.tests.test_basic import random_lil
from theano.tests import unittest_tools as utt
from theano.sparse import verify_grad_sparse
class TestSP(unittest.TestCase):
......@@ -493,19 +494,17 @@ def test_diag_grad():
utt.verify_grad(d, [diag_mat],
mode=theano.Mode(linker='py', optimizer='fast_compile'))
def test_row_scale():
x = theano.sparse.csc_dmatrix()
s = theano.tensor.dvector()
def d(x,s):
return sp.sp_sum(sp.row_scale(x, s), sparse_grad=True)
rng = numpy.random.RandomState(8723)
R = 5
C = 8
x_val_dense = numpy.zeros((R, C),dtype='d')
for idx in [(0,0), (4, 1), (2,1), (3, 3), (4, 4), (3, 7), (2, 7)]:
x_val_dense = numpy.zeros((R, C), dtype='d')
for idx in [(0, 0), (4, 1), (2, 1), (3, 3), (4, 4), (3, 7), (2, 7)]:
x_val_dense.__setitem__(idx, rng.randn())
x_val = scipy.sparse.csc_matrix(x_val_dense)
......@@ -518,25 +517,19 @@ def test_row_scale():
assert numpy.all(f(x_val, s_val).toarray() == (x_val_dense.T * s_val).T)
if 0:
tensor.verify_grad(None, d, [x_val, s_val],
mode=theano.Mode(linker='py', optimizer='fast_compile'))
else:
print >> sys.stderr, "WARNING: skipping gradient test because verify_grad doesn't support sparse arguments"
verify_grad_sparse(sp.row_scale, [x_val, s_val], structured=False)
def test_col_scale():
x = theano.sparse.csc_dmatrix()
s = theano.tensor.dvector()
def d(x,s):
return sp.sp_sum(sp.col_scale(x, s), sparse_grad=True)
rng = numpy.random.RandomState(8723)
R = 5
C = 8
x_val_dense = numpy.zeros((R, C),dtype='d')
for idx in [(0,0), (4, 1), (2,1), (3, 3), (4, 4), (3, 7), (2, 7)]:
x_val_dense = numpy.zeros((R, C), dtype='d')
for idx in [(0, 0), (4, 1), (2, 1), (3, 3), (4, 4), (3, 7), (2, 7)]:
x_val_dense.__setitem__(idx, rng.randn())
x_val = scipy.sparse.csc_matrix(x_val_dense)
......@@ -549,11 +542,7 @@ def test_col_scale():
assert numpy.all(f(x_val, s_val).toarray() == (x_val_dense * s_val))
if 0:
tensor.verify_grad(None, d, [x_val, s_val],
mode=theano.Mode(linker='py', optimizer='fast_compile'))
else:
print >> sys.stderr, "WARNING: skipping gradient test because verify_grad doesn't support sparse arguments"
verify_grad_sparse(sp.col_scale, [x_val, s_val], structured=False)
if __name__ == '__main__':
if 0:
......
......@@ -323,17 +323,17 @@ class T_AddMul(unittest.TestCase):
def testMulSS(self):
self._testSS(mul,
numpy.array([[1., 0], [3, 0], [0, 6]]),
numpy.array([[1., 0], [3, 0], [0, 6]]))
numpy.array([[1., 2], [3, 0], [0, 6]]))
def testMulSD(self):
self._testSD(mul,
numpy.array([[1., 0], [3, 0], [0, 6]]),
numpy.array([[1., 0], [3, 0], [0, 6]]))
numpy.array([[1., 2], [3, 0], [0, 6]]))
def testMulDS(self):
self._testDS(mul,
numpy.array([[1., 0], [3, 0], [0, 6]]),
numpy.array([[1., 0], [3, 0], [0, 6]]))
numpy.array([[1., 2], [3, 0], [0, 6]]))
def _testSS(self, op, array1=numpy.array([[1., 0], [3, 0], [0, 6]]),
array2=numpy.asarray([[0, 2.], [0, 4], [5, 0]])):
......@@ -361,15 +361,12 @@ class T_AddMul(unittest.TestCase):
val = eval_outputs([apb])
self.assertTrue(val.shape == (3, 2))
if op is add:
self.assertTrue(numpy.all(val.todense() == (a + b).todense()))
ans = numpy.array([[1., 2], [3, 4], [5, 6]])
self.assertTrue(numpy.all(val.todense() == ans))
self.assertTrue(numpy.all(val.todense() == (array1 + array2)))
verify_grad_sparse(op, [a, b], structured=False)
elif op is mul:
self.assertTrue(numpy.all(val.todense()
== (a.multiply(b)).todense()))
ans = numpy.array([[1, 0], [9, 0], [0, 36]])
self.assertTrue(numpy.all(val.todense() == ans))
== (array1 * array2)))
verify_grad_sparse(op, [a, b], structured=False)
def _testSD(self, op, array1=numpy.array([[1., 0], [3, 0], [0, 6]]),
array2=numpy.asarray([[0, 2.], [0, 4], [5, 0]])):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论