提交 b1c43646 authored 作者: David Warde-Farley's avatar David Warde-Farley

Merge.

......@@ -6,11 +6,10 @@ To read about different sparse formats, see U{http://www-users.cs.umn.edu/~saad/
@todo: Automatic methods for determining best sparse format?
"""
import sys, operator
import sys
import numpy, theano
import scipy.sparse
from theano.printing import Print
from theano import gof
from theano import tensor
......@@ -101,6 +100,15 @@ def as_sparse_variable(x, name=None):
as_sparse = as_sparse_variable
def as_sparse_or_tensor_variable(x, name=None):
"""
If we can't make a sparse variable, we try to make a tensor variable.
"""
try:
return as_sparse_variable(x,name)
except (ValueError, TypeError):
return theano.tensor.as_tensor_variable(x,name)
def constant(x, name=None):
if not isinstance(x, scipy.sparse.spmatrix):
......@@ -610,7 +618,7 @@ class AddSD(gof.op.Op):
def grad(self, (x, y), (gz,)):
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_dense_variable(gz)
return sp_one_like(x) * gz, gz
return sp_ones_like(x) * gz, gz
add_s_d = AddSD()
def add(x,y):
"""
......@@ -633,7 +641,7 @@ def sub(x,y):
class MulSS(gof.op.Op):
''' Elementwise multiply a sparse and a ndarray '''
''' Elementwise multiply a sparse and a sparse '''
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
......@@ -663,6 +671,12 @@ class MulSD(gof.op.Op):
return hash(type(self))
def make_node(self, x, y):
x, y = as_sparse_variable(x), tensor.as_tensor_variable(y)
#upcast the tensor. Is the cast of sparse done implemented?
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if y.type.dtype != dtype:
y = tensor.cast(y,dtype)
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
# The magic number two here arises because L{scipy.sparse}
......@@ -720,8 +734,8 @@ def mul(x,y):
"""
Multiply (elementwise) two matrices, at least one of which is sparse.
"""
if hasattr(x, 'getnnz'): x = as_sparse_variable(x)
if hasattr(y, 'getnnz'): y = as_sparse_variable(y)
x = as_sparse_or_tensor_variable(x)
y = as_sparse_or_tensor_variable(y)
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
......
......@@ -18,7 +18,7 @@ if enable_sparse == False:
from theano.sparse.basic import _is_dense, _is_sparse, _is_dense_variable, _is_sparse_variable
from theano.sparse.basic import _mtypes
from theano.sparse import as_sparse_variable, CSC, CSR, CSM, CSMProperties, SparseType, StructuredDotCSC
from theano.sparse import add, structured_dot, transpose
from theano.sparse import add, mul, structured_dot, transpose
from theano.sparse import csc_from_dense, csr_from_dense, dense_from_sparse
from theano.tests import unittest_tools as utt
......@@ -72,22 +72,43 @@ class T_transpose(unittest.TestCase):
vta = eval_outputs([ta])
self.failUnless(vta.shape == (3,5))
class T_Add(unittest.TestCase):
def testSS(self):
class T_AddMul(unittest.TestCase):
def testAddSS(self):
self._testSS(add)
def testAddSD(self):
self._testSD(add)
def testAddDS(self):
self._testDS(add)
def testMulSS(self):
self._testSS(mul,
numpy.array([[1., 0], [3, 0], [0, 6]]),
numpy.array([[1., 0], [3, 0], [0, 6]]))
def testMulSD(self):
self._testSD(mul,
numpy.array([[1., 0], [3, 0], [0, 6]]),
numpy.array([[1., 0], [3, 0], [0, 6]]))
def testMulDS(self):
self._testDS(mul,
numpy.array([[1., 0], [3, 0], [0, 6]]),
numpy.array([[1., 0], [3, 0], [0, 6]]))
def _testSS(self, op, array1 = numpy.array([[1., 0], [3, 0], [0, 6]]),
array2 = numpy.asarray([[0, 2.], [0, 4], [5, 0]])):
for mtype in _mtypes:
a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]]))
a = mtype(array1)
aR = as_sparse_variable(a)
self.failIf(aR.data is a)
self.failUnless(_is_sparse(a))
self.failUnless(_is_sparse_variable(aR))
b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]]))
b = mtype(array2)
bR = as_sparse_variable(b)
self.failIf(bR.data is b)
self.failUnless(_is_sparse(b))
self.failUnless(_is_sparse_variable(bR))
apb = add(aR, bR)
apb = op(aR, bR)
self.failUnless(_is_sparse_variable(apb))
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
......@@ -97,58 +118,77 @@ class T_Add(unittest.TestCase):
val = eval_outputs([apb])
self.failUnless(val.shape == (3,2))
self.failUnless(numpy.all(val.todense() == (a + b).todense()))
self.failUnless(numpy.all(val.todense() == numpy.array([[1., 2], [3, 4], [5, 6]])))
def testSD(self):
if op is add:
self.failUnless(numpy.all(val.todense() == (a + b).todense()))
self.failUnless(numpy.all(val.todense() == numpy.array([[1., 2], [3, 4], [5, 6]])))
elif op is mul:
self.failUnless(numpy.all(val.todense() == (a.multiply(b)).todense()))
self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0], [9, 0], [0, 36]])))
def _testSD(self, op, array1 = numpy.array([[1., 0], [3, 0], [0, 6]]),
array2 = numpy.asarray([[0, 2.], [0, 4], [5, 0]])):
for mtype in _mtypes:
a = numpy.array([[1., 0], [3, 0], [0, 6]])
a = numpy.array(array1)
aR = tensor.as_tensor_variable(a)
self.failIf(aR.data is a) #constants are copied
self.failUnless(_is_dense(a))
self.failUnless(_is_dense_variable(aR))
b = mtype(numpy.asarray([[0, 2.], [0, 4], [5, 0]]))
b = mtype(array2)
bR = as_sparse_variable(b)
self.failIf(bR.data is b) #constants are copied
self.failUnless(_is_sparse(b))
self.failUnless(_is_sparse_variable(bR))
apb = add(aR, bR)
self.failUnless(_is_dense_variable(apb))
apb = op(aR, bR)
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
def testDS(self):
if op is add:
self.failUnless(_is_dense_variable(apb))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
elif op is mul:
self.failUnless(_is_sparse_variable(apb))
self.failUnless(numpy.all(val.todense() == (b.multiply(a))))
self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0],
[9, 0], [0, 36]])))
def _testDS(self, op, array1 = numpy.array([[1., 0], [3, 0], [0, 6]]),
array2 = numpy.asarray([[0, 2.], [0, 4], [5, 0]])):
for mtype in _mtypes:
a = mtype(numpy.array([[1., 0], [3, 0], [0, 6]]))
a = mtype(array1)
aR = as_sparse_variable(a)
self.failIf(aR.data is a)
self.failUnless(_is_sparse(a))
self.failUnless(_is_sparse_variable(aR))
b = numpy.asarray([[0, 2.], [0, 4], [5, 0]])
b = numpy.asarray(array2)
bR = tensor.as_tensor_variable(b)
self.failIf(bR.data is b)
self.failUnless(_is_dense(b))
self.failUnless(_is_dense_variable(bR))
apb = add(aR, bR)
self.failUnless(_is_dense_variable(apb))
apb = op(aR, bR)
self.failUnless(apb.type.dtype == aR.type.dtype, apb.type.dtype)
self.failUnless(apb.type.dtype == bR.type.dtype, apb.type.dtype)
val = eval_outputs([apb])
self.failUnless(val.shape == (3, 2))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
if op is add:
self.failUnless(_is_dense_variable(apb))
self.failUnless(numpy.all(val == (a + b)))
self.failUnless(numpy.all(val == numpy.array([[1., 2], [3, 4], [5, 6]])))
elif op is mul:
self.failUnless(_is_sparse_variable(apb))
self.failUnless(numpy.all(val.todense() == (a.multiply(b))))
self.failUnless(numpy.all(val.todense() == numpy.array([[1, 0],
[9, 0], [0, 36]])))
class T_conversion(unittest.TestCase):
def setUp(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论