提交 ae90c5a2 authored 作者: James Bergstra's avatar James Bergstra

fixed sparse tests, added structureddotcsc optimization

上级 7b396f11
......@@ -440,6 +440,7 @@ class AddSS(gof.op.Op):
format = x.type.format).make_result()])
def perform(self, node, (x, y), (out, )):
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
out[0] = x + y
def grad(self, (x, y), (gz,)):
assert _is_sparse_result(x) and _is_sparse_result(y)
......@@ -626,7 +627,7 @@ class TrueDot(gof.op.Op):
def grad(self, (x, y), (gz,)):
assert _is_sparse_result(gz)
assert _is_sparse_result(x)
rval = [dot(gz, y.T), dot(x.T, gz)]
rval = [true_dot(gz, y.T), true_dot(x.T, gz)]
if _is_dense_result(y):
if self.grad_preserves_dense:
rval[1] = dense_from_sparse(rval[1])
......@@ -649,10 +650,10 @@ def true_dot(x, y, grad_preserves_dense=True):
if not x_is_sparse_result and not y_is_sparse_result:
raise TypeError()
if x_is_sparse_result:
return Dot(grad_preserves_dense)(x, y)
return TrueDot(grad_preserves_dense)(x, y)
else:
assert y_is_sparse_result
return transpose(Dot(grad_preserves_dense)(y.T, x.T))
return transpose(TrueDot(grad_preserves_dense)(y.T, x.T))
###############
......@@ -845,6 +846,17 @@ class StructuredDotCSC(gof.Op):
sd_csc = StructuredDotCSC()
#TODO: register a specialization to replace StructuredDot -> StructuredDotCSC
@gof.local_optimizer([_structured_dot])
def local_structured_dot_csc(node):
if node.op == _structured_dot:
a, b = node.inputs
if a.type.format == 'csc':
a_val, a_ind, a_ptr, a_shape = csm_properties(a)
a_nrows = a_shape[0]
return [sd_csc(a_val,a_ind, a_ptr, a_nrows, b)]
return False
register_specialize(local_structured_dot_csc)
class StructuredDotGrad(gof.Op):
def make_node(self, a, b, g_ab):
......
......@@ -147,7 +147,7 @@ class T_conversion(unittest.TestCase):
self.failUnless(numpy.all(val[0] == [1,0,0,0,0]))
class test_dot(unittest.TestCase):
class test_true_dot(unittest.TestCase):
def setUp(self):
numpy.random.seed(44)
......@@ -161,7 +161,7 @@ class test_dot(unittest.TestCase):
xT = x.T
self.failUnless(_is_sparse_result(xT))
zop = dot(x,xT)
zop = true_dot(x,xT)
self.failUnless(_is_sparse_result(zop))
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
......@@ -192,7 +192,7 @@ class test_dot(unittest.TestCase):
y = tensor.as_tensor([[1., 2], [3, 4], [2, 1]])
self.failUnless(_is_dense_result(y))
zop = dot(x,y)
zop = true_dot(x,y)
self.failUnless(_is_sparse_result(zop))
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
......@@ -228,8 +228,8 @@ class test_dot(unittest.TestCase):
x.data = x.data.T
y.data = y.data.T
# zop = dot(y, x)
zop = transpose(dot(y, x))
# zop = true_dot(y, x)
zop = transpose(true_dot(y, x))
self.failUnless(_is_sparse_result(zop))
z = eval_outputs([zop])
self.failUnless(_is_sparse(z))
......@@ -258,8 +258,8 @@ class test_dot(unittest.TestCase):
for mtype in _mtypes:
x = tensor.matrix('x') #Tensor('float64', broadcastable=[False,False], name='x')
w = Sparse(dtype = 'float64', format = _mtype_to_str[mtype]).make_result()
xw = dense_from_sparse(dot(w, x))
y = dense_from_sparse(dot(w.T, xw))
xw = dense_from_sparse(true_dot(w, x))
y = dense_from_sparse(true_dot(w.T, xw))
diff = x-y
loss = tensor.sum(tensor.sqr(diff))
gw = tensor.grad(loss, w)
......@@ -285,8 +285,8 @@ class test_dot(unittest.TestCase):
for mtype in _mtypes:
x = tensor.matrix('x')
w = Sparse(dtype = 'float64', format = _mtype_to_str[mtype]).make_result()
xw = dense_from_sparse(dot(w, x))
y = dense_from_sparse(dot(w.T, xw))
xw = dense_from_sparse(true_dot(w, x))
y = dense_from_sparse(true_dot(w.T, xw))
diff = x-y
loss = tensor.sum(tensor.sqr(diff))
gw = tensor.grad(loss, w)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论