提交 b2b1d7f7 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Add tests for -1 strides in blas ops.

上级 f2693c57
...@@ -286,6 +286,14 @@ class TestVectorMatrixDot(TestCase): ...@@ -286,6 +286,14 @@ class TestVectorMatrixDot(TestCase):
assert sum([node.op is gpu_gemv_inplace for node in assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.env.toposort() ]) == 1 gpu_f2.maker.env.toposort() ]) == 1
# Check double-strided m
m.set_value(
m.get_value(borrow=True, return_internal_type=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
def test_dot_mv(self): def test_dot_mv(self):
''' Test matrix dot vector ''' ''' Test matrix dot vector '''
v = theano.shared( numpy.array(numpy.random.rand(2), dtype='float32')) v = theano.shared( numpy.array(numpy.random.rand(2), dtype='float32'))
......
...@@ -878,6 +878,11 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -878,6 +878,11 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
# Assert they produce the same output # Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value())) assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
# Assert it works when m has no contiguous dimension
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
def test_dot_mv(self): def test_dot_mv(self):
...@@ -894,6 +899,11 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -894,6 +899,11 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
# Assert they produce the same output # Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value())) assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
# Assert it works when m has no contiguous dimension
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
@staticmethod @staticmethod
def t_gemv1(m_shp): def t_gemv1(m_shp):
...@@ -915,19 +925,30 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -915,19 +925,30 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
assert topo[0].op.inplace==False assert topo[0].op.inplace==False
#test the inplace version #test the inplace version
f = theano.function([], [], updates={v2:v2+theano.dot(m,v1)} g = theano.function([], [], updates={v2:v2+theano.dot(m,v1)}
, mode = mode_blas_opt) , mode = mode_blas_opt)
# Assert they produce the same output # Assert they produce the same output
f() g()
assert numpy.allclose(v2.get_value(), assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig) numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = f.maker.env.toposort() topo = g.maker.env.toposort()
assert len(topo)==1 assert len(topo)==1
assert isinstance(topo[0].op, Gemv) assert isinstance(topo[0].op, Gemv)
if config.mode != 'FAST_COMPILE': if config.mode != 'FAST_COMPILE':
assert topo[0].op.inplace==True assert topo[0].op.inplace==True
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
def test_gemv1(self): def test_gemv1(self):
self.t_gemv1((3,2)) self.t_gemv1((3,2))
self.t_gemv1((0,2)) self.t_gemv1((0,2))
...@@ -952,18 +973,29 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -952,18 +973,29 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
assert topo[-1].op.inplace==False assert topo[-1].op.inplace==False
#test the inplace version #test the inplace version
f = theano.function([], [], updates={v2:v2+theano.dot(v1,m)} g = theano.function([], [], updates={v2:v2+theano.dot(v1,m)}
, mode = mode_blas_opt) , mode = mode_blas_opt)
# Assert they produce the same output # Assert they produce the same output
f() g()
assert numpy.allclose(v2.get_value(), assert numpy.allclose(v2.get_value(),
numpy.dot(v1.get_value(), m.get_value()) + v2_orig) numpy.dot(v1.get_value(), m.get_value()) + v2_orig)
topo = f.maker.env.toposort() topo = g.maker.env.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo)==1 assert sum(isinstance(node.op, Gemv) for node in topo)==1
if config.mode != 'FAST_COMPILE': if config.mode != 'FAST_COMPILE':
assert topo[-1].op.inplace==True assert topo[-1].op.inplace==True
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(v1.get_value(), m.get_value()) + v2.get_value())
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(v1.get_value(), m.get_value()) + v2_orig)
def test_gemv_dimensions(self): def test_gemv_dimensions(self):
A = T.matrix('A') A = T.matrix('A')
x, y = T.vectors('x', 'y') x, y = T.vectors('x', 'y')
...@@ -984,6 +1016,7 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -984,6 +1016,7 @@ class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
ones_6 = numpy.ones(6, dtype=config.floatX) ones_6 = numpy.ones(6, dtype=config.floatX)
f(A_val, ones_3, ones_5) f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
self.assertRaises(ValueError, f, A_val, ones_4, ones_5) self.assertRaises(ValueError, f, A_val, ones_4, ones_5)
self.assertRaises(ValueError, f, A_val, ones_3, ones_6) self.assertRaises(ValueError, f, A_val, ones_3, ones_6)
self.assertRaises(ValueError, f, A_val, ones_4, ones_6) self.assertRaises(ValueError, f, A_val, ones_4, ones_6)
...@@ -1144,6 +1177,46 @@ class BaseGemv(object): ...@@ -1144,6 +1177,46 @@ class BaseGemv(object):
oy_v = oy_func() oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v) assert_array_almost_equal(desired_oy, oy_v)
def test_a_strides(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [ shared(v) for v in vs ]
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True, return_internal_type=True)[::-1, ::-1],
borrow=True)
desired_oy = alpha_v * matrixmultiply(a_v,x_v)+beta_v*y_v
oy = alpha * T.dot(a,x)+beta*y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_a_strides_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [ shared(v) for v in vs ]
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True, return_internal_type=True)[::-1, ::-1],
borrow=True)
desired_oy = alpha_v * matrixmultiply(transpose(a_v),x_v)+beta_v*y_v
oy = alpha * T.dot(a.T,x)+beta*y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_upcasting_scalar_nogemv(self): def test_upcasting_scalar_nogemv(self):
# Test that the optimization does not crash when the scale has # Test that the optimization does not crash when the scale has
# an incorrect dtype, and forces upcasting of the result # an incorrect dtype, and forces upcasting of the result
...@@ -1332,6 +1405,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -1332,6 +1405,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin):
f(numpy.random.rand(5, 4).astype(self.dtype), f(numpy.random.rand(5, 4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype), numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype)) numpy.random.rand(4).astype(self.dtype))
f(numpy.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def test_A_plus_scaled_outer(self): def test_A_plus_scaled_outer(self):
f = self.function([self.A, self.x, self.y], f = self.function([self.A, self.x, self.y],
...@@ -1340,6 +1416,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -1340,6 +1416,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin):
f(numpy.random.rand(5, 4).astype(self.dtype), f(numpy.random.rand(5, 4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype), numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype)) numpy.random.rand(4).astype(self.dtype))
f(numpy.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def test_scaled_A_plus_scaled_outer(self): def test_scaled_A_plus_scaled_outer(self):
f = self.function([self.A, self.x, self.y], f = self.function([self.A, self.x, self.y],
...@@ -1352,6 +1431,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -1352,6 +1431,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin):
f(numpy.random.rand(5, 4).astype(self.dtype), f(numpy.random.rand(5, 4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype), numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype)) numpy.random.rand(4).astype(self.dtype))
f(numpy.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def given_dtype(self, dtype, M, N): def given_dtype(self, dtype, M, N):
""" test corner case shape and dtype""" """ test corner case shape and dtype"""
...@@ -1362,6 +1444,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -1362,6 +1444,9 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin):
f(numpy.random.rand(M, N).astype(self.dtype), f(numpy.random.rand(M, N).astype(self.dtype),
numpy.random.rand(M).astype(self.dtype), numpy.random.rand(M).astype(self.dtype),
numpy.random.rand(N).astype(self.dtype)) numpy.random.rand(N).astype(self.dtype))
f(numpy.random.rand(M, N).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(M).astype(self.dtype),
numpy.random.rand(N).astype(self.dtype))
def test_f32_0_0(self): def test_f32_0_0(self):
return self.given_dtype('float32', 0, 0) return self.given_dtype('float32', 0, 0)
...@@ -1401,3 +1486,7 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin): ...@@ -1401,3 +1486,7 @@ class TestGer(TestCase, unittest_tools.TestOptimizationMixin):
self.assertFunctionContains(f, self.ger_destructive) self.assertFunctionContains(f, self.ger_destructive)
f(numpy.random.rand(4).astype(self.dtype), f(numpy.random.rand(4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype)) numpy.random.rand(5).astype(self.dtype))
A.set_value(A.get_value(borrow=True)[::-1, ::-1], borrow=True)
f(numpy.random.rand(4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype))
...@@ -41,7 +41,8 @@ class TestCGer(TestCase, TestOptimizationMixin): ...@@ -41,7 +41,8 @@ class TestCGer(TestCase, TestOptimizationMixin):
) )
def run_f(self, f): def run_f(self, f):
return f(self.Aval, self.xval, self.yval) f(self.Aval, self.xval, self.yval)
f(self.Aval[::-1, ::-1], self.xval[::-1], self.yval[::-1])
def b(self, bval): def b(self, bval):
return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype)) return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype))
...@@ -132,6 +133,10 @@ class TestCGemv(TestCase, TestOptimizationMixin): ...@@ -132,6 +133,10 @@ class TestCGemv(TestCase, TestOptimizationMixin):
assert numpy.allclose(f(self.xval, self.Aval), assert numpy.allclose(f(self.xval, self.Aval),
numpy.dot(self.xval, self.Aval)) numpy.dot(self.xval, self.Aval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.xval, self.Aval[::-1, ::-1]),
numpy.dot(self.xval, self.Aval[::-1, ::-1]))
def test_optimizations_mv(self): def test_optimizations_mv(self):
''' Test matrix dot vector ''' ''' Test matrix dot vector '''
f = theano.function([self.A, self.y], f = theano.function([self.A, self.y],
...@@ -145,6 +150,10 @@ class TestCGemv(TestCase, TestOptimizationMixin): ...@@ -145,6 +150,10 @@ class TestCGemv(TestCase, TestOptimizationMixin):
# Assert they produce the same output # Assert they produce the same output
assert numpy.allclose(f(self.Aval, self.yval), assert numpy.allclose(f(self.Aval, self.yval),
numpy.dot(self.Aval, self.yval)) numpy.dot(self.Aval, self.yval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.Aval[::-1, ::-1], self.yval),
numpy.dot(self.Aval[::-1, ::-1], self.yval))
def t_gemv1(self, m_shp): def t_gemv1(self, m_shp):
''' test vector2 + dot(matrix, vector1) ''' ''' test vector2 + dot(matrix, vector1) '''
...@@ -164,17 +173,28 @@ class TestCGemv(TestCase, TestOptimizationMixin): ...@@ -164,17 +173,28 @@ class TestCGemv(TestCase, TestOptimizationMixin):
assert topo == [CGemv(inplace=False)], topo assert topo == [CGemv(inplace=False)], topo
#test the inplace version #test the inplace version
f = theano.function([], [], g = theano.function([], [],
updates={v2:v2+theano.dot(m,v1)}, updates={v2:v2+theano.dot(m,v1)},
mode=self.mode) mode=self.mode)
# Assert they produce the same output # Assert they produce the same output
f() g()
assert numpy.allclose(v2.get_value(), assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig) numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in f.maker.env.toposort()] topo = [n.op for n in g.maker.env.toposort()]
assert topo == [CGemv(inplace=True)] assert topo == [CGemv(inplace=True)]
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
def test_gemv1(self): def test_gemv1(self):
self.t_gemv1((3,2)) self.t_gemv1((3,2))
self.t_gemv1((0,2)) self.t_gemv1((0,2))
...@@ -200,6 +220,7 @@ class TestCGemv(TestCase, TestOptimizationMixin): ...@@ -200,6 +220,7 @@ class TestCGemv(TestCase, TestOptimizationMixin):
ones_6 = numpy.ones(6, dtype=dtype) ones_6 = numpy.ones(6, dtype=dtype)
f(A_val, ones_3, ones_5) f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
self.assertRaises(ValueError, f, A_val, ones_4, ones_5) self.assertRaises(ValueError, f, A_val, ones_4, ones_5)
self.assertRaises(ValueError, f, A_val, ones_3, ones_6) self.assertRaises(ValueError, f, A_val, ones_3, ones_6)
self.assertRaises(ValueError, f, A_val, ones_4, ones_6) self.assertRaises(ValueError, f, A_val, ones_4, ones_6)
......
...@@ -30,6 +30,7 @@ class TestScipyGer(TestCase, TestOptimizationMixin): ...@@ -30,6 +30,7 @@ class TestScipyGer(TestCase, TestOptimizationMixin):
def run_f(self, f): def run_f(self, f):
f(self.Aval, self.xval, self.yval) f(self.Aval, self.xval, self.yval)
f(self.Aval[::-1, ::-1], self.xval[::-1], self.yval[::-1])
def b(self, bval): def b(self, bval):
return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype)) return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论