提交 421af281 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Remove redundant tests.

上级 15e577fd
...@@ -172,363 +172,6 @@ class TestBlasStridesGpu(TestBlasStrides): ...@@ -172,363 +172,6 @@ class TestBlasStridesGpu(TestBlasStrides):
shared = staticmethod(tcn.shared_constructor) shared = staticmethod(tcn.shared_constructor)
mode = mode_with_gpu mode = mode_with_gpu
def test_dot22_strides():
def cmp(b_shp, c_shp):
av = numpy.zeros((0, 0), dtype='float32')
bv = my_rand(*b_shp)
cv = my_rand(*c_shp)
a = tcn.shared_constructor(av, 'a')
b = tcn.shared_constructor(bv, 'b')
c = tcn.shared_constructor(cv, 'c')
b_t = tcn.shared_constructor(bv.T, 'b.T')
c_t = tcn.shared_constructor(cv.T, 'c.T')
b_gpu = b.get_value(borrow=False, return_internal_type=True)
c_gpu = c.get_value(borrow=False, return_internal_type=True)
bt_gpu = b_t.get_value(borrow=False, return_internal_type=True)
ct_gpu = c_t.get_value(borrow=False, return_internal_type=True)
f_nn = pfunc([], [], updates={a: tensor.dot(b, c)},
mode=mode_with_gpu)
f_nt = pfunc([], [], updates={a: tensor.dot(b, c_t.T)},
mode=mode_with_gpu)
f_tn = pfunc([], [], updates={a: tensor.dot(b_t.T, c)},
mode=mode_with_gpu)
f_tt = pfunc([], [], updates={a: tensor.dot(b_t.T, c_t.T)},
mode=mode_with_gpu)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools.product((-1, 1), repeat=4):
for step in (1, 2):
b_step1, b_step2, c_step1, c_step2 = (s * step
for s in step_signs)
b.set_value(b_gpu.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_gpu.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_gpu.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_gpu.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy result
a_n = numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2])
f_nn()
assert numpy.allclose(a.get_value(), a_n)
f_nt()
assert numpy.allclose(a.get_value(), a_n)
f_tn()
assert numpy.allclose(a.get_value(), a_n)
f_tt()
assert numpy.allclose(a.get_value(), a_n)
cmp((3, 4), (4, 5))
cmp((1, 4), (4, 5))
cmp((3, 4), (4, 1))
cmp((3, 1), (1, 1))
cmp((1, 4), (4, 1))
cmp((3, 1), (1, 5))
cmp((0, 4), (4, 5))
cmp((0, 4), (4, 1))
cmp((0, 1), (1, 5))
cmp((3, 4), (4, 0))
cmp((3, 0), (0, 5))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
def test_dot22scalar_strides():
def cmp(b_shp, c_shp):
av = numpy.zeros((0, 0), dtype='float32')
bv = my_rand(*b_shp)
cv = my_rand(*c_shp)
l = numpy.float32(0.2)
a = tcn.shared_constructor(av, 'a')
b = tcn.shared_constructor(bv, 'b')
c = tcn.shared_constructor(cv, 'c')
b_t = tcn.shared_constructor(bv.T, 'b.T')
c_t = tcn.shared_constructor(cv.T, 'c.T')
b_gpu = b.get_value(borrow=False, return_internal_type=True)
c_gpu = c.get_value(borrow=False, return_internal_type=True)
bt_gpu = b_t.get_value(borrow=False, return_internal_type=True)
ct_gpu = c_t.get_value(borrow=False, return_internal_type=True)
f_nn = pfunc([], [], updates={a: l * tensor.dot(b, c)},
mode=mode_with_gpu)
f_nt = pfunc([], [], updates={a: l * tensor.dot(b, c_t.T)},
mode=mode_with_gpu)
f_tn = pfunc([], [], updates={a: l * tensor.dot(b_t.T, c)},
mode=mode_with_gpu)
f_tt = pfunc([], [], updates={a: l * tensor.dot(b_t.T, c_t.T)},
mode=mode_with_gpu)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools.product((-1, 1), repeat=4):
for step in (1, 2):
b_step1, b_step2, c_step1, c_step2 = (s * step
for s in step_signs)
b.set_value(b_gpu.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_gpu.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_gpu.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_gpu.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy result
a_n = l * numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2])
f_nn()
assert numpy.allclose(a.get_value(), a_n)
f_nt()
assert numpy.allclose(a.get_value(), a_n)
f_tn()
assert numpy.allclose(a.get_value(), a_n)
f_tt()
assert numpy.allclose(a.get_value(), a_n)
cmp((3, 4), (4, 5))
cmp((1, 4), (4, 5))
cmp((3, 4), (4, 1))
cmp((3, 1), (1, 1))
cmp((1, 4), (4, 1))
cmp((3, 1), (1, 5))
cmp((0, 4), (4, 5))
cmp((0, 4), (4, 1))
cmp((0, 1), (1, 5))
cmp((3, 4), (4, 0))
cmp((3, 0), (0, 5))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
def test_gemm_strides():
def cmp(a_shp, b_shp, c_shp):
av = my_rand(*a_shp)
bv = my_rand(*b_shp)
cv = my_rand(*c_shp)
l = numpy.float32(0.2)
a = tcn.shared_constructor(av, 'a')
b = tcn.shared_constructor(bv, 'b')
c = tcn.shared_constructor(cv, 'c')
a_t = tcn.shared_constructor(av.T, 'a.T')
b_t = tcn.shared_constructor(bv.T, 'b.T')
c_t = tcn.shared_constructor(cv.T, 'c.T')
a_gpu = a.get_value(borrow=False, return_internal_type=True)
b_gpu = b.get_value(borrow=False, return_internal_type=True)
c_gpu = c.get_value(borrow=False, return_internal_type=True)
bt_gpu = b_t.get_value(borrow=False, return_internal_type=True)
ct_gpu = c_t.get_value(borrow=False, return_internal_type=True)
f_nnn = pfunc([], [], updates={a: (l * a + tensor.dot(b, c))},
mode=mode_with_gpu)
f_nnt = pfunc([], [], updates={a: (l * a + tensor.dot(b, c_t.T))},
mode=mode_with_gpu)
f_ntn = pfunc([], [], updates={a: (l * a + tensor.dot(b_t.T, c))},
mode=mode_with_gpu)
f_ntt = pfunc([], [], updates={a: (l * a + tensor.dot(b_t.T, c_t.T))},
mode=mode_with_gpu)
f_tnn = pfunc([], [], updates={a_t: (l * a_t + tensor.dot(b, c).T)},
mode=mode_with_gpu)
f_tnt = pfunc([], [], updates={a_t: (l * a_t + tensor.dot(b, c_t.T).T)},
mode=mode_with_gpu)
f_ttn = pfunc([], [], updates={a_t: (l * a_t + tensor.dot(b_t.T, c).T)},
mode=mode_with_gpu)
f_ttt = pfunc([], [], updates={a_t: (l * a_t + tensor.dot(b_t.T, c_t.T).T)},
mode=mode_with_gpu)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools.product((-1, 1), repeat=6):
for step in (1, 2):
a_step1, a_step2, b_step1, b_step2, c_step1, c_step2 = \
(s * step for s in step_signs)
b.set_value(b_gpu.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_gpu.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_gpu.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_gpu.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy results
a_n = (l * av[::a_step1, ::a_step2]
+ numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2]))
at_n = (l * av[::a_step1, ::a_step2].T
+ numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2]).T)
# a's value is updated, so we need to reinitialize it each time
a.set_value(a_gpu.copy()[::a_step1, ::a_step2], borrow=True)
f_nnn()
assert numpy.allclose(a.get_value(), a_n)
a.set_value(a_gpu.copy()[::a_step1, ::a_step2], borrow=True)
f_nnt()
assert numpy.allclose(a.get_value(), a_n)
a.set_value(a_gpu.copy()[::a_step1, ::a_step2], borrow=True)
f_ntn()
assert numpy.allclose(a.get_value(), a_n)
a.set_value(a_gpu.copy()[::a_step1, ::a_step2], borrow=True)
f_ntt()
assert numpy.allclose(a.get_value(), a_n)
a_t.set_value(transpose(a_gpu.copy())[::a_step2, ::a_step1],
borrow=True)
f_tnn()
assert numpy.allclose(a_t.get_value(), at_n)
a_t.set_value(transpose(a_gpu.copy())[::a_step2, ::a_step1],
borrow=True)
f_tnt()
assert numpy.allclose(a_t.get_value(), at_n)
a_t.set_value(transpose(a_gpu.copy())[::a_step2, ::a_step1],
borrow=True)
f_ttn()
assert numpy.allclose(a_t.get_value(), at_n)
a_t.set_value(transpose(a_gpu.copy())[::a_step2, ::a_step1],
borrow=True)
f_ttt()
assert numpy.allclose(a_t.get_value(), at_n)
cmp((3, 5), (3, 4), (4, 5))
cmp((1, 5), (1, 4), (4, 5))
cmp((3, 1), (3, 4), (4, 1))
cmp((3, 1), (3, 1), (1, 1))
cmp((1, 1), (1, 4), (4, 1))
cmp((3, 5), (3, 1), (1, 5))
cmp((0, 5), (0, 4), (4, 5))
cmp((0, 1), (0, 4), (4, 1))
cmp((0, 5), (0, 1), (1, 5))
cmp((3, 0), (3, 4), (4, 0))
cmp((3, 5), (3, 0), (0, 5))
cmp((0, 0), (0, 4), (4, 0))
cmp((0, 0), (0, 0), (0, 0))
def test_gemv_strides():
def cmp(a_shp, b_shp, c_shp):
av = my_rand(a_shp)
bv = my_rand(*b_shp)
cv = my_rand(c_shp)
l = numpy.float32(0.2)
a = tcn.shared_constructor(av, 'a')
b = tcn.shared_constructor(bv, 'b')
c = tcn.shared_constructor(cv, 'c')
b_t = tcn.shared_constructor(bv.T, 'b.T')
a_gpu = a.get_value(borrow=False, return_internal_type=True)
b_gpu = b.get_value(borrow=False, return_internal_type=True)
c_gpu = c.get_value(borrow=False, return_internal_type=True)
f_n = pfunc([], [], updates={a: (a + l * tensor.dot(b, c))},
mode=mode_with_gpu)
f_t = pfunc([], [], updates={a: (a + l * tensor.dot(b_t.T, c))},
mode=mode_with_gpu)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools.product((1, -1), repeat=4):
for step in (1, 2):
a_step, b_step1, b_step2, c_step = (s * step
for s in step_signs)
a.set_value(a_gpu.copy()[::a_step], borrow=True)
b.set_value(b_gpu.copy()[::b_step1, ::b_step2],
borrow=True)
b_t.set_value(transpose(b_gpu.copy())[::b_step2, ::b_step1],
borrow=True)
c.set_value(c_gpu.copy()[::c_step], borrow=True)
a_n = (av[::a_step]
+ l * numpy.dot(bv[::b_step1, ::b_step2], cv[::c_step]))
f_n()
assert numpy.allclose(a.get_value(), a_n), (a.get_value(), a_n)
a.set_value(a_gpu.copy()[::a_step], borrow=True)
f_t()
assert numpy.allclose(a.get_value(), a_n), (a.get_value(), a_n)
cmp(3, (3, 5), 5)
cmp(1, (1, 5), 5)
cmp(3, (3, 1), 1)
cmp(0, (0, 5), 5)
cmp(3, (3, 0), 0)
cmp(0, (0, 1), 1)
cmp(1, (1, 0), 0)
cmp(0, (0, 0), 0)
def test_ger_strides():
def cmp(a_shp, b_shp, c_shp):
av = my_rand(*a_shp)
bv = my_rand(b_shp)
cv = my_rand(c_shp)
l = numpy.float32(0.2)
a = tcn.shared_constructor(av, 'a')
b = tcn.shared_constructor(bv, 'b')
c = tcn.shared_constructor(cv, 'c')
a_t = tcn.shared_constructor(av.T, 'a.T')
a_gpu = a.get_value(borrow=False, return_internal_type=True)
b_gpu = b.get_value(borrow=False, return_internal_type=True)
c_gpu = c.get_value(borrow=False, return_internal_type=True)
f_n = pfunc([], [], updates={a: (a + l * tensor.outer(b, c))},
mode=mode_with_gpu)
f_t = pfunc([], [], updates={a_t: (a_t + l * tensor.outer(b, c).T)},
mode=mode_with_gpu)
# Try with all stride patterns, and all transposed patterns
for step_signs in itertools.product((1, -1), repeat=4):
for step in (1, 2):
a_step1, a_step2, b_step, c_step = (s * step
for s in step_signs)
a.set_value(a_gpu.copy()[::a_step1, ::a_step2], borrow=True)
a_t.set_value(transpose(a_gpu.copy())[::a_step1, ::a_step2],
borrow=True)
b.set_value(b_gpu.copy()[::b_step], borrow=True)
c.set_value(c_gpu.copy()[::c_step], borrow=True)
f_n()
n_n = (av[::a_step1, ::a_step2]
+ l * numpy.outer(bv[::b_step], cv[::c_step]))
assert numpy.allclose(a.get_value(), n_n), (a.get_value(), n_n)
f_t()
n_t = (av.T[::a_step1, ::a_step2]
+ l * numpy.outer(bv[::b_step], cv[::c_step]).T)
assert numpy.allclose(a_t.get_value(), n_t),\
(a_t.get_value(), n_t)
cmp((3, 5), 3, 5)
cmp((1, 5), 1, 5)
cmp((3, 1), 3, 1)
cmp((0, 5), 0, 5)
cmp((3, 0), 3, 0)
cmp((0, 1), 0, 1)
cmp((1, 0), 1, 0)
cmp((0, 0), 0, 0)
def test_outer(): def test_outer():
x = tcn.shared_constructor(my_rand(8,), 'x') x = tcn.shared_constructor(my_rand(8,), 'x')
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论