提交 65f5d0c7 authored 作者: Marc-Alexandre Cote's avatar Marc-Alexandre Cote

Add a 2D version cumsum

上级 6dbb2457
...@@ -17,7 +17,7 @@ from theano import tensor as T ...@@ -17,7 +17,7 @@ from theano import tensor as T
import numpy as np import numpy as np
import theano import theano
from theano import config from theano import config
from theano.tensor.extra_ops import cumsum from theano.tensor.extra_ops import cumsum, diff
from mlpython.misc.utils import Timer from mlpython.misc.utils import Timer
...@@ -26,123 +26,148 @@ class TestGpuCumsum(theano.tensor.tests.test_extra_ops.TestCumsumOp): ...@@ -26,123 +26,148 @@ class TestGpuCumsum(theano.tensor.tests.test_extra_ops.TestCumsumOp):
op = GpuCumsum op = GpuCumsum
dtypes = ['float32'] dtypes = ['float32']
def test_GpuCumsum(self): def test_benchmark_1D_vs_2D(self):
### Test 1D case ### print "\nBenchmark:"
x = T.vector('x')
f = theano.function([x], cumsum(x))
# # Even number of elements from theano import sandbox, Out
# a = np.random.random((18,)).astype(config.floatX) import time
# assert np.allclose(np.cumsum(a), f(a))
# # Odd number of elements vlen = 40 * 1024 * 2048 # 10 x # cores x # threads per core
# a = np.random.random((7,)).astype(config.floatX) iters = 25
# assert np.allclose(np.cumsum(a), f(a))
# # Use multiple GPU threadblocks x = theano.shared(np.ones((vlen,), dtype=config.floatX), borrow=False)
# a = np.random.random((2048+2,)).astype(config.floatX) res = Out(sandbox.cuda.basic_ops.gpu_from_host(cumsum(x)), borrow=True)
# assert np.allclose(np.cumsum(a), f(a)) f = theano.function([], res)
# # Use multiple GPU threadblocks print f.maker.fgraph.toposort()
# a = np.random.random((2048*75+2,)).astype(config.floatX) t0 = time.time()
# assert np.allclose(np.cumsum(a), f(a)) for i in xrange(iters):
r = f()
t1 = time.time()
# # Use multiple GPU gridblocks print 'Looping %d times took' % iters, t1 - t0, 'seconds'
# a = np.ones((2048*2048+2,)).astype(config.floatX) print 'Result is', r
# assert np.allclose(np.cumsum(a), f(a)) print 'Numpy result is', np.asarray(r)
print "\nBenchmark:"
import timeit as t # x = theano.shared(np.ones((1,vlen), dtype=config.floatX), borrow=True)
#theano_time = t.timeit("np.ones((100,))", "import numpy as np", number=1000) # f = theano.function([], Out(sandbox.cuda.basic_ops.gpu_from_host(cumsum(x,axis=1)), borrow=True))
# print f.maker.fgraph.toposort()
# t0 = time.time()
# for i in xrange(iters):
# r = f()
# t1 = time.time()
stmt = "f(a)" # print 'Looping %d times took' % iters, t1 - t0, 'seconds'
setup = """ # print 'Result is', r
import numpy as np # print 'Numpy result is', np.asarray(r)
import theano
import theano.tensor as T # print 'Used the', config.device
from theano.tensor.extra_ops import cumsum
from theano import config
def test_GpuCumsum(self):
### Test 1D case ###
x = T.vector('x') x = T.vector('x')
f = theano.function([x], cumsum(x)) f = theano.function([x], cumsum(x))
a = np.ones((100000,), dtype=config.floatX)
""".replace(" ", "") # Even number of elements
theano_time = t.timeit(stmt, setup, number=1000) a = np.random.random((18,)).astype(config.floatX)
print "Theano:\t", theano_time print f(a)
print np.cumsum(a)
stmt = "np.cumsum(a)" assert np.allclose(np.cumsum(a), f(a))
setup = """
import numpy as np # Odd number of elements
from theano import config a = np.random.random((7,)).astype(config.floatX)
a = np.ones((100000,), dtype=config.floatX) assert np.allclose(np.cumsum(a), f(a))
""".replace(" ", "")
numpy_time = t.timeit(stmt, setup, number=1000) # Use multiple GPU threadblocks
print "Numpy:\t", numpy_time a = np.random.random((2048+2,)).astype(config.floatX)
print "Speedup: {0}x".format(numpy_time/theano_time) assert np.allclose(np.cumsum(a), f(a))
# Use multiple GPU threadblocks
# # Extensive testing a = np.random.random((2048*75+2,)).astype(config.floatX)
# i = 0; assert np.allclose(np.cumsum(a), f(a))
# while True:
# a = np.ones((i,), dtype=config.floatX) # Use multiple GPU gridblocks
a = np.ones((2048*2048+2,)).astype(config.floatX)
# fa = f(a) assert np.allclose(np.cumsum(a), f(a))
# npa = np.cumsum(a)
# if not np.allclose(npa, fa): # Extensive testing
# print i, np.allclose(npa, fa) # Test axis=None for i in xrange(int(1e3)*5):
# print fa a = np.ones((i,), dtype=config.floatX)
# print npa
# assert False fa = f(a)
npa = np.cumsum(a)
# if i % 1000 == 0:
# print i if not np.allclose(npa, fa):
print i, np.allclose(npa, fa) # Test axis=None
# i += 1 print fa
print npa
assert False
# ### Test 2D case - axis=1 ###
# x = T.matrix('x') if i % 1000 == 0:
# f = theano.function([x], cumsum(x, axis=1)) print i
# # # Even number of elements
# # print "\n# Even number of elements" #for axis in xrange(2):
# # a = np.random.random((18,18)).astype(config.floatX) for axis in xrange(2):
# # assert np.allclose(np.cumsum(a, axis=1), f(a)) ### Test 2D case - axis=1 ###
x = T.matrix('x')
# # # Odd number of elements f = theano.function([x], cumsum(x, axis=axis))
# # print "\n# Odd number of elements"
# # assert np.allclose(np.cumsum(a, axis=1), f(a)) # Even number of elements
print "\n# Even number of elements (axis={0})".format(axis)
# # # Use multiple GPU threadblocks a = np.random.random((18,18)).astype(config.floatX)
# # print "\n# Use multiple GPU threadblocks" assert np.allclose(np.cumsum(a, axis=axis), f(a))
# # a = np.random.random((2048+2,2048+2)).astype(config.floatX)
# # assert np.allclose(np.cumsum(a, axis=1), f(a)) # Odd number of elements
print "\n# Odd number of elements (axis={0})".format(axis)
# # # Use multiple GPU threadblocks a = np.random.random((21,21)).astype(config.floatX)
# # print "\n# Use multiple GPU threadblocks" assert np.allclose(np.cumsum(a, axis=axis), f(a))
# # a = np.ones((10,2048*75+3)).astype(config.floatX)
# # assert np.allclose(np.cumsum(a, axis=1), f(a)) # Use two GPU threadblocks
print "\n# Use two GPU threadblocks (axis={0})".format(axis)
# # # Use multiple GPU gridblocks a = np.random.random((2048+2,2048+2)).astype(config.floatX)
# # print "\n# Use multiple GPU gridblocks" assert np.allclose(np.cumsum(a, axis=axis), f(a))
# # a = np.ones((11,2048*2048+3)).astype(config.floatX)
# # assert np.allclose(np.cumsum(a, axis=1), f(a)) # Use multiple GPU threadblocks
print "\n# Use multiple GPU threadblocks (axis={0})".format(axis)
# # Extensive testing a = np.ones((10,2048*75+3)).astype(config.floatX)
# i = 19000; assert np.allclose(np.cumsum(a, axis=axis), f(a))
# while True:
# a = np.ones((11,i), dtype=config.floatX) a = np.ones((2048*75+3,10)).astype(config.floatX)
# fa = f(a) assert np.allclose(np.cumsum(a, axis=axis), f(a))
# npa = np.cumsum(a, axis=1)
# Use multiple GPU gridblocks
# if not np.allclose(npa, fa): print "\n# Use multiple GPU gridblocks (axis={0})".format(axis)
# print i, np.allclose(npa, fa) # Test axis=None a = np.ones((11,2048*2048+3)).astype(config.floatX)
# print fa assert np.allclose(np.cumsum(a, axis=axis), f(a))
# print npa a = np.ones((2048*2048+3,11)).astype(config.floatX)
# assert False assert np.allclose(np.cumsum(a, axis=axis), f(a))
# if i % 1000 == 0: # Extensive testing for the first 10k sizes
# print i for i in xrange(int(1e3)*5):
a = np.ones((11,i), dtype=config.floatX)
# i += 1 fa = f(a)
npa = np.cumsum(a, axis=axis)
if not np.allclose(npa, fa):
print i, np.allclose(npa, fa) # Test axis=None
print fa
print npa
assert False
a = np.ones((i,11), dtype=config.floatX)
fa = f(a)
npa = np.cumsum(a, axis=axis)
if not np.allclose(npa, fa):
print i, np.allclose(npa, fa) # Test axis=None
print fa
print npa
assert False
if i % 1000 == 0:
print i
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论