提交 26cc1f73 authored 作者: Frederic's avatar Frederic

pep8

上级 c6129572
......@@ -16,7 +16,7 @@ if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode=='FAST_COMPILE':
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
......@@ -26,26 +26,28 @@ else:
def test_shape_i():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3,4,5),dtype='float32'))
f = theano.function([x],x.shape[1])
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape[1])
topo = f.maker.fgraph.toposort()
assert f(v)==4
if theano.config.mode!='FAST_COMPILE':
assert len(topo)==1
assert isinstance(topo[0].op,T.opt.Shape_i)
assert f(v) == 4
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 1
assert isinstance(topo[0].op, T.opt.Shape_i)
def test_shape():
x = cuda.ftensor3()
v = cuda.CudaNdarray(numpy.zeros((3,4,5),dtype='float32'))
f = theano.function([x],x.shape)
v = cuda.CudaNdarray(numpy.zeros((3, 4, 5), dtype='float32'))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert numpy.all(f(v)==(3,4,5))
if theano.config.mode!='FAST_COMPILE':
assert len(topo)==4
assert isinstance(topo[0].op,T.opt.Shape_i)
assert isinstance(topo[1].op,T.opt.Shape_i)
assert isinstance(topo[2].op,T.opt.Shape_i)
assert isinstance(topo[3].op,T.opt.MakeVector)
assert numpy.all(f(v) == (3, 4, 5))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 4
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.Shape_i)
assert isinstance(topo[3].op, T.opt.MakeVector)
def test_softmax_optimizations():
from theano.tensor.nnet.nnet import softmax, crossentropy_categorical_1hot
......@@ -66,16 +68,17 @@ def test_softmax_optimizations():
assert fgraph.outputs[0].owner.inputs[0].owner.op == cuda.host_from_gpu
assert fgraph.outputs[0].owner.inputs[0].owner.inputs[0].owner.op == cuda.nnet.gpu_crossentropy_softmax_argmax_1hot_with_bias
def test_may_share_memory_cuda():
from theano.misc.may_share_memory import may_share_memory
a = cuda.CudaNdarray(numpy.zeros((3,4),dtype='float32'))
b = cuda.CudaNdarray(numpy.zeros((3,4),dtype='float32'))
na = numpy.zeros((3,4))
nb = numpy.zeros((3,4))
a = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
b = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
na = numpy.zeros((3, 4))
nb = numpy.zeros((3, 4))
va = a.view()
vb = b.view()
ra = a.reshape((4,3))
rb = b.reshape((4,3))
ra = a.reshape((4, 3))
rb = b.reshape((4, 3))
#can't test the transpose as ta._strides = is not implemented
#manual transpose of a
......@@ -84,25 +87,28 @@ def test_may_share_memory_cuda():
#elem_size=elem_size = numpy.zeros(0,dtype=a.dtype).dtype.itemsize
#ta.gpudata += ta.size*elem_size
for a_,b_,rep in [(a,a,True),(b,b,True),(a,b,False),
(a,na,False),(b,nb,False),(na,b,False),(nb,a,False),
(a,va,True),(b,vb,True),(va,b,False),(a,vb,False),
(a,ra,True),(b,rb,True),(ra,b,False),(a,rb,False),
for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
(a, na, False), (b, nb, False),
(na, b, False), (nb, a, False),
(a, va, True), (b, vb, True),
(va, b, False), (a, vb, False),
(a, ra, True), (b, rb, True),
(ra, b, False), (a, rb, False),
]:
assert may_share_memory(a_,b_)==rep
assert may_share_memory(b_,a_)==rep
assert may_share_memory(a_, b_) == rep
assert may_share_memory(b_, a_) == rep
#test that it raise error when needed.
for a_,b_,rep in [(a,(0,),False),(a,1,False),(a,None,False)]:
assert may_share_memory(a_,b_,False)==rep
assert may_share_memory(b_,a_,False)==rep
for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
assert may_share_memory(a_, b_, False) == rep
assert may_share_memory(b_, a_, False) == rep
try:
may_share_memory(a_,b_)
may_share_memory(a_, b_)
raise Exception("An error was expected")
except TypeError:
pass
try:
may_share_memory(b_,a_)
may_share_memory(b_, a_)
raise Exception("An error was expected")
except TypeError:
pass
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论