提交 0b75e14a authored 作者: Melanie Ducoffe's avatar Melanie Ducoffe

pep8

上级 4c4327cf
......@@ -156,7 +156,6 @@ def test_gpualloc():
def test_gpuallocempty():
f_gpu = theano.function([], tensor.AllocEmpty('float32')(2,3),
mode=mode_with_gpu)
l_gpu = f_gpu.maker.fgraph.toposort()
......
......@@ -5505,7 +5505,6 @@ class AllocEmpty(gof.Op):
out, = out_
sh = tuple([int(i) for i in inputs])
if out[0] is None or out[0].shape != sh:
# XXX: We could implement and call CudaNdarray.empty(sh) instead.
out[0] = numpy.empty(sh, dtype=self.dtype)
def c_code(self, node, name, inputs, out_, sub):
......@@ -5515,10 +5514,10 @@ class AllocEmpty(gof.Op):
shps = inputs
nd = len(shps)
str = "npy_intp dims[%(nd)s];\n" % locals()
for idx, sh in enumerate(shps):
str +="dims[%(idx)s] =" \
"((npy_intp)((dtype_%(sh)s*)PyArray_DATA(%(sh)s))[0]);\n" % locals()
str += "dims[%(idx)s] =" \
"((npy_intp)((dtype_%(sh)s*)" \
" PyArray_DATA(%(sh)s))[0]);\n" % locals()
# Validate that the output storage exists
str += "if(%(out)s==NULL\n" % locals()
......
......@@ -2090,7 +2090,7 @@ def local_dot22_to_dot22scalar(node):
d.owner.inputs[1].shape[1])
zero = T.as_tensor_variable(numpy.asarray(0, dtype=a.dtype))
return [T.mul(gemm(z, a, d.owner.inputs[0], d.owner.inputs[1],
zero), *o)]
zero), *o)]
# must happen after gemm as the gemm optimizer don't understant
# dot22scalar and gemm give more speed up then dot22scalar
blas_optdb.register('local_dot22_to_dot22scalar',
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论