提交 1121fe45 authored 作者: Frederic's avatar Frederic

Rename GpuCAReduce to GpuCAReduceCuda and reuse the name GpuCAReduce for…

Rename GpuCAReduce to GpuCAReduceCuda and reuse the name GpuCAReduce for GpuCAReduceCPY to allow reloading older pickled files.
上级 a2afb9dd
......@@ -521,8 +521,8 @@ class GpuDimShuffle(HideC, DimShuffle):
return (4,)
class GpuCAReduce(HideC, CAReduce):
"""GpuCAReduce is a Reduction along some dimensions by a scalar op.
class GpuCAReduceCuda(HideC, CAReduce):
"""GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.
The dimensions along which to reduce is specified by the
`reduce_mask` that you pass to the constructor. The `reduce_mask`
......@@ -584,12 +584,12 @@ class GpuCAReduce(HideC, CAReduce):
ax = ''
if self.axis is not None:
ax = '{%s}' % (', '.join(str(x) for x in self.axis),)
return "GpuCAReduce{%s}%s" % (str(self.scalar_op), ax)
return "GpuCAReduceCuda{%s}%s" % (str(self.scalar_op), ax)
def make_node(self, x):
x = as_gpuarray_variable(x)
assert x.dtype == "float32"
ret = super(GpuCAReduce, self).make_node(x)
ret = super(GpuCAReduceCuda, self).make_node(x)
self = copy.copy(self)
self.axis = ret.op.axis
if self.reduce_mask is None:
......@@ -762,7 +762,7 @@ class GpuCAReduce(HideC, CAReduce):
scalar_op = self.scalar_op
zero_shp = """
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduce not implemented when input shape is 0"
"GpuCAReduceCuda not implemented when input shape is 0"
" for this scalar_op: %(scalar_op)s");
%(fail)s;
""" % locals()
......@@ -1162,7 +1162,7 @@ class GpuCAReduce(HideC, CAReduce):
else:
zero_shp = """
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduce not implemented when input shape is 0 for this scalar_op");
"GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op");
%(fail)s;
""" % locals()
......@@ -2567,3 +2567,5 @@ class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):
else:
output[0] = pygpu.gpuarray.array(input, copy=True,
dtype=node.outputs[0].type.dtype)
# To allow reloading old pickled files
GpuCAReduce = GpuCAReduceCPY
......@@ -22,7 +22,7 @@ from theano.sandbox.gpuarray.conv import GpuConv
from theano.sandbox.gpuarray.nnet import (GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx)
from theano.sandbox.gpuarray.elemwise import (GpuElemwise, _is_scalar,
GpuDimShuffle, GpuCAReduce)
GpuDimShuffle, GpuCAReduceCuda)
from theano.sandbox.gpuarray.subtensor import GpuIncSubtensor, GpuSubtensor
from theano.sandbox.gpuarray.type import GpuArrayConstant
......@@ -248,7 +248,7 @@ def local_gpua_careduce(node):
if (isinstance(node.op.scalar_op, scalar.basic.Add) or
isinstance(node.op.scalar_op, scalar.basic.Mul)):
x, = node.inputs
greduce = GpuCAReduce(node.op.scalar_op, axis=node.op.axis)
greduce = GpuCAReduceCuda(node.op.scalar_op, axis=node.op.axis)
if x.dtype != "float32":
return
gvar = greduce(x)
......@@ -284,7 +284,7 @@ def local_gpua_careduce(node):
new_mask.append(reduce_mask[i])
new_in_shp.append(x_shape[i])
new_greduce = GpuCAReduce(new_mask, scalar_op)
new_greduce = GpuCAReduceCuda(new_mask, scalar_op)
reshaped_x = x.reshape(tensor.stack(*new_in_shp))
gpu_reshaped_x = gpu_from_host(reshaped_x)
reshaped_gpu_inputs = [gpu_reshaped_x]
......
......@@ -10,7 +10,7 @@ from theano.tensor.tests.test_elemwise import (test_Broadcast, test_DimShuffle,
from theano.sandbox.gpuarray.tests.test_basic_ops import rand_gpuarray
from theano.sandbox.gpuarray.elemwise import (GpuElemwise, GpuDimShuffle,
GpuCAReduce, GpuCAReduceCPY)
GpuCAReduceCuda, GpuCAReduceCPY)
from theano.sandbox.gpuarray.type import GpuArrayType
from pygpu.array import gpuarray
......@@ -71,7 +71,7 @@ class test_GpuCAReduceCPY(test_CAReduce):
test_CAReduce.test_infer_shape(self, dtype)
class test_GpuCAReduce(test_GpuCAReduceCPY):
class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
dtypes = ["float32"]
bin_dtypes = ["uint8", "int8"]
bin_dtypes = []
......@@ -141,7 +141,7 @@ class test_GpuCAReduce(test_GpuCAReduceCPY):
# ((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111
# ((5,4,3,10,11),[1,2]),
]
op = GpuCAReduce
op = GpuCAReduceCuda
reds = [scalar.add, scalar.mul]
def test_perform(self):
......
......@@ -3,7 +3,7 @@ import numpy
import theano
from theano.tests import unittest_tools as utt
from theano.sandbox.gpuarray.basic_ops import GpuAlloc, GpuReshape, gpu_alloc
from theano.sandbox.gpuarray.elemwise import GpuCAReduce
from theano.sandbox.gpuarray.elemwise import GpuCAReduceCuda
import theano.sandbox.gpuarray
from theano.tests.unittest_tools import SkipTest
......@@ -69,8 +69,8 @@ def test_sum_prod():
res = f(val)
utt.assert_allclose(res, val.sum())
assert res.shape == ()
assert GpuCAReduce in [type(node.op)
for node in f.maker.fgraph.toposort()]
assert GpuCAReduceCuda in [type(node.op)
for node in f.maker.fgraph.toposort()]
def test_local_gpualloc_memset_0():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论