提交 d9072951 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

context kind is now a bytes.

上级 87cd5536
......@@ -125,7 +125,7 @@ def dnn_available(context_name):
ctx = get_context(context_name)
if not ctx.kind == 'cuda':
if not ctx.kind == b'cuda':
dnn_available.msg = "Not on a CUDA device."
return False
......
......@@ -554,7 +554,7 @@ class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype):
def make_node(self, x):
x = as_gpuarray_variable(x, infer_context_name(x))
if x.type.context.kind != 'cuda':
if x.type.context.kind != b'cuda':
raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices")
ret = super(GpuCAReduceCuda, self).make_node(x)
self = copy.copy(self)
......
......@@ -218,7 +218,7 @@ class GpuCumsum(GpuKernelBase, Op):
return kernels
def c_code(self, node, nodename, inp, out, sub):
if node.inputs[0].type.context.kind != 'cuda':
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError("cuda only")
x, = inp
z, = out
......
......@@ -243,7 +243,7 @@ class GpuImages2Neibs(GpuKernelBase, Images2Neibs, Op):
return kernels
def c_code(self, node, name, inp, out, sub):
if node.inputs[0].type.context.kind != 'cuda':
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError("cuda only")
dtype_ten4 = node.inputs[0].dtype
dtype_neib_shape = node.inputs[1].dtype
......
......@@ -189,7 +189,7 @@ class GpuCrossentropySoftmaxArgmax1HotWithBias(GpuKernelBase, Op):
flags=flags, objvar=k_var)]
def c_code(self, node, nodename, inp, out, sub):
if node.inputs[0].type.context.kind != 'cuda':
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError('cuda only')
typecode_x = pygpu.gpuarray.dtype_to_typecode(node.inputs[0].dtype)
typecode_b = pygpu.gpuarray.dtype_to_typecode(node.inputs[1].dtype)
......@@ -375,7 +375,7 @@ class GpuCrossentropySoftmax1HotWithBiasDx(GpuKernelBase, Op):
return ['<numpy_compat.h>', '<gpuarray/types.h>']
def c_code(self, node, nodename, inp, out, sub):
if node.inputs[0].type.context.kind != 'cuda':
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError("cuda only")
typecode_dx = pygpu.gpuarray.dtype_to_typecode(node.outputs[0].dtype)
itemsize_dnll = numpy.dtype(node.inputs[0].dtype).itemsize
......@@ -584,7 +584,7 @@ class GpuSoftmax(GpuKernelBase, Op):
return ['<numpy_compat.h>', '<gpuarray/types.h>']
def c_code(self, node, nodename, inp, out, sub):
if node.inputs[0].type.context.kind != 'cuda':
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError("cuda only")
dtype_x = node.inputs[0].dtype
work_x = work_dtype(dtype_x)
......@@ -783,7 +783,7 @@ class GpuSoftmaxWithBias(GpuKernelBase, Op):
return ['<numpy_compat.h>', '<gpuarray/types.h>']
def c_code(self, node, nodename, inp, out, sub):
if node.inputs[0].type.context.kind != 'cuda':
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError('cuda only')
dtype_x = node.inputs[0].dtype
dtype_b = node.inputs[1].dtype
......
......@@ -145,7 +145,7 @@ def op_lifter(OP, cuda_only=False):
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != 'cuda')):
get_context(context_name).kind != b'cuda')):
return False
# tag the inputs with the context in case
......@@ -642,7 +642,7 @@ def local_gpua_advanced_subtensor(node, context_name):
def local_gpua_advanced_incsubtensor(node, context_name):
context = get_context(context_name)
# This is disabled on non-cuda contexts
if context.kind != 'cuda':
if context.kind != b'cuda':
return None
x, y, ilist = node.inputs
......@@ -673,12 +673,12 @@ def local_gpua_careduce(node, context_name):
if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == 'opencl':
if ctx.kind == b'opencl':
op = GpuCAReduceCPY
if node.op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == 'cuda':
elif ctx.kind == b'cuda':
op = GpuCAReduceCuda
else:
return False
......
......@@ -599,7 +599,7 @@ class GpuAdvancedIncSubtensor1_dev20(GpuKernelBase, GpuAdvancedIncSubtensor1):
def c_code(self, node, name, inputs, outputs, sub):
ctx = self.get_params(node)
if ctx.kind != 'cuda':
if ctx.kind != b'cuda':
raise NotImplementedError("cuda only")
if (self.set_instead_of_inc or
node.inputs[0].ndim != node.inputs[1].ndim or
......
......@@ -197,7 +197,7 @@ class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
def setUp(self):
super(test_GpuCAReduceCuda, self).setUp()
if get_context(test_ctx_name).kind != 'cuda':
if get_context(test_ctx_name).kind != b'cuda':
raise SkipTest("Cuda specific tests")
......@@ -212,7 +212,7 @@ class T_gpureduce_dtype(test_elemwise.T_reduce_dtype):
'float32', 'float64']
def setUp(self):
if get_context(test_ctx_name).kind != 'cuda':
if get_context(test_ctx_name).kind != b'cuda':
raise SkipTest("Cuda specific tests")
......
......@@ -24,7 +24,7 @@ class TestGpuCumsum(theano.tensor.tests.test_extra_ops.TestCumsumOp):
def setUp(self):
super(TestGpuCumsum, self).setUp()
test_ctx = get_context(test_ctx_name)
if test_ctx.kind != 'cuda':
if test_ctx.kind != b'cuda':
raise SkipTest("Cuda specific tests")
self.max_threads_dim0 = test_ctx.maxlsize0
self.max_grid_size1 = test_ctx.maxgsize2
......
......@@ -124,7 +124,7 @@ def test_reduce():
topo = f.maker.fgraph.toposort()
ops = [type(node.op) for node in topo]
if kind == 'opencl' and method in ["max", "min"]:
if kind == b'opencl' and method in ["max", "min"]:
assert not(GpuCAReduceCuda in ops or GpuCAReduceCPY in ops)
else:
assert GpuCAReduceCuda in ops or GpuCAReduceCPY in ops
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论