提交 c13192c0 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Misceallenous fixes all over discovered in the first real test.

上级 d8cc425e
......@@ -3,6 +3,7 @@ import logging
import theano
from theano.configparser import config, AddConfigVar, StrParam, \
BoolParam, IntParam
from theano.compile import optdb
_logger_name = 'theano.sandbox.gpuarray'
_logger = logging.getLogger(_logger_name)
......@@ -29,6 +30,7 @@ AddConfigVar('gpuarray.init_device',
# This is for documentation not to depend on the availability of pygpu
from type import (GpuArrayType, GpuArrayVariable, GpuArrayConstant,
GpuArraySharedVariable, gpuarray_shared_constructor)
import opt
def init_dev(dev):
......
......@@ -434,10 +434,10 @@ class GpuAlloc(Op):
def make_node(self, value, *shape):
v = as_gpuarray_variable(value)
sh = [tensor.as_tensor_variable(s) for s in shape]
if v.ndim = len(shape):
if v.ndim != len(shape):
raise TypeError(
'GpuAlloc requires value of same dimensions as shape',
value len(shape))
value, len(shape))
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uint'):
......
import theano
from theano import tensor
from theano.compile import optdb
from theano.gof import (local_optimizer, EquilibriumDB, SequenceDB, ProxyDB,
Optimizer, toolbox, DestroyHandler,
......@@ -6,6 +8,8 @@ from theano.gof import (local_optimizer, EquilibriumDB, SequenceDB, ProxyDB,
from theano.gof.python25 import all, any
from theano.sandbox.gpuarray.type import GpuArrayType
from basic_ops import host_from_gpu, gpu_from_host, gpu_alloc
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
......@@ -16,18 +20,19 @@ gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpu')
'gpuarray')
optdb.register('gpuarray_after_fusion', ProxyDB(gpu_seqopt),
optdb.__position__.get('elemwise_fusion', 71) + 1,
'gpu')
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpu', *tags)
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
......
......@@ -8,11 +8,12 @@ from theano.compile import SharedVariable
# (it will not work though)
try:
import pygpu
except ImportError:
pygpu = None
if pygpu:
from pygpu import gpuarray
from pygpu.elemwise import compare, elemwise2
from basic_ops import host_from_gpu, gpu_from_host
except ImportError:
pass
class GpuArrayType(Type):
......@@ -97,7 +98,7 @@ class GpuArrayType(Type):
return (hash(self.typecode) ^ hash(self.broadcastable))
def __str__(self):
return "GpuArray[%s, %s]<%s>" % (self.dtype,)
return "GpuArray<%s>" % (self.dtype,)
def c_declare(self, name, sub):
return "GpuArrayObject *%s;" % (name,)
......@@ -153,6 +154,7 @@ class GpuArrayType(Type):
class _operators(tensor.basic._tensor_py_operators):
def _as_TensorVariable(self):
from basic_ops import host_from_gpu
return host_from_gpu(self)
def _as_GpuArrayVariable(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论