提交 ba4178f7 authored 作者: Frederic Bastien's avatar Frederic Bastien

backported import and any to python 2.4

上级 28dfa5e3
from .type import CudaNdarrayType
from theano.sandbox.cuda.type import CudaNdarrayType
from .var import (CudaNdarrayVariable,
from theano.sandbox.cuda.var import (CudaNdarrayVariable,
CudaNdarrayConstant,
CudaNdarraySharedVariable,
shared_constructor)
......
......@@ -4,10 +4,10 @@ import numpy
from theano import Op, Type, Apply, Variable, Constant
from theano import tensor, scalar
from .type import CudaNdarrayType
from .type_support import filter as type_support_filter
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda.type_support import filter as type_support_filter
from .elemwise import NaiveAlgo
from theano.sandbox.cuda.elemwise import NaiveAlgo
import logging, copy
_logger_name = 'theano_cuda_ndarray.basic_ops'
......
......@@ -3,7 +3,7 @@ from theano import tensor, scalar
import StringIO
import cuda_ndarray
from .type import CudaNdarrayType
from theano.sandbox.cuda.type import CudaNdarrayType
class GpuDot22(Op):
def __str__(self):
......
......@@ -3,9 +3,9 @@ from theano import tensor, scalar
import StringIO
import cuda_ndarray
from .type import CudaNdarrayType
from theano.sandbox.cuda.type import CudaNdarrayType
from .kernel_codegen import nvcc_kernel, inline_reduce_max, inline_reduce_sum, inline_softmax
from theano.sandbox.cuda.kernel_codegen import nvcc_kernel, inline_reduce_max, inline_reduce_sum, inline_softmax
class GpuCrossentropySoftmaxArgmax1HotWithBias (Op):
nin=3
......
import sys
import theano
import numpy
from theano import tensor, scalar, compile
from theano.gof import local_optimizer, EquilibriumDB, SequenceDB
......@@ -47,8 +48,8 @@ gpu_cut_copies.register('cut_gpu_constant_transfers', tensor.opt.constant_foldin
@local_optimizer([])
def local_gpu_elemwise_0(node):
if isinstance(node.op, tensor.Elemwise):
if any(hasattr(i.owner, 'op') and isinstance(i.owner.op, HostFromGpu) for i in node.inputs):
if any(o.type.dtype == 'float64' for o in node.outputs):
if numpy.any(hasattr(i.owner, 'op') and isinstance(i.owner.op, HostFromGpu) for i in node.inputs):
if numpy.any(o.type.dtype == 'float64' for o in node.outputs):
print 'WARNING: THERE ARE STILL float64s in your graph local_gpu_elemwise_0', node
else:
# move the add to a GpuAdd
......@@ -110,7 +111,7 @@ def local_gpu_dot(node):
x, y = host_input.owner.inputs
return [gpu_dot22(gpu_from_host(x), gpu_from_host(y))]
if node.op == tensor.blas._dot22:
if any((i.owner and i.owner.op == host_from_gpu) for i in node.inputs):
if numpy.any((i.owner and i.owner.op == host_from_gpu) for i in node.inputs):
x, y = node.inputs
return [host_from_gpu(gpu_dot22(gpu_from_host(x), gpu_from_host(y)))]
return False
......
......@@ -9,9 +9,9 @@ import theano.config as config
import cuda_ndarray
from .type_support import filter as type_support_filter
from theano.sandbox.cuda.type_support import filter as type_support_filter
from .nvcc_compiler import nvcc_module_compile_str
from theano.sandbox.cuda.nvcc_compiler import nvcc_module_compile_str
class CudaNdarrayType(Type):
......
......@@ -4,10 +4,10 @@ from theano import Op, Type, Apply, Variable, Constant
from theano import tensor
from theano.compile.sandbox.sharedvalue import shared, SharedVariable, shared_constructor
from .type import CudaNdarrayType
from .type_support import filter as type_support_filter
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda.type_support import filter as type_support_filter
from .basic_ops import HostFromGpu, GpuFromHost
from theano.sandbox.cuda.basic_ops import HostFromGpu, GpuFromHost
class _operators(tensor.basic._tensor_py_operators):
"""Define a few properties and conversion methods for CudaNdarray Variables.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论