提交 03071a68 authored 作者: Frederic's avatar Frederic

fix import of sandbox.cuda when nvcc not avail. Needed for doc gen

上级 92362104
...@@ -9,7 +9,7 @@ import sys ...@@ -9,7 +9,7 @@ import sys
import theano import theano
from theano.compat import get_unbound_function from theano.compat import get_unbound_function
from theano.compile import optdb from theano.compile import optdb
from theano.gof import EquilibriumDB from theano.gof import EquilibriumDB, SequenceDB
from theano.gof.cmodule import get_lib_extension from theano.gof.cmodule import get_lib_extension
from theano.gof.compilelock import get_lock, release_lock from theano.gof.compilelock import get_lock, release_lock
from theano.configparser import config, AddConfigVar, StrParam, BoolParam from theano.configparser import config, AddConfigVar, StrParam, BoolParam
...@@ -19,6 +19,7 @@ import nvcc_compiler ...@@ -19,6 +19,7 @@ import nvcc_compiler
# we use for optimization. Otherwise, we can iterate 100s of time on # we use for optimization. Otherwise, we can iterate 100s of time on
# the graph and apply only a few optimizations each time. # the graph and apply only a few optimizations each time.
gpu_optimizer = EquilibriumDB(ignore_newtrees=False) gpu_optimizer = EquilibriumDB(ignore_newtrees=False)
gpu_seqopt = SequenceDB()
def register_opt(*tags, **kwargs): def register_opt(*tags, **kwargs):
......
...@@ -2,7 +2,7 @@ import os ...@@ -2,7 +2,7 @@ import os
import theano import theano
from theano import Apply, gof, tensor from theano import Apply, gof, tensor
from theano.gof import Optimizer from theano.gof import Optimizer, local_optimizer
from theano.gof.type import CDataType from theano.gof.type import CDataType
from theano.compat import PY3 from theano.compat import PY3
from theano.tensor.nnet import SoftmaxGrad from theano.tensor.nnet import SoftmaxGrad
...@@ -13,7 +13,7 @@ from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, ...@@ -13,7 +13,7 @@ from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
from theano.sandbox.cuda.blas import (GpuConv, GpuDownsampleFactorMax, from theano.sandbox.cuda.blas import (GpuConv, GpuDownsampleFactorMax,
GpuDownsampleFactorMaxGrad) GpuDownsampleFactorMaxGrad)
from theano.sandbox.cuda.nnet import GpuSoftmax from theano.sandbox.cuda.nnet import GpuSoftmax
from theano.sandbox.cuda import register_opt from theano.sandbox.cuda import gpu_seqopt, register_opt
from theano.sandbox.cuda.nvcc_compiler import NVCC_compiler from theano.sandbox.cuda.nvcc_compiler import NVCC_compiler
...@@ -1145,12 +1145,8 @@ err%(name)s = cudnnSoftmaxBackward( ...@@ -1145,12 +1145,8 @@ err%(name)s = cudnnSoftmaxBackward(
""" """
# We need this since other stuff from opt is not importable. # Intentation for history
if cuda_available: if True:
from theano.sandbox.cuda.opt import (
local_optimizer, gpu_optimizer, gpu_seqopt)
#@register_opt('cudnn') # this optimizer is registered in opt.py instead. #@register_opt('cudnn') # this optimizer is registered in opt.py instead.
@local_optimizer([GpuConv]) @local_optimizer([GpuConv])
def local_conv_dnn(node): def local_conv_dnn(node):
......
...@@ -3,9 +3,8 @@ import copy ...@@ -3,9 +3,8 @@ import copy
from theano import Op from theano import Op
from theano.gof import local_optimizer from theano.gof import local_optimizer
from theano.sandbox.cuda import cuda_available, GpuOp from theano.sandbox.cuda import cuda_available, GpuOp
from theano.sandbox.cuda.basic_ops import GpuFlatten
from theano.tensor.extra_ops import CumsumOp from theano.tensor.extra_ops import CumsumOp
from theano.sandbox.cuda import GpuFlatten
if cuda_available: if cuda_available:
from theano.sandbox.cuda import CudaNdarrayType from theano.sandbox.cuda import CudaNdarrayType
......
...@@ -13,7 +13,7 @@ from theano import config, tensor, gof ...@@ -13,7 +13,7 @@ from theano import config, tensor, gof
import theano.ifelse import theano.ifelse
from theano.compile import optdb from theano.compile import optdb
from theano.gof import (local_optimizer, EquilibriumDB, SequenceDB, ProxyDB, from theano.gof import (local_optimizer, EquilibriumDB, ProxyDB,
Optimizer, toolbox) Optimizer, toolbox)
from theano.gof.python25 import all, any from theano.gof.python25 import all, any
from theano.sandbox.cuda.basic_ops import ( from theano.sandbox.cuda.basic_ops import (
...@@ -42,7 +42,7 @@ from theano.sandbox.cuda.elemwise import SupportCodeError ...@@ -42,7 +42,7 @@ from theano.sandbox.cuda.elemwise import SupportCodeError
from theano.scalar.basic_scipy import Erfinv from theano.scalar.basic_scipy import Erfinv
from theano.sandbox.cuda.elemwise import erfinv_gpu from theano.sandbox.cuda.elemwise import erfinv_gpu
from theano.sandbox.cuda.var import CudaNdarrayConstant from theano.sandbox.cuda.var import CudaNdarrayConstant
from theano.sandbox.cuda import gpu_optimizer, register_opt from theano.sandbox.cuda import gpu_optimizer, register_opt, gpu_seqopt
from theano.scan_module import scan_utils, scan_op, scan_opt from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.blas import _is_real_vector, _is_real_matrix from theano.tensor.blas import _is_real_vector, _is_real_matrix
from theano.tensor import nlinalg from theano.tensor import nlinalg
...@@ -58,7 +58,6 @@ except ImportError: ...@@ -58,7 +58,6 @@ except ImportError:
#optdb.print_summary() # shows what is currently registered #optdb.print_summary() # shows what is currently registered
gpu_cut_copies = EquilibriumDB() gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpu_local_optimizations', gpu_optimizer, 1, gpu_seqopt.register('gpu_local_optimizations', gpu_optimizer, 1,
'fast_run', 'fast_compile', 'inplace', 'gpu') 'fast_run', 'fast_compile', 'inplace', 'gpu')
gpu_seqopt.register('gpu_cut_transfers', gpu_cut_copies, 2, gpu_seqopt.register('gpu_cut_transfers', gpu_cut_copies, 2,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论