提交 bd16aee9 authored 作者: Razvan Pascanu's avatar Razvan Pascanu

remove safe_to_cpu, and re-wrote safe_new

One thing that I've tried is to avoid importing cuda.
上级 6a6ac9b2
...@@ -30,24 +30,31 @@ import theano ...@@ -30,24 +30,31 @@ import theano
# Logging function for sending warning or info # Logging function for sending warning or info
_logger = logging.getLogger('theano.scan_utils') _logger = logging.getLogger('theano.scan_utils')
def safe_new(x, tag = ''):
def safe_new(x): """
if isinstance(x, numpy.ndarray): Internal function that constructs a new variable from x with the same
x = tensor.as_tensor_variable(x) type, but with a different name ( old name + tag). This function is used
if cuda.cuda_available and isinstance(x.type, cuda.CudaNdarrayType): by gradient, or the R-op to construct new variables for the inputs of
return tensor.TensorType( the inner graph such that there is no interference between the original
broadcastable = x.type.broadcastable graph and the newly constructed graph.
, dtype = config.floatX)() """
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else: else:
return x.type() nw_name = None
if isinstance(x.type, tensor.Constant):
def safe_to_cpu(x): return x.clone()
if isinstance(x, numpy.ndarray):
x = tensor.as_tensor_variable(x)
if cuda.cuda_available and isinstance(x.type, cuda.CudaNdarrayType):
return cuda.basic_ops.host_from_gpu(x)
else: else:
return x try:
x = tensor.as_tensor_variable(x)
except TypeError:
# This could happend for example for random states, and I really
# want to avoid the convoluted logic that checks for cuda
# ndarrays
pass
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def traverse(out, x,x_copy, d): def traverse(out, x,x_copy, d):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论