提交 eec6ce70 authored 作者: Razvan Pascanu's avatar Razvan Pascanu

create fake shared variables for aux inputs

Even if we do not have access to the memory buffer, if we are dealing with Tensor Types we can create a fake shared variable (all we need to know is the number of dimensions and dtype). I've wrote a function that create such a shared variable (that could afterwards take the real values) and used it for the auxiliary inputs (paramters and inputs/sequences)
上级 ed03b08b
...@@ -197,11 +197,30 @@ class ScanOp(PureOp): ...@@ -197,11 +197,30 @@ class ScanOp(PureOp):
aux_buffers = node_input_storage[1 + len(base_inputs):] aux_buffers = node_input_storage[1 + len(base_inputs):]
# 2.1 First the auxiliary arguments, those that are parameters or # 2.1 First the auxiliary arguments, those that are parameters or
# input # input
for mem_buf, var in izip(ndoe_input_storage[1 + len(base_inputs):], def fake_shared(var):
aux_inputs): val = 0
givens[var] = theano.shared(mem_buf[0], name=var.name, for dim in xrange(var.ndim):
val = [val]
val = numpy.asarray(val, dtype=var.dtype)
return theano.shared(val, name=var.name)
non_tensor_args = []
non_tensor_buffers = []
aux_buffers = []
for mem_buf, var in izip(aux_buffers, aux_inputs):
if mem_buf[0] is not None:
givens[var] = theano.shared(mem_buf[0], name=var.name,
borrow=True) borrow=True)
# 3.2. Next the states (numeric or not) and the outputs elif isinstance(var, TensorType):
givens[var] = fake_shared(var)
aux_buffers.append((givens[var],mem_buf))
else:
givens[var] = var.type()
non_tensor_args.append(givens[var])
non_tensor_buffers.append(mem_buf)
# 2.2. Next the states (numeric) and the outputs
updates = {} updates = {}
n_numeric_values = len(self.lengths) n_numeric_values = len(self.lengths)
for pos, (mem_buf, var, expr) in enumerate( for pos, (mem_buf, var, expr) in enumerate(
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论