提交 1a06538d authored 作者: James Bergstra's avatar James Bergstra

tensor.sharedvar - copy value in constructor

This is to match the new rules I proposed for where shared / non-shared memory is allocated.
上级 1652e97e
...@@ -27,7 +27,7 @@ def tensor_constructor(value, name=None, strict=False, broadcastable=None): ...@@ -27,7 +27,7 @@ def tensor_constructor(value, name=None, strict=False, broadcastable=None):
if broadcastable is None: if broadcastable is None:
broadcastable = (False,)*len(value.shape) broadcastable = (False,)*len(value.shape)
type = TensorType(value.dtype, broadcastable=broadcastable) type = TensorType(value.dtype, broadcastable=broadcastable)
return TensorSharedVariable(type=type, value=value, name=name, strict=strict) return TensorSharedVariable(type=type, value=numpy.array(value,copy=True), name=name, strict=strict)
# TensorSharedVariable brings in the tensor operators, is not ideal, but works as long as we # TensorSharedVariable brings in the tensor operators, is not ideal, but works as long as we
# dont do purely scalar-scalar operations # dont do purely scalar-scalar operations
...@@ -56,7 +56,7 @@ def scalar_constructor(value, name=None, strict=False): ...@@ -56,7 +56,7 @@ def scalar_constructor(value, name=None, strict=False):
# Do not pass the dtype to asarray because we want this to fail if # Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match. # strict is True and the types do not match.
rval = ScalarSharedVariable(type=tensor_type, rval = ScalarSharedVariable(type=tensor_type,
value=numpy.asarray(value), value=numpy.array(value, copy=True),
name=name, strict=strict) name=name, strict=strict)
return rval return rval
except: except:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论