提交 5281bc31 authored 作者: James Bergstra's avatar James Bergstra

merge

......@@ -69,13 +69,18 @@ def __oplist_tag(thing, tag):
thing.__oplist_tags = tags
def as_cuda_or_tensor_variable(x, name = None, ndim=None):
"""
This function do the same as_tensor_variable, but don't transfert the value on the gpu
"""
if hasattr(x, '_as_CudaNdarrayVariable'):
return x._as_CudaNdarrayVariable() #TODO: pass name and ndim arguments
return as_tensor_variable(x, name, ndim)
if 0:
# this starts to feel like we're enumerating all the types
# the one place where this is used we should also allow for sparse
# variables
# - JB 20100226
def as_cuda_or_tensor_variable(x, name = None, ndim=None):
"""
This function do the same as_tensor_variable, but don't transfert the value on the gpu
"""
if hasattr(x, '_as_CudaNdarrayVariable'):
return x._as_CudaNdarrayVariable() #TODO: pass name and ndim arguments
return as_tensor_variable(x, name, ndim)
def as_tensor_variable(x, name = None, ndim=None):
"""Return `x`, transformed into a `TensorType`
......
......@@ -279,10 +279,11 @@ class Shape_i(T.Op):
def __str__(self):
return '%s{%i}'%(self.__class__.__name__, self.i)
def make_node(self, x):
#we use as_cuda_or_tensor_variable as we want this op to work for
# TensorVariable AND CudaNdarrayVariable. Otherwise, we force the transfert
# of the variable to the cpu.
x = T.as_cuda_or_tensor_variable(x)
# x could be one of a number of types
# the only thing we require is that the variable have a .ndim,
# and that the value have a .shape
if not isinstance(x, T.Variable):
raise TypeError('x must be Variable with ndim attribute', x)
if x.ndim <= self.i:
raise TypeError('x has too few dimensions for Shape_i', (x, self.i))
return T.Apply(self, [x], [T.lscalar()])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论