提交 5454b126 authored 作者: James Bergstra's avatar James Bergstra

Shape_i - requires Variable argument to make_node, doesnt convert with as_tensor

上级 a3b4fd8c
...@@ -69,7 +69,12 @@ def __oplist_tag(thing, tag): ...@@ -69,7 +69,12 @@ def __oplist_tag(thing, tag):
thing.__oplist_tags = tags thing.__oplist_tags = tags
def as_cuda_or_tensor_variable(x, name = None, ndim=None): if 0:
# this starts to feel like we're enumerating all the types
# the one place where this is used we should also allow for sparse
# variables
# - JB 20100226
def as_cuda_or_tensor_variable(x, name = None, ndim=None):
""" """
This function do the same as_tensor_variable, but don't transfert the value on the gpu This function do the same as_tensor_variable, but don't transfert the value on the gpu
""" """
......
...@@ -279,10 +279,11 @@ class Shape_i(T.Op): ...@@ -279,10 +279,11 @@ class Shape_i(T.Op):
def __str__(self): def __str__(self):
return '%s{%i}'%(self.__class__.__name__, self.i) return '%s{%i}'%(self.__class__.__name__, self.i)
def make_node(self, x): def make_node(self, x):
#we use as_cuda_or_tensor_variable as we want this op to work for # x could be one of a number of types
# TensorVariable AND CudaNdarrayVariable. Otherwise, we force the transfert # the only thing we require is that the variable have a .ndim,
# of the variable to the cpu. # and that the value have a .shape
x = T.as_cuda_or_tensor_variable(x) if not isinstance(x, T.Variable):
raise TypeError('x must be Variable with ndim attribute', x)
if x.ndim <= self.i: if x.ndim <= self.i:
raise TypeError('x has too few dimensions for Shape_i', (x, self.i)) raise TypeError('x has too few dimensions for Shape_i', (x, self.i))
return T.Apply(self, [x], [T.lscalar()]) return T.Apply(self, [x], [T.lscalar()])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论