提交 de826376 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #3146 from nouiz/make_variable

Don't use type.make_variable() to have more stack trace
...@@ -1451,7 +1451,7 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase): ...@@ -1451,7 +1451,7 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):
sm = as_cuda_ndarray_variable(sm) sm = as_cuda_ndarray_variable(sm)
assert dy.ndim == 4 assert dy.ndim == 4
assert sm.ndim == 4 assert sm.ndim == 4
return Apply(self, [dy, sm], [sm.type.make_variable()]) return Apply(self, [dy, sm], [sm.type()])
def method(self): def method(self):
return """ return """
......
...@@ -1454,7 +1454,7 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase): ...@@ -1454,7 +1454,7 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):
sm = as_gpuarray_variable(sm) sm = as_gpuarray_variable(sm)
assert dy.ndim == 4 assert dy.ndim == 4
assert sm.ndim == 4 assert sm.ndim == 4
return Apply(self, [dy, sm], [sm.type.make_variable()]) return Apply(self, [dy, sm], [sm.type()])
def method(self): def method(self):
return """ return """
......
...@@ -131,7 +131,7 @@ class Binomial(gof.op.Op): ...@@ -131,7 +131,7 @@ class Binomial(gof.op.Op):
shape = tensor.as_tensor_variable(shape) shape = tensor.as_tensor_variable(shape)
return gof.Apply(self, [n, p, shape], return gof.Apply(self, [n, p, shape],
[SparseType(dtype=self.dtype, [SparseType(dtype=self.dtype,
format=self.format).make_variable()]) format=self.format)()])
def perform(self, node, inputs, outputs): def perform(self, node, inputs, outputs):
(n, p, shape) = inputs (n, p, shape) = inputs
......
...@@ -70,7 +70,7 @@ class SoftmaxWithBias(gof.Op): ...@@ -70,7 +70,7 @@ class SoftmaxWithBias(gof.Op):
or x.type.dtype not in tensor.float_dtypes: or x.type.dtype not in tensor.float_dtypes:
raise ValueError('b must be 1-d tensor of floats') raise ValueError('b must be 1-d tensor of floats')
sm = x.type.make_variable() sm = x.type()
return Apply(self, [x, b], [sm]) return Apply(self, [x, b], [sm])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
...@@ -298,7 +298,7 @@ class SoftmaxGrad(gof.Op): ...@@ -298,7 +298,7 @@ class SoftmaxGrad(gof.Op):
dy = tensor.shape_padleft(dy, n_ones=1) dy = tensor.shape_padleft(dy, n_ones=1)
if sm.ndim == 1: if sm.ndim == 1:
sm = tensor.shape_padleft(sm, n_ones=1) sm = tensor.shape_padleft(sm, n_ones=1)
return Apply(self, [dy, sm], [sm.type.make_variable()]) return Apply(self, [dy, sm], [sm.type()])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
dy, sm = input_storage dy, sm = input_storage
...@@ -857,10 +857,10 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op): ...@@ -857,10 +857,10 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
# TODO: Is this correct? It used to be y, not y_idx # TODO: Is this correct? It used to be y, not y_idx
nll = tensor.TensorType(x.type.dtype, nll = tensor.TensorType(x.type.dtype,
y_idx.type.broadcastable).make_variable() y_idx.type.broadcastable)()
# nll = TensorType(x.dtype, y.broadcastable) # nll = TensorType(x.dtype, y.broadcastable)
sm = x.type.make_variable() sm = x.type()
am = y_idx.type.make_variable() am = y_idx.type()
return Apply(self, [x, b, y_idx], [nll, sm, am]) return Apply(self, [x, b, y_idx], [nll, sm, am])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
...@@ -1084,7 +1084,7 @@ class CrossentropySoftmax1HotWithBiasDx(gof.Op): ...@@ -1084,7 +1084,7 @@ class CrossentropySoftmax1HotWithBiasDx(gof.Op):
if (y_idx.type.ndim != 1 or if (y_idx.type.ndim != 1 or
y_idx.type.dtype not in tensor.discrete_dtypes): y_idx.type.dtype not in tensor.discrete_dtypes):
raise ValueError('y_idx must be 1-d tensor of [u]ints', y_idx.type) raise ValueError('y_idx must be 1-d tensor of [u]ints', y_idx.type)
return Apply(self, [dy, sm, y_idx], [sm.type.make_variable()]) return Apply(self, [dy, sm, y_idx], [sm.type()])
def perform(self, node, input_storage, output_storage): def perform(self, node, input_storage, output_storage):
dy, sm, y_idx = input_storage dy, sm, y_idx = input_storage
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论