提交 1625e634 authored 作者: Dustin Webb's avatar Dustin Webb

Incorporated reviewer comments.

上级 ddb712a5
......@@ -887,7 +887,7 @@ cudnnTensor4dDescriptor_t %(name)s_%(id)d;
%(name)s_%(id)d = NULL;
if ((err%(id)d = cudnnCreateTensor4dDescriptor(&%(name)s_%(id)d)) != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_MemoryError, "could not allocate tensor4d descriptor "
"(inp): %%s", cudnnGetErrorString(err%(id)d));
"%%s", cudnnGetErrorString(err%(id)d));
%(fail)s
}
""" % dict(name=name, id=id, fail=fail)
......@@ -921,9 +921,6 @@ cudnnStatus_t err%(id)d;
def c_code(self, node, name, inputs, outputs, sub):
ins = inputs
if type(inputs) is not list:
ins = [ins]
outs, = outputs
if self.tensor_format == 'b01c':
......@@ -1058,7 +1055,7 @@ err%(name)s = cudnnSoftmaxForward(
class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):
softmax_inputs = ['softmax_var', 'softmax_input']
softmax_inputs = ['softmax_gout', 'softmax_input']
def make_node(self, dy, sm):
dy = as_cuda_ndarray_variable(dy)
......
......@@ -321,10 +321,11 @@ class test_SoftMax(unittest.TestCase):
)
# Verify the grad operation
n = 3
m = 5
data = numpy.arange(n * m, dtype='float32').reshape(n, m)
gdata = numpy.asarray(data)[:, :, None, None]
dims = (2, 3, 4, 5)
gdata = numpy.arange(
numpy.product(dims),
dtype='float32'
).reshape(dims)
T.verify_grad(f_gpu, [gdata], rng=numpy.random)
def check_types(graph, graph_gpu):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论