提交 fae01b90 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Make sure to warn people that tensor_format doesn't work.

上级 acad83a3
......@@ -1778,7 +1778,7 @@ class GpuDnnSoftmaxBase(DnnBase):
Parameters
----------
tensor_format
Whether the data format is 'bc01' or 'b01c'.
Always set this to 'bc01'.
algo
'fast', 'accurate' or 'log' indicating whether, respectively, computations
should be optimized for speed, for accuracy, or if CuDNN should rather
......@@ -1793,7 +1793,13 @@ class GpuDnnSoftmaxBase(DnnBase):
__props__ = ('tensor_format', 'mode', 'algo')
def __init__(self, tensor_format, algo, mode):
assert(tensor_format in ('bc01', 'b01c'))
if tensor_format != 'bc01':
raise ValueError(
"It was discovered that since December 2014, the "
"tensor_format parameter was ignored and the equivalent of "
"'bc01' is always used. Since your code seems to be using "
"another value, this might have affected previous results "
"ran with this code.")
DnnBase.__init__(self)
self.tensor_format = tensor_format
......@@ -1935,7 +1941,7 @@ class GpuDnnSoftmax(GpuDnnSoftmaxBase):
Parameters
----------
tensor_format
Whether the data format is 'bc01' or 'b01c'.
Always set to 'bc01'.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
......@@ -2003,7 +2009,7 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):
Parameters
----------
tensor_format
Whether the data format is 'bc01' or 'b01c'.
Always set to 'bc01'.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
......
......@@ -1078,8 +1078,6 @@ class GpuDnnSoftmaxBase(DnnBase):
Parameters
----------
tensor_format
*deprecated* Ignored, will look at the strides of the input(s).
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
......@@ -1092,7 +1090,7 @@ class GpuDnnSoftmaxBase(DnnBase):
__props__ = ('mode', 'algo')
def __init__(self, _, algo, mode):
def __init__(self, algo, mode):
DnnBase.__init__(self, [self.file], self.c_func)
assert(algo in ('fast', 'accurate', 'log'))
......@@ -1129,8 +1127,6 @@ class GpuDnnSoftmax(GpuDnnSoftmaxBase):
"""
Op for the cuDNN Softmax.
tensor_format
*deprecated* Ignored, will look at input strides.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
......@@ -1154,7 +1150,6 @@ class GpuDnnSoftmax(GpuDnnSoftmaxBase):
g_sm, = grads
sm = self.make_node(x).outputs[0]
return [GpuDnnSoftmaxGrad(
None,
self.algo,
self.mode
)(g_sm, sm)]
......@@ -1166,8 +1161,6 @@ class GpuDnnSoftmaxGrad(GpuDnnSoftmaxBase):
Parameters
----------
tensor_format
*deprecated* Ignored, will look at the input strides.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
......@@ -1393,7 +1386,7 @@ def local_softmax_dnn(node):
if isinstance(node.op, GpuSoftmax):
ins = node.inputs[0].dimshuffle(0, 1, 'x', 'x')
ins = gpu_contiguous(ins)
out = GpuDnnSoftmax('bc01', 'accurate', 'channel')(ins)
out = GpuDnnSoftmax('accurate', 'channel')(ins)
out = as_gpuarray_variable(out.dimshuffle(0, 1))
return [out]
......@@ -1410,7 +1403,7 @@ def local_log_softmax_dnn(node):
isinstance(node.inputs[0].owner.op, GpuDnnSoftmax) and
len(node.inputs[0].clients) == 1):
softmax_node = node.inputs[0].owner
new_softmax = GpuDnnSoftmax(None, 'log', softmax_node.op.mode)
new_softmax = GpuDnnSoftmax('log', softmax_node.op.mode)
return [new_softmax(softmax_node.inputs[0])]
......@@ -1444,6 +1437,6 @@ def local_softmax_dnn_grad(node):
return
ins.append(n.dimshuffle(0, 1, 'x', 'x'))
out = GpuDnnSoftmaxGrad('bc01', 'accurate', 'channel')(
out = GpuDnnSoftmaxGrad('accurate', 'channel')(
gpu_contiguous(ins[0]), gpu_contiguous(ins[1]))
return [out.dimshuffle(0, 1)]
......@@ -349,7 +349,7 @@ class TestDnnInferShapes(utt.InferShapeTester):
)
self._compile_and_check(
[t],
[dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(t)],
[dnn.GpuDnnSoftmax('accurate', 'channel')(t)],
[rand_tensor],
dnn.GpuDnnSoftmax
)
......@@ -359,7 +359,6 @@ class TestDnnInferShapes(utt.InferShapeTester):
[
T.grad(
dnn.GpuDnnSoftmax(
'bc01',
'accurate',
'channel'
)(t).mean(),
......@@ -707,7 +706,6 @@ class test_SoftMax(test_nnet.test_SoftMax):
x_gpu = T.tensor4('x_gpu', 'float32')
f_z = T.nnet.softmax_op
f_gpu = dnn.GpuDnnSoftmax(
'bc01',
'accurate',
'channel'
)
......@@ -808,7 +806,7 @@ class test_SoftMax(test_nnet.test_SoftMax):
raise SkipTest("Log-softmax is only in cudnn v3+")
x = T.ftensor4()
softmax_out = dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(x)
softmax_out = dnn.GpuDnnSoftmax('accurate', 'channel')(x)
log_out = T.log(T.as_tensor_variable(softmax_out))
f = theano.function([x], log_out, mode=mode_with_gpu)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论