提交 766fe8a3 authored 作者: Cesar Laurent's avatar Cesar Laurent

Added args check.

上级 c27117f0
...@@ -245,12 +245,6 @@ class Pool(OpenMPOp): ...@@ -245,12 +245,6 @@ class Pool(OpenMPOp):
def make_node(self, x, ws, stride=None, pad=(0, 0)): def make_node(self, x, ws, stride=None, pad=(0, 0)):
# TODO: consider restricting the dtype? # TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x) x = tensor.as_tensor_variable(x)
# TODO CESAR: How can we check the theano variables?
if isinstance(ws, (tuple, list)):
if not all([isinstance(w, integer_types) for w in ws]):
raise ValueError(
"Pool downsample parameters must be ints."
" Got %s" % str(ws))
if stride is None: if stride is None:
stride = ws stride = ws
if isinstance(pad, (tuple, list)): if isinstance(pad, (tuple, list)):
...@@ -258,8 +252,9 @@ class Pool(OpenMPOp): ...@@ -258,8 +252,9 @@ class Pool(OpenMPOp):
if pad != (0, 0) and not self.ignore_border: if pad != (0, 0) and not self.ignore_border:
raise NotImplementedError( raise NotImplementedError(
'padding works only with ignore_border=True') 'padding works only with ignore_border=True')
# TODO CESAR: Again, how can we check against theano variables? if isinstance(ws, (tuple, list)):
if pad[0] >= ws[0] or pad[1] >= ws[1]: #TODO CESAR this is wrong if ws is a theano variable ws = tuple(ws)
if pad[0] >= ws[0] or pad[1] >= ws[1]:
raise NotImplementedError( raise NotImplementedError(
'padding_h and padding_w must be smaller than strides') 'padding_h and padding_w must be smaller than strides')
ws = tensor.as_tensor_variable(ws) ws = tensor.as_tensor_variable(ws)
...@@ -270,6 +265,12 @@ class Pool(OpenMPOp): ...@@ -270,6 +265,12 @@ class Pool(OpenMPOp):
assert pad.ndim == 1 assert pad.ndim == 1
if x.type.ndim != 4: if x.type.ndim != 4:
raise TypeError() raise TypeError()
if not ws.dtype.startswith('int'):
raise TypeError('Pool downsample parameters must be ints.')
if not stride.dtype.startswith('int'):
raise TypeError('Stride parameters must be ints.')
if not pad.dtype.startswith('int'):
raise TypeError('Padding parameters must be ints.')
# If the input shape are broadcastable we can have 0 in the output shape # If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:2] + (False, False) broad = x.broadcastable[:2] + (False, False)
out = tensor.TensorType(x.dtype, broad) out = tensor.TensorType(x.dtype, broad)
...@@ -660,7 +661,9 @@ class MaxPoolGrad(PoolGrad): ...@@ -660,7 +661,9 @@ class MaxPoolGrad(PoolGrad):
assert isinstance(x, Variable) and x.ndim == 4 assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(maxout, Variable) and maxout.ndim == 4 assert isinstance(maxout, Variable) and maxout.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4 assert isinstance(gz, Variable) and gz.ndim == 4
#TODO CESAR: ASSERT assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert isinstance(pad, Variable) and pad.ndim == 1
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()]) return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out): def perform(self, node, inp, out):
...@@ -840,7 +843,7 @@ class AveragePoolGrad(PoolGrad): ...@@ -840,7 +843,7 @@ class AveragePoolGrad(PoolGrad):
# of MaxPoolGrad. They have to keep the same interface because of # of MaxPoolGrad. They have to keep the same interface because of
# the DownsampleFactorMaxGrad trick to keep old scripts working # the DownsampleFactorMaxGrad trick to keep old scripts working
# (see downsample.py for details on this). # (see downsample.py for details on this).
def make_node(self, x, gz, ws, stride=None, pad=(0, 0), dummy=None): # TODO CESAR check if it works! def make_node(self, x, gz, ws, stride=None, pad=(0, 0), dummy=None):
# make_node should only be called by the grad function of # make_node should only be called by the grad function of
# Pool, so these asserts should not fail. # Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x) x = tensor.as_tensor_variable(x)
...@@ -852,7 +855,9 @@ class AveragePoolGrad(PoolGrad): ...@@ -852,7 +855,9 @@ class AveragePoolGrad(PoolGrad):
pad = tensor.as_tensor_variable(pad) pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim == 4 assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4 assert isinstance(gz, Variable) and gz.ndim == 4
# TODO CESAR assert assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert isinstance(pad, Variable) and pad.ndim == 1
return Apply(self, [x, gz, ws, stride, pad], [x.type()]) return Apply(self, [x, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out): def perform(self, node, inp, out):
...@@ -940,13 +945,6 @@ class DownsampleFactorMaxGradGrad(OpenMPOp): ...@@ -940,13 +945,6 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
assert x.ndim == 4 assert x.ndim == 4
assert maxout.ndim == 4 assert maxout.ndim == 4
assert gz.ndim == 4 assert gz.ndim == 4
# TODO CESAR: How can we check the theano variables?
if isinstance(ws, (tuple, list)):
if not all([isinstance(w, integer_types) for w in ws]):
raise ValueError(
"Pool downsample parameters must be ints."
" Got %s" % str(ws))
if stride is None: if stride is None:
stride = ws stride = ws
if isinstance(pad, (tuple, list)): if isinstance(pad, (tuple, list)):
...@@ -954,8 +952,9 @@ class DownsampleFactorMaxGradGrad(OpenMPOp): ...@@ -954,8 +952,9 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
if pad != (0, 0) and not self.ignore_border: if pad != (0, 0) and not self.ignore_border:
raise NotImplementedError( raise NotImplementedError(
'padding works only with ignore_border=True') 'padding works only with ignore_border=True')
# TODO CESAR: Again, how can we check against theano variables? if isinstance(ws, (tuple, list)):
if pad[0] >= ws[0] or pad[1] >= ws[1]: #TODO CESAR this is wrong if ws is a theano variable ws = tuple(ws)
if pad[0] >= ws[0] or pad[1] >= ws[1]:
raise NotImplementedError( raise NotImplementedError(
'padding_h and padding_w must be smaller than strides') 'padding_h and padding_w must be smaller than strides')
ws = tensor.as_tensor_variable(ws) ws = tensor.as_tensor_variable(ws)
...@@ -964,7 +963,12 @@ class DownsampleFactorMaxGradGrad(OpenMPOp): ...@@ -964,7 +963,12 @@ class DownsampleFactorMaxGradGrad(OpenMPOp):
assert ws.ndim == 1 assert ws.ndim == 1
assert stride.ndim == 1 assert stride.ndim == 1
assert pad.ndim == 1 assert pad.ndim == 1
if not ws.dtype.startswith('int'):
raise TypeError('Pool downsample parameters must be ints.')
if not stride.dtype.startswith('int'):
raise TypeError('Stride parameters must be ints.')
if not pad.dtype.startswith('int'):
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()]) return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out): def perform(self, node, inp, out):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论