提交 e980fdd0 authored 作者: Nicolas Ballas's avatar Nicolas Ballas 提交者: Pascal Lamblin

fix flake8

上级 136153f4
import unittest
import numpy
import copy
import itertools
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
from nose.plugins.skip import SkipTest
import theano.tensor.nnet.conv as conv_ref
import theano.tensor.nnet.abstract_conv2d as conv
from theano.sandbox.cuda import float32_shared_constructor as gpu_shared
from theano.compile import shared as cpu_shared
from theano.sandbox.cuda.tests.test_conv_cuda_ndarray import py_conv
from theano.sandbox.cuda.dnn import dnn_available, dnn_conv, dnn_gradweight, dnn_gradinput
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
......@@ -29,8 +20,8 @@ else:
class TestConv2d(unittest.TestCase):
def setUp(self):
super(TestConv2d, self).setUp()
super(TestConv2d, self).setUp()
self.inputs_shapes = [(8, 1, 12, 12), (8, 1, 18, 18), (2, 1, 4, 4),
(6, 1, 10, 11), (2, 1, 6, 5), (1, 5, 9, 9)]
self.filters_shapes = [(5, 1, 2, 2), (4, 1, 3, 3), (2, 1, 3, 3),
......@@ -39,8 +30,8 @@ class TestConv2d(unittest.TestCase):
self.border_modes = ["valid", "full", (0, 0), (1, 1), (5, 5), (5, 2)]
self.filters_flip = [True, False]
def get_output_shape(self, inputs_shape, filters_shape, subsample, border_mode):
if border_mode == "valid":
border_mode = (0, 0)
if border_mode == "full":
......@@ -49,7 +40,7 @@ class TestConv2d(unittest.TestCase):
num_filters = filters_shape[0]
return (batch_size, num_filters,) \
+ tuple(None if i is None or k is None
else ((i + 2*pad - k) // d + 1)
else ((i + 2 * pad - k) // d + 1)
for i, k, d, pad in zip(inputs_shape[2:], filters_shape[2:],
subsample, border_mode))
......@@ -79,7 +70,7 @@ class TestConv2d(unittest.TestCase):
c_ref = ref(inputs, filters,
border_mode=border_mode,
subsample=subsample,
conv_mode = conv_mode)
conv_mode=conv_mode)
c = conv.conv2d(inputs, filters,
border_mode=border_mode,
subsample=subsample,
......@@ -123,7 +114,7 @@ class TestConv2d(unittest.TestCase):
c = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
filters_flip=filters_flip,
subsample=subsample,
imshp = imshp, kshp = kshp)
imshp=imshp, kshp=kshp)
c = c(inputs, output, filters_shape[-2:])
c_ref = ref(inputs, output,
filters_shape,
......@@ -144,12 +135,10 @@ class TestConv2d(unittest.TestCase):
utt.verify_grad(abstract_conv2d_gradweight, [inputs_val, output_val],
mode=mode, eps=1)
def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradinput,
subsample=(1, 1), filters_flip=True, verify_grad=True, mode=mode_without_gpu,
border_mode='valid', device='cpu', provide_shape = False):
output_val = numpy.random.random(output_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
if device == 'gpu':
......@@ -189,11 +178,10 @@ class TestConv2d(unittest.TestCase):
utt.verify_grad(abstract_conv2d_gradinputs, [filters_val, output_val],
mode=mode, eps=1)
def test_dnn_conv(self):
if not dnn_available():
return
mode=mode_with_gpu
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
......
......@@ -6,17 +6,14 @@ __docformat__ = "restructuredtext en"
import logging
import numpy
import theano
from theano.tensor import (as_tensor_variable, blas, get_scalar_constant_value,
patternbroadcast, NotScalarConstantError)
from theano.tensor import (as_tensor_variable, patternbroadcast)
from theano.tensor import TensorType
from theano.gof import Apply, Op
from theano.gof import local_optimizer
from theano.tensor.opt import register_specialize_device
## Cpu implementation
# Cpu implementation
from theano.tensor.nnet import conv2d as cpu_conv2d, ConvOp
from theano.tensor.nnet.ConvGrad3D import convGrad3D
from theano.tensor.nnet.ConvTransp3D import convTransp3D
......@@ -90,21 +87,18 @@ def conv2d(inputs,
of shape (batch size, output channels, output rows, output columns)
"""
### FIXME input shape/kernel shape
conv_op = AbstractConv2d(imshp=inputs_shape,
kshp=filters_shape,
bsize=batch_size,
border_mode=border_mode,
subsample=subsample,
filters_flip = filters_flip)
filters_flip=filters_flip)
return conv_op(inputs, filters)
class BaseAbstractConv2d(Op):
"""Base class for ConvInferace
FIXME
"""
Base class for ConvInferace
"""
check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filters_flip', 'imshp', 'kshp', 'bsize')
......@@ -151,11 +145,8 @@ class BaseAbstractConv2d(Op):
return flops
class AbstractConv2d(BaseAbstractConv2d):
"""
FIXME
"""
def __init__(self,
imshp=None,
kshp=None,
......@@ -172,12 +163,10 @@ class AbstractConv2d(BaseAbstractConv2d):
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
broadcastable=[img.broadcastable[0],
kern.broadcastable[0],
False, False]
#output = img.type.__class__(dtype=img.type.dtype,
# broadcastable=broadcastable)()
output = img.type.clone( broadcastable=broadcastable)()
broadcastable = [img.broadcastable[0],
kern.broadcastable[0],
False, False]
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, kern], [output])
def perform(self, node, inp, out_):
......@@ -219,7 +208,7 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp, bsize,
border_mode, subsample, filters_flip)
## Update shape/height_width
# Update shape/height_width
def make_node(self, img, topgrad, shape):
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
......@@ -231,9 +220,9 @@ class AbstractConv2d_gradWeights(BaseAbstractConv2d):
' or border_mode == "half"')
shape = as_tensor_variable(shape)
broadcastable=[topgrad.broadcastable[1],
img.broadcastable[1],
False, False]
broadcastable = [topgrad.broadcastable[1],
img.broadcastable[1],
False, False]
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, topgrad, shape], [output])
......@@ -280,7 +269,7 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp, bsize,
border_mode, subsample, filters_flip)
## Update shape/height_width
# Update shape/height_width
def make_node(self, kern, topgrad, shape):
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
......@@ -289,7 +278,6 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
if self.subsample != (1, 1) and shape is None:
raise ValueError('shape must be given if subsample != (1, 1)')
shape = as_tensor_variable(shape)
broadcastable = [topgrad.type.broadcastable[0],
kern.type.broadcastable[1],
......@@ -297,7 +285,6 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
output = kern.type.clone(broadcastable=broadcastable)()
return Apply(self, [kern, topgrad, shape], [output])
def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d_gradWeight theano optimization failed')
......@@ -316,7 +303,8 @@ class AbstractConv2d_gradInputs(BaseAbstractConv2d):
def connection_pattern(self, node):
return [[1], [1], [0]] # no connection to height, width
### Cpu Optmization
# Cpu Optmization
@local_optimizer([AbstractConv2d])
def local_conv2d_cpu(node):
......@@ -324,8 +312,8 @@ def local_conv2d_cpu(node):
return None
img, kern = node.inputs
if (not isinstance(img.type, TensorType) or
not isinstance(kern.type, TensorType)):
if ((not isinstance(img.type, TensorType) or
not isinstance(kern.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
......@@ -346,8 +334,8 @@ def local_conv2d_gradweight_cpu(node):
img, topgrad, shape = node.inputs
if (not isinstance(img.type, TensorType) or
not isinstance(topgrad.type, TensorType)):
if ((not isinstance(img.type, TensorType) or
not isinstance(topgrad.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
......@@ -397,11 +385,11 @@ def local_conv2d_gradweight_cpu(node):
# We cannot infer the shapes
return None
####### Determine gradient on kernels ########
# Determine gradient on kernels
assert len(op_imshp) == 4 and len(op_kshp) == 4
outshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], node.op.subsample,
op_kshp[2:], node.op.subsample,
node.op.border_mode)
fulloutshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], (1, 1),
......@@ -455,8 +443,8 @@ register_specialize_device(local_conv2d_gradweight_cpu)
def local_conv2d_gradinputs_cpu(node):
kern, topgrad, shape = node.inputs
if (not isinstance(kern.type, TensorType) or
not isinstance(topgrad.type, TensorType)):
if ((not isinstance(kern.type, TensorType) or
not isinstance(topgrad.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
......@@ -464,7 +452,7 @@ def local_conv2d_gradinputs_cpu(node):
# Not tested yet
return None
### Conv 3d implementation, needed when subsample > 2
# Conv 3d implementation, needed when subsample > 2
if node.op.border_mode == 'valid' and node.op.subsample != (1, 1):
kern = kern[:, :, ::-1, ::-1]
shuffled_kern = kern.dimshuffle(0, 2, 3, 'x', 1)
......@@ -479,7 +467,7 @@ def local_conv2d_gradinputs_cpu(node):
rval = patternbroadcast(rval, node.outputs[0].broadcastable)
return [rval]
### Conv2d Implementation
# Conv2d Implementation
dx, dy = node.op.subsample
if dx not in (1, 2) or dy not in (1, 2):
# Not implemented in the gradient of ConvOp
......@@ -506,7 +494,7 @@ def local_conv2d_gradinputs_cpu(node):
filters = filters[:, :, ::-1, ::-1]
outshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], node.op.subsample,
op_kshp[2:], node.op.subsample,
node.op.border_mode)
fulloutshp = ConvOp.getOutputShape(op_imshp[2:],
op_kshp[2:], (1, 1),
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论