提交 692f9012 authored 作者: Iban Harlouchet's avatar Iban Harlouchet 提交者: Frederic

flake8 for theano/tensor/nnet/Conv3D.py

上级 70b5f2c1
from __future__ import print_function
import numpy as N
from six.moves import xrange
from . import ConvGrad3D
from . import ConvTransp3D
import theano
from theano.tensor import basic as T
import numpy as N
#from util import strutil
# from util import strutil
from theano.tensor.blas_headers import blas_header_text, blas_header_version
from theano.tensor.blas import ldflags
from theano.misc import strutil
......@@ -72,12 +77,12 @@ class Conv3D(theano.Op):
def grad(self, inputs, output_gradients):
V, W, b, d = inputs
dCdH , = output_gradients
dCdH, = output_gradients
# make all of these ops support broadcasting of scalar b to vector b and eplace the zeros_like in all their grads
# print dCdH.broadcastable
# print "dCdH.broadcastable"
# quit(-1)
#dCdH = printing.Print("dCdH = ",["shape"])
# dCdH = printing.Print("dCdH = ",["shape"])
# Make sure the broadcasting pattern of the gradient is the the same
# as the initial variable
......@@ -88,10 +93,11 @@ class Conv3D(theano.Op):
dCdW = T.patternbroadcast(dCdW, W.broadcastable)
dCdb = T.sum(dCdH, axis=(0, 1, 2, 3))
dCdb = T.patternbroadcast(dCdb, b.broadcastable)
dCdd = grad_undefined(self, 3, inputs[3],
"The gradient of Conv3D with respect to the convolution" +\
" stride is undefined because Conv3D is only defined for" +\
" integer strides.")
dCdd = grad_undefined(
self, 3, inputs[3],
"The gradient of Conv3D with respect to the convolution"
" stride is undefined because Conv3D is only defined for"
" integer strides.")
if 'name' in dir(dCdH) and dCdH.name is not None:
dCdH_name = dCdH.name
......@@ -113,11 +119,13 @@ class Conv3D(theano.Op):
else:
b_name = 'anon_b'
dCdV.name = 'Conv3D_dCdV(dCdH='+dCdH_name+',V='+V_name+')'
dCdW.name = 'Conv3D_dCdW(dCdH='+dCdH_name+',V='+V_name+',W='+W_name+')'
dCdb.name = 'Conv3D_dCdb(dCdH='+dCdH_name+',V='+V_name+',W='+W_name+',b='+b_name+')'
dCdV.name = 'Conv3D_dCdV(dCdH=' + dCdH_name + ',V=' + V_name + ')'
dCdW.name = ('Conv3D_dCdW(dCdH=' + dCdH_name + ',V=' + V_name +
',W=' + W_name + ')')
dCdb.name = ('Conv3D_dCdb(dCdH=' + dCdH_name + ',V=' + V_name +
',W=' + W_name + ',b=' + b_name + ')')
return [ dCdV, dCdW, dCdb, dCdd ]
return [dCdV, dCdW, dCdb, dCdd]
def perform(self, node, inputs, output_storage):
V, W, b, d = inputs
......@@ -144,7 +152,7 @@ class Conv3D(theano.Op):
output_width = T.floor((vidWidth - filterWidth) // dc) + 1
output_dur = T.floor((vidDur - filterDur) // dt) + 1
rval = (batch_size, output_height, output_width, output_dur, output_channels )
rval = (batch_size, output_height, output_width, output_dur, output_channels)
return [rval]
......@@ -155,7 +163,7 @@ class Conv3D(theano.Op):
return ldflags()
def c_compile_args(self):
flags = ldflags(libs=False, flags=True)
flags = ldflags(libs=False, flags=True)
return flags
def c_lib_dirs(self):
......@@ -170,7 +178,7 @@ class Conv3D(theano.Op):
H = outputs[0]
codeSource = """
codeSource = """
///////////// < code generated by Conv3D >
//printf("\t\t\t\tConv3D c code\\n");
......@@ -320,13 +328,13 @@ class Conv3D(theano.Op):
VV, WV, bv, dv = node.inputs
HV = node.outputs[0]
if (theano.config.blas.ldflags and
VV.dtype == WV.dtype and HV.dtype == VV.dtype):
VV.dtype == WV.dtype and HV.dtype == VV.dtype):
if VV.dtype == 'float64':
gemv = 'dgemv_'
elif VV.dtype == 'float32':
gemv = 'sgemv_'
else:
raise Exception('Unrecognized dtype for convolution '+V.value.dtype)
raise Exception('Unrecognized dtype for convolution ' + V.value.dtype)
codeSource += """
if (inputChannels > 20 && outputChannels > 20 && ws4 == sizeof(ELEM_AT(%(W)s,0)))
......@@ -571,7 +579,7 @@ def computeH(V, W, b, d):
outputChannels = W.shape[0]
inputChannels = V.shape[4]
if W.shape[4] != inputChannels:
raise Exception("W.shape[4] = "+str(W.shape[4])+" but inputChannels = "+str(inputChannels))
raise Exception("W.shape[4] = " + str(W.shape[4]) + " but inputChannels = " + str(inputChannels))
filterHeight = W.shape[1]
filterWidth = W.shape[2]
filterDur = W.shape[3]
......@@ -586,12 +594,12 @@ def computeH(V, W, b, d):
assert dy > 0
assert dt > 0
outputHeight = int( (vidHeight - filterHeight) / dx )+1
outputWidth = int( (vidWidth - filterWidth) / dy )+1
outputDur = int( (vidDur - filterDur) / dt ) + 1
outputHeight = int((vidHeight - filterHeight) / dx) + 1
outputWidth = int((vidWidth - filterWidth) / dy) + 1
outputDur = int((vidDur - filterDur) / dt) + 1
H = N.zeros( (batchSize, outputHeight,
outputWidth, outputDur, outputChannels ), dtype=V.dtype )
H = N.zeros((batchSize, outputHeight,
outputWidth, outputDur, outputChannels), dtype=V.dtype)
# H[i,j,x,y,t] = b_j + sum_k sum_l sum_m sum_z W[j,z,k,l,m] V[i,z, dx*x+k,dy*y+l,dt*t+m]
for i in xrange(0, H.shape[0]):
......@@ -610,12 +618,8 @@ def computeH(V, W, b, d):
# if (i,j,x,y,t) == (0,0,0,0,0):
# print (( W[j,z,k,l,m] , V[i,z,d[0]*x+k,d[1]*y+l,d[2]*t+m] ), (k,l,m) )
w = W[j, k, l, m, z]
v = V[i, d[0]*x+k, d[1]*y+l, d[2]*t+m, z]
v = V[i, d[0] * x + k, d[1] * y + l, d[2] * t + m, z]
# if i == 0 and x == 0 and y == 0 and t == 0 and j == 0:
# print 'setting H[0] += '+str(w*v)+' W['+str((j,z,k,l,m))+']='+str(w)+' V['+str((i,d[0]*x+k,d[1]*y+l,d[2]*t+m,z))+']='+str(v)
H[i, x, y, t, j] += w * v
return H
from . import ConvGrad3D
from . import ConvTransp3D
......@@ -88,7 +88,6 @@ whitelist_flake8 = [
"tensor/signal/conv.py",
"tensor/signal/tests/test_conv.py",
"tensor/signal/tests/test_downsample.py",
"tensor/nnet/Conv3D.py",
"tensor/nnet/__init__.py",
"tensor/nnet/ConvTransp3D.py",
"tensor/nnet/sigm.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论