提交 54b5450d authored 作者: Ian Goodfellow's avatar Ian Goodfellow

fixed a bug in Conv3D where the wrong axis is marked as broadcastable

fixed a bug in ConvTransp3D where infer_shape transposes two of the axes
上级 33be0600
...@@ -62,7 +62,7 @@ class Conv3D(theano.Op): ...@@ -62,7 +62,7 @@ class Conv3D(theano.Op):
b_ = T.as_tensor_variable(b) b_ = T.as_tensor_variable(b)
d_ = T.as_tensor_variable(d) d_ = T.as_tensor_variable(d)
node = theano.Apply(self, inputs=[V_, W_,b_,d_], outputs = [ T.TensorType(V_.dtype, (V_.broadcastable[0],W_.broadcastable[0],False,False,False))() ] ) node = theano.Apply(self, inputs=[V_, W_,b_,d_], outputs = [ T.TensorType(V_.dtype, (V_.broadcastable[0],False,False,False, W_.broadcastable[0]))() ] )
return node return node
...@@ -108,8 +108,10 @@ class Conv3D(theano.Op): ...@@ -108,8 +108,10 @@ class Conv3D(theano.Op):
output_width = T.floor( (vidWidth - filterWidth) / dc )+1 output_width = T.floor( (vidWidth - filterWidth) / dc )+1
output_dur = T.floor( (vidDur - filterDur) / dt ) +1 output_dur = T.floor( (vidDur - filterDur) / dt ) +1
rval = (batch_size, output_height, output_width, output_dur, output_channels )
return [(batch_size, output_height, output_width, output_dur, output_channels )] return [ rval ]
def c_support_code(self): def c_support_code(self):
return blas_header_text() return blas_header_text()
...@@ -179,7 +181,7 @@ class Conv3D(theano.Op): ...@@ -179,7 +181,7 @@ class Conv3D(theano.Op):
if (%(W)s->dimensions[4] != inputChannels) if (%(W)s->dimensions[4] != inputChannels)
{ {
PyErr_Format(PyExc_ValueError, "Conv3D: W operates on a %%li channel image but the image has %%i channels.",%(W)s->dimensions[4],inputChannels); PyErr_Format(PyExc_ValueError, "Conv3D: W operates on a %%ld channel image but the image has %%d channels.",%(W)s->dimensions[4],inputChannels);
%(fail)s %(fail)s
} }
......
...@@ -37,7 +37,7 @@ class ConvTransp3D(theano.Op): ...@@ -37,7 +37,7 @@ class ConvTransp3D(theano.Op):
def infer_shape(self, node, input_shapes): def infer_shape(self, node, input_shapes):
W,b,d,H,RShape = node.inputs W,b,d,H,RShape = node.inputs
W_shape, b_shape, d_shape, H_shape, RShape_shape = input_shapes W_shape, b_shape, d_shape, H_shape, RShape_shape = input_shapes
return [(H_shape[0], W_shape[1], RShape[0], RShape[1], RShape[2])] return [(H_shape[0], RShape[0], RShape[1], RShape[2], W_shape[1])]
def grad(self,inputs, output_gradients): def grad(self,inputs, output_gradients):
W,b,d,H, RShape = inputs W,b,d,H, RShape = inputs
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论