提交 a41fc8b0 authored 作者: Vincent Michalski's avatar Vincent Michalski

first try

上级 dd41f3e9
...@@ -36,4 +36,4 @@ distribute-*.tar.gz ...@@ -36,4 +36,4 @@ distribute-*.tar.gz
Theano.suo Theano.suo
.ipynb_checkpoints .ipynb_checkpoints
.pydevproject .pydevproject
.ropeproject
...@@ -81,6 +81,7 @@ class TestConv2d(unittest.TestCase): ...@@ -81,6 +81,7 @@ class TestConv2d(unittest.TestCase):
filter_flip=filter_flip, filter_flip=filter_flip,
input_shape=imshp, input_shape=imshp,
filter_shape=kshp) filter_shape=kshp)
self.assertTrue(hasattr(c.tag, 'trace'))
f_ref = theano.function([], c_ref, mode=mode) f_ref = theano.function([], c_ref, mode=mode)
f = theano.function([], c, mode) f = theano.function([], c, mode)
...@@ -124,6 +125,7 @@ class TestConv2d(unittest.TestCase): ...@@ -124,6 +125,7 @@ class TestConv2d(unittest.TestCase):
filter_flip=filter_flip, filter_flip=filter_flip,
subsample=subsample, subsample=subsample,
imshp=imshp, kshp=kshp) imshp=imshp, kshp=kshp)
self.assertTrue(hasattr(c.tag, 'trace'))
c = c(inputs, output, filters_shape[-2:]) c = c(inputs, output, filters_shape[-2:])
c_ref = ref(inputs, output, c_ref = ref(inputs, output,
filters_shape, filters_shape,
...@@ -176,6 +178,7 @@ class TestConv2d(unittest.TestCase): ...@@ -176,6 +178,7 @@ class TestConv2d(unittest.TestCase):
subsample=subsample, subsample=subsample,
filter_flip=filter_flip, filter_flip=filter_flip,
imshp=imshp, kshp=kshp) imshp=imshp, kshp=kshp)
self.assertTrue(hasattr(c.tag, 'trace'))
c = c(filters, output, inputs_shape[-2:]) c = c(filters, output, inputs_shape[-2:])
c_ref = ref(filters, output, inputs_shape, c_ref = ref(filters, output, inputs_shape,
border_mode=border_mode, subsample=subsample, border_mode=border_mode, subsample=subsample,
......
...@@ -17,7 +17,8 @@ from theano.tensor.nnet.abstract_conv import (AbstractConv2d, ...@@ -17,7 +17,8 @@ from theano.tensor.nnet.abstract_conv import (AbstractConv2d,
AbstractConv2d_gradWeights, AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs) AbstractConv2d_gradInputs)
from theano.tensor.nnet.abstract_conv import get_conv_output_shape from theano.tensor.nnet.abstract_conv import get_conv_output_shape
from theano.tensor.opt import register_specialize_device from theano.tensor.opt import (copy_stack_trace,
register_specialize_device)
from theano.tensor import TensorType from theano.tensor import TensorType
# Cpu implementation # Cpu implementation
...@@ -75,6 +76,7 @@ def local_abstractconv_gemm(node): ...@@ -75,6 +76,7 @@ def local_abstractconv_gemm(node):
kern = kern[:, :, ::-1, ::-1] kern = kern[:, :, ::-1, ::-1]
rval = CorrMM(border_mode=node.op.border_mode, rval = CorrMM(border_mode=node.op.border_mode,
subsample=node.op.subsample)(img, kern) subsample=node.op.subsample)(img, kern)
copy_stack_trace(node.outputs[0], rval)
return [rval] return [rval]
...@@ -96,6 +98,7 @@ def local_abstractconv_gradweight_gemm(node): ...@@ -96,6 +98,7 @@ def local_abstractconv_gradweight_gemm(node):
if node.op.filter_flip: if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1] rval = rval[:, :, ::-1, ::-1]
rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable) rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], rval)
return [rval] return [rval]
...@@ -117,6 +120,7 @@ def local_abstractconv_gradinputs_gemm(node): ...@@ -117,6 +120,7 @@ def local_abstractconv_gradinputs_gemm(node):
rval = CorrMM_gradInputs(border_mode=node.op.border_mode, rval = CorrMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample)(kern, topgrad, subsample=node.op.subsample)(kern, topgrad,
shape) shape)
copy_stack_trace(node.outputs[0], rval)
return [rval] return [rval]
...@@ -141,6 +145,8 @@ def local_conv2d_cpu(node): ...@@ -141,6 +145,8 @@ def local_conv2d_cpu(node):
node.op.imshp, node.op.kshp, node.op.imshp, node.op.kshp,
border_mode=node.op.border_mode, border_mode=node.op.border_mode,
subsample=node.op.subsample) subsample=node.op.subsample)
copy_stack_trace(node.outputs[0], rval)
return [rval] return [rval]
...@@ -181,6 +187,7 @@ def local_conv2d_gradweight_cpu(node): ...@@ -181,6 +187,7 @@ def local_conv2d_gradweight_cpu(node):
rval = rval[:, :, ::-1, ::-1] rval = rval[:, :, ::-1, ::-1]
rval = theano.tensor.patternbroadcast(rval, rval = theano.tensor.patternbroadcast(rval,
node.outputs[0].broadcastable) node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], rval)
return [rval] return [rval]
dx, dy = node.op.subsample dx, dy = node.op.subsample
...@@ -251,6 +258,8 @@ def local_conv2d_gradweight_cpu(node): ...@@ -251,6 +258,8 @@ def local_conv2d_gradweight_cpu(node):
res = res[:, :, ::-1, ::-1] res = res[:, :, ::-1, ::-1]
res = theano.tensor.patternbroadcast(res, node.outputs[0].broadcastable) res = theano.tensor.patternbroadcast(res, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], res)
return [res] return [res]
...@@ -284,6 +293,8 @@ def local_conv2d_gradinputs_cpu(node): ...@@ -284,6 +293,8 @@ def local_conv2d_gradinputs_cpu(node):
rval = rval.dimshuffle(0, 4, 1, 2) rval = rval.dimshuffle(0, 4, 1, 2)
rval = theano.tensor.patternbroadcast(rval, rval = theano.tensor.patternbroadcast(rval,
node.outputs[0].broadcastable) node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], rval)
return [rval] return [rval]
# Conv2d Implementation # Conv2d Implementation
...@@ -333,6 +344,7 @@ def local_conv2d_gradinputs_cpu(node): ...@@ -333,6 +344,7 @@ def local_conv2d_gradinputs_cpu(node):
direction_hint='bprop inputs') direction_hint='bprop inputs')
din = din(topgrad, filters) din = din(topgrad, filters)
din = theano.tensor.patternbroadcast(din, node.outputs[0].broadcastable) din = theano.tensor.patternbroadcast(din, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], din)
return [din] return [din]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论