提交 bc0f3a93 authored 作者: Reyhane Askari's avatar Reyhane Askari

removed with_stack_trace and other small changes

上级 5d23dc6f
......@@ -2903,7 +2903,7 @@ def pre_greedy_local_optimizer(list_optimizations, out):
def copy_stack_trace(from_var, to_var):
"""
Copies the stack trace from one or more tensor variables to
one or more tensor variables.
one or more tensor variables and returns the destination variables.
Parameters
----------
......@@ -2947,34 +2947,6 @@ def copy_stack_trace(from_var, to_var):
# Copy over stack traces from from_var to each variable to
# to_var, including the stack_trace of the to_var before
to_var.tag.trace = getattr(to_var.tag, 'trace', []) + tr
def with_stack_trace(from_var, to_var):
"""
Copies the stack trace from one or more tensor variables to
one or more tensor variables and returns the destination variables.
Parameters
----------
from_var
Tensor variable or list of tensor variables to copy stack traces from.
to_var
Tensor variable or list of tensor variables to copy stack traces to.
Returns
-------
tensor variable or list of tensor variables
`to_var`, augmented with the stack traces from `from_var`.
Notes
-----
The stacktrace is assumed to be of the form of a list of lists
of tuples. Each tuple contains the filename, line number, function name
and so on. Each list of tuples contains the truples belonging to a
particular variable.
"""
copy_stack_trace(from_var, to_var)
return to_var
......
......@@ -15,7 +15,7 @@ from theano.tensor.basic import (
from theano.gof import HideC, COp, ParamsType
from theano.gof.utils import MethodNotDefined
from theano.gof.opt import with_stack_trace
from theano.gof.opt import copy_stack_trace
from collections import deque
......@@ -76,11 +76,11 @@ def as_gpuarray_variable(x, context_name):
# If we couldn't deal with transfers, then maybe it's a tensor
if isinstance(x.type, tensor.TensorType):
return with_stack_trace(x, GpuFromHost(context_name)(x))
return copy_stack_trace(x, GpuFromHost(context_name)(x))
# Try _as_GpuArrayVariable if possible
if hasattr(x, '_as_GpuArrayVariable'):
return with_stack_trace(x, x._as_GpuArrayVariable(context_name))
return copy_stack_trace(x, x._as_GpuArrayVariable(context_name))
# If it didn't work try for a constant
ctx = get_context(context_name)
......
......@@ -1831,20 +1831,17 @@ class GpuCorr3dMM_gradInputs(BaseGpuCorr3dMM):
@inplace_allocempty(GpuGemv, 0)
def local_inplace_gpuagemv(node, inputs):
with inherit_stack_trace(node.outputs):
return [gpugemv_inplace(*inputs)]
return [gpugemv_inplace(*inputs)]
@inplace_allocempty(GpuGemm, 0)
def local_inplace_gpuagemm(node, inputs):
with inherit_stack_trace(node.outputs):
return [gpugemm_inplace(*inputs)]
return [gpugemm_inplace(*inputs)]
@inplace_allocempty(GpuGer, 0)
def local_inplace_gpuager(node, inputs):
with inherit_stack_trace(node.outputs):
return [gpuger_inplace(*inputs)]
return [gpuger_inplace(*inputs)]
@inplace_allocempty(GpuGemmBatch, 0)
......
......@@ -16,7 +16,7 @@ from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
LocalGroupDB,
SequenceDB, Optimizer, DB, toolbox, graph)
from theano.gof.opt import (LocalMetaOptimizer, copy_stack_trace,
with_stack_trace, inherit_stack_trace)
inherit_stack_trace)
from theano.ifelse import IfElse
from theano.misc.ordered_set import OrderedSet
......@@ -468,7 +468,7 @@ class GraphToGPU(Optimizer):
new_o.owner.inputs[0].type == o.type):
new_o = new_o.owner.inputs[0]
else:
new_o = with_stack_trace(o, safe_to_cpu(new_o))
new_o = copy_stack_trace(o, safe_to_cpu(new_o))
new_nodes.append(new_o)
fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),
reason=self.__class__.__name__)
......
......@@ -43,7 +43,7 @@ from theano.tensor import DimShuffle, Subtensor
from theano.tensor.opt import register_uncanonicalize
from theano import scalar as scal
from theano.gof.opt import copy_stack_trace, with_stack_trace
from theano.gof.opt import copy_stack_trace
_logger = logging.getLogger('theano.tensor.opt')
......@@ -89,7 +89,7 @@ def local_max_to_min(node):
neg = max.owner.inputs[0]
if neg.owner and neg.owner.op == T.neg:
new = CAReduce(scal.minimum, max.owner.op.axis)(neg.owner.inputs[0])
return [with_stack_trace(node.outputs[0], new)]
return [copy_stack_trace(node.outputs[0], new)]
return False
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论