提交 82d4e9a7 authored 作者: carriepl's avatar carriepl

Merge pull request #2909 from aalmah/ticket_2347

improving debugprint for scan ops
...@@ -25,8 +25,7 @@ from theano.configparser import (config, AddConfigVar, BoolParam, IntParam, ...@@ -25,8 +25,7 @@ from theano.configparser import (config, AddConfigVar, BoolParam, IntParam,
StrParam) StrParam)
from theano.compile.function_module import ( from theano.compile.function_module import (
FunctionMaker, Function, infer_reuse_pattern, FunctionMaker, Function, infer_reuse_pattern,
SymbolicInputKit, SymbolicOutput, Supervisor, std_fgraph SymbolicInputKit, SymbolicOutput, Supervisor, std_fgraph)
)
from theano.compile.mode import Mode, register_mode from theano.compile.mode import Mode, register_mode
from theano.compile.ops import OutputGuard from theano.compile.ops import OutputGuard
...@@ -521,7 +520,8 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False, ...@@ -521,7 +520,8 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False,
file=sys.stdout, print_destroy_map=False, file=sys.stdout, print_destroy_map=False,
print_view_map=False, order=None, ids='CHAR', print_view_map=False, order=None, ids='CHAR',
stop_on_name=False, prefix_child=None, stop_on_name=False, prefix_child=None,
scan_ops=None, profile=None): scan_ops=None, profile=None,
scan_inner_to_outer_inputs=None):
"""Print the graph leading to `r` to given depth. """Print the graph leading to `r` to given depth.
:param r: Variable instance :param r: Variable instance
...@@ -544,6 +544,9 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False, ...@@ -544,6 +544,9 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False,
we don't print anything below it. we don't print anything below it.
:param scan_ops: Scan ops in the graph will be added inside this list :param scan_ops: Scan ops in the graph will be added inside this list
for later printing purposes. for later printing purposes.
:param scan_inner_to_outer_inputs: a dictionary mapping a scan ops
inner function inputs to the scan op inputs (outer inputs) for
printing purposes.
""" """
if depth == 0: if depth == 0:
...@@ -578,6 +581,7 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False, ...@@ -578,6 +581,7 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False,
elif ids == "": elif ids == "":
id_str = "" id_str = ""
done[obj] = id_str done[obj] = id_str
return id_str return id_str
if hasattr(r.owner, 'op'): if hasattr(r.owner, 'op'):
...@@ -678,16 +682,30 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False, ...@@ -678,16 +682,30 @@ def debugprint(r, prefix='', depth=-1, done=None, print_type=False,
theano.scan_module.scan_op.Scan): theano.scan_module.scan_op.Scan):
scan_ops.append(i) scan_ops.append(i)
debugprint(i, new_prefix, depth=depth - 1, done=done, debugprint(
print_type=print_type, file=file, order=order, i, new_prefix, depth=depth - 1, done=done,
ids=ids, stop_on_name=stop_on_name, print_type=print_type, file=file, order=order,
prefix_child=new_prefix_child, ids=ids, stop_on_name=stop_on_name,
scan_ops=scan_ops, profile=profile) prefix_child=new_prefix_child, scan_ops=scan_ops,
profile=profile,
scan_inner_to_outer_inputs=scan_inner_to_outer_inputs)
else: else:
# this is an input variable if scan_inner_to_outer_inputs is not None and\
id_str = get_id_str(r) r in scan_inner_to_outer_inputs:
print('%s%s %s%s' % (prefix, r, id_str, type_str), file=file)
id_str = get_id_str(r)
outer_r = scan_inner_to_outer_inputs[r]
if hasattr(outer_r.owner, 'op'):
outer_id_str = get_id_str(outer_r.owner)
else:
outer_id_str = get_id_str(outer_r)
print('%s%s %s%s -> %s' % (prefix, r, id_str, type_str,
outer_id_str), file=file)
else:
# this is an input variable
id_str = get_id_str(r)
print('%s%s %s%s' % (prefix, r, id_str, type_str), file=file)
return file return file
...@@ -1601,7 +1619,7 @@ class _VariableEquivalenceTracker(object): ...@@ -1601,7 +1619,7 @@ class _VariableEquivalenceTracker(object):
r, r,
debugprint(r, prefix=' ', depth=6, debugprint(r, prefix=' ', depth=6,
file=StringIO(), done=done).getvalue(), file=StringIO(), done=done).getvalue(),
debugprint(new_r, prefix=' ', depth=6, debugprint(new_r, prefix=' ', depth=6,
file=StringIO(), done=done).getvalue())) file=StringIO(), done=done).getvalue()))
self.replaced_by[r].append((reason, new_r)) self.replaced_by[r].append((reason, new_r))
......
...@@ -149,6 +149,7 @@ N.B.: ...@@ -149,6 +149,7 @@ N.B.:
file=_file, order=order, ids=ids, file=_file, order=order, ids=ids,
scan_ops=scan_ops, stop_on_name=stop_on_name, scan_ops=scan_ops, stop_on_name=stop_on_name,
profile=p) profile=p)
if len(scan_ops) > 0: if len(scan_ops) > 0:
print("", file=_file) print("", file=_file)
new_prefix = ' >' new_prefix = ' >'
...@@ -156,27 +157,47 @@ N.B.: ...@@ -156,27 +157,47 @@ N.B.:
print("Inner graphs of the scan ops:", file=_file) print("Inner graphs of the scan ops:", file=_file)
for s in scan_ops: for s in scan_ops:
# prepare a dict which maps the scan op's inner inputs
# to its outer inputs.
if hasattr(s.owner.op, 'fn'):
# If the op was compiled, print the optimized version.
inner_inputs = s.owner.op.fn.maker.fgraph.inputs
else:
inner_inputs = s.owner.op.inputs
outer_inputs = s.owner.inputs
inner_to_outer_inputs = \
dict([(inner_inputs[i], outer_inputs[o])
for i, o in
s.owner.op.var_mappings['outer_inp_from_inner_inp']
.items()])
print("", file=_file) print("", file=_file)
debugmode.debugprint(s, depth=depth, done=done, debugmode.debugprint(
print_type=print_type, s, depth=depth, done=done,
file=_file, ids=ids, print_type=print_type,
scan_ops=scan_ops, stop_on_name=stop_on_name) file=_file, ids=ids,
scan_ops=scan_ops,
stop_on_name=stop_on_name,
scan_inner_to_outer_inputs=inner_to_outer_inputs)
if hasattr(s.owner.op, 'fn'): if hasattr(s.owner.op, 'fn'):
# If the op was compiled, print the optimized version. # If the op was compiled, print the optimized version.
outputs = s.owner.op.fn.maker.fgraph.outputs outputs = s.owner.op.fn.maker.fgraph.outputs
else: else:
outputs = s.owner.op.outputs outputs = s.owner.op.outputs
for idx, i in enumerate(outputs): for idx, i in enumerate(outputs):
if hasattr(i, 'owner') and hasattr(i.owner, 'op'): if hasattr(i, 'owner') and hasattr(i.owner, 'op'):
if isinstance(i.owner.op, theano.scan_module.scan_op.Scan): if isinstance(i.owner.op, theano.scan_module.scan_op.Scan):
scan_ops.append(i) scan_ops.append(i)
debugmode.debugprint(r=i, prefix=new_prefix, debugmode.debugprint(
depth=depth, done=done, r=i, prefix=new_prefix,
print_type=print_type, file=_file, depth=depth, done=done,
ids=ids, stop_on_name=stop_on_name, print_type=print_type, file=_file,
prefix_child=new_prefix_child, ids=ids, stop_on_name=stop_on_name,
scan_ops=scan_ops) prefix_child=new_prefix_child,
scan_ops=scan_ops,
scan_inner_to_outer_inputs=inner_to_outer_inputs)
if file is _file: if file is _file:
return file return file
......
...@@ -11,6 +11,7 @@ import theano.tensor as tensor ...@@ -11,6 +11,7 @@ import theano.tensor as tensor
from theano.printing import min_informative_str, debugprint from theano.printing import min_informative_str, debugprint
from theano.compat.six import StringIO from theano.compat.six import StringIO
import numpy
def test_pydotprint_cond_highlight(): def test_pydotprint_cond_highlight():
...@@ -248,3 +249,457 @@ def test_debugprint(): ...@@ -248,3 +249,457 @@ def test_debugprint():
print('--' + reference + '--') print('--' + reference + '--')
assert s == reference assert s == reference
def test_scan_debugprint1():
k = tensor.iscalar("k")
A = tensor.vector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = result[-1]
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|Subtensor{int64::} [@B] ''
| |for{cpu,scan_fn} [@C] ''
| | |k [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0.0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | | | |k [@D]
| | | | | |Subtensor{int64} [@I] ''
| | | | | |Shape [@J] ''
| | | | | | |Rebroadcast{0} [@K] ''
| | | | | | |DimShuffle{x,0} [@L] ''
| | | | | | |Elemwise{second,no_inplace} [@M] ''
| | | | | | |A [@N]
| | | | | | |DimShuffle{x} [@O] ''
| | | | | | |TensorConstant{1.0} [@P]
| | | | | |Constant{0} [@Q]
| | | | |Subtensor{int64} [@R] ''
| | | | |Shape [@S] ''
| | | | | |Rebroadcast{0} [@K] ''
| | | | |Constant{1} [@T]
| | | |Rebroadcast{0} [@K] ''
| | | |ScalarFromTensor [@U] ''
| | | |Subtensor{int64} [@I] ''
| | |A [@N]
| |Constant{1} [@V]
|Constant{-1} [@W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@C] ''
>Elemwise{mul,no_inplace} [@X] ''
> |<TensorType(float64, vector)> [@Y] -> [@E]
> |A_copy [@Z] -> [@N]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint2():
coefficients = theano.tensor.vector("coefficients")
x = tensor.scalar("x")
max_coefficients_supported = 10000
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient, power,
free_variable:
coefficient * (free_variable ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=x)
# Sum them up
polynomial = components.sum()
output_str = theano.printing.debugprint(polynomial, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [@A] ''
|for{cpu,scan_fn} [@B] ''
|Elemwise{minimum,no_inplace} [@C] ''
| |Subtensor{int64} [@D] ''
| | |Shape [@E] ''
| | | |Subtensor{int64::} [@F] 'coefficients[0:]'
| | | |coefficients [@G]
| | | |Constant{0} [@H]
| | |Constant{0} [@I]
| |Subtensor{int64} [@J] ''
| |Shape [@K] ''
| | |Subtensor{int64::} [@L] ''
| | |ARange [@M] ''
| | | |TensorConstant{0} [@N]
| | | |TensorConstant{10000} [@O]
| | | |TensorConstant{1} [@P]
| | |Constant{0} [@Q]
| |Constant{0} [@R]
|Subtensor{:int64:} [@S] ''
| |Subtensor{int64::} [@F] 'coefficients[0:]'
| |ScalarFromTensor [@T] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Subtensor{:int64:} [@U] ''
| |Subtensor{int64::} [@L] ''
| |ScalarFromTensor [@V] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Elemwise{minimum,no_inplace} [@C] ''
|x [@W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@B] ''
>Elemwise{mul,no_inplace} [@X] ''
> |coefficients[t] [@Y] -> [@S]
> |Elemwise{pow,no_inplace} [@Z] ''
> |x_copy [@BA] -> [@W]
> |<TensorType(int16, scalar)> [@BB] -> [@U]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint3():
coefficients = theano.tensor.vector("coefficients")
max_coefficients_supported = 10
k = tensor.iscalar("k")
A = tensor.vector("A")
# compute A**k
def compute_A_k(A, k):
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result,
A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
A_k = result[-1]
return A_k
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient,
power, some_A, some_k:
coefficient *
(compute_A_k(some_A, some_k) ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=[A, k])
# Sum them up
polynomial = components.sum()
final_result = polynomial
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [@A] ''
|for{cpu,scan_fn} [@B] ''
|Elemwise{minimum,no_inplace} [@C] ''
| |Subtensor{int64} [@D] ''
| | |Shape [@E] ''
| | | |Subtensor{int64::} [@F] 'coefficients[0:]'
| | | |coefficients [@G]
| | | |Constant{0} [@H]
| | |Constant{0} [@I]
| |Subtensor{int64} [@J] ''
| |Shape [@K] ''
| | |Subtensor{int64::} [@L] ''
| | |ARange [@M] ''
| | | |TensorConstant{0} [@N]
| | | |TensorConstant{10} [@O]
| | | |TensorConstant{1} [@P]
| | |Constant{0} [@Q]
| |Constant{0} [@R]
|Subtensor{:int64:} [@S] ''
| |Subtensor{int64::} [@F] 'coefficients[0:]'
| |ScalarFromTensor [@T] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Subtensor{:int64:} [@U] ''
| |Subtensor{int64::} [@L] ''
| |ScalarFromTensor [@V] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Elemwise{minimum,no_inplace} [@C] ''
|A [@W]
|k [@X]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@B] ''
>Elemwise{mul,no_inplace} [@Y] ''
> |DimShuffle{x} [@Z] ''
> | |coefficients[t] [@BA] -> [@S]
> |Elemwise{pow,no_inplace} [@BB] ''
> |Subtensor{int64} [@BC] ''
> | |Subtensor{int64::} [@BD] ''
> | | |for{cpu,scan_fn} [@BE] ''
> | | | |k_copy [@BF] -> [@X]
> | | | |IncSubtensor{Set;:int64:} [@BG] ''
> | | | | |Alloc [@BH] ''
> | | | | | |TensorConstant{0.0} [@BI]
> | | | | | |Elemwise{add,no_inplace} [@BJ] ''
> | | | | | | |k_copy [@BF] -> [@X]
> | | | | | | |Subtensor{int64} [@BK] ''
> | | | | | | |Shape [@BL] ''
> | | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | | | |DimShuffle{x,0} [@BN] ''
> | | | | | | | |Elemwise{second,no_inplace} [@BO] ''
> | | | | | | | |A_copy [@BP] -> [@W]
> | | | | | | | |DimShuffle{x} [@BQ] ''
> | | | | | | | |TensorConstant{1.0} [@BR]
> | | | | | | |Constant{0} [@BS]
> | | | | | |Subtensor{int64} [@BT] ''
> | | | | | |Shape [@BU] ''
> | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | |Constant{1} [@BV]
> | | | | |Rebroadcast{0} [@BM] ''
> | | | | |ScalarFromTensor [@BW] ''
> | | | | |Subtensor{int64} [@BK] ''
> | | | |A_copy [@BP] -> [@W]
> | | |Constant{1} [@BX]
> | |Constant{-1} [@BY]
> |DimShuffle{x} [@BZ] ''
> |<TensorType(int8, scalar)> [@CA] -> [@U]
for{cpu,scan_fn} [@BE] ''
>Elemwise{mul,no_inplace} [@CB] ''
> |<TensorType(float64, vector)> [@CC] -> [@BG]
> |A_copy [@CD] -> [@BP]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1+a_m2, b_m1+b_m2
a0 = theano.shared(numpy.arange(2))
b0 = theano.shared(numpy.arange(2))
(a, b), _ = theano.scan(fn, outputs_info=[
{'initial': a0, 'taps': [-2, -1]},
{'initial': b0, 'taps': [-2, -1]}],
n_steps=5)
final_result = a+b
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Elemwise{add,no_inplace} [@A] ''
|Subtensor{int64::} [@B] ''
| |for{cpu,scan_fn}.0 [@C] ''
| | |TensorConstant{5} [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | | |TensorConstant{5} [@D]
| | | | |Subtensor{int64} [@I] ''
| | | | |Shape [@J] ''
| | | | | |Subtensor{:int64:} [@K] ''
| | | | | |<TensorType(int64, vector)> [@L]
| | | | | |Constant{2} [@M]
| | | | |Constant{0} [@N]
| | | |Subtensor{:int64:} [@K] ''
| | | |ScalarFromTensor [@O] ''
| | | |Subtensor{int64} [@I] ''
| | |IncSubtensor{Set;:int64:} [@P] ''
| | |Alloc [@Q] ''
| | | |TensorConstant{0} [@G]
| | | |Elemwise{add,no_inplace} [@R] ''
| | | |TensorConstant{5} [@D]
| | | |Subtensor{int64} [@S] ''
| | | |Shape [@T] ''
| | | | |Subtensor{:int64:} [@U] ''
| | | | |<TensorType(int64, vector)> [@V]
| | | | |Constant{2} [@W]
| | | |Constant{0} [@X]
| | |Subtensor{:int64:} [@U] ''
| | |ScalarFromTensor [@Y] ''
| | |Subtensor{int64} [@S] ''
| |Constant{2} [@Z]
|Subtensor{int64::} [@BA] ''
|for{cpu,scan_fn}.1 [@C] ''
|Constant{2} [@BB]
Inner graphs of the scan ops:
for{cpu,scan_fn}.0 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
> |<TensorType(int64, scalar)> [@BD] -> [@E]
> |<TensorType(int64, scalar)> [@BE] -> [@E]
>Elemwise{add,no_inplace} [@BF] ''
> |<TensorType(int64, scalar)> [@BG] -> [@P]
> |<TensorType(int64, scalar)> [@BH] -> [@P]
for{cpu,scan_fn}.1 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
>Elemwise{add,no_inplace} [@BF] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint5():
k = tensor.iscalar("k")
A = tensor.vector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = tensor.grad(result[-1].sum(), A)
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|for{cpu,grad_of_scan_fn}.1 [@B] ''
| |Elemwise{sub,no_inplace} [@C] ''
| | |Subtensor{int64} [@D] ''
| | | |Shape [@E] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |k [@G]
| | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |Alloc [@I] ''
| | | | | | |TensorConstant{0.0} [@J]
| | | | | | |Elemwise{add,no_inplace} [@K] ''
| | | | | | | |k [@G]
| | | | | | | |Subtensor{int64} [@L] ''
| | | | | | | |Shape [@M] ''
| | | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | | | |DimShuffle{x,0} [@O] ''
| | | | | | | | |Elemwise{second,no_inplace} [@P] ''
| | | | | | | | |A [@Q]
| | | | | | | | |DimShuffle{x} [@R] ''
| | | | | | | | |TensorConstant{1.0} [@S]
| | | | | | | |Constant{0} [@T]
| | | | | | |Subtensor{int64} [@U] ''
| | | | | | |Shape [@V] ''
| | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | |Constant{1} [@W]
| | | | | |Rebroadcast{0} [@N] ''
| | | | | |ScalarFromTensor [@X] ''
| | | | | |Subtensor{int64} [@L] ''
| | | | |A [@Q]
| | | |Constant{0} [@Y]
| | |TensorConstant{1} [@Z]
| |Subtensor{:int64:} [@BA] ''
| | |Subtensor{::int64} [@BB] ''
| | | |Subtensor{:int64:} [@BC] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BD]
| | | |Constant{-1} [@BE]
| | |ScalarFromTensor [@BF] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{:int64:} [@BG] ''
| | |Subtensor{:int64:} [@BH] ''
| | | |Subtensor{::int64} [@BI] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BJ]
| | | |Constant{-1} [@BK]
| | |ScalarFromTensor [@BL] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{::int64} [@BM] ''
| | |IncSubtensor{Inc;int64::} [@BN] ''
| | | |Elemwise{second,no_inplace} [@BO] ''
| | | | |for{cpu,scan_fn} [@BP] ''
| | | | | |k [@G]
| | | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |A [@Q]
| | | | |DimShuffle{x,x} [@BQ] ''
| | | | |TensorConstant{0.0} [@J]
| | | |IncSubtensor{Inc;int64} [@BR] ''
| | | | |Elemwise{second,no_inplace} [@BS] ''
| | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |for{cpu,scan_fn} [@BP] ''
| | | | | | |Constant{1} [@BU]
| | | | | |DimShuffle{x,x} [@BV] ''
| | | | | |TensorConstant{0.0} [@J]
| | | | |Elemwise{second} [@BW] ''
| | | | | |Subtensor{int64} [@BX] ''
| | | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |Constant{-1} [@BY]
| | | | | |DimShuffle{x} [@BZ] ''
| | | | | |Elemwise{second,no_inplace} [@CA] ''
| | | | | |Sum{acc_dtype=float64} [@CB] ''
| | | | | | |Subtensor{int64} [@BX] ''
| | | | | |TensorConstant{1.0} [@S]
| | | | |Constant{-1} [@BY]
| | | |Constant{1} [@BU]
| | |Constant{-1} [@CC]
| |Alloc [@CD] ''
| | |TensorConstant{0.0} [@J]
| | |Elemwise{add,no_inplace} [@CE] ''
| | | |Elemwise{sub,no_inplace} [@C] ''
| | | |TensorConstant{1} [@Z]
| | |Subtensor{int64} [@CF] ''
| | |Shape [@CG] ''
| | | |A [@Q]
| | |Constant{0} [@CH]
| |A [@Q]
|Constant{-1} [@CI]
Inner graphs of the scan ops:
for{cpu,grad_of_scan_fn}.1 [@B] ''
>Elemwise{add,no_inplace} [@CJ] ''
> |Elemwise{mul} [@CK] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |A_copy [@CM] -> [@Q]
> |<TensorType(float64, vector)> [@CN] -> [@BM]
>Elemwise{add,no_inplace} [@CO] ''
> |Elemwise{mul} [@CP] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |<TensorType(float64, vector)> [@CQ] -> [@BA]
> |<TensorType(float64, vector)> [@CR] -> [@CD]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
> |<TensorType(float64, vector)> [@CT] -> [@H]
> |A_copy [@CU] -> [@Q]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论