提交 7d737cca authored 作者: Amjad Almahairi's avatar Amjad Almahairi

added tests

上级 6b620e2f
......@@ -167,7 +167,6 @@ N.B.:
inner_to_outer_inputs = dict([(inner_inputs[i],outer_inputs[o])
for i,o in enumerate(
s.owner.op.get_outer_iidx_from_inner_iidx_seq())])
#import pdb; pdb.set_trace()
print("", file=_file)
debugmode.debugprint(s, depth=depth, done=done,
......
......@@ -11,6 +11,7 @@ import theano.tensor as tensor
from theano.printing import min_informative_str, debugprint
from theano.compat.six import StringIO
import numpy
def test_pydotprint_cond_highlight():
......@@ -248,3 +249,452 @@ def test_debugprint():
print('--' + reference + '--')
assert s == reference
def test_scan_debugprint1():
k = tensor.iscalar("k")
A = tensor.vector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = result[-1]
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|Subtensor{int64::} [@B] ''
| |for{cpu,scan_fn} [@C] ''
| | |k [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0.0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | | | |k [@D]
| | | | | |Subtensor{int64} [@I] ''
| | | | | |Shape [@J] ''
| | | | | | |Rebroadcast{0} [@K] ''
| | | | | | |DimShuffle{x,0} [@L] ''
| | | | | | |Elemwise{second,no_inplace} [@M] ''
| | | | | | |A [@N]
| | | | | | |DimShuffle{x} [@O] ''
| | | | | | |TensorConstant{1.0} [@P]
| | | | | |Constant{0} [@Q]
| | | | |Subtensor{int64} [@R] ''
| | | | |Shape [@S] ''
| | | | | |Rebroadcast{0} [@K] ''
| | | | |Constant{1} [@T]
| | | |Rebroadcast{0} [@K] ''
| | | |ScalarFromTensor [@U] ''
| | | |Subtensor{int64} [@I] ''
| | |A [@N]
| |Constant{1} [@V]
|Constant{-1} [@W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@C] ''
>Elemwise{mul,no_inplace} [@X] ''
> |<TensorType(float64, vector)> [@Y] -> [@E]
> |A_copy [@Z] -> [@N]"""
for truth,out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint2():
coefficients = theano.tensor.vector("coefficients")
x = tensor.scalar("x")
max_coefficients_supported = 10000
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient, power, free_variable:
coefficient * (free_variable ** power),
outputs_info=None,
sequences=[coefficients,
theano.tensor.arange(max_coefficients_supported)],
non_sequences=x)
# Sum them up
polynomial = components.sum()
output_str = theano.printing.debugprint(polynomial, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output= """Sum{acc_dtype=float64} [@A] ''
|for{cpu,scan_fn} [@B] ''
|Elemwise{minimum,no_inplace} [@C] ''
| |Subtensor{int64} [@D] ''
| | |Shape [@E] ''
| | | |Subtensor{int64::} [@F] 'coefficients[0:]'
| | | |coefficients [@G]
| | | |Constant{0} [@H]
| | |Constant{0} [@I]
| |Subtensor{int64} [@J] ''
| |Shape [@K] ''
| | |Subtensor{int64::} [@L] ''
| | |ARange [@M] ''
| | | |TensorConstant{0} [@N]
| | | |TensorConstant{10000} [@O]
| | | |TensorConstant{1} [@P]
| | |Constant{0} [@Q]
| |Constant{0} [@R]
|Subtensor{:int64:} [@S] ''
| |Subtensor{int64::} [@F] 'coefficients[0:]'
| |ScalarFromTensor [@T] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Subtensor{:int64:} [@U] ''
| |Subtensor{int64::} [@L] ''
| |ScalarFromTensor [@V] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Elemwise{minimum,no_inplace} [@C] ''
|x [@W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@B] ''
>Elemwise{mul,no_inplace} [@X] ''
> |coefficients[t] [@Y] -> [@S]
> |Elemwise{pow,no_inplace} [@Z] ''
> |x_copy [@BA] -> [@W]
> |<TensorType(int16, scalar)> [@BB] -> [@U]"""
for truth,out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint3():
coefficients = theano.tensor.vector("coefficients")
max_coefficients_supported = 10
k = tensor.iscalar("k")
A = tensor.vector("A")
# compute A**k
def compute_A_k(A, k):
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
A_k = result[-1]
return A_k
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient, power, some_A, some_k:
coefficient * (compute_A_k(some_A, some_k) ** power),
outputs_info=None,
sequences=[coefficients,
theano.tensor.arange(max_coefficients_supported)],
non_sequences=[A, k])
# Sum them up
polynomial = components.sum()
final_result = polynomial
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output="""Sum{acc_dtype=float64} [@A] ''
|for{cpu,scan_fn} [@B] ''
|Elemwise{minimum,no_inplace} [@C] ''
| |Subtensor{int64} [@D] ''
| | |Shape [@E] ''
| | | |Subtensor{int64::} [@F] 'coefficients[0:]'
| | | |coefficients [@G]
| | | |Constant{0} [@H]
| | |Constant{0} [@I]
| |Subtensor{int64} [@J] ''
| |Shape [@K] ''
| | |Subtensor{int64::} [@L] ''
| | |ARange [@M] ''
| | | |TensorConstant{0} [@N]
| | | |TensorConstant{10} [@O]
| | | |TensorConstant{1} [@P]
| | |Constant{0} [@Q]
| |Constant{0} [@R]
|Subtensor{:int64:} [@S] ''
| |Subtensor{int64::} [@F] 'coefficients[0:]'
| |ScalarFromTensor [@T] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Subtensor{:int64:} [@U] ''
| |Subtensor{int64::} [@L] ''
| |ScalarFromTensor [@V] ''
| |Elemwise{minimum,no_inplace} [@C] ''
|Elemwise{minimum,no_inplace} [@C] ''
|A [@W]
|k [@X]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@B] ''
>Elemwise{mul,no_inplace} [@Y] ''
> |DimShuffle{x} [@Z] ''
> | |coefficients[t] [@BA] -> [@S]
> |Elemwise{pow,no_inplace} [@BB] ''
> |Subtensor{int64} [@BC] ''
> | |Subtensor{int64::} [@BD] ''
> | | |for{cpu,scan_fn} [@BE] ''
> | | | |k_copy [@BF] -> [@X]
> | | | |IncSubtensor{Set;:int64:} [@BG] ''
> | | | | |Alloc [@BH] ''
> | | | | | |TensorConstant{0.0} [@BI]
> | | | | | |Elemwise{add,no_inplace} [@BJ] ''
> | | | | | | |k_copy [@BF] -> [@X]
> | | | | | | |Subtensor{int64} [@BK] ''
> | | | | | | |Shape [@BL] ''
> | | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | | | |DimShuffle{x,0} [@BN] ''
> | | | | | | | |Elemwise{second,no_inplace} [@BO] ''
> | | | | | | | |A_copy [@BP] -> [@W]
> | | | | | | | |DimShuffle{x} [@BQ] ''
> | | | | | | | |TensorConstant{1.0} [@BR]
> | | | | | | |Constant{0} [@BS]
> | | | | | |Subtensor{int64} [@BT] ''
> | | | | | |Shape [@BU] ''
> | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | |Constant{1} [@BV]
> | | | | |Rebroadcast{0} [@BM] ''
> | | | | |ScalarFromTensor [@BW] ''
> | | | | |Subtensor{int64} [@BK] ''
> | | | |A_copy [@BP] -> [@W]
> | | |Constant{1} [@BX]
> | |Constant{-1} [@BY]
> |DimShuffle{x} [@BZ] ''
> |<TensorType(int8, scalar)> [@CA] -> [@U]
for{cpu,scan_fn} [@BE] ''
>Elemwise{mul,no_inplace} [@CB] ''
> |<TensorType(float64, vector)> [@CC] -> [@BG]
> |A_copy [@CD] -> [@BP]"""
for truth,out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1+a_m2, b_m1+b_m2
a0 = theano.shared(numpy.arange(2))
b0 = theano.shared(numpy.arange(2))
(a, b), _ = theano.scan(fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
{'initial': b0, 'taps': [-2, -1]}],
n_steps=5)
final_result = a+b
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Elemwise{add,no_inplace} [@A] ''
|Subtensor{int64::} [@B] ''
| |for{cpu,scan_fn}.0 [@C] ''
| | |TensorConstant{5} [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | | |TensorConstant{5} [@D]
| | | | |Subtensor{int64} [@I] ''
| | | | |Shape [@J] ''
| | | | | |Subtensor{:int64:} [@K] ''
| | | | | |<TensorType(int64, vector)> [@L]
| | | | | |Constant{2} [@M]
| | | | |Constant{0} [@N]
| | | |Subtensor{:int64:} [@K] ''
| | | |ScalarFromTensor [@O] ''
| | | |Subtensor{int64} [@I] ''
| | |IncSubtensor{Set;:int64:} [@P] ''
| | |Alloc [@Q] ''
| | | |TensorConstant{0} [@G]
| | | |Elemwise{add,no_inplace} [@R] ''
| | | |TensorConstant{5} [@D]
| | | |Subtensor{int64} [@S] ''
| | | |Shape [@T] ''
| | | | |Subtensor{:int64:} [@U] ''
| | | | |<TensorType(int64, vector)> [@V]
| | | | |Constant{2} [@W]
| | | |Constant{0} [@X]
| | |Subtensor{:int64:} [@U] ''
| | |ScalarFromTensor [@Y] ''
| | |Subtensor{int64} [@S] ''
| |Constant{2} [@Z]
|Subtensor{int64::} [@BA] ''
|for{cpu,scan_fn}.1 [@C] ''
|Constant{2} [@BB]
Inner graphs of the scan ops:
for{cpu,scan_fn}.0 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
> |<TensorType(int64, scalar)> [@BD] -> [@E]
> |<TensorType(int64, scalar)> [@BE] -> [@E]
>Elemwise{add,no_inplace} [@BF] ''
> |<TensorType(int64, scalar)> [@BG] -> [@P]
> |<TensorType(int64, scalar)> [@BH] -> [@P]
for{cpu,scan_fn}.1 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
>Elemwise{add,no_inplace} [@BF] ''"""
# output = "\n".join(lines)
# with open('output5.txt', 'w') as f:
# f.write(output)
for truth,out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint5():
k = tensor.iscalar("k")
A = tensor.vector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = tensor.grad(result[-1].sum(), A)
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|for{cpu,grad_of_scan_fn}.1 [@B] ''
| |Elemwise{sub,no_inplace} [@C] ''
| | |Subtensor{int64} [@D] ''
| | | |Shape [@E] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |k [@G]
| | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |Alloc [@I] ''
| | | | | | |TensorConstant{0.0} [@J]
| | | | | | |Elemwise{add,no_inplace} [@K] ''
| | | | | | | |k [@G]
| | | | | | | |Subtensor{int64} [@L] ''
| | | | | | | |Shape [@M] ''
| | | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | | | |DimShuffle{x,0} [@O] ''
| | | | | | | | |Elemwise{second,no_inplace} [@P] ''
| | | | | | | | |A [@Q]
| | | | | | | | |DimShuffle{x} [@R] ''
| | | | | | | | |TensorConstant{1.0} [@S]
| | | | | | | |Constant{0} [@T]
| | | | | | |Subtensor{int64} [@U] ''
| | | | | | |Shape [@V] ''
| | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | |Constant{1} [@W]
| | | | | |Rebroadcast{0} [@N] ''
| | | | | |ScalarFromTensor [@X] ''
| | | | | |Subtensor{int64} [@L] ''
| | | | |A [@Q]
| | | |Constant{0} [@Y]
| | |TensorConstant{1} [@Z]
| |Subtensor{:int64:} [@BA] ''
| | |Subtensor{::int64} [@BB] ''
| | | |Subtensor{:int64:} [@BC] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BD]
| | | |Constant{-1} [@BE]
| | |ScalarFromTensor [@BF] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{:int64:} [@BG] ''
| | |Subtensor{:int64:} [@BH] ''
| | | |Subtensor{::int64} [@BI] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BJ]
| | | |Constant{-1} [@BK]
| | |ScalarFromTensor [@BL] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{::int64} [@BM] ''
| | |IncSubtensor{Inc;int64::} [@BN] ''
| | | |Elemwise{second,no_inplace} [@BO] ''
| | | | |for{cpu,scan_fn} [@BP] ''
| | | | | |k [@G]
| | | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |A [@Q]
| | | | |DimShuffle{x,x} [@BQ] ''
| | | | |TensorConstant{0.0} [@J]
| | | |IncSubtensor{Inc;int64} [@BR] ''
| | | | |Elemwise{second,no_inplace} [@BS] ''
| | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |for{cpu,scan_fn} [@BP] ''
| | | | | | |Constant{1} [@BU]
| | | | | |DimShuffle{x,x} [@BV] ''
| | | | | |TensorConstant{0.0} [@J]
| | | | |Elemwise{second} [@BW] ''
| | | | | |Subtensor{int64} [@BX] ''
| | | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |Constant{-1} [@BY]
| | | | | |DimShuffle{x} [@BZ] ''
| | | | | |Elemwise{second,no_inplace} [@CA] ''
| | | | | |Sum{acc_dtype=float64} [@CB] ''
| | | | | | |Subtensor{int64} [@BX] ''
| | | | | |TensorConstant{1.0} [@S]
| | | | |Constant{-1} [@BY]
| | | |Constant{1} [@BU]
| | |Constant{-1} [@CC]
| |Alloc [@CD] ''
| | |TensorConstant{0.0} [@J]
| | |Elemwise{add,no_inplace} [@CE] ''
| | | |Elemwise{sub,no_inplace} [@C] ''
| | | |TensorConstant{1} [@Z]
| | |Subtensor{int64} [@CF] ''
| | |Shape [@CG] ''
| | | |A [@Q]
| | |Constant{0} [@CH]
| |A [@Q]
|Constant{-1} [@CI]
Inner graphs of the scan ops:
for{cpu,grad_of_scan_fn}.1 [@B] ''
>Elemwise{add,no_inplace} [@CJ] ''
> |Elemwise{mul} [@CK] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |A_copy [@CM] -> [@Q]
> |<TensorType(float64, vector)> [@CN] -> [@BM]
>Elemwise{add,no_inplace} [@CO] ''
> |Elemwise{mul} [@CP] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |<TensorType(float64, vector)> [@CQ] -> [@BA]
> |<TensorType(float64, vector)> [@CR] -> [@CD]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
> |<TensorType(float64, vector)> [@CT] -> [@H]
> |A_copy [@CU] -> [@Q]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''"""
for truth,out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论