提交 ad4e2a09 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #3521 from carriepl/scan_speedup_cgt

Scan replace Allocs with AllocEmpty
......@@ -627,7 +627,7 @@ def scan(fn,
# the initial state over. We do this using the expand function
# defined in scan utils
sit_sot_scan_inputs.append(
scan_utils.expand(
scan_utils.expand_empty(
tensor.unbroadcast(
tensor.shape_padleft(actual_arg), 0),
actual_n_steps
......@@ -653,8 +653,8 @@ def scan(fn,
idx_offset = abs(numpy.min(init_out['taps']))
# Sequence
mit_sot_scan_inputs.append(
scan_utils.expand(init_out['initial'][:mintap],
actual_n_steps))
scan_utils.expand_empty(init_out['initial'][:mintap],
actual_n_steps))
if i in return_steps:
mit_sot_return_steps[n_mit_sot] = return_steps[i]
......@@ -866,7 +866,7 @@ def scan(fn,
if isinstance(new_var.type, ops.expandable_types):
sit_sot_inner_inputs.append(new_var)
sit_sot_scan_inputs.append(
scan_utils.expand(
scan_utils.expand_empty(
tensor.unbroadcast(
tensor.shape_padleft(input.variable), 0),
actual_n_steps))
......
......@@ -1499,7 +1499,7 @@ class ScanSaveMem(gof.Optimizer):
tmp_idx)
tmp = pre_constant_merge([tmp])[0]
nw_input = scan_utils.expand(_nw_input, tmp)
nw_input = scan_utils.expand_empty(_nw_input, tmp)
else:
tmp = tensor.as_tensor_variable(val)
initl = tensor.as_tensor_variable(init_l[i])
......@@ -1550,8 +1550,8 @@ class ScanSaveMem(gof.Optimizer):
nw_inputs[in_idx].owner.op.idx_list[0],
slice))):
_nw_input = nw_inputs[in_idx].owner.inputs[1]
nw_input = scan_utils.expand(_nw_input,
nw_steps)
nw_input = scan_utils.expand_empty(_nw_input,
nw_steps)
nw_inputs[in_idx] = nw_input
else:
nw_input = nw_inputs[in_idx][:(initl+nw_steps)]
......
......@@ -607,19 +607,18 @@ def isNaN_or_Inf_or_None(x):
return isNone or isNaN or isInf or isStr
def expand(tensor_var, size):
def expand_empty(tensor_var, size):
"""
Transoforms the shape of a tensor from (d1, d2 ... ) to ( d1+size, d2, ..)
by adding 0s at the end of the tensor.
Transforms the shape of a tensor from (d1, d2 ... ) to ( d1+size, d2, ..)
by adding uninitialized memory at the end of the tensor.
"""
# Corner case that I might use in an optimization
if size == 0:
return tensor_var
shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)]
zeros_shape = [size + shapes[0]] + shapes[1:]
empty = tensor.zeros(zeros_shape,
dtype=tensor_var.dtype)
new_shape = [size + shapes[0]] + shapes[1:]
empty = tensor.AllocEmpty(tensor_var.dtype)(*new_shape)
return tensor.set_subtensor(empty[:shapes[0]], tensor_var)
......
......@@ -6099,3 +6099,12 @@ class AllocEmpty(gof.Op):
def do_constant_folding(self, node):
return False
def connection_pattern(self, node):
return [[False] for i in node.inputs]
def grad(self, inputs, grads):
return [DisconnectedType()() for i in inputs]
def R_op(self, inputs, eval_points):
return [zeros(inputs, self.dtype)]
......@@ -291,36 +291,35 @@ def test_scan_debugprint1():
| |for{cpu,scan_fn} [@C] ''
| | |k [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0.0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | |AllocEmpty{dtype='float64'} [@F] ''
| | | | |Elemwise{add,no_inplace} [@G] ''
| | | | | |k [@D]
| | | | | |Subtensor{int64} [@I] ''
| | | | | |Shape [@J] ''
| | | | | | |Rebroadcast{0} [@K] ''
| | | | | | |DimShuffle{x,0} [@L] ''
| | | | | | |Elemwise{second,no_inplace} [@M] ''
| | | | | | |A [@N]
| | | | | | |DimShuffle{x} [@O] ''
| | | | | | |TensorConstant{1.0} [@P]
| | | | | |Constant{0} [@Q]
| | | | |Subtensor{int64} [@R] ''
| | | | |Shape [@S] ''
| | | | | |Rebroadcast{0} [@K] ''
| | | | |Constant{1} [@T]
| | | |Rebroadcast{0} [@K] ''
| | | |ScalarFromTensor [@U] ''
| | | |Subtensor{int64} [@I] ''
| | |A [@N]
| |Constant{1} [@V]
|Constant{-1} [@W]
| | | | | |Subtensor{int64} [@H] ''
| | | | | |Shape [@I] ''
| | | | | | |Rebroadcast{0} [@J] ''
| | | | | | |DimShuffle{x,0} [@K] ''
| | | | | | |Elemwise{second,no_inplace} [@L] ''
| | | | | | |A [@M]
| | | | | | |DimShuffle{x} [@N] ''
| | | | | | |TensorConstant{1.0} [@O]
| | | | | |Constant{0} [@P]
| | | | |Subtensor{int64} [@Q] ''
| | | | |Shape [@R] ''
| | | | | |Rebroadcast{0} [@J] ''
| | | | |Constant{1} [@S]
| | | |Rebroadcast{0} [@J] ''
| | | |ScalarFromTensor [@T] ''
| | | |Subtensor{int64} [@H] ''
| | |A [@M]
| |Constant{1} [@U]
|Constant{-1} [@V]
Inner graphs of the scan ops:
for{cpu,scan_fn} [@C] ''
>Elemwise{mul,no_inplace} [@X] ''
> |<TensorType(float64, vector)> [@Y] -> [@E]
> |A_copy [@Z] -> [@N]"""
>Elemwise{mul,no_inplace} [@W] ''
> |<TensorType(float64, vector)> [@X] -> [@E]
> |A_copy [@Y] -> [@M]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
......@@ -475,36 +474,35 @@ def test_scan_debugprint3():
> | | |for{cpu,scan_fn} [@BE] ''
> | | | |k_copy [@BF] -> [@X]
> | | | |IncSubtensor{Set;:int64:} [@BG] ''
> | | | | |Alloc [@BH] ''
> | | | | | |TensorConstant{0.0} [@BI]
> | | | | | |Elemwise{add,no_inplace} [@BJ] ''
> | | | | |AllocEmpty{dtype='float64'} [@BH] ''
> | | | | | |Elemwise{add,no_inplace} [@BI] ''
> | | | | | | |k_copy [@BF] -> [@X]
> | | | | | | |Subtensor{int64} [@BK] ''
> | | | | | | |Shape [@BL] ''
> | | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | | | |DimShuffle{x,0} [@BN] ''
> | | | | | | | |Elemwise{second,no_inplace} [@BO] ''
> | | | | | | | |A_copy [@BP] -> [@W]
> | | | | | | | |DimShuffle{x} [@BQ] ''
> | | | | | | | |TensorConstant{1.0} [@BR]
> | | | | | | |Constant{0} [@BS]
> | | | | | |Subtensor{int64} [@BT] ''
> | | | | | |Shape [@BU] ''
> | | | | | | |Rebroadcast{0} [@BM] ''
> | | | | | |Constant{1} [@BV]
> | | | | |Rebroadcast{0} [@BM] ''
> | | | | |ScalarFromTensor [@BW] ''
> | | | | |Subtensor{int64} [@BK] ''
> | | | |A_copy [@BP] -> [@W]
> | | |Constant{1} [@BX]
> | |Constant{-1} [@BY]
> |DimShuffle{x} [@BZ] ''
> |<TensorType(int64, scalar)> [@CA] -> [@U]
> | | | | | | |Subtensor{int64} [@BJ] ''
> | | | | | | |Shape [@BK] ''
> | | | | | | | |Rebroadcast{0} [@BL] ''
> | | | | | | | |DimShuffle{x,0} [@BM] ''
> | | | | | | | |Elemwise{second,no_inplace} [@BN] ''
> | | | | | | | |A_copy [@BO] -> [@W]
> | | | | | | | |DimShuffle{x} [@BP] ''
> | | | | | | | |TensorConstant{1.0} [@BQ]
> | | | | | | |Constant{0} [@BR]
> | | | | | |Subtensor{int64} [@BS] ''
> | | | | | |Shape [@BT] ''
> | | | | | | |Rebroadcast{0} [@BL] ''
> | | | | | |Constant{1} [@BU]
> | | | | |Rebroadcast{0} [@BL] ''
> | | | | |ScalarFromTensor [@BV] ''
> | | | | |Subtensor{int64} [@BJ] ''
> | | | |A_copy [@BO] -> [@W]
> | | |Constant{1} [@BW]
> | |Constant{-1} [@BX]
> |DimShuffle{x} [@BY] ''
> |<TensorType(int64, scalar)> [@BZ] -> [@U]
for{cpu,scan_fn} [@BE] ''
>Elemwise{mul,no_inplace} [@CB] ''
> |<TensorType(float64, vector)> [@CC] -> [@BG]
> |A_copy [@CD] -> [@BP]"""
>Elemwise{mul,no_inplace} [@CA] ''
> |<TensorType(float64, vector)> [@CB] -> [@BG]
> |A_copy [@CC] -> [@BO]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
......@@ -534,51 +532,49 @@ def test_scan_debugprint4():
| |for{cpu,scan_fn}.0 [@C] ''
| | |TensorConstant{5} [@D]
| | |IncSubtensor{Set;:int64:} [@E] ''
| | | |Alloc [@F] ''
| | | | |TensorConstant{0} [@G]
| | | | |Elemwise{add,no_inplace} [@H] ''
| | | |AllocEmpty{dtype='int64'} [@F] ''
| | | | |Elemwise{add,no_inplace} [@G] ''
| | | | |TensorConstant{5} [@D]
| | | | |Subtensor{int64} [@I] ''
| | | | |Shape [@J] ''
| | | | | |Subtensor{:int64:} [@K] ''
| | | | | |<TensorType(int64, vector)> [@L]
| | | | | |Constant{2} [@M]
| | | | |Constant{0} [@N]
| | | |Subtensor{:int64:} [@K] ''
| | | |ScalarFromTensor [@O] ''
| | | |Subtensor{int64} [@I] ''
| | |IncSubtensor{Set;:int64:} [@P] ''
| | |Alloc [@Q] ''
| | | |TensorConstant{0} [@G]
| | | |Elemwise{add,no_inplace} [@R] ''
| | | | |Subtensor{int64} [@H] ''
| | | | |Shape [@I] ''
| | | | | |Subtensor{:int64:} [@J] ''
| | | | | |<TensorType(int64, vector)> [@K]
| | | | | |Constant{2} [@L]
| | | | |Constant{0} [@M]
| | | |Subtensor{:int64:} [@J] ''
| | | |ScalarFromTensor [@N] ''
| | | |Subtensor{int64} [@H] ''
| | |IncSubtensor{Set;:int64:} [@O] ''
| | |AllocEmpty{dtype='int64'} [@P] ''
| | | |Elemwise{add,no_inplace} [@Q] ''
| | | |TensorConstant{5} [@D]
| | | |Subtensor{int64} [@S] ''
| | | |Shape [@T] ''
| | | | |Subtensor{:int64:} [@U] ''
| | | | |<TensorType(int64, vector)> [@V]
| | | | |Constant{2} [@W]
| | | |Constant{0} [@X]
| | |Subtensor{:int64:} [@U] ''
| | |ScalarFromTensor [@Y] ''
| | |Subtensor{int64} [@S] ''
| |Constant{2} [@Z]
|Subtensor{int64::} [@BA] ''
| | | |Subtensor{int64} [@R] ''
| | | |Shape [@S] ''
| | | | |Subtensor{:int64:} [@T] ''
| | | | |<TensorType(int64, vector)> [@U]
| | | | |Constant{2} [@V]
| | | |Constant{0} [@W]
| | |Subtensor{:int64:} [@T] ''
| | |ScalarFromTensor [@X] ''
| | |Subtensor{int64} [@R] ''
| |Constant{2} [@Y]
|Subtensor{int64::} [@Z] ''
|for{cpu,scan_fn}.1 [@C] ''
|Constant{2} [@BB]
|Constant{2} [@BA]
Inner graphs of the scan ops:
for{cpu,scan_fn}.0 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
>Elemwise{add,no_inplace} [@BB] ''
> |<TensorType(int64, scalar)> [@BC] -> [@E]
> |<TensorType(int64, scalar)> [@BD] -> [@E]
> |<TensorType(int64, scalar)> [@BE] -> [@E]
>Elemwise{add,no_inplace} [@BF] ''
> |<TensorType(int64, scalar)> [@BG] -> [@P]
> |<TensorType(int64, scalar)> [@BH] -> [@P]
>Elemwise{add,no_inplace} [@BE] ''
> |<TensorType(int64, scalar)> [@BF] -> [@O]
> |<TensorType(int64, scalar)> [@BG] -> [@O]
for{cpu,scan_fn}.1 [@C] ''
>Elemwise{add,no_inplace} [@BC] ''
>Elemwise{add,no_inplace} [@BF] ''"""
>Elemwise{add,no_inplace} [@BB] ''
>Elemwise{add,no_inplace} [@BE] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
......@@ -603,122 +599,121 @@ def test_scan_debugprint5():
lines += [line]
expected_output = """Subtensor{int64} [@A] ''
|for{cpu,grad_of_scan_fn}.1 [@B] ''
| |Elemwise{sub,no_inplace} [@C] ''
| | |Subtensor{int64} [@D] ''
| | | |Shape [@E] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |k [@G]
| | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |Alloc [@I] ''
| | | | | | |TensorConstant{0.0} [@J]
| | | | | | |Elemwise{add,no_inplace} [@K] ''
| | | | | | | |k [@G]
| | | | | | | |Subtensor{int64} [@L] ''
| | | | | | | |Shape [@M] ''
| | | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | | | |DimShuffle{x,0} [@O] ''
| | | | | | | | |Elemwise{second,no_inplace} [@P] ''
| | | | | | | | |A [@Q]
| | | | | | | | |DimShuffle{x} [@R] ''
| | | | | | | | |TensorConstant{1.0} [@S]
| | | | | | | |Constant{0} [@T]
| | | | | | |Subtensor{int64} [@U] ''
| | | | | | |Shape [@V] ''
| | | | | | | |Rebroadcast{0} [@N] ''
| | | | | | |Constant{1} [@W]
| | | | | |Rebroadcast{0} [@N] ''
| | | | | |ScalarFromTensor [@X] ''
| | | | | |Subtensor{int64} [@L] ''
| | | | |A [@Q]
| | | |Constant{0} [@Y]
| | |TensorConstant{1} [@Z]
| |Subtensor{:int64:} [@BA] ''
| | |Subtensor{::int64} [@BB] ''
| | | |Subtensor{:int64:} [@BC] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BD]
| | | |Constant{-1} [@BE]
| | |ScalarFromTensor [@BF] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{:int64:} [@BG] ''
| | |Subtensor{:int64:} [@BH] ''
| | | |Subtensor{::int64} [@BI] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BJ]
| | | |Constant{-1} [@BK]
| | |ScalarFromTensor [@BL] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{::int64} [@BM] ''
| | |IncSubtensor{Inc;int64::} [@BN] ''
| | | |Elemwise{second,no_inplace} [@BO] ''
| | | | |for{cpu,scan_fn} [@BP] ''
| | | | | |k [@G]
| | | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |A [@Q]
| | | | |DimShuffle{x,x} [@BQ] ''
| | | | |TensorConstant{0.0} [@J]
| | | |IncSubtensor{Inc;int64} [@BR] ''
| | | | |Elemwise{second,no_inplace} [@BS] ''
| | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |for{cpu,scan_fn} [@BP] ''
| | | | | | |Constant{1} [@BU]
| | | | | |DimShuffle{x,x} [@BV] ''
| | | | | |TensorConstant{0.0} [@J]
| | | | |Elemwise{second} [@BW] ''
| | | | | |Subtensor{int64} [@BX] ''
| | | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |Constant{-1} [@BY]
| | | | | |DimShuffle{x} [@BZ] ''
| | | | | |Elemwise{second,no_inplace} [@CA] ''
| | | | | |Sum{acc_dtype=float64} [@CB] ''
| | | | | | |Subtensor{int64} [@BX] ''
| | | | | |TensorConstant{1.0} [@S]
| | | | |Constant{-1} [@BY]
| | | |Constant{1} [@BU]
| | |Constant{-1} [@CC]
| |Alloc [@CD] ''
| | |TensorConstant{0.0} [@J]
| | |Elemwise{add,no_inplace} [@CE] ''
| | | |Elemwise{sub,no_inplace} [@C] ''
| | | |TensorConstant{1} [@Z]
| | |Subtensor{int64} [@CF] ''
| | |Shape [@CG] ''
| | | |A [@Q]
| | |Constant{0} [@CH]
| |A [@Q]
|Constant{-1} [@CI]
|for{cpu,grad_of_scan_fn}.1 [@B] ''
| |Elemwise{sub,no_inplace} [@C] ''
| | |Subtensor{int64} [@D] ''
| | | |Shape [@E] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |k [@G]
| | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |AllocEmpty{dtype='float64'} [@I] ''
| | | | | | |Elemwise{add,no_inplace} [@J] ''
| | | | | | | |k [@G]
| | | | | | | |Subtensor{int64} [@K] ''
| | | | | | | |Shape [@L] ''
| | | | | | | | |Rebroadcast{0} [@M] ''
| | | | | | | | |DimShuffle{x,0} [@N] ''
| | | | | | | | |Elemwise{second,no_inplace} [@O] ''
| | | | | | | | |A [@P]
| | | | | | | | |DimShuffle{x} [@Q] ''
| | | | | | | | |TensorConstant{1.0} [@R]
| | | | | | | |Constant{0} [@S]
| | | | | | |Subtensor{int64} [@T] ''
| | | | | | |Shape [@U] ''
| | | | | | | |Rebroadcast{0} [@M] ''
| | | | | | |Constant{1} [@V]
| | | | | |Rebroadcast{0} [@M] ''
| | | | | |ScalarFromTensor [@W] ''
| | | | | |Subtensor{int64} [@K] ''
| | | | |A [@P]
| | | |Constant{0} [@X]
| | |TensorConstant{1} [@Y]
| |Subtensor{:int64:} [@Z] ''
| | |Subtensor{::int64} [@BA] ''
| | | |Subtensor{:int64:} [@BB] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BC]
| | | |Constant{-1} [@BD]
| | |ScalarFromTensor [@BE] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{:int64:} [@BF] ''
| | |Subtensor{:int64:} [@BG] ''
| | | |Subtensor{::int64} [@BH] ''
| | | | |for{cpu,scan_fn} [@F] ''
| | | | |Constant{-1} [@BI]
| | | |Constant{-1} [@BJ]
| | |ScalarFromTensor [@BK] ''
| | |Elemwise{sub,no_inplace} [@C] ''
| |Subtensor{::int64} [@BL] ''
| | |IncSubtensor{Inc;int64::} [@BM] ''
| | | |Elemwise{second,no_inplace} [@BN] ''
| | | | |for{cpu,scan_fn} [@BO] ''
| | | | | |k [@G]
| | | | | |IncSubtensor{Set;:int64:} [@H] ''
| | | | | |A [@P]
| | | | |DimShuffle{x,x} [@BP] ''
| | | | |TensorConstant{0.0} [@BQ]
| | | |IncSubtensor{Inc;int64} [@BR] ''
| | | | |Elemwise{second,no_inplace} [@BS] ''
| | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |for{cpu,scan_fn} [@BO] ''
| | | | | | |Constant{1} [@BU]
| | | | | |DimShuffle{x,x} [@BV] ''
| | | | | |TensorConstant{0.0} [@BQ]
| | | | |Elemwise{second} [@BW] ''
| | | | | |Subtensor{int64} [@BX] ''
| | | | | | |Subtensor{int64::} [@BT] ''
| | | | | | |Constant{-1} [@BY]
| | | | | |DimShuffle{x} [@BZ] ''
| | | | | |Elemwise{second,no_inplace} [@CA] ''
| | | | | |Sum{acc_dtype=float64} [@CB] ''
| | | | | | |Subtensor{int64} [@BX] ''
| | | | | |TensorConstant{1.0} [@R]
| | | | |Constant{-1} [@BY]
| | | |Constant{1} [@BU]
| | |Constant{-1} [@CC]
| |Alloc [@CD] ''
| | |TensorConstant{0.0} [@BQ]
| | |Elemwise{add,no_inplace} [@CE] ''
| | | |Elemwise{sub,no_inplace} [@C] ''
| | | |TensorConstant{1} [@Y]
| | |Subtensor{int64} [@CF] ''
| | |Shape [@CG] ''
| | | |A [@P]
| | |Constant{0} [@CH]
| |A [@P]
|Constant{-1} [@CI]
Inner graphs of the scan ops:
for{cpu,grad_of_scan_fn}.1 [@B] ''
>Elemwise{add,no_inplace} [@CJ] ''
> |Elemwise{mul} [@CK] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |A_copy [@CM] -> [@Q]
> |<TensorType(float64, vector)> [@CN] -> [@BM]
>Elemwise{add,no_inplace} [@CO] ''
> |Elemwise{mul} [@CP] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BM]
> | |<TensorType(float64, vector)> [@CQ] -> [@BA]
> |<TensorType(float64, vector)> [@CR] -> [@CD]
>Elemwise{add,no_inplace} [@CJ] ''
> |Elemwise{mul} [@CK] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BL]
> | |A_copy [@CM] -> [@P]
> |<TensorType(float64, vector)> [@CN] -> [@BL]
>Elemwise{add,no_inplace} [@CO] ''
> |Elemwise{mul} [@CP] ''
> | |<TensorType(float64, vector)> [@CL] -> [@BL]
> | |<TensorType(float64, vector)> [@CQ] -> [@Z]
> |<TensorType(float64, vector)> [@CR] -> [@CD]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
> |<TensorType(float64, vector)> [@CT] -> [@H]
> |A_copy [@CU] -> [@Q]
>Elemwise{mul,no_inplace} [@CS] ''
> |<TensorType(float64, vector)> [@CT] -> [@H]
> |A_copy [@CU] -> [@P]
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@F] ''
>Elemwise{mul,no_inplace} [@CS] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BO] ''
>Elemwise{mul,no_inplace} [@CS] ''
for{cpu,scan_fn} [@BP] ''
>Elemwise{mul,no_inplace} [@CS] ''"""
for{cpu,scan_fn} [@BO] ''
>Elemwise{mul,no_inplace} [@CS] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论