Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
eb2b9afb
提交
eb2b9afb
authored
4月 20, 2022
作者:
Ricardo
提交者:
Brandon T. Willard
7月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove unnecessary use of patternbroadcast
The behavior was already accounted by filter_variable which is called directly on as a fallback by the optimizer routines
上级
e6d204ec
显示空白字符变更
内嵌
并排
正在显示
6 个修改的文件
包含
5 行增加
和
95 行删除
+5
-95
opt.py
aesara/sparse/opt.py
+2
-8
basic_opt.py
aesara/tensor/basic_opt.py
+1
-24
abstract_conv.py
aesara/tensor/nnet/abstract_conv.py
+1
-29
batchnorm.py
aesara/tensor/nnet/batchnorm.py
+0
-11
opt.py
aesara/tensor/nnet/opt.py
+1
-7
subtensor_opt.py
aesara/tensor/subtensor_opt.py
+0
-16
没有找到文件。
aesara/sparse/opt.py
浏览文件 @
eb2b9afb
...
@@ -19,7 +19,7 @@ from aesara.sparse.basic import (
...
@@ -19,7 +19,7 @@ from aesara.sparse.basic import (
usmm
,
usmm
,
)
)
from
aesara.tensor
import
blas
from
aesara.tensor
import
blas
from
aesara.tensor.basic
import
as_tensor_variable
,
cast
,
patternbroadcast
from
aesara.tensor.basic
import
as_tensor_variable
,
cast
from
aesara.tensor.basic_opt
import
register_canonicalize
,
register_specialize
from
aesara.tensor.basic_opt
import
register_canonicalize
,
register_specialize
from
aesara.tensor.math
import
mul
,
neg
,
sub
from
aesara.tensor.math
import
mul
,
neg
,
sub
from
aesara.tensor.shape
import
shape
,
specify_shape
from
aesara.tensor.shape
import
shape
,
specify_shape
...
@@ -42,13 +42,7 @@ def local_csm_properties_csm(fgraph, node):
...
@@ -42,13 +42,7 @@ def local_csm_properties_csm(fgraph, node):
if
node
.
op
==
csm_properties
:
if
node
.
op
==
csm_properties
:
(
csm
,)
=
node
.
inputs
(
csm
,)
=
node
.
inputs
if
csm
.
owner
and
(
csm
.
owner
.
op
==
CSC
or
csm
.
owner
.
op
==
CSR
):
if
csm
.
owner
and
(
csm
.
owner
.
op
==
CSC
or
csm
.
owner
.
op
==
CSR
):
# csm.owner.inputs could be broadcastable. In that case, we have
return
csm
.
owner
.
inputs
# to adjust the broadcasting flag here.
ret_var
=
[
patternbroadcast
(
i
,
o
.
broadcastable
)
for
i
,
o
in
zip
(
csm
.
owner
.
inputs
,
node
.
outputs
)
]
return
ret_var
return
False
return
False
...
...
aesara/tensor/basic_opt.py
浏览文件 @
eb2b9afb
...
@@ -61,7 +61,6 @@ from aesara.tensor.basic import (
...
@@ -61,7 +61,6 @@ from aesara.tensor.basic import (
get_scalar_constant_value
,
get_scalar_constant_value
,
join
,
join
,
ones_like
,
ones_like
,
patternbroadcast
,
stack
,
stack
,
switch
,
switch
,
tensor_copy
,
tensor_copy
,
...
@@ -2425,15 +2424,6 @@ def local_join_empty(fgraph, node):
...
@@ -2425,15 +2424,6 @@ def local_join_empty(fgraph, node):
# by an error in the old join op.
# by an error in the old join op.
copy_stack_trace
(
node
.
outputs
,
ret
)
copy_stack_trace
(
node
.
outputs
,
ret
)
if
not
o
.
type
.
is_super
(
ret
.
type
):
assert
ret
.
dtype
==
o
.
dtype
assert
ret
.
ndim
==
o
.
ndim
ret
=
patternbroadcast
(
ret
,
node
.
outputs
[
0
]
.
broadcastable
)
# Copy over stacktrace from previous output
# (after patternbroadcast op) for same reasons as before.
copy_stack_trace
(
node
.
outputs
,
ret
)
return
[
ret
]
return
[
ret
]
...
@@ -2832,20 +2822,7 @@ def local_reshape_lift(fgraph, node):
...
@@ -2832,20 +2822,7 @@ def local_reshape_lift(fgraph, node):
# Copy stacktrace from both previous Reshape and UnaryElemwise op
# Copy stacktrace from both previous Reshape and UnaryElemwise op
# because an error in new cg could have been caused by either ops.
# because an error in new cg could have been caused by either ops.
copy_stack_trace
(
node
.
outputs
+
node
.
inputs
,
e
)
copy_stack_trace
(
node
.
outputs
+
node
.
inputs
,
e
)
return
[
e
]
# In rare case the original broadcast was (False, True), but
# the new one is (False, False). So don't crash in that case.
if
not
node
.
outputs
[
0
]
.
type
.
is_super
(
e
.
type
):
re
=
patternbroadcast
(
e
,
node
.
outputs
[
0
]
.
broadcastable
)
# Copy over stack trace.
# If the graph fails it is usually due to the fact that a dimension
# that should be broadcastable does not actually have length 1,
copy_stack_trace
(
e
,
re
)
else
:
re
=
e
return
[
re
]
register_canonicalize
(
OpRemove
(
tensor_copy
),
name
=
"remove_tensor_copy"
)
register_canonicalize
(
OpRemove
(
tensor_copy
),
name
=
"remove_tensor_copy"
)
...
...
aesara/tensor/nnet/abstract_conv.py
浏览文件 @
eb2b9afb
...
@@ -30,11 +30,7 @@ from aesara.configdefaults import config
...
@@ -30,11 +30,7 @@ from aesara.configdefaults import config
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.basic
import
Apply
,
Variable
from
aesara.graph.op
import
Op
from
aesara.graph.op
import
Op
from
aesara.raise_op
import
Assert
from
aesara.raise_op
import
Assert
from
aesara.tensor.basic
import
(
from
aesara.tensor.basic
import
as_tensor_variable
,
get_scalar_constant_value
as_tensor_variable
,
get_scalar_constant_value
,
patternbroadcast
,
)
from
aesara.tensor.exceptions
import
NotScalarConstantError
from
aesara.tensor.exceptions
import
NotScalarConstantError
from
aesara.tensor.var
import
TensorConstant
,
TensorVariable
from
aesara.tensor.var
import
TensorConstant
,
TensorVariable
...
@@ -2704,11 +2700,7 @@ class AbstractConv2d(AbstractConv):
...
@@ -2704,11 +2700,7 @@ class AbstractConv2d(AbstractConv):
# Make sure that the broadcastable pattern of the inputs is used
# Make sure that the broadcastable pattern of the inputs is used
# for the gradients, even if the grad opts are not able to infer
# for the gradients, even if the grad opts are not able to infer
# that the dimensions are broadcastable.
# that the dimensions are broadcastable.
# Also make sure that the gradient lives on the same device than
# the corresponding input.
d_bottom
=
patternbroadcast
(
d_bottom
,
bottom
.
broadcastable
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_weights
=
patternbroadcast
(
d_weights
,
weights
.
broadcastable
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
return
d_bottom
,
d_weights
return
d_bottom
,
d_weights
...
@@ -2765,11 +2757,7 @@ class AbstractConv3d(AbstractConv):
...
@@ -2765,11 +2757,7 @@ class AbstractConv3d(AbstractConv):
# Make sure that the broadcastable pattern of the inputs is used
# Make sure that the broadcastable pattern of the inputs is used
# for the gradients, even if the grad opts are not able to infer
# for the gradients, even if the grad opts are not able to infer
# that the dimensions are broadcastable.
# that the dimensions are broadcastable.
# Also make sure that the gradient lives on the same device than
# the corresponding input.
d_bottom
=
patternbroadcast
(
d_bottom
,
bottom
.
broadcastable
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_weights
=
patternbroadcast
(
d_weights
,
weights
.
broadcastable
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
return
d_bottom
,
d_weights
return
d_bottom
,
d_weights
...
@@ -3062,11 +3050,7 @@ class AbstractConv2d_gradWeights(AbstractConv_gradWeights):
...
@@ -3062,11 +3050,7 @@ class AbstractConv2d_gradWeights(AbstractConv_gradWeights):
# Make sure that the broadcastable pattern of the inputs is used
# Make sure that the broadcastable pattern of the inputs is used
# for the gradients, even if the grad opts are not able to infer
# for the gradients, even if the grad opts are not able to infer
# that the dimensions are broadcastable.
# that the dimensions are broadcastable.
# Also make sure that the gradient lives on the same device than
# the corresponding input.
d_bottom
=
patternbroadcast
(
d_bottom
,
bottom
.
broadcastable
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_top
=
patternbroadcast
(
d_top
,
top
.
broadcastable
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
d_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
...
@@ -3129,11 +3113,7 @@ class AbstractConv3d_gradWeights(AbstractConv_gradWeights):
...
@@ -3129,11 +3113,7 @@ class AbstractConv3d_gradWeights(AbstractConv_gradWeights):
# Make sure that the broadcastable pattern of the inputs is used
# Make sure that the broadcastable pattern of the inputs is used
# for the gradients, even if the grad opts are not able to infer
# for the gradients, even if the grad opts are not able to infer
# that the dimensions are broadcastable.
# that the dimensions are broadcastable.
# Also make sure that the gradient lives on the same device than
# the corresponding input.
d_bottom
=
patternbroadcast
(
d_bottom
,
bottom
.
broadcastable
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_bottom
=
bottom
.
type
.
filter_variable
(
d_bottom
)
d_top
=
patternbroadcast
(
d_top
,
top
.
broadcastable
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_depth_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
d_depth_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
...
@@ -3452,11 +3432,7 @@ class AbstractConv2d_gradInputs(AbstractConv_gradInputs):
...
@@ -3452,11 +3432,7 @@ class AbstractConv2d_gradInputs(AbstractConv_gradInputs):
# Make sure that the broadcastable pattern of the inputs is used
# Make sure that the broadcastable pattern of the inputs is used
# for the gradients, even if the grad opts are not able to infer
# for the gradients, even if the grad opts are not able to infer
# that the dimensions are broadcastable.
# that the dimensions are broadcastable.
# Also make sure that the gradient lives on the same device than
# the corresponding input.
d_weights
=
patternbroadcast
(
d_weights
,
weights
.
broadcastable
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
d_top
=
patternbroadcast
(
d_top
,
top
.
broadcastable
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
d_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
...
@@ -3519,11 +3495,7 @@ class AbstractConv3d_gradInputs(AbstractConv_gradInputs):
...
@@ -3519,11 +3495,7 @@ class AbstractConv3d_gradInputs(AbstractConv_gradInputs):
# Make sure that the broadcastable pattern of the inputs is used
# Make sure that the broadcastable pattern of the inputs is used
# for the gradients, even if the grad opts are not able to infer
# for the gradients, even if the grad opts are not able to infer
# that the dimensions are broadcastable.
# that the dimensions are broadcastable.
# Also make sure that the gradient lives on the same device than
# the corresponding input.
d_weights
=
patternbroadcast
(
d_weights
,
weights
.
broadcastable
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
d_weights
=
weights
.
type
.
filter_variable
(
d_weights
)
d_top
=
patternbroadcast
(
d_top
,
top
.
broadcastable
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_top
=
top
.
type
.
filter_variable
(
d_top
)
d_depth_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
d_depth_height_width
=
(
aesara
.
gradient
.
DisconnectedType
()(),)
...
...
aesara/tensor/nnet/batchnorm.py
浏览文件 @
eb2b9afb
...
@@ -823,11 +823,6 @@ def local_abstract_batch_norm_train(fgraph, node):
...
@@ -823,11 +823,6 @@ def local_abstract_batch_norm_train(fgraph, node):
)
)
results
.
append
(
running_var
)
results
.
append
(
running_var
)
results
=
[
at
.
patternbroadcast
(
r
,
r_orig
.
broadcastable
)
for
(
r
,
r_orig
)
in
zip
(
results
,
node
.
outputs
)
]
for
var
in
aesara
.
graph
.
basic
.
vars_between
(
node
.
inputs
,
results
):
for
var
in
aesara
.
graph
.
basic
.
vars_between
(
node
.
inputs
,
results
):
if
var
not
in
node
.
inputs
:
if
var
not
in
node
.
inputs
:
copy_stack_trace
(
node
.
outputs
[
0
],
var
)
copy_stack_trace
(
node
.
outputs
[
0
],
var
)
...
@@ -862,11 +857,6 @@ def local_abstract_batch_norm_train_grad(fgraph, node):
...
@@ -862,11 +857,6 @@ def local_abstract_batch_norm_train_grad(fgraph, node):
g_wrt_bias
=
at_sum
(
dy
,
axis
=
axes
,
keepdims
=
True
)
g_wrt_bias
=
at_sum
(
dy
,
axis
=
axes
,
keepdims
=
True
)
results
=
[
g_wrt_inputs
,
g_wrt_scale
,
g_wrt_bias
]
results
=
[
g_wrt_inputs
,
g_wrt_scale
,
g_wrt_bias
]
results
=
[
at
.
patternbroadcast
(
r
,
r_orig
.
broadcastable
)
for
(
r
,
r_orig
)
in
zip
(
results
,
node
.
outputs
)
]
for
var
in
aesara
.
graph
.
basic
.
vars_between
(
node
.
inputs
,
results
):
for
var
in
aesara
.
graph
.
basic
.
vars_between
(
node
.
inputs
,
results
):
if
var
not
in
node
.
inputs
:
if
var
not
in
node
.
inputs
:
copy_stack_trace
(
node
.
outputs
[
0
],
var
)
copy_stack_trace
(
node
.
outputs
[
0
],
var
)
...
@@ -895,7 +885,6 @@ def local_abstract_batch_norm_inference(fgraph, node):
...
@@ -895,7 +885,6 @@ def local_abstract_batch_norm_inference(fgraph, node):
epsilon
=
epsilon
.
astype
(
"float32"
)
epsilon
=
epsilon
.
astype
(
"float32"
)
result
=
(
x
-
estimated_mean
)
*
(
scale
/
sqrt
(
estimated_variance
+
epsilon
))
+
bias
result
=
(
x
-
estimated_mean
)
*
(
scale
/
sqrt
(
estimated_variance
+
epsilon
))
+
bias
result
=
at
.
patternbroadcast
(
result
,
node
.
outputs
[
0
]
.
broadcastable
)
for
var
in
aesara
.
graph
.
basic
.
vars_between
(
node
.
inputs
,
[
result
]):
for
var
in
aesara
.
graph
.
basic
.
vars_between
(
node
.
inputs
,
[
result
]):
if
var
not
in
node
.
inputs
:
if
var
not
in
node
.
inputs
:
...
...
aesara/tensor/nnet/opt.py
浏览文件 @
eb2b9afb
...
@@ -164,7 +164,6 @@ def local_abstractconv_gradweight_gemm(fgraph, node):
...
@@ -164,7 +164,6 @@ def local_abstractconv_gradweight_gemm(fgraph, node):
if
node
.
op
.
filter_flip
:
if
node
.
op
.
filter_flip
:
flip
=
(
slice
(
None
),)
*
(
rval
.
ndim
-
2
)
+
(
slice
(
None
,
None
,
-
1
),)
*
2
flip
=
(
slice
(
None
),)
*
(
rval
.
ndim
-
2
)
+
(
slice
(
None
,
None
,
-
1
),)
*
2
rval
=
rval
[
flip
]
rval
=
rval
[
flip
]
rval
=
aesara
.
tensor
.
patternbroadcast
(
rval
,
node
.
outputs
[
0
]
.
broadcastable
)
copy_stack_trace
(
node
.
outputs
[
0
],
rval
)
copy_stack_trace
(
node
.
outputs
[
0
],
rval
)
return
[
rval
]
return
[
rval
]
...
@@ -193,7 +192,6 @@ def local_abstractconv3d_gradweight_gemm(fgraph, node):
...
@@ -193,7 +192,6 @@ def local_abstractconv3d_gradweight_gemm(fgraph, node):
# need to flip the kernel if necessary
# need to flip the kernel if necessary
if
node
.
op
.
filter_flip
:
if
node
.
op
.
filter_flip
:
rval
=
rval
[:,
:,
::
-
1
,
::
-
1
,
::
-
1
]
rval
=
rval
[:,
:,
::
-
1
,
::
-
1
,
::
-
1
]
rval
=
aesara
.
tensor
.
patternbroadcast
(
rval
,
node
.
outputs
[
0
]
.
broadcastable
)
copy_stack_trace
(
node
.
outputs
[
0
],
rval
)
copy_stack_trace
(
node
.
outputs
[
0
],
rval
)
return
[
rval
]
return
[
rval
]
...
@@ -393,10 +391,8 @@ def local_conv2d_gradweight_cpu(fgraph, node):
...
@@ -393,10 +391,8 @@ def local_conv2d_gradweight_cpu(fgraph, node):
if
node
.
op
.
border_mode
==
"valid"
:
if
node
.
op
.
border_mode
==
"valid"
:
res
=
res
.
dimshuffle
((
1
,
0
,
2
,
3
))
res
=
res
.
dimshuffle
((
1
,
0
,
2
,
3
))
res
=
res
[:,
:,
::
-
1
,
::
-
1
]
res
=
res
[:,
:,
::
-
1
,
::
-
1
]
res
=
aesara
.
tensor
.
patternbroadcast
(
res
,
node
.
outputs
[
0
]
.
broadcastable
)
copy_stack_trace
(
node
.
outputs
[
0
],
res
)
copy_stack_trace
(
node
.
outputs
[
0
],
res
)
return
[
res
]
return
[
res
]
...
@@ -485,8 +481,6 @@ def local_conv2d_gradinputs_cpu(fgraph, node):
...
@@ -485,8 +481,6 @@ def local_conv2d_gradinputs_cpu(fgraph, node):
)
)
din
=
din
(
topgrad
,
filters
)
din
=
din
(
topgrad
,
filters
)
copy_stack_trace
(
node
.
outputs
[
0
],
din
)
copy_stack_trace
(
node
.
outputs
[
0
],
din
)
din
=
aesara
.
tensor
.
patternbroadcast
(
din
,
node
.
outputs
[
0
]
.
broadcastable
)
copy_stack_trace
(
node
.
outputs
[
0
],
din
)
return
[
din
]
return
[
din
]
...
...
aesara/tensor/subtensor_opt.py
浏览文件 @
eb2b9afb
...
@@ -23,7 +23,6 @@ from aesara.tensor.basic import (
...
@@ -23,7 +23,6 @@ from aesara.tensor.basic import (
concatenate
,
concatenate
,
extract_constant
,
extract_constant
,
get_scalar_constant_value
,
get_scalar_constant_value
,
patternbroadcast
,
switch
,
switch
,
)
)
from
aesara.tensor.basic_opt
import
(
from
aesara.tensor.basic_opt
import
(
...
@@ -533,14 +532,6 @@ def local_subtensor_merge(fgraph, node):
...
@@ -533,14 +532,6 @@ def local_subtensor_merge(fgraph, node):
# because of either of the two original slicing operations
# because of either of the two original slicing operations
orig_out
=
node
.
outputs
[
0
]
orig_out
=
node
.
outputs
[
0
]
copy_stack_trace
([
orig_out
,
node
.
inputs
[
0
]],
out
)
copy_stack_trace
([
orig_out
,
node
.
inputs
[
0
]],
out
)
# Restore original broadcastable dimensions that `subtens()` may
# have been unable to infer again
if
not
orig_out
.
type
.
is_super
(
out
.
type
):
assert
out
.
dtype
==
orig_out
.
dtype
assert
out
.
ndim
==
orig_out
.
ndim
out
=
patternbroadcast
(
out
,
orig_out
.
broadcastable
)
copy_stack_trace
([
orig_out
,
node
.
inputs
[
0
]],
out
)
return
[
out
]
return
[
out
]
...
@@ -658,11 +649,6 @@ def local_subtensor_of_alloc(fgraph, node):
...
@@ -658,11 +649,6 @@ def local_subtensor_of_alloc(fgraph, node):
rval
=
alloc
(
nw_val
,
*
nw_dims
)
rval
=
alloc
(
nw_val
,
*
nw_dims
)
if
not
isinstance
(
rval
,
(
list
,
tuple
)):
if
not
isinstance
(
rval
,
(
list
,
tuple
)):
rval
=
[
rval
]
rval
=
[
rval
]
if
not
node
.
outputs
[
0
]
.
type
.
is_super
(
rval
[
0
]
.
type
):
# It happen that the make_node() isn't able to infer the same pattern.
# We know it is safe, so fix that.
rval
[
0
]
=
patternbroadcast
(
rval
[
0
],
node
.
outputs
[
0
]
.
broadcastable
)
return
rval
return
rval
...
@@ -766,7 +752,6 @@ def local_subtensor_make_vector(fgraph, node):
...
@@ -766,7 +752,6 @@ def local_subtensor_make_vector(fgraph, node):
values
=
list
(
map
(
int
,
list
(
idx
.
value
)))
values
=
list
(
map
(
int
,
list
(
idx
.
value
)))
ret
=
make_vector_op
(
*
[
x
.
owner
.
inputs
[
v
]
for
v
in
values
])
ret
=
make_vector_op
(
*
[
x
.
owner
.
inputs
[
v
]
for
v
in
values
])
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
copy_stack_trace
(
node
.
outputs
[
0
],
ret
)
ret
=
patternbroadcast
(
ret
,
node
.
outputs
[
0
]
.
broadcastable
)
return
[
ret
]
return
[
ret
]
elif
isinstance
(
idx
,
slice
):
elif
isinstance
(
idx
,
slice
):
# The index is a slice. If it's a constant slice, we can perform the
# The index is a slice. If it's a constant slice, we can perform the
...
@@ -777,7 +762,6 @@ def local_subtensor_make_vector(fgraph, node):
...
@@ -777,7 +762,6 @@ def local_subtensor_make_vector(fgraph, node):
)[
0
]
)[
0
]
ret
=
make_vector_op
(
*
x
.
owner
.
inputs
[
const_slice
])
ret
=
make_vector_op
(
*
x
.
owner
.
inputs
[
const_slice
])
copy_stack_trace
(
node
.
outputs
,
ret
)
copy_stack_trace
(
node
.
outputs
,
ret
)
ret
=
patternbroadcast
(
ret
,
node
.
outputs
[
0
]
.
broadcastable
)
return
[
ret
]
return
[
ret
]
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论