提交 ce8a772d authored 作者: Ricardo's avatar Ricardo 提交者: Thomas Wiecki

Remove `warn__gpu_set_subtensor1` flag

上级 423b9851
...@@ -1454,15 +1454,6 @@ def add_deprecated_configvars(): ...@@ -1454,15 +1454,6 @@ def add_deprecated_configvars():
in_c_key=False, in_c_key=False,
) )
config.add(
"warn__gpu_set_subtensor1",
"Warn if previous versions of Aesara (before 0.6) could have given "
"incorrect results when moving to the gpu "
"set_subtensor(x[int vector], new_value)",
BoolParam(_warn_default("0.6")),
in_c_key=False,
)
config.add( config.add(
"warn__vm_gc_bug", "warn__vm_gc_bug",
"There was a bug that existed in the default Aesara configuration," "There was a bug that existed in the default Aesara configuration,"
......
...@@ -1275,14 +1275,13 @@ class TestSubtensor(utt.OptimizationTestMixin): ...@@ -1275,14 +1275,13 @@ class TestSubtensor(utt.OptimizationTestMixin):
) )
# Actual test (we compile a single Aesara function to make it faster). # Actual test (we compile a single Aesara function to make it faster).
with config.change_flags(warn__gpu_set_subtensor1=False): f = self.function(
f = self.function( all_inputs_var,
all_inputs_var, all_outputs_var,
all_outputs_var, accept_inplace=True,
accept_inplace=True, op=AdvancedIncSubtensor1,
op=AdvancedIncSubtensor1, N=len(all_outputs_var),
N=len(all_outputs_var), )
)
f_outs = f(*all_inputs_num) f_outs = f(*all_inputs_num)
assert len(f_outs) == len(all_outputs_num) assert len(f_outs) == len(all_outputs_num)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论