提交 423b9851 authored 作者: Ricardo's avatar Ricardo 提交者: Thomas Wiecki

Remove `warn__subtensor_merge_bug` flag

上级 47586163
...@@ -1454,15 +1454,6 @@ def add_deprecated_configvars(): ...@@ -1454,15 +1454,6 @@ def add_deprecated_configvars():
in_c_key=False, in_c_key=False,
) )
config.add(
"warn__subtensor_merge_bug",
"Warn if previous versions of Aesara (before 0.5rc2) could have given "
"incorrect results when indexing into a subtensor with negative "
"stride (for instance, for instance, x[a:b:-1][c]).",
BoolParam(_warn_default("0.5")),
in_c_key=False,
)
config.add( config.add(
"warn__gpu_set_subtensor1", "warn__gpu_set_subtensor1",
"Warn if previous versions of Aesara (before 0.6) could have given " "Warn if previous versions of Aesara (before 0.6) could have given "
......
...@@ -2910,13 +2910,6 @@ def merge_two_slices(fgraph, slice1, len1, slice2, len2): ...@@ -2910,13 +2910,6 @@ def merge_two_slices(fgraph, slice1, len1, slice2, len2):
# the k-th element from sl.start but the k-th element from # the k-th element from sl.start but the k-th element from
# sl.stop backwards # sl.stop backwards
n_val = sl1.stop - 1 - sl2 * sl1.step n_val = sl1.stop - 1 - sl2 * sl1.step
if config.warn__subtensor_merge_bug:
warnings.warning(
"Your current code is fine, but Aesara versions "
"prior to 0.5rc2 might have given an incorrect result. "
"To disable this warning, set the Aesara flag "
"warn__subtensor_merge_bug to False."
)
# we need to pick either n_val or p_val and then follow same # we need to pick either n_val or p_val and then follow same
# steps as above for covering the index error cases # steps as above for covering the index error cases
val = switch(lt(reverse1, 0), n_val, p_val) val = switch(lt(reverse1, 0), n_val, p_val)
......
...@@ -362,7 +362,6 @@ Note that if you want to use a random variable ``d`` that will not be updated th ...@@ -362,7 +362,6 @@ Note that if you want to use a random variable ``d`` that will not be updated th
import aesara import aesara
import aesara.tensor as aet import aesara.tensor as aet
aesara.config.warn__subtensor_merge_bug = False
k = aet.iscalar("k") k = aet.iscalar("k")
A = aet.vector("A") A = aet.vector("A")
...@@ -396,7 +395,6 @@ Note that if you want to use a random variable ``d`` that will not be updated th ...@@ -396,7 +395,6 @@ Note that if you want to use a random variable ``d`` that will not be updated th
import numpy import numpy
import aesara import aesara
import aesara.tensor as aet import aesara.tensor as aet
aesara.config.warn__subtensor_merge_bug = False
coefficients = aesara.tensor.vector("coefficients") coefficients = aesara.tensor.vector("coefficients")
x = aet.scalar("x") x = aet.scalar("x")
......
...@@ -10,8 +10,6 @@ import aesara.tensor as aet ...@@ -10,8 +10,6 @@ import aesara.tensor as aet
# 1. First example # 1. First example
aesara.config.warn__subtensor_merge_bug = False
k = aet.iscalar("k") k = aet.iscalar("k")
A = aet.vector("A") A = aet.vector("A")
...@@ -58,8 +56,6 @@ print(calculate_polynomial1(test_coeff, 3)) ...@@ -58,8 +56,6 @@ print(calculate_polynomial1(test_coeff, 3))
# 3. Reduction performed inside scan # 3. Reduction performed inside scan
aesara.config.warn__subtensor_merge_bug = False
coefficients = aet.vector("coefficients") coefficients = aet.vector("coefficients")
x = aet.scalar("x") x = aet.scalar("x")
max_coefficients_supported = 10000 max_coefficients_supported = 10000
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论