Unverified 提交 b0b34b59 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: GitHub

Merge pull request #214 from michaelosthege/objectify-config

Get rid of singleton design and most module-level variables
......@@ -130,7 +130,7 @@ jobs:
run: |
if [[ $FAST_COMPILE == "1" ]]; then export THEANO_FLAGS=$THEANO_FLAGS,mode=FAST_COMPILE; fi
if [[ $FLOAT32 == "1" ]]; then export THEANO_FLAGS=$THEANO_FLAGS,floatX=float32; fi
export THEANO_FLAGS=$THEANO_FLAGS,warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,gcc.cxxflags=-pipe
export THEANO_FLAGS=$THEANO_FLAGS,warn__ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise,gcc__cxxflags=-pipe
python -m pytest -x -r A --verbose --runslow --cov=theano/ --cov-report=xml:coverage/coverage-${MATRIX_ID}.xml --no-cov-on-fail $PART
env:
MATRIX_ID: ${{ steps.matrix-id.outputs.id }}
......
......@@ -396,7 +396,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
OpFromGraph,
)
@theano.change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_compute_test_value(self):
x = tt.scalar("x")
x.tag.test_value = np.array(1.0, dtype=config.floatX)
......
......@@ -190,7 +190,7 @@ wb1 = WeirdBrokenOp("times1")
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_badthunkoutput():
# Check if the c and python code is consistent.
......@@ -200,12 +200,12 @@ def test_badthunkoutput():
f_good = theano.function(
[a, b],
off_by_half(a, b),
mode=debugmode.DebugMode(check_c_code=theano.config.cxx),
mode=debugmode.DebugMode(check_c_code=config.cxx),
)
f_inconsistent = theano.function(
[a, b],
inconsistent(a, b),
mode=debugmode.DebugMode(check_c_code=theano.config.cxx),
mode=debugmode.DebugMode(check_c_code=config.cxx),
)
# this should evaluate with no error
......@@ -283,7 +283,7 @@ def test_badoptimization_opt_err():
with pytest.raises(
theano.gof.toolbox.BadOptimization, match=r"insert_bad_dtype"
) as einfo:
with theano.change_flags(on_opt_error="raise"):
with config.change_flags(on_opt_error="raise"):
f2 = theano.function(
[a, b],
a + b,
......@@ -337,7 +337,7 @@ def test_stochasticoptimization():
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_just_c_code():
x = theano.tensor.dvector()
......@@ -366,7 +366,7 @@ def test_baddestroymap():
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_baddestroymap_c():
x = theano.tensor.dvector()
......@@ -420,7 +420,7 @@ class TestViewMap:
f([1, 5, 1], [3, 4, 2, 1, 4])
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_badviewmap_c(self):
x = theano.tensor.dvector()
......@@ -748,7 +748,7 @@ class TestPreallocatedOutput:
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
if config.cxx:
with pytest.raises(debugmode.BadThunkOutput):
f(a_val, b_val)
else:
......@@ -779,7 +779,7 @@ class TestPreallocatedOutput:
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
if config.cxx:
with pytest.raises(debugmode.BadThunkOutput):
f(a_val, b_val)
else:
......
......@@ -5,7 +5,7 @@ import pytest
import theano
from tests import unittest_tools as utt
from theano import change_flags, config, function
from theano import config, function
from theano.compile.ops import Rebroadcast, SpecifyShape, as_op, shape, shape_i
from theano.gof.fg import FunctionGraph
from theano.tensor.basic import (
......@@ -228,11 +228,11 @@ class TestRebroadcast(utt.InferShapeTester):
)
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_nonstandard_shapes():
a = tensor3(config.floatX)
a.tag.test_value = np.random.random((2, 3, 4)).astype(config.floatX)
b = tensor3(theano.config.floatX)
b = tensor3(config.floatX)
b.tag.test_value = np.random.random((2, 3, 4)).astype(config.floatX)
tl = make_list([a, b])
......
......@@ -12,7 +12,7 @@ from theano.tensor.basic import _allclose
@pytest.fixture(scope="module", autouse=True)
def set_theano_flags():
with theano.change_flags(compute_test_value="raise"):
with config.change_flags(compute_test_value="raise"):
yield
......@@ -92,26 +92,19 @@ class TestComputeTestValue:
y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)
# should skip computation of test value
theano.config.compute_test_value = "off"
z = tt.dot(x, y)
assert not hasattr(z.tag, "test_value")
with config.change_flags(compute_test_value="off"):
z = tt.dot(x, y)
assert not hasattr(z.tag, "test_value")
# should fail when asked by user
theano.config.compute_test_value = "raise"
with pytest.raises(ValueError):
with pytest.raises(ValueError), config.change_flags(compute_test_value="raise"):
tt.dot(x, y)
# test that a warning is raised if required
theano.config.compute_test_value = "warn"
warnings.simplefilter("error", UserWarning)
try:
with warnings.catch_warnings(), config.change_flags(compute_test_value="warn"):
warnings.simplefilter("error", UserWarning)
with pytest.raises(UserWarning):
tt.dot(x, y)
finally:
# Restore the default behavior.
# TODO There is a cleaner way to do this in Python 2.6, once
# Theano drops support of Python 2.4 and 2.5.
warnings.simplefilter("default", UserWarning)
def test_string_var(self):
x = tt.matrix("x")
......@@ -302,7 +295,7 @@ class TestComputeTestValue:
assert o.tag.test_value == 4
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_no_perform(self):
i = scalar.int32("i")
......
......@@ -3,7 +3,7 @@ from copy import copy
import pytest
from tests.unittest_tools import assertFailure_fast
from theano import change_flags
from theano import config
from theano.gof import destroyhandler, graph
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.graph import Apply, Variable
......@@ -417,7 +417,7 @@ def test_value_repl():
assert g.consistent()
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_value_repl_2():
x, y, z = inputs()
sy = sigmoid(y)
......
......@@ -4,7 +4,7 @@ import numpy as np
import pytest
from tests.gof.utils import MyVariable, MyVariable2, op1, op2, op3
from theano import change_flags
from theano import config
from theano.gof.fg import FunctionGraph, MissingInputError
from theano.gof.toolbox import BadOptimization
......@@ -186,7 +186,7 @@ class TestFunctionGraph:
assert var5.owner.inputs[1] is var1
assert (var5.owner, 1) not in fg.get_clients(var2)
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_replace_test_value(self):
var1 = MyVariable("var1")
......
......@@ -5,7 +5,6 @@ import theano
import theano.gof.op as op
import theano.tensor as tt
from theano import config, scalar, shared
from theano.configparser import change_flags
from theano.gof.graph import Apply, Variable
from theano.gof.op import Op
from theano.gof.type import Generic, Type
......@@ -133,13 +132,13 @@ class TestOp:
assert rval == "test Op no input"
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_op_struct(self):
sop = StructOp()
c = sop(theano.tensor.constant(0))
mode = None
if theano.config.mode == "FAST_COMPILE":
if config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
f = theano.function([], c, mode=mode)
rval = f()
......@@ -219,7 +218,7 @@ class TestMakeThunk:
thunk = o.owner.op.make_thunk(
o.owner, storage_map, compute_map, no_recycling=[]
)
if theano.config.cxx:
if config.cxx:
required = thunk()
# Check everything went OK
assert not required # We provided all inputs
......@@ -275,7 +274,7 @@ def test_test_value_shared():
assert np.all(v == np.zeros((5, 5)))
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_test_value_op():
x = tt.log(np.ones((5, 5)))
......@@ -284,7 +283,7 @@ def test_test_value_op():
assert np.allclose(v, np.zeros((5, 5)))
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_get_test_values_no_debugger():
"""Tests that `get_test_values` returns `[]` when debugger is off."""
......@@ -292,7 +291,7 @@ def test_get_test_values_no_debugger():
assert op.get_test_values(x) == []
@change_flags(compute_test_value="ignore")
@config.change_flags(compute_test_value="ignore")
def test_get_test_values_ignore():
"""Tests that `get_test_values` returns `[]` when debugger is set to "ignore" and some values are missing."""
......@@ -304,7 +303,7 @@ def test_get_test_values_success():
"""Tests that `get_test_values` returns values when available (and the debugger is on)."""
for mode in ["ignore", "warn", "raise"]:
with change_flags(compute_test_value=mode):
with config.change_flags(compute_test_value=mode):
x = tt.vector()
x.tag.test_value = np.zeros((4,), dtype=config.floatX)
y = np.zeros((5, 5))
......@@ -321,7 +320,7 @@ def test_get_test_values_success():
assert iters == 1
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_get_test_values_exc():
"""Tests that `get_test_values` raises an exception when debugger is set to raise and a value is missing."""
......
import theano.tensor as tt
from tests.gof.utils import MyType, MyVariable, op1, op2, op3, op4, op5, op6, op_y, op_z
from theano import config
from theano.gof.fg import FunctionGraph
from theano.gof.graph import Apply, Constant
from theano.gof.op import Op
......@@ -10,7 +11,6 @@ from theano.gof.opt import (
OpSub,
PatternSub,
TopoOptimizer,
config,
logging,
pre_constant_merge,
pre_greedy_local_optimizer,
......@@ -305,12 +305,8 @@ class TestMergeOptimizer:
x = MyVariable("x")
y = Constant(MyType(), 2, name="y")
z = Constant(MyType(), 2, name="z")
ctv_backup = config.compute_test_value
config.compute_test_value = "off"
try:
with config.change_flags(compute_test_value="off"):
e1 = op1(y, z)
finally:
config.compute_test_value = ctv_backup
g = FunctionGraph([x, y, z], [e1])
MergeOptimizer().optimize(g)
strg = str(g)
......@@ -515,7 +511,7 @@ class TestEquilibrium:
opt.optimize(g)
assert str(g) == "FunctionGraph(Op2(x, y))"
@theano.change_flags(on_opt_error="ignore")
@config.change_flags(on_opt_error="ignore")
def test_low_use_ratio(self):
x, y, z = map(MyVariable, "xyz")
e = op3(op4(x, y))
......
......@@ -284,6 +284,6 @@ class TestEnumTypes:
assert val_billion == val_million * 1000
assert val_two_billions == val_billion * 2
@theano.change_flags(**{"cmodule__debug": True})
@theano.config.change_flags(**{"cmodule__debug": True})
def test_op_with_cenumtype_debug(self):
self.test_op_with_cenumtype()
差异被折叠。
......@@ -7,7 +7,7 @@ from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu as mode
from tests.sandbox.test_rng_mrg import java_samples, rng_mrg_overflow
from tests.sandbox.test_rng_mrg import test_f16_nonzero as cpu_f16_nonzero
from theano import change_flags, tensor
from theano import config, tensor
from theano.gpuarray.rng_mrg import GPUA_mrg_uniform
from theano.gpuarray.type import gpuarray_shared_constructor
from theano.sandbox import rng_mrg
......@@ -161,7 +161,7 @@ def test_overflow_gpu_new_backend():
def test_validate_input_types_gpuarray_backend():
with change_flags(compute_test_value="raise"):
with config.change_flags(compute_test_value="raise"):
rstate = np.zeros((7, 6), dtype="int32")
rstate = gpuarray_shared_constructor(rstate)
rng_mrg.mrg_uniform.new(rstate, ndim=None, dtype="float32", size=(3,))
......
......@@ -130,7 +130,7 @@ def test_advinc_subtensor1_dtype():
assert np.allclose(rval, rep)
@theano.change_flags(deterministic="more")
@theano.config.change_flags(deterministic="more")
def test_deterministic_flag():
shp = (3, 4)
for dtype1, dtype2 in [("float32", "int8")]:
......
......@@ -14,7 +14,7 @@ from theano.gof.op import get_test_value # noqa: E402
@pytest.fixture(scope="module", autouse=True)
def set_theano_flags():
with theano.change_flags(cxx="", compute_test_value="ignore"):
with theano.config.change_flags(cxx="", compute_test_value="ignore"):
yield
......@@ -136,7 +136,7 @@ def test_jax_compile_ops():
compare_jax_and_py(x_fg, [])
with theano.change_flags(compute_test_value="off"):
with theano.config.change_flags(compute_test_value="off"):
x = theano.compile.ops.SpecifyShape()(tt.as_tensor_variable(x_np), (2, 3))
x_fg = theano.gof.FunctionGraph([], [x])
......@@ -151,7 +151,7 @@ def test_jax_compile_ops():
compare_jax_and_py(x_fg, [])
with theano.change_flags(compute_test_value="off"):
with theano.config.change_flags(compute_test_value="off"):
x = theano.compile.ops.Rebroadcast((0, True), (1, False), (2, False))(
tt.as_tensor_variable(x_np)
)
......
......@@ -7,7 +7,7 @@ import pytest
import theano
from tests import unittest_tools as utt
from theano import change_flags, config, tensor
from theano import config, tensor
from theano.sandbox import rng_mrg
from theano.sandbox.rng_mrg import MRG_RandomStreams
......@@ -78,9 +78,7 @@ def test_consistency_randomstreams():
def test_get_substream_rstates():
try:
orig = theano.config.compute_test_value
theano.config.compute_test_value = "raise"
with config.change_flags(compute_test_value="raise"):
n_streams = 100
dtype = "float32"
......@@ -88,9 +86,6 @@ def test_get_substream_rstates():
rng.get_substream_rstates(n_streams, dtype)
finally:
theano.config.compute_test_value = orig
def test_consistency_cpu_serial():
# Verify that the random numbers generated by mrg_uniform, serially,
......@@ -949,7 +944,7 @@ def test_overflow_cpu():
# run with THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32
rng = MRG_RandomStreams(np.random.randint(1234))
fct = rng.uniform
with change_flags(compute_test_value="off"):
with config.change_flags(compute_test_value="off"):
# should raise error as the size overflows
sizes = [
(2 ** 31,),
......
import numpy as np
from theano import change_flags
from theano import config
from theano.scalar.basic import (
IntDiv,
Scalar,
......@@ -33,7 +33,7 @@ def test_div_types():
def test_filter_float_subclass():
"""Make sure `Scalar.filter` can handle `float` subclasses."""
with change_flags(floatX="float64"):
with config.change_flags(floatX="float64"):
test_type = Scalar("float64")
nan = np.array([np.nan], dtype="float64")[0]
......@@ -42,7 +42,7 @@ def test_filter_float_subclass():
filtered_nan = test_type.filter(nan)
assert isinstance(filtered_nan, float)
with change_flags(floatX="float32"):
with config.change_flags(floatX="float32"):
# Try again, except this time `nan` isn't a `float`
test_type = Scalar("float32")
......
差异被折叠。
......@@ -7,7 +7,7 @@ import numpy as np
from mpi4py import MPI
import theano
from theano.configparser import change_flags
from theano import config
from theano.gof.sched import sort_schedule_fn
from theano.tensor.io import mpi_cmps, recv, send
......@@ -31,7 +31,7 @@ dtype = "float32"
scheduler = sort_schedule_fn(*mpi_cmps)
mode = theano.Mode(optimizer=None, linker=theano.OpWiseCLinker(schedule=scheduler))
with change_flags(compute_test_value="off"):
with config.change_flags(compute_test_value="off"):
if rank == 0:
x = theano.tensor.matrix("x", dtype=dtype)
y = x + 1
......
......@@ -4,7 +4,7 @@ import pytest
import theano
import theano.tensor as tt
from tests import unittest_tools
from theano import change_flags, function, shared
from theano import config, function, shared
from theano.tensor.nnet.neighbours import Images2Neibs, images2neibs, neibs2images
......@@ -161,7 +161,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
# print g()
# assert numpy.allclose(images.get_value(borrow=True), g())
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_neibs_bad_shape(self):
shape = (2, 3, 10, 10)
for dtype in self.dtypes:
......@@ -351,7 +351,7 @@ class TestImages2Neibs(unittest_tools.InferShapeTester):
f_full = theano.function([], x_using_full, mode=self.mode)
unittest_tools.assert_allclose(f_valid(), f_full())
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_neibs_bad_shape_wrap_centered(self):
shape = (2, 3, 10, 10)
......
差异被折叠。
......@@ -128,8 +128,8 @@ class TestGemm:
cmp_linker(copy(z), a, x, y, b, "c|py")
cmp_linker(copy(z), a, x, y, b, "py")
if not dtype.startswith("complex") and theano.config.cxx:
# If theano.config.blas__ldflags is empty, Theano will use
if not dtype.startswith("complex") and config.cxx:
# If config.blas__ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
cmp_linker(copy(z), a, x, y, b, "c")
......@@ -477,7 +477,7 @@ class TestGemmNoFlags:
C = self.get_value(C, transpose_C, slice_C)
return alpha * np.dot(A, B) + beta * C
@theano.change_flags({"blas__ldflags": ""})
@config.change_flags({"blas__ldflags": ""})
def run_gemm(
self,
dtype,
......@@ -819,13 +819,9 @@ def test_upcasting_scalar_nogemm():
t = tt.fmatrix("t")
alpha = tt.cscalar("a")
on_opt_error = config.on_opt_error
try:
config.on_opt_error = "raise"
with config.change_flags(on_opt_error="raise"):
rval = tt.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
finally:
config.on_opt_error = on_opt_error
t = f.maker.fgraph.toposort()
assert np.sum([isinstance(n.op, Gemm) for n in t]) == 0
......
......@@ -387,7 +387,7 @@ class TestCGemvNoFlags:
ref_val += beta * y
return ref_val
@theano.change_flags({"blas__ldflags": ""})
@theano.config.change_flags(blas__ldflags="")
def run_cgemv(self, dtype, ALPHA, BETA, transpose_A, slice_tensors):
f = self.get_function(
dtype, transpose_A=transpose_A, slice_tensors=slice_tensors
......
......@@ -3,7 +3,7 @@ import pytest
import theano
from tests import unittest_tools as utt
from theano import change_flags, config, function
from theano import config, function
from theano import tensor as tt
from theano.tensor.extra_ops import (
Bartlett,
......@@ -320,8 +320,8 @@ class TestSqueeze(utt.InferShapeTester):
def test_op(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = np.random.random(size=shape).astype(theano.config.floatX)
variable = tt.TensorType(theano.config.floatX, broadcast)()
data = np.random.random(size=shape).astype(config.floatX)
variable = tt.TensorType(config.floatX, broadcast)()
f = theano.function([variable], self.op(variable))
......@@ -333,8 +333,8 @@ class TestSqueeze(utt.InferShapeTester):
def test_infer_shape(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = np.random.random(size=shape).astype(theano.config.floatX)
variable = tt.TensorType(theano.config.floatX, broadcast)()
data = np.random.random(size=shape).astype(config.floatX)
variable = tt.TensorType(config.floatX, broadcast)()
self._compile_and_check(
[variable], [self.op(variable)], [data], tt.DimShuffle, warn=False
......@@ -342,15 +342,15 @@ class TestSqueeze(utt.InferShapeTester):
def test_grad(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = np.random.random(size=shape).astype(theano.config.floatX)
data = np.random.random(size=shape).astype(config.floatX)
utt.verify_grad(self.op, [data])
def test_var_interface(self):
# same as test_op, but use a_theano_var.squeeze.
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = np.random.random(size=shape).astype(theano.config.floatX)
variable = tt.TensorType(theano.config.floatX, broadcast)()
data = np.random.random(size=shape).astype(config.floatX)
variable = tt.TensorType(config.floatX, broadcast)()
f = theano.function([variable], variable.squeeze())
......@@ -361,17 +361,17 @@ class TestSqueeze(utt.InferShapeTester):
assert np.allclose(tested, expected)
def test_axis(self):
variable = tt.TensorType(theano.config.floatX, [False, True, False])()
variable = tt.TensorType(config.floatX, [False, True, False])()
res = squeeze(variable, axis=1)
assert res.broadcastable == (False, False)
variable = tt.TensorType(theano.config.floatX, [False, True, False])()
variable = tt.TensorType(config.floatX, [False, True, False])()
res = squeeze(variable, axis=(1,))
assert res.broadcastable == (False, False)
variable = tt.TensorType(theano.config.floatX, [False, True, False, True])()
variable = tt.TensorType(config.floatX, [False, True, False, True])()
res = squeeze(variable, axis=(1, 3))
assert res.broadcastable == (False, False)
......@@ -396,7 +396,7 @@ class TestCompress(utt.InferShapeTester):
def test_op(self):
for axis, cond, shape in zip(self.axis_list, self.cond_list, self.shape_list):
cond_var = theano.tensor.ivector()
data = np.random.random(size=shape).astype(theano.config.floatX)
data = np.random.random(size=shape).astype(config.floatX)
data_var = theano.tensor.matrix()
f = theano.function(
......@@ -713,7 +713,7 @@ def test_to_one_hot():
o = to_one_hot(v, 10)
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == theano.config.floatX
assert out.dtype == config.floatX
assert np.allclose(
out,
[
......@@ -1318,7 +1318,7 @@ class TestBroadcastTo(utt.InferShapeTester):
self.op_class = BroadcastTo
self.op = broadcast_to
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_perform(self):
a = tt.scalar()
a.tag.test_value = 5
......
......@@ -4,7 +4,7 @@ import subprocess
import pytest
import theano
from theano import change_flags
from theano import config
from theano.gof.sched import sort_schedule_fn
from theano.tensor.io import (
MPISend,
......@@ -22,7 +22,7 @@ mpi_linker = theano.OpWiseCLinker(schedule=mpi_scheduler)
mpi_mode = theano.Mode(linker=mpi_linker)
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_recv():
x = recv((10, 10), "float64", 0, 11)
assert x.dtype == "float64"
......@@ -41,7 +41,7 @@ def test_send():
assert sendnode.op.tag == 11
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_can_make_function():
x = recv((5, 5), "float32", 0, 11)
y = x + 1
......@@ -83,7 +83,7 @@ def test_mpi_send_wait_cmp():
assert mpi_send_wait_cmp(waitnode, addnode) > 0 # wait happens last
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_mpi_tag_ordering():
x = recv((2, 2), "float32", 1, 12)
y = recv((2, 2), "float32", 1, 11)
......
......@@ -7,7 +7,6 @@ from numpy.testing import assert_array_almost_equal
import theano
from tests import unittest_tools as utt
from theano import config, function, tensor
from theano.configparser import change_flags
from theano.tensor.basic import _allclose
from theano.tensor.nlinalg import (
SVD,
......@@ -39,7 +38,7 @@ def test_pseudoinverse_correctness():
rng = np.random.RandomState(utt.fetch_seed())
d1 = rng.randint(4) + 2
d2 = rng.randint(4) + 2
r = rng.randn(d1, d2).astype(theano.config.floatX)
r = rng.randn(d1, d2).astype(config.floatX)
x = tensor.matrix()
xi = pinv(x)
......@@ -57,7 +56,7 @@ def test_pseudoinverse_grad():
rng = np.random.RandomState(utt.fetch_seed())
d1 = rng.randint(4) + 2
d2 = rng.randint(4) + 2
r = rng.randn(d1, d2).astype(theano.config.floatX)
r = rng.randn(d1, d2).astype(config.floatX)
utt.verify_grad(pinv, [r])
......@@ -71,7 +70,7 @@ class TestMatrixInverse(utt.InferShapeTester):
def test_inverse_correctness(self):
r = self.rng.randn(4, 4).astype(theano.config.floatX)
r = self.rng.randn(4, 4).astype(config.floatX)
x = tensor.matrix()
xi = self.op(x)
......@@ -88,7 +87,7 @@ class TestMatrixInverse(utt.InferShapeTester):
def test_infer_shape(self):
r = self.rng.randn(4, 4).astype(theano.config.floatX)
r = self.rng.randn(4, 4).astype(config.floatX)
x = tensor.matrix()
xi = self.op(x)
......@@ -102,7 +101,7 @@ def test_matrix_dot():
rs = []
xs = []
for k in range(n):
rs += [rng.randn(4, 4).astype(theano.config.floatX)]
rs += [rng.randn(4, 4).astype(config.floatX)]
xs += [tensor.matrix()]
sol = matrix_dot(*xs)
......@@ -117,8 +116,8 @@ def test_matrix_dot():
def test_qr_modes():
rng = np.random.RandomState(utt.fetch_seed())
A = tensor.matrix("A", dtype=theano.config.floatX)
a = rng.rand(4, 4).astype(theano.config.floatX)
A = tensor.matrix("A", dtype=config.floatX)
a = rng.rand(4, 4).astype(config.floatX)
f = function([A], qr(A))
t_qr = f(a)
......@@ -189,15 +188,15 @@ class TestSvd(utt.InferShapeTester):
def test_tensorsolve():
rng = np.random.RandomState(utt.fetch_seed())
A = tensor.tensor4("A", dtype=theano.config.floatX)
B = tensor.matrix("B", dtype=theano.config.floatX)
A = tensor.tensor4("A", dtype=config.floatX)
B = tensor.matrix("B", dtype=config.floatX)
X = tensorsolve(A, B)
fn = function([A, B], [X])
# slightly modified example from np.linalg.tensorsolve docstring
a = np.eye(2 * 3 * 4).astype(theano.config.floatX)
a = np.eye(2 * 3 * 4).astype(config.floatX)
a.shape = (2 * 3, 4, 2, 3 * 4)
b = rng.rand(2 * 3, 4).astype(theano.config.floatX)
b = rng.rand(2 * 3, 4).astype(config.floatX)
n_x = np.linalg.tensorsolve(a, b)
t_x = fn(a, b)
......@@ -233,7 +232,7 @@ def test_tensorsolve():
def test_inverse_singular():
singular = np.array([[1, 0, 0]] + [[0, 1, 0]] * 2, dtype=theano.config.floatX)
singular = np.array([[1, 0, 0]] + [[0, 1, 0]] * 2, dtype=config.floatX)
a = tensor.matrix()
f = function([a], matrix_inverse(a))
with pytest.raises(np.linalg.LinAlgError):
......@@ -527,20 +526,20 @@ class TestLstsq:
class TestMatrixPower:
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
@pytest.mark.parametrize("n", [-1, 0, 1, 2, 3, 4, 5, 11])
def test_numpy_compare(self, n):
a = np.array([[0.1231101, 0.72381381], [0.28748201, 0.43036511]]).astype(
theano.config.floatX
config.floatX
)
A = tensor.matrix("A", dtype=theano.config.floatX)
A = tensor.matrix("A", dtype=config.floatX)
A.tag.test_value = a
Q = matrix_power(A, n)
n_p = np.linalg.matrix_power(a, n)
assert np.allclose(n_p, Q.get_test_value())
def test_non_square_matrix(self):
A = tensor.matrix("A", dtype=theano.config.floatX)
A = tensor.matrix("A", dtype=config.floatX)
Q = matrix_power(A, 3)
f = function([A], [Q])
a = np.array(
......@@ -549,7 +548,7 @@ class TestMatrixPower:
[0.74387558, 0.31780172],
[0.54381007, 0.28153101],
]
).astype(theano.config.floatX)
).astype(config.floatX)
with pytest.raises(ValueError):
f(a)
......@@ -574,11 +573,11 @@ class TestNormTests:
def test_numpy_compare(self):
rng = np.random.RandomState(utt.fetch_seed())
M = tensor.matrix("A", dtype=theano.config.floatX)
V = tensor.vector("V", dtype=theano.config.floatX)
M = tensor.matrix("A", dtype=config.floatX)
V = tensor.vector("V", dtype=config.floatX)
a = rng.rand(4, 4).astype(theano.config.floatX)
b = rng.rand(4).astype(theano.config.floatX)
a = rng.rand(4, 4).astype(config.floatX)
b = rng.rand(4).astype(config.floatX)
A = (
[None, "fro", "inf", "-inf", 1, -1, None, "inf", "-inf", 0, 1, -1, 2, -2],
......@@ -597,12 +596,12 @@ class TestNormTests:
class TestTensorInv(utt.InferShapeTester):
def setup_method(self):
super().setup_method()
self.A = tensor.tensor4("A", dtype=theano.config.floatX)
self.B = tensor.tensor3("B", dtype=theano.config.floatX)
self.a = np.random.rand(4, 6, 8, 3).astype(theano.config.floatX)
self.b = np.random.rand(2, 15, 30).astype(theano.config.floatX)
self.A = tensor.tensor4("A", dtype=config.floatX)
self.B = tensor.tensor3("B", dtype=config.floatX)
self.a = np.random.rand(4, 6, 8, 3).astype(config.floatX)
self.b = np.random.rand(2, 15, 30).astype(config.floatX)
self.b1 = np.random.rand(30, 2, 15).astype(
theano.config.floatX
config.floatX
) # for ind=1 since we need prod(b1.shape[:ind]) == prod(b1.shape[ind:])
def test_infer_shape(self):
......
差异被折叠。
......@@ -11,7 +11,7 @@ import theano.scalar as scal
import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.utils import inplace_func, rand, randint_ranged
from theano import change_flags, config
from theano import config
from theano.compile import DeepCopyOp
from theano.gof.op import get_test_value
from theano.gof.toolbox import is_same_graph
......@@ -70,10 +70,10 @@ class TestSubtensor(utt.OptimizationTestMixin):
def setup_method(self):
self.shared = _shared
self.dtype = theano.config.floatX
self.dtype = config.floatX
mode = theano.compile.mode.get_default_mode()
self.mode = mode.including("local_useless_subtensor")
self.fast_compile = theano.config.mode == "FAST_COMPILE"
self.fast_compile = config.mode == "FAST_COMPILE"
utt.seed_rng()
def function(
......@@ -124,7 +124,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
with pytest.raises(IndexError):
n.__getitem__(0)
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_err_bounds(self):
n = self.shared(np.ones(3, dtype=self.dtype))
t = n[7]
......@@ -204,7 +204,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
assert tval.shape == (2,)
assert (tval == [0.0, 2.0]).all()
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_err_bounds0(self):
n = self.shared(np.ones((2, 3), dtype=self.dtype) * 5)
for idx in [(0, 4), (0, -4)]:
......@@ -220,7 +220,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
finally:
_logger.setLevel(oldlevel)
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_err_bounds1(self):
n = self.shared(np.ones((2, 3), dtype=self.dtype) * 5)
t = n[4:5, 3]
......@@ -449,7 +449,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
inc_subtensor(n4[test_array > 2, ..., 0, 1], 1).eval(),
)
with change_flags(compute_test_value="off"):
with config.change_flags(compute_test_value="off"):
# the boolean mask should have the correct shape
# - too large, padded with True
mask = np.array([True, False, True])
......@@ -783,7 +783,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
g = tt.grad(h.sum(), W)
N = 2
if (
theano.config.mode == "FAST_COMPILE"
config.mode == "FAST_COMPILE"
and AdvancedIncSubtensor1 is AdvancedIncSubtensor1
):
N = 3
......@@ -1273,9 +1273,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
)
# Actual test (we compile a single Theano function to make it faster).
orig_warn = theano.config.warn__gpu_set_subtensor1
try:
theano.config.warn__gpu_set_subtensor1 = False
with config.change_flags(warn__gpu_set_subtensor1=False):
f = self.function(
all_inputs_var,
all_outputs_var,
......@@ -1283,8 +1281,6 @@ class TestSubtensor(utt.OptimizationTestMixin):
op=AdvancedIncSubtensor1,
N=len(all_outputs_var),
)
finally:
theano.config.warn__gpu_set_subtensor1 = orig_warn
f_outs = f(*all_inputs_num)
assert len(f_outs) == len(all_outputs_num)
......@@ -1528,7 +1524,7 @@ class TestAdvancedSubtensor:
def setup_method(self):
self.shared = _shared
self.dtype = theano.config.floatX
self.dtype = config.floatX
self.mode = theano.compile.mode.get_default_mode()
self.s = iscalar()
......@@ -2183,7 +2179,7 @@ class TestInferShape(utt.InferShapeTester):
bivec.tag.test_value = bivec_val
# Make sure it doesn't complain about test values
with theano.change_flags(compute_test_value="raise"):
with config.change_flags(compute_test_value="raise"):
self._compile_and_check(
[admat, aivec],
[admat[1:3, aivec]],
......@@ -2215,7 +2211,7 @@ class TestInferShape(utt.InferShapeTester):
assert abs_res.broadcastable == (False,)
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_basic_shape():
test_shape = (5, 4)
test_indices = (make_slice(1, 3, None),)
......@@ -2223,7 +2219,7 @@ def test_basic_shape():
assert get_test_value(res) == (2,)
@change_flags(compute_test_value="raise")
@config.change_flags(compute_test_value="raise")
def test_indexed_result_shape():
_test_idx = np.ix_(np.array([True, True]), np.array([True]), np.array([True, True]))
......
......@@ -4,7 +4,7 @@ from tempfile import mkdtemp
import numpy as np
import pytest
from theano import change_flags, config
from theano import config
from theano.tensor.type import TensorType
......@@ -42,7 +42,7 @@ def test_filter_ndarray_subclass():
def test_filter_float_subclass():
"""Make sure `TensorType.filter` can handle `float` subclasses."""
with change_flags(floatX="float64"):
with config.change_flags(floatX="float64"):
test_type = TensorType("float64", broadcastable=[])
nan = np.array([np.nan], dtype="float64")[0]
......@@ -51,7 +51,7 @@ def test_filter_float_subclass():
filtered_nan = test_type.filter(nan)
assert isinstance(filtered_nan, np.ndarray)
with change_flags(floatX="float32"):
with config.change_flags(floatX="float32"):
# Try again, except this time `nan` isn't a `float`
test_type = TensorType("float32", broadcastable=[])
......
......@@ -8,7 +8,7 @@ import pytest
import theano
from tests import unittest_tools as utt
from theano import change_flags, config, function, gof, shared, tensor
from theano import config, function, gof, shared, tensor
from theano.compile.mode import get_default_mode
from theano.tensor.type import TensorType
......@@ -16,7 +16,7 @@ from theano.tensor.type import TensorType
# Used to exclude random numbers too close to certain values
_eps = 1e-2
if theano.config.floatX == "float32":
if config.floatX == "float32":
angle_eps = 1e-4
else:
angle_eps = 1e-10
......@@ -572,7 +572,7 @@ def makeTester(
# instantiated on the following bad inputs: %s"
# % (self.op, testname, node, inputs))
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
@pytest.mark.skipif(skip, reason="Skipped")
def test_bad_runtime(self):
for testname, inputs in self.bad_runtime.items():
......
"""Test config options."""
import configparser as stdlib_configparser
import logging
from unittest.mock import patch
......@@ -6,13 +7,69 @@ import pytest
from theano import configdefaults, configparser
from theano.configdefaults import default_blas_ldflags
from theano.configparser import THEANO_FLAGS_DICT, AddConfigVar, ConfigParam
from theano.configparser import ConfigParam
def _create_test_config():
return configparser.TheanoConfigParser(
flags_dict={},
theano_cfg=stdlib_configparser.ConfigParser(),
theano_raw_cfg=stdlib_configparser.RawConfigParser(),
)
def test_api_deprecation_warning():
# accessing through configdefaults.config is the new best practice
with pytest.warns(None):
root = configdefaults.config
assert isinstance(str(root), str)
# accessing through configparser.config is discouraged
root = configparser.config
with pytest.warns(DeprecationWarning, match="instead"):
root.add(
"test_deprecationwarning",
"A config var from a test case.",
configparser.StrParam("test_default"),
)
with pytest.warns(DeprecationWarning, match="instead"):
with root.change_flags(test_deprecationwarning="new_value"):
pass
def test_api_redirect():
root = _create_test_config()
# one section level
root.add(
"test__section_redirect",
"A config var from a test case.",
configparser.StrParam("test_default"),
)
assert hasattr(root, "test__section_redirect")
assert root.test__section_redirect == "test_default"
assert hasattr(root, "test")
assert isinstance(root.test, configparser._SectionRedirect)
with pytest.warns(DeprecationWarning):
assert root.test.section_redirect == "test_default"
# two section levels
root.add(
"test__subsection__redirect",
"A config var from a test case.",
configparser.StrParam("test_default2"),
)
assert hasattr(root, "test__subsection__redirect")
assert root.test__subsection__redirect == "test_default2"
with pytest.warns(DeprecationWarning):
assert root.test.subsection.redirect == "test_default2"
def test_invalid_default():
# Ensure an invalid default value found in the Theano code only causes
# a crash if it is not overridden by the user.
root = _create_test_config()
def validate(val):
if val == "invalid":
raise ValueError("Test-triggered")
......@@ -20,26 +77,25 @@ def test_invalid_default():
with pytest.raises(ValueError, match="Test-triggered"):
# This should raise a ValueError because the default value is
# invalid.
AddConfigVar(
"T_config__test_invalid_default_a",
root.add(
"test__test_invalid_default_a",
doc="unittest",
configparam=ConfigParam("invalid", validate=validate),
in_c_key=False,
)
THEANO_FLAGS_DICT["T_config__test_invalid_default_b"] = "ok"
root._flags_dict["test__test_invalid_default_b"] = "ok"
# This should succeed since we defined a proper value, even
# though the default was invalid.
AddConfigVar(
"T_config__test_invalid_default_b",
root.add(
"test__test_invalid_default_b",
doc="unittest",
configparam=ConfigParam("invalid", validate=validate),
in_c_key=False,
)
# TODO We should remove these dummy options on test exit.
# Check that the flag has been removed
assert "T_config__test_invalid_default_b" not in THEANO_FLAGS_DICT
assert "test__test_invalid_default_b" not in root._flags_dict
@patch("theano.configdefaults.try_blas_flag", return_value=None)
......@@ -82,26 +138,30 @@ def test_config_param_apply_and_validation():
def test_config_hash():
# TODO: use custom config instance for the test
root = configparser.config
configparser.AddConfigVar(
"test_config_hash",
root = _create_test_config()
root.add(
"test__config_hash",
"A config var from a test case.",
configparser.StrParam("test_default"),
root=root,
)
h0 = configparser.get_config_hash()
h0 = root.get_config_hash()
with configparser.change_flags(test_config_hash="new_value"):
assert root.test_config_hash == "new_value"
h1 = configparser.get_config_hash()
with root.change_flags(test__config_hash="new_value"):
assert root.test__config_hash == "new_value"
h1 = root.get_config_hash()
h2 = configparser.get_config_hash()
h2 = root.get_config_hash()
assert h1 != h0
assert h2 == h0
def test_config_print():
root = configdefaults.config
result = str(root)
assert isinstance(result, str)
class TestConfigTypes:
def test_bool(self):
valids = {
......@@ -139,26 +199,56 @@ class TestConfigTypes:
def test_config_context():
# TODO: use custom config instance for the test
root = configparser.config
configparser.AddConfigVar(
"test_config_context",
root = _create_test_config()
root.add(
"test__config_context",
"A config var from a test case.",
configparser.StrParam("test_default"),
root=root,
)
assert hasattr(root, "test_config_context")
assert root.test_config_context == "test_default"
with configparser.change_flags(test_config_context="new_value"):
assert root.test_config_context == "new_value"
assert root.test_config_context == "test_default"
assert hasattr(root, "test__config_context")
assert root.test__config_context == "test_default"
with root.change_flags(test__config_context="new_value"):
assert root.test__config_context == "new_value"
with root.change_flags({"test__config_context": "new_value2"}):
assert root.test__config_context == "new_value2"
assert root.test__config_context == "new_value"
assert root.test__config_context == "test_default"
def test_invalid_configvar_access():
root = configdefaults.config
root_test = _create_test_config()
# add a setting to the test instance
root_test.add(
"test__on_test_instance",
"This config setting was added to the test instance.",
configparser.IntParam(5),
)
assert hasattr(root_test, "test__on_test_instance")
# While the property _actually_ exists on all instances,
# accessing it through another instance raises an AttributeError.
assert not hasattr(root, "test__on_test_instance")
# But we can make sure that nothing crazy happens when we access it:
with pytest.raises(configparser.ConfigAccessViolation, match="different instance"):
print(root.test__on_test_instance)
# And also that we can't add two configs of the same name to different instances:
with pytest.raises(AttributeError, match="already registered"):
root.add(
"test__on_test_instance",
"This config setting was already added to another instance.",
configparser.IntParam(5),
)
def test_no_more_dotting():
root = configdefaults.config
with pytest.raises(ValueError, match="Dot-based"):
AddConfigVar(
"T_config.something",
root.add(
"test.something",
doc="unittest",
configparam=ConfigParam("invalid"),
in_c_key=False,
......
......@@ -5,7 +5,7 @@ import pytest
import theano
from tests import unittest_tools as utt
from theano import change_flags, config, gof, gradient
from theano import config, gof, gradient
from theano.gof.null_type import NullType
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
......@@ -521,7 +521,7 @@ def test_known_grads_integers():
f = theano.function([g_expected], g_grad)
x = -3
gv = np.cast[theano.config.floatX](0.6)
gv = np.cast[config.floatX](0.6)
g_actual = f(gv)
......@@ -784,7 +784,7 @@ def test_grad_clip():
f = theano.function([x], outputs=[z, z2])
if theano.config.mode != "FAST_COMPILE":
if config.mode != "FAST_COMPILE":
topo = f.maker.fgraph.toposort()
assert not any([isinstance(node.op, gradient.GradClip) for node in topo])
out = f(2.0)
......@@ -800,7 +800,7 @@ def test_grad_scale():
f = theano.function([x], outputs=[z, z2])
if theano.config.mode != "FAST_COMPILE":
if config.mode != "FAST_COMPILE":
topo = f.maker.fgraph.toposort()
assert not any([isinstance(node.op, gradient.GradScale) for node in topo])
out = f(2.0)
......@@ -808,11 +808,11 @@ def test_grad_scale():
assert np.allclose(out, (8, 4))
@change_flags(compute_test_value="off")
@config.change_flags(compute_test_value="off")
def test_undefined_grad_opt():
# Make sure that undefined grad get removed in optimized graph.
random = RandomStreams(np.random.randint(1, 2147462579))
pvals = theano.shared(np.random.rand(10, 20).astype(theano.config.floatX))
pvals = theano.shared(np.random.rand(10, 20).astype(config.floatX))
pvals = pvals / pvals.sum(axis=1)
pvals = gradient.zero_grad(pvals)
samples = random.multinomial(pvals=pvals, n=1)
......
......@@ -64,7 +64,12 @@ for p in sys.path:
raise RuntimeError("You have the theano directory in your Python path.")
from theano.configdefaults import config
from theano.configparser import change_flags
from theano.utils import deprecated
change_flags = deprecated("Use theano.config.change_flags instead!")(
config.change_flags
)
# This is the api version for ops that generate C code. External ops
......
......@@ -401,7 +401,7 @@ class OpFromGraph(Op):
is_inline = self.is_inline
return "%(name)s{inline=%(is_inline)s}" % locals()
@theano.change_flags(compute_test_value="off")
@theano.config.change_flags(compute_test_value="off")
def _recompute_lop_op(self):
"""
converts self._lop_op from user supplied form to type(self) instance
......@@ -541,7 +541,7 @@ class OpFromGraph(Op):
self._lop_op_is_cached = True
self._lop_type = "lop"
@theano.change_flags(compute_test_value="off")
@theano.config.change_flags(compute_test_value="off")
def _recompute_rop_op(self):
"""
converts self._rop_op from user supplied form to type(self) instance
......
......@@ -18,7 +18,7 @@ from warnings import warn
import numpy as np
import theano
from theano import change_flags, config, gof
from theano import config, gof
from theano.compile.function.types import (
Function,
FunctionMaker,
......@@ -2446,7 +2446,7 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
)
fgraph.equivalence_tracker = equivalence_tracker
with change_flags(compute_test_value=config.compute_test_value_opt):
with config.change_flags(compute_test_value=config.compute_test_value_opt):
optimizer(fgraph)
theano.compile.function.types.insert_deepcopy(
......@@ -2506,7 +2506,7 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
file=sys.stderr,
)
self.fgraph = fgraph
if theano.config.cycle_detection == "regular":
if config.cycle_detection == "regular":
destroy_handler_added = False
for feature in fgraph._features:
if isinstance(feature, gof.DestroyHandler):
......@@ -2516,7 +2516,9 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
fgraph.attach_feature(gof.DestroyHandler())
for o in fgraph.outputs:
try:
with change_flags(compute_test_value=config.compute_test_value_opt):
with config.change_flags(
compute_test_value=config.compute_test_value_opt
):
fgraph.replace_validate(
o, _output_guard(o), reason="output_guard"
)
......
......@@ -1153,7 +1153,7 @@ def _pickle_Function(f):
def _constructor_Function(maker, input_storage, inputs_data, trust_input=False):
if not theano.config.unpickle_function:
if not config.unpickle_function:
return None
f = maker.create(input_storage, trustme=True)
......@@ -1360,7 +1360,7 @@ class FunctionMaker:
from theano.gof.compilelock import get_lock, release_lock
graph_db_file = os.path.join(theano.config.compiledir, "optimized_graphs.pkl")
graph_db_file = os.path.join(config.compiledir, "optimized_graphs.pkl")
# the inputs, outputs, and size of the graph to be optimized
inputs_new = [inp.variable for inp in inputs]
......@@ -1379,12 +1379,12 @@ class FunctionMaker:
print(f"create new graph_db in {graph_db_file}")
# load the graph_db dictionary
try:
with open(graph_db_file, "rb") as f:
with open(graph_db_file, "rb") as f, config.change_flags(
unpickle_function=False
):
# Temporary hack to allow
# tests.scan.test_scan.T_Scan to
# finish. Should be changed in definitive version.
tmp = theano.config.unpickle_function
theano.config.unpickle_function = False
graph_db = pickle.load(f)
print("graph_db loaded and it is not empty")
except EOFError as e:
......@@ -1392,8 +1392,6 @@ class FunctionMaker:
print(e)
print("graph_db loaded and it is empty")
graph_db = {}
finally:
theano.config.unpickle_function = tmp
return graph_db
......@@ -1583,50 +1581,52 @@ class FunctionMaker:
# Fetch the optimizer and linker
optimizer, linker = mode.optimizer, copy.copy(mode.linker)
if need_opt:
compute_test_value_orig = theano.config.compute_test_value
limit_orig = theano.config.traceback__limit
# Why we add stack on node when it get done in output var?
try:
# optimize the fgraph
theano.config.compute_test_value = theano.config.compute_test_value_opt
theano.config.traceback__limit = theano.config.traceback__compile_limit
start_optimizer = time.time()
# In case there is an error during optimization.
optimizer_profile = None
opt_time = None
# now optimize the graph
if theano.config.cache_optimizations:
optimizer_profile = self.optimize_graph_with_cache(
optimizer, inputs, outputs
)
else:
optimizer_profile = optimizer(fgraph)
with config.change_flags(
compute_test_value=config.compute_test_value_opt,
traceback__limit=config.traceback__compile_limit,
):
# now optimize the graph
if config.cache_optimizations:
optimizer_profile = self.optimize_graph_with_cache(
optimizer, inputs, outputs
)
else:
optimizer_profile = optimizer(fgraph)
end_optimizer = time.time()
opt_time = end_optimizer - start_optimizer
_logger.debug(f"Optimizing took {opt_time:f} seconds")
end_optimizer = time.time()
opt_time = end_optimizer - start_optimizer
_logger.debug(f"Optimizing took {opt_time:f} seconds")
# Add deep copy to respect the memory interface
insert_deepcopy(fgraph, inputs, outputs + additional_outputs)
# Add deep copy to respect the memory interface
insert_deepcopy(fgraph, inputs, outputs + additional_outputs)
finally:
theano.config.compute_test_value = compute_test_value_orig
theano.config.traceback__limit = limit_orig
# If the optimizer got interrupted
if opt_time is None:
end_optimizer = time.time()
opt_time = end_optimizer - start_optimizer
theano.compile.profiling.total_graph_opt_time += opt_time
if profile:
if optimizer_profile is None and hasattr(optimizer, "pre_profile"):
optimizer_profile = optimizer.pre_profile
profile.optimizer_time += opt_time
if theano.config.profile_optimizer:
if config.profile_optimizer:
profile.optimizer_profile = (optimizer, optimizer_profile)
# IF False, if mean the profile for that function was explicitly disabled
elif theano.config.profile_optimizer and profile is not False:
# IF False, if mean the profile for that function was
# explicitly disabled
elif config.profile_optimizer and profile is not False:
warnings.warn(
(
"config.profile_optimizer requires config.profile to "
......@@ -1687,7 +1687,7 @@ class FunctionMaker:
def _check_unused_inputs(self, inputs, outputs, on_unused_input):
if on_unused_input is None:
on_unused_input = theano.config.on_unused_input
on_unused_input = config.on_unused_input
if on_unused_input == "ignore":
return
......@@ -1816,14 +1816,11 @@ class FunctionMaker:
# Get a function instance
start_linker = time.time()
start_import_time = theano.gof.cmodule.import_time
limit_orig = theano.config.traceback__limit
try:
theano.config.traceback__limit = theano.config.traceback__compile_limit
with config.change_flags(traceback__limit=config.traceback__compile_limit):
_fn, _i, _o = self.linker.make_thunk(
input_storage=input_storage_lists, storage_map=storage_map
)
finally:
theano.config.traceback__limit = limit_orig
end_linker = time.time()
......@@ -1857,8 +1854,8 @@ class FunctionMaker:
def _constructor_FunctionMaker(kwargs):
# Needed for old pickle
# Old pickle have at least the problem that output_keys where not saved.
if theano.config.unpickle_function:
if theano.config.reoptimize_unpickled_function:
if config.unpickle_function:
if config.reoptimize_unpickled_function:
del kwargs["fgraph"]
return FunctionMaker(**kwargs)
else:
......@@ -1965,7 +1962,7 @@ def orig_function(
output_keys=output_keys,
name=name,
)
with theano.change_flags(compute_test_value="off"):
with config.change_flags(compute_test_value="off"):
fn = m.create(defaults)
finally:
t2 = time.time()
......
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论