提交 40d0a283 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Thomas Wiecki

Use direct imports for theano.compile.io and theano.compile.mode objects

上级 54c106b7
......@@ -29,9 +29,9 @@ Conditions
z_lazy = ifelse(tt.lt(a,b), tt.mean(x), tt.mean(y))
f_switch = theano.function([a,b,x,y], z_switch,
mode=theano.Mode(linker='vm'))
mode=theano.compile.mode.Mode(linker='vm'))
f_lazyifelse = theano.function([a,b,x,y], z_lazy,
mode=theano.Mode(linker='vm'))
mode=theano.compile.mode.Mode(linker='vm'))
val1 = 0.
val2 = 1.
......
......@@ -232,11 +232,13 @@ that control how ``theano.function`` handles its argument[s] and return value[s]
.. testcode::
import theano, theano.tensor
import theano
import theano.tensor as tt
from theano.compile.io import In, Out
x = theano.tensor.matrix()
x = tt.matrix()
y = 2 * x
f = theano.function([theano.In(x, borrow=True)], theano.Out(y, borrow=True))
f = theano.function([In(x, borrow=True)], Out(y, borrow=True))
Borrowing an input means that Theano will treat the argument you provide as if
it were part of Theano's pool of temporaries. Consequently, your input
......
......@@ -31,9 +31,9 @@ IfElse vs Switch
z_lazy = ifelse(tt.lt(a, b), tt.mean(x), tt.mean(y))
f_switch = theano.function([a, b, x, y], z_switch,
mode=theano.Mode(linker='vm'))
mode=theano.compile.mode.Mode(linker='vm'))
f_lazyifelse = theano.function([a, b, x, y], z_lazy,
mode=theano.Mode(linker='vm'))
mode=theano.compile.mode.Mode(linker='vm'))
val1 = 0.
val2 = 1.
......
......@@ -210,7 +210,7 @@ The optimizers Theano provides are summarized below to indicate the trade-offs
one might make between compilation time and execution time.
These optimizers can be enabled globally with the Theano flag: ``optimizer=name``
or per call to theano functions with ``theano.function(...mode=theano.Mode(optimizer="name"))``.
or per call to theano functions with ``function(...mode=Mode(optimizer="name"))``.
================= ============ ============== ==================================================
optimizer Compile time Execution time Description
......
......@@ -11,6 +11,7 @@ import theano.tensor as tt
from theano.compile.function import function
from theano.compile.function.types import UnusedInputError
from theano.compile.io import In, Out
from theano.compile.mode import Mode
from theano.configdefaults import config
from theano.graph.basic import Constant
from theano.graph.fg import MissingInputError
......@@ -627,14 +628,12 @@ class TestFunction:
f(o + 0.1) # should not clobber the memory used to store four
assert np.all(four == 4)
f = function(
[a], Out(a * 4, borrow=True), mode=theano.Mode("c|py_nogc", "fast_run")
)
f = function([a], Out(a * 4, borrow=True), mode=Mode("c|py_nogc", "fast_run"))
o = np.ones((3, 3))
four = f(o)
assert np.all(four == 4)
f(o + 0.1) # should clobber the memory used to store four
if theano.config.cxx:
if config.cxx:
assert not np.all(four == 4)
else:
# The Elemwise.perform method don't reuse memory
......@@ -691,9 +690,7 @@ class TestFunction:
a, b = dscalars("a", "b")
c = a + b
func = theano.function(
[theano.In(a, name="first"), theano.In(b, value=1, name="second")], c
)
func = theano.function([In(a, name="first"), In(b, value=1, name="second")], c)
x = func(first=1)
try:
func(second=2)
......@@ -721,17 +718,17 @@ class TestFunction:
# Assert cases we should check for aliased inputs
for d in [
dict(
inputs=[theano.In(x1, borrow=True)],
inputs=[In(x1, borrow=True)],
outputs=[x1 + 1],
updates=[(s2, s2 + 3)],
),
dict(
inputs=[theano.In(x1, borrow=True, mutable=True)],
inputs=[In(x1, borrow=True, mutable=True)],
outputs=[x1 + 1],
updates=[(s2, s2 + 3)],
),
dict(
inputs=[theano.In(x1, mutable=True)],
inputs=[In(x1, mutable=True)],
outputs=[x1 + 1],
updates=[(s2, s2 + 3)],
),
......@@ -1146,12 +1143,12 @@ def test_empty_givens_updates():
# triggering useless crashes at compile time.
x = scalar()
y = x * 2
function([theano.In(x)], y, givens={})
function([theano.In(x)], y, updates={})
function([In(x)], y, givens={})
function([In(x)], y, updates={})
@pytest.mark.skipif(
not theano.gpuarray.pygpu_activated or theano.config.mode == "DEBUG_MODE",
not theano.gpuarray.pygpu_activated or config.mode == "DEBUG_MODE",
reason="DEBUG_MODE forces synchronous behaviour which breaks this test",
)
def test_sync_update():
......
......@@ -33,5 +33,5 @@ def test_no_output_from_implace():
def test_including():
mode = theano.Mode(optimizer="merge")
mode = Mode(optimizer="merge")
mode.including("fast_compile")
......@@ -5,7 +5,7 @@ Test compilation modes
import copy
import theano
from theano.compile import Mode
from theano.compile.mode import Mode
from theano.tensor.type import matrix, vector
......
......@@ -2,6 +2,7 @@ import numpy as np
import pytest
import theano
from theano.compile.mode import Mode
from theano.graph import fg
from theano.graph.basic import Apply, Constant, Variable
from theano.graph.op import COp
......@@ -220,7 +221,7 @@ def test_clinker_literal_inlining():
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_literal_cache():
mode = theano.Mode(linker="c")
mode = Mode(linker="c")
A = matrix()
input1 = vector()
......@@ -415,7 +416,7 @@ def test_shared_input_output():
state = theano.shared(0)
state.name = "state"
linker = CLinker()
mode = theano.Mode(linker=linker)
mode = Mode(linker=linker)
f = theano.function([inc], state, updates=[(state, state + inc)], mode=mode)
g = theano.function([inc], state, updates=[(state, state + inc)])
......
......@@ -3,8 +3,9 @@ from copy import deepcopy
import numpy as np
import theano
from theano.graph import basic, fg
from theano.graph.basic import Apply, Constant, Variable
from theano.compile.mode import Mode
from theano.graph import fg
from theano.graph.basic import Apply, Constant, Variable, clone
from theano.graph.op import Op
from theano.graph.type import Type
from theano.link.basic import Container, PerformLinker, WrapLinker
......@@ -121,7 +122,7 @@ class TestPerformLinker:
x, y, z = inputs()
a, d = add(x, y), div(x, y)
e = mul(a, d)
fn = perform_linker(FunctionGraph(*basic.clone([x, y, a], [e]))).make_function()
fn = perform_linker(FunctionGraph(*clone([x, y, a], [e]))).make_function()
assert fn(1.0, 2.0, 9.0) == 4.5
def test_skiphole(self):
......@@ -129,7 +130,7 @@ class TestPerformLinker:
a = add(x, y)
r = raise_err(a)
e = add(r, a)
fn = perform_linker(FunctionGraph(*basic.clone([x, y, r], [e]))).make_function()
fn = perform_linker(FunctionGraph(*clone([x, y, r], [e]))).make_function()
assert fn(1.0, 2.0, 4.5) == 7.5
......@@ -186,7 +187,7 @@ def test_sort_schedule_fn():
return cmp(str(a), str(b)) # lexicographical sort
linker = OpWiseCLinker(schedule=sort_schedule_fn(str_cmp))
mode = theano.Mode(linker=linker)
mode = Mode(linker=linker)
f = theano.function((x,), (y,), mode=mode)
nodes = f.maker.linker.make_all()[-1]
......
......@@ -8,7 +8,8 @@ import pytest
import theano
from theano import function
from theano import tensor as tt
from theano.compile import Mode
from theano.compile.io import In
from theano.compile.mode import Mode
from theano.configdefaults import config
from theano.graph.basic import Apply
from theano.graph.op import Op
......@@ -389,8 +390,8 @@ def test_vm_gc():
x = vector()
p = RunOnce()(x)
mode = theano.Mode(linker=VMLinker(lazy=True))
f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2], mode=mode)
mode = Mode(linker=VMLinker(lazy=True))
f = theano.function([In(x, mutable=True)], [p + 1, p + 2], mode=mode)
f([1, 2, 3])
p = RunOnce()(x)
......@@ -408,7 +409,7 @@ def test_reallocation():
VMLinker(allow_gc=False, lazy=False, use_cloop=False),
VMLinker(allow_gc=True, lazy=False, use_cloop=False),
]:
m = theano.compile.get_mode(theano.Mode(linker=linker))
m = theano.compile.get_mode(Mode(linker=linker))
m = m.excluding("fusion", "inplace")
f = theano.function([x, y], z, name="test_reduce_memory", mode=m)
......@@ -444,7 +445,7 @@ def test_no_recycling():
VMLinker(use_cloop=False, lazy=False, allow_gc=False),
]:
mode = theano.Mode(optimizer="fast_compile", linker=lnk)
mode = Mode(optimizer="fast_compile", linker=lnk)
f = theano.function([x], x + 1, mode=mode)
f2 = theano.function([x], (x + 1) * 2, mode=mode)
m1 = f.fn.thunks[0].thunk.module
......
from theano.compile import Mode
from theano.compile.mode import Mode
from theano.configdefaults import config
from theano.link.basic import WrapLinkerMany
from theano.link.vm import VMLinker
......
......@@ -13,6 +13,7 @@ import pytest
import tests.unittest_tools as utt
import theano
from theano.compile.mode import Mode
from theano.graph.fg import FunctionGraph
from theano.link.c.basic import DualLinker
from theano.scalar.basic import (
......@@ -453,7 +454,7 @@ def test_grad_abs():
a = fscalar("a")
b = theano.tensor.nnet.relu(a)
c = theano.grad(b, a)
f = theano.function([a], c, mode=theano.Mode(optimizer=None))
f = theano.function([a], c, mode=Mode(optimizer=None))
# Currently Theano return 0.5, but it isn't sure it won't change
# in the futur.
ret = f(0.0)
......
......@@ -26,6 +26,8 @@ import theano.scalar.sharedvar
from tests import unittest_tools as utt
from theano import tensor as tt
from theano.compile.function.pfunc import rebuild_collect_shared
from theano.compile.io import In
from theano.compile.mode import FAST_RUN, Mode, get_default_mode, get_mode
from theano.configdefaults import config
from theano.misc.safe_asarray import _asarray
from theano.scan.basic import scan
......@@ -54,11 +56,11 @@ from theano.tensor.type import (
if config.mode == "FAST_COMPILE":
mode_with_opt = theano.compile.mode.get_mode("FAST_RUN")
mode_with_opt = get_mode("FAST_RUN")
else:
mode_with_opt = theano.compile.mode.get_default_mode()
mode_with_opt = get_default_mode()
if config.mode in ("DEBUG_MODE", "DebugMode"):
mode_nodebug = theano.compile.mode.get_mode("FAST_RUN")
mode_nodebug = get_mode("FAST_RUN")
else:
mode_nodebug = mode_with_opt
......@@ -234,9 +236,7 @@ class TestScan:
# generator network, only one output , type scalar ; no sequence or
# non sequence arguments
@pytest.mark.skipif(
isinstance(
theano.compile.mode.get_default_mode(), theano.compile.debugmode.DebugMode
),
isinstance(get_default_mode(), theano.compile.debugmode.DebugMode),
reason="This test fails in DebugMode, because it is not yet picklable.",
)
def test_pickling(self):
......@@ -920,14 +920,14 @@ class TestScan:
u0 = vector("u0")
u1 = vector("u1")
u2 = vector("u2")
mu0 = theano.In(u0, mutable=False)
mu1 = theano.In(u1, mutable=True)
mu2 = theano.In(u2, mutable=True)
mu0 = In(u0, mutable=False)
mu1 = In(u1, mutable=True)
mu2 = In(u2, mutable=True)
x0 = scalar("x0")
x1 = scalar("y0")
W_in = theano.shared(vW_in, "Win")
W = theano.shared(vW, "W")
mode = theano.compile.mode.get_mode(None).including("inplace")
mode = get_mode(None).including("inplace")
def f_rnn_shared(u0_t, u1_t, u2_t, x0_tm1, x1_tm1):
return [
......@@ -987,14 +987,14 @@ class TestScan:
u0 = vector("u0")
u1 = vector("u1")
u2 = vector("u2")
mu0 = theano.In(u0, mutable=True)
mu1 = theano.In(u1, mutable=True)
mu2 = theano.In(u2, mutable=True)
mu0 = In(u0, mutable=True)
mu1 = In(u1, mutable=True)
mu2 = In(u2, mutable=True)
x0 = scalar("x0")
x1 = scalar("y0")
W_in = theano.shared(vW_in, "Win")
W = theano.shared(vW, "W")
mode = theano.compile.mode.get_mode(None).including("inplace")
mode = get_mode(None).including("inplace")
def f_rnn_shared(u0_t, u1_t, u1_tp1, u2_tm1, u2_t, u2_tp1, x0_tm1, x1_tm1):
return [
......@@ -1057,7 +1057,7 @@ class TestScan:
x0 = tt.constant(x0)
to_replace = outputs[0].owner.inputs[0].owner.inputs[1]
outputs = theano.clone(outputs, replace=[(to_replace, x0)])
mode = theano.compile.mode.get_mode(None).including("inplace")
mode = get_mode(None).including("inplace")
f9 = theano.function([], outputs, updates=updates, mode=mode)
scan_node = [x for x in f9.maker.fgraph.toposort() if isinstance(x.op, Scan)]
assert 0 not in scan_node[0].op.destroy_map.keys()
......@@ -2783,7 +2783,7 @@ class TestScan:
x,
tt.constant(np.asarray(0.0, dtype=config.floatX)),
)
mode = theano.compile.mode.FAST_RUN
mode = FAST_RUN
mode = mode.excluding("inplace")
f1 = theano.function([], o, mode=mode)
inputs, outputs = clone_optimized_graph(f1)
......@@ -2817,7 +2817,7 @@ class TestScan:
tt.constant(np.asarray(0.0, dtype=config.floatX)),
)
mode = theano.compile.mode.FAST_RUN
mode = FAST_RUN
mode = mode.excluding("inplace")
f0 = theano.function([], o, mode=mode)
inputs, outputs = clone_optimized_graph(f0)
......@@ -2852,7 +2852,7 @@ class TestScan:
tt.constant(np.asarray(0.0, dtype=config.floatX)),
)
mode = theano.compile.mode.FAST_RUN
mode = FAST_RUN
mode = mode.excluding("inplace")
f1 = theano.function([], o, mode=mode)
inputs, outputs = clone_optimized_graph(f1)
......@@ -4186,7 +4186,7 @@ class TestScan:
[U, x1, x2],
[X1, X2, X3],
updates=updates,
mode=theano.Mode(linker="py"),
mode=Mode(linker="py"),
allow_input_downcast=True,
)
rng = np.random.RandomState(utt.fetch_seed())
......@@ -4223,7 +4223,7 @@ class TestScan:
[W, x1, x2],
[X1, X2, X3],
updates=updates,
mode=theano.Mode(linker="py"),
mode=Mode(linker="py"),
allow_input_downcast=True,
)
rng = np.random.RandomState(utt.fetch_seed())
......@@ -4594,7 +4594,7 @@ def test_speed():
# fn=lambda ri, rii: ri + rii,
# sequences=[s_r[1:]],
# outputs_info=tt.constant(r[0]),
# mode=theano.Mode(linker="cvm"),
# mode=Mode(linker="cvm"),
# )
# assert not updates
#
......@@ -4616,7 +4616,7 @@ def test_speed():
[],
[],
updates=OrderedDict([(s_i, s_i + 1), (shared_r, s_rinc)]),
mode=theano.Mode(linker="cvm"),
mode=Mode(linker="cvm"),
)
f_cvm_shared._check_for_aliased_inputs = False
......@@ -4652,11 +4652,11 @@ def test_speed_rnn():
# fn=lambda ri, rii: tt.tanh(tt.dot(rii, w)),
# sequences=[s_r[1:]],
# outputs_info=tt.constant(r[0]),
# mode=theano.Mode(linker="cvm"),
# mode=Mode(linker="cvm"),
# )
# assert not updates
#
# f_cvm = theano.function([s_r], s_y, mode=theano.Mode(linker="cvm"))
# f_cvm = theano.function([s_r], s_y, mode=Mode(linker="cvm"))
#
# cvm_duration = timeit(lambda: f_cvm(r), number=n_timeit)
......@@ -4675,7 +4675,7 @@ def test_speed_rnn():
[],
[],
updates=OrderedDict([(s_i, s_i + 1), (shared_r, s_rinc)]),
mode=theano.Mode(linker="cvm"),
mode=Mode(linker="cvm"),
)
cvm_shared_duration = timeit(lambda: f_cvm_shared(), number=n_timeit)
......@@ -4691,7 +4691,7 @@ def test_speed_batchrnn():
This function prints out the speed of recurrent neural network
calculations implemented in various ways.
We force the mode to theano.Mode(linker='cvm'). If you manually
We force the mode to Mode(linker='cvm'). If you manually
change this code to use DebugMode this will test the correctness
of the optimizations applied, but generally correctness-testing
is not the goal of this test.
......@@ -4725,7 +4725,7 @@ def test_speed_batchrnn():
[],
[],
updates=[(s_i, s_i + 1), (shared_r, s_rinc)],
mode=theano.Mode(linker="cvm"),
mode=Mode(linker="cvm"),
)
f_fn = f.fn
t2 = time.time()
......
......@@ -11,6 +11,7 @@ from tests import unittest_tools as utt
from tests.tensor.test_sharedvar import makeSharedTester
from theano import sparse
from theano.compile.function import function
from theano.compile.io import In, Out
from theano.configdefaults import config
from theano.gradient import GradientError
from theano.graph.basic import Apply, Constant
......@@ -1357,7 +1358,7 @@ class TestStructuredDot:
a = SparseType(sparse_format_a, dtype=sparse_dtype)()
b = SparseType(sparse_format_b, dtype=sparse_dtype)()
d = tt.dot(a, b)
f = theano.function([a, b], theano.Out(d, borrow=True))
f = theano.function([a, b], Out(d, borrow=True))
for M, N, K, nnz in [
(4, 3, 2, 3),
(40, 30, 20, 3),
......@@ -1379,7 +1380,7 @@ class TestStructuredDot:
a = SparseType("csc", dtype=sparse_dtype)()
b = matrix(dtype=dense_dtype)
d = tt.dot(a, b)
f = theano.function([a, b], theano.Out(d, borrow=True))
f = theano.function([a, b], Out(d, borrow=True))
for M, N, K, nnz in [
(4, 3, 2, 3),
......@@ -1928,9 +1929,7 @@ def test_sparse_shared_memory():
sdot = sparse.structured_dot
z = sdot(x * 3, m1) + sdot(y * 2, m2)
f = theano.function(
[theano.In(x, mutable=True), theano.In(y, mutable=True)], z, mode="FAST_RUN"
)
f = theano.function([In(x, mutable=True), In(y, mutable=True)], z, mode="FAST_RUN")
def f_(x, y, m1=m1, m2=m2):
return ((x * 3) * m1) + ((y * 2) * m2)
......@@ -2243,9 +2242,7 @@ class TestRemove0(utt.InferShapeTester):
# the In thingy has to be there because theano has as rule not
# to optimize inputs
f = theano.function(
[theano.In(x, borrow=True, mutable=True)], Remove0()(x)
)
f = theano.function([In(x, borrow=True, mutable=True)], Remove0()(x))
# assert optimization local_inplace_remove0 is applied in
# modes with optimization
......
......@@ -10,6 +10,7 @@ import theano.tensor as tt
from tests import unittest_tools as utt
from tests.sparse.test_basic import random_lil
from theano import sparse
from theano.compile.mode import Mode, get_default_mode
from theano.configdefaults import config
from theano.tensor.type import ivector, matrix, vector
......@@ -17,7 +18,7 @@ from theano.tensor.type import ivector, matrix, vector
def test_local_csm_properties_csm():
data = vector()
indices, indptr, shape = (ivector(), ivector(), ivector())
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("specialize", "local_csm_properties_csm")
for CS, cast in [
(sparse.CSC, sp.sparse.csc_matrix),
......@@ -43,10 +44,10 @@ def test_local_csm_properties_csm():
def test_local_csm_grad_c():
data = vector()
indices, indptr, shape = (ivector(), ivector(), ivector())
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
if theano.config.mode == "FAST_COMPILE":
mode = theano.compile.Mode(linker="c|py", optimizer="fast_compile")
mode = Mode(linker="c|py", optimizer="fast_compile")
mode = mode.including("specialize", "local_csm_grad_c")
for CS, cast in [
......@@ -68,7 +69,7 @@ def test_local_csm_grad_c():
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_local_mul_s_d():
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("specialize", "local_mul_s_d")
for sp_format in sparse.sparse_formats:
......@@ -85,7 +86,7 @@ def test_local_mul_s_d():
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_local_mul_s_v():
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("specialize", "local_mul_s_v")
for sp_format in ["csr"]: # Not implemented for other format
......@@ -102,7 +103,7 @@ def test_local_mul_s_v():
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_local_structured_add_s_v():
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("specialize", "local_structured_add_s_v")
for sp_format in ["csr"]: # Not implemented for other format
......@@ -120,7 +121,7 @@ def test_local_structured_add_s_v():
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_local_sampling_dot_csr():
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("specialize", "local_sampling_dot_csr")
for sp_format in ["csr"]: # Not implemented for other format
......@@ -147,7 +148,7 @@ def test_local_sampling_dot_csr():
def test_local_dense_from_sparse_sparse_from_dense():
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("local_dense_from_sparse_sparse_from_dense")
m = matrix()
......
......@@ -4,6 +4,7 @@ import pytest
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from theano.compile.mode import Mode
from theano.configdefaults import config
from theano.graph.opt import check_stack_trace
from theano.tensor.nnet import abstract_conv as conv
......@@ -1047,7 +1048,7 @@ class TestAbstractConvNoOptim(BaseTestConv2d):
def run_test_case(self, i, f, s, b, flip, provide_shape, fd=(1, 1)):
o = self.get_output_shape(i, f, s, b, fd)
mode = theano.Mode(optimizer=None)
mode = Mode(optimizer=None)
self.run_fwd(
inputs_shape=i,
filters_shape=f,
......@@ -1093,7 +1094,7 @@ class TestAbstractConvNoOptim(BaseTestConv2d):
def run_test_case_gi(
self, i, f, o, s, b, flip, provide_shape, fd=(1, 1), expect_error=False
):
mode = theano.Mode(optimizer=None)
mode = Mode(optimizer=None)
if not expect_error:
self.run_gradinput(
inputs_shape=i,
......@@ -2059,7 +2060,7 @@ class TestGroupedConvNoOptim:
conv_op = theano.tensor.nnet.abstract_conv.AbstractConv2d
conv_gradw_op = theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights
conv_gradi_op = theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs
mode = theano.Mode(optimizer=None)
mode = Mode(optimizer=None)
is_dnn = False
def setup_method(self):
......@@ -2269,7 +2270,7 @@ class TestGroupedConv3dNoOptim(TestGroupedConvNoOptim):
conv_op = theano.tensor.nnet.abstract_conv.AbstractConv3d
conv_gradw_op = theano.tensor.nnet.abstract_conv.AbstractConv3d_gradWeights
conv_gradi_op = theano.tensor.nnet.abstract_conv.AbstractConv3d_gradInputs
mode = theano.Mode(optimizer=None)
mode = Mode(optimizer=None)
def setup_method(self):
self.num_groups = [3, 2, 4, 4]
......@@ -2509,7 +2510,7 @@ class TestUnsharedConv:
conv2d_gradw_op = theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights
conv2d_gradi_op = theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs
mode = theano.compile.mode.Mode(optimizer="None")
mode = Mode(optimizer="None")
def setup_method(self):
self.img_shape = [(2, 2, 4, 4), (3, 2, 4, 2), (3, 3, 5, 3), (3, 4, 4, 4)]
......@@ -2741,7 +2742,7 @@ class TestAsymmetricPadding:
conv2d_gradw_op = theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights
conv2d_gradi_op = theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs
mode = theano.compile.mode.Mode(optimizer="None")
mode = Mode(optimizer="None")
img_shape = [(2, 2, 4, 4), (3, 2, 4, 2), (3, 3, 5, 3)]
kern_shape = [(4, 2, 2, 2), (2, 2, 4, 2), (2, 3, 3, 3)]
......@@ -2911,7 +2912,7 @@ class TestAsymmetricPadding:
class TestCausalConv:
mode = theano.compile.mode.Mode(optimizer="None")
mode = Mode(optimizer="None")
img = np.array(
[
......
......@@ -6,6 +6,7 @@ import pytest
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from theano.compile.mode import Mode
from theano.tensor.basic import NotScalarConstantError, _allclose
from theano.tensor.nnet import conv, conv2d
from theano.tensor.type import dmatrix, dtensor3, dtensor4, dvector, scalar, tensor4
......@@ -607,7 +608,7 @@ class TestConv2D(utt.InferShapeTester):
unroll_patch=True,
openmp=openmp,
)
mode = theano.Mode(
mode = Mode(
linker=theano.link.vm.VMLinker(
allow_gc=False, use_cloop=True
)
......
......@@ -13,7 +13,6 @@ from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equ
import theano
import theano.scalar as ts
import theano.tensor as tt
import theano.tensor.var as var
from tests import unittest_tools as utt
from tests.tensor.utils import (
ALL_DTYPES,
......@@ -73,6 +72,7 @@ from tests.tensor.utils import (
from theano import compile, config, function, shared
from theano.assert_op import Assert
from theano.compile import DeepCopyOp
from theano.compile.io import In, Out
from theano.compile.mode import get_default_mode
from theano.gradient import grad, hessian, numeric_grad
from theano.graph.basic import Apply, Variable
......@@ -170,6 +170,7 @@ from theano.tensor import (
triu,
true_div,
unbroadcast,
var,
vertical_stack,
)
from theano.tensor.elemwise import DimShuffle, Elemwise
......@@ -3224,7 +3225,7 @@ def test_join_inplace():
join = Join(view=0)
c = join(0, x, z, z)
f = theano.function([theano.In(x, borrow=True), s], theano.Out(c, borrow=True))
f = theano.function([In(x, borrow=True), s], Out(c, borrow=True))
data = np.array([3, 4, 5], dtype=config.floatX)
print(f(data, 0))
......
......@@ -25,6 +25,7 @@ from tests import unittest_tools
from tests.tensor.utils import inplace_func
from theano import shared
from theano.compile.io import In
from theano.compile.mode import Mode
from theano.configdefaults import config
from theano.graph.fg import FunctionGraph
from theano.misc.safe_asarray import _asarray
......@@ -139,7 +140,7 @@ class TestGemm:
f = inplace_func(
[tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb),
mode=theano.compile.Mode(optimizer=None, linker=l),
mode=Mode(optimizer=None, linker=l),
)
f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b)
......@@ -304,12 +305,12 @@ class TestGemm:
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = theano.compile.Mode(optimizer = None, linker=l))
# mode = Mode(optimizer = None, linker=l))
# f(z, a, x, y, b)
f = inplace_func(
[],
gemm_inplace(tz, ta, tx, ty, tb),
mode=theano.compile.Mode(optimizer=None, linker=l),
mode=Mode(optimizer=None, linker=l),
)
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
......@@ -366,7 +367,7 @@ class TestGemm:
f_i = inplace_func(
[],
gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb),
mode=theano.compile.Mode(optimizer=None, linker=l),
mode=Mode(optimizer=None, linker=l),
)
for j in range(3):
# tz will not _always_ be overwritten,
......@@ -385,7 +386,7 @@ class TestGemm:
[],
tz_i,
updates=[(tz, tt.set_subtensor(tz[:, :, i], tz_i))],
mode=theano.compile.Mode(optimizer=None, linker=l),
mode=Mode(optimizer=None, linker=l),
)
for j in range(3):
g_i()
......@@ -645,7 +646,7 @@ def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
g = inplace_func(
i,
o,
mode=theano.compile.Mode(linker="py", optimizer=None),
mode=Mode(linker="py", optimizer=None),
allow_input_downcast=True,
on_unused_input="ignore",
)
......@@ -734,7 +735,7 @@ def test_gemm_opt_double_gemm():
g = inplace_func(
i,
o,
mode=theano.compile.Mode(linker="py", optimizer=None),
mode=Mode(linker="py", optimizer=None),
on_unused_input="ignore",
)
......
......@@ -3,7 +3,7 @@ import numpy as np
import theano
import theano.tensor.basic as basic
from theano import function
from theano.compile import In
from theano.compile.io import In
from theano.misc.safe_asarray import _asarray
from theano.tensor.basic import (
_convert_to_float32,
......
......@@ -1284,9 +1284,7 @@ class TestElemwise(unittest_tools.InferShapeTester):
# it overflowed in this case.
a, b, c, d, e, f = vectors("abcdef")
s = a + b + c + d + e + f
g = theano.function(
[a, b, c, d, e, f], s, mode=theano.compile.Mode(linker="py")
)
g = theano.function([a, b, c, d, e, f], s, mode=Mode(linker="py"))
g(*[np.zeros(2 ** 11, config.floatX) for i in range(6)])
......
......@@ -4,6 +4,7 @@ import time
import numpy as np
import theano
from theano.compile.mode import Mode
from theano.link.basic import PerformLinker
from theano.link.c.basic import OpWiseCLinker
from theano.tensor.type import dvector, lvector
......@@ -42,12 +43,8 @@ def test_gc_never_pickles_temporaries():
# g_linker has no garbage collection
f = theano.function(
[x], r, mode=theano.Mode(optimizer=optimizer, linker=f_linker)
)
g = theano.function(
[x], r, mode=theano.Mode(optimizer=optimizer, linker=g_linker)
)
f = theano.function([x], r, mode=Mode(optimizer=optimizer, linker=f_linker))
g = theano.function([x], r, mode=Mode(optimizer=optimizer, linker=g_linker))
pre_f = pickle.dumps(f)
# pre_g = pickle.dumps(g)
......
......@@ -4,6 +4,7 @@ import pytest
import theano
import theano.tensor as tt
from theano import function
from theano.compile.mode import Mode
from theano.tensor.elemwise import DimShuffle
from theano.tensor.type import dtensor3
......@@ -43,7 +44,7 @@ class TestKeepDims:
a = np.random.rand(3, 2, 4)
# We don't need to test all opt and C code, as this is tested
# by the ops tests.
mode = theano.compile.Mode(optimizer="fast_compile", linker="py")
mode = Mode(optimizer="fast_compile", linker="py")
# 'max_and_argmax' has two outputs and can be specified with either
# a single or every axis:
......@@ -177,7 +178,7 @@ class TestKeepDims:
x = dtensor3()
a = np.random.rand(3, 2, 4).astype(theano.config.floatX)
mode = theano.compile.Mode(optimizer="fast_compile", linker="py")
mode = Mode(optimizer="fast_compile", linker="py")
for axis in [
0,
......
......@@ -4,6 +4,7 @@ import subprocess
import pytest
import theano
from theano.compile.mode import Mode
from theano.configdefaults import config
from theano.graph.sched import sort_schedule_fn
from theano.link.c.basic import OpWiseCLinker
......@@ -21,7 +22,7 @@ from theano.tensor.type import matrix
mpi_scheduler = sort_schedule_fn(*mpi_cmps)
mpi_linker = OpWiseCLinker(schedule=mpi_scheduler)
mpi_mode = theano.Mode(linker=mpi_linker)
mpi_mode = Mode(linker=mpi_linker)
@config.change_flags(compute_test_value="off")
......
......@@ -11,10 +11,13 @@ import theano.scalar as ts
import theano.tensor as tt
import theano.tensor.opt as opt
from tests import unittest_tools as utt
from theano import compile, pprint, shared
from theano import pprint, shared
from theano.assert_op import Assert
from theano.compile import DeepCopyOp, deep_copy_op, get_mode
from theano.compile import optdb
from theano.compile.debugmode import DebugMode
from theano.compile.function import function
from theano.compile.mode import Mode, get_default_mode, get_mode
from theano.compile.ops import DeepCopyOp, SpecifyShape, deep_copy_op, specify_shape
from theano.configdefaults import config
from theano.graph.basic import Apply, Constant
from theano.graph.fg import FunctionGraph
......@@ -102,20 +105,20 @@ from theano.tensor.var import TensorConstant
mode_opt = config.mode
if mode_opt == "FAST_COMPILE":
mode_opt = "FAST_RUN"
mode_opt = theano.compile.mode.get_mode(mode_opt)
mode_opt = get_mode(mode_opt)
dimshuffle_lift = out2in(local_dimshuffle_lift)
_optimizer_stabilize = Query(include=["fast_run"])
_optimizer_stabilize.position_cutoff = 1.51
_optimizer_stabilize = compile.optdb.query(_optimizer_stabilize)
_optimizer_stabilize = optdb.query(_optimizer_stabilize)
_optimizer_specialize = Query(include=["fast_run"])
_optimizer_specialize.position_cutoff = 2.01
_optimizer_specialize = compile.optdb.query(_optimizer_specialize)
_optimizer_specialize = optdb.query(_optimizer_specialize)
_optimizer_fast_run = Query(include=["fast_run"])
_optimizer_fast_run = compile.optdb.query(_optimizer_fast_run)
_optimizer_fast_run = optdb.query(_optimizer_fast_run)
def ds(x, y):
......@@ -513,7 +516,7 @@ class TestCanonize:
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
mode = get_default_mode()
opt = Query(["canonicalize"])
opt = opt.excluding("local_elemwise_fusion")
mode = mode.__class__(linker=mode.linker, optimizer=opt)
......@@ -648,7 +651,7 @@ class TestCanonize:
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
mode = get_default_mode()
mode._optimizer = Query(["canonicalize"])
mode._optimizer = mode._optimizer.excluding("local_elemwise_fusion")
for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):
......@@ -695,7 +698,7 @@ class TestCanonize:
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
mode = get_default_mode()
opt = Query(["canonicalize"])
opt = opt.including("ShapeOpt", "local_fill_to_alloc")
......@@ -1015,13 +1018,9 @@ class TestCanonize:
# a = T.abs_(x)
if config.mode == "FAST_COMPILE":
mode = theano.compile.mode.get_mode("FAST_RUN").excluding(
"local_elemwise_fusion"
)
mode = get_mode("FAST_RUN").excluding("local_elemwise_fusion")
else:
mode = theano.compile.mode.get_default_mode().excluding(
"local_elemwise_fusion"
)
mode = get_default_mode().excluding("local_elemwise_fusion")
f = function([x], [(4 * x) / abs(2 * x)], mode=mode)
print(f.maker.fgraph.toposort())
......@@ -1030,7 +1029,7 @@ class TestCanonize:
f(-1)
# some stabilization optimization make the output be finite instead of nan
# debug_mode will raise an error when he see nan
if not isinstance(mode, theano.compile.debugmode.DebugMode):
if not isinstance(mode, DebugMode):
assert np.isfinite(f(0))
assert len(f.maker.fgraph.toposort()) == 2
......@@ -1043,7 +1042,7 @@ class TestCanonize:
f(-1)
# some stabilization optimization make the output be finite instead of nan
# debug_mode will raise an error when he see nan
if not isinstance(mode, theano.compile.debugmode.DebugMode):
if not isinstance(mode, DebugMode):
assert np.isfinite(f(0))
assert len(f.maker.fgraph.toposort()) == 2
......@@ -1066,7 +1065,7 @@ class TestCanonize:
# fvv = _asarray(np.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])
# We must be sure that the Canonizer is working, but that we don't have other
# optimisation that could hide bug in the Canonizer as local_elemwise_fusion
mode = compile.mode.get_default_mode()
mode = get_default_mode()
opt = Query(["canonicalize"])
opt = opt.excluding("local_elemwise_fusion")
......@@ -1131,7 +1130,7 @@ def test_local_merge_abs():
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = theano.compile.mode.get_mode(mode).excluding("local_elemwise_fusion")
mode = get_mode(mode).excluding("local_elemwise_fusion")
f = function([y, z], (abs(y * z * -2)), mode=mode)
f(y_val, z_val)
......@@ -1204,7 +1203,7 @@ def test_cast_in_mul_canonizer():
e = tt.eq(go, x)
o1 = (1 - e) * go
o2 = e * go
mode = theano.compile.get_default_mode().excluding("fusion").including("fast_run")
mode = get_default_mode().excluding("fusion").including("fast_run")
f = function([x, y], [o1, o2], mode=mode)
theano.printing.debugprint(f, print_type=True)
nodes = f.maker.fgraph.apply_nodes
......@@ -1232,7 +1231,7 @@ class TestFusion:
],
exclude=["cxx_only", "BlasOpt"],
)
mode = theano.compile.Mode(compile.mode.get_default_mode().linker, opts)
mode = Mode(get_default_mode().linker, opts)
_shared = staticmethod(shared)
topo_exclude = ()
......@@ -1986,7 +1985,7 @@ class TestFusion:
vars = [sd, means]
# Make sure that C compilation is used
mode = theano.compile.Mode("cvm", self.opts)
mode = Mode("cvm", self.opts)
dlogp = function(vars, [theano.grad(logp, v) for v in vars], mode=mode)
# Make sure something was fused
......@@ -2007,7 +2006,7 @@ class TestFusion:
exclude=["cxx_only", "BlasOpt"],
)
mode = theano.compile.mode.Mode(self.mode.linker, opts)
mode = Mode(self.mode.linker, opts)
x, y, z = dmatrices("xyz")
out = tt.dot(x, y) + x + y + z
......@@ -2109,7 +2108,7 @@ class TestFusion:
no_c_code_op = Elemwise(NoCCodeOp(ts.basic.upgrade_to_float))
mode = theano.Mode(linker="cvm")
mode = Mode(linker="cvm")
mode._optimizer = mode._optimizer.including(
"local_elemwise_fusion",
"composite_elemwise_fusion",
......@@ -2194,9 +2193,7 @@ class TestCompositeCodegen:
c = ts.Composite([x], [x + 1, x - 1])
X = matrix()
o = Elemwise(scalar_op=c)(X)
mode = theano.compile.mode.get_default_mode().including(
"local_useless_composite"
)
mode = get_default_mode().including("local_useless_composite")
f = function([X], o[0], mode=mode)
topo = f.maker.fgraph.toposort()
......@@ -2216,7 +2213,7 @@ def test_log1p():
m = config.mode
if m == "FAST_COMPILE":
m = "FAST_RUN"
m = compile.mode.get_mode(m)
m = get_mode(m)
m = m.excluding("fusion")
# check some basic cases
x = dvector()
......@@ -2267,7 +2264,7 @@ def test_log_add():
m = config.mode
if m == "FAST_COMPILE":
m = "FAST_RUN"
m = compile.mode.get_mode(m)
m = get_mode(m)
m = m.excluding("fusion")
m = copy.copy(m)
# No need to put them back as we have a new object
......@@ -2304,11 +2301,11 @@ def test_log_add():
def test_local_useless_slice():
# test a simple matrix
x = matrix("x")
mode_unopt = compile.get_default_mode().excluding(
mode_unopt = get_default_mode().excluding(
"local_useless_slice", "local_mul_canonizer"
)
mode_opt = (
compile.get_default_mode()
get_default_mode()
.including("local_useless_slice")
.excluding("local_mul_canonizer")
)
......@@ -2358,13 +2355,11 @@ def test_local_useless_slice():
def test_local_useless_inc_subtensor():
x = matrix("x")
y = matrix("y")
mode = compile.get_default_mode().including("local_useless_inc_subtensor")
mode = get_default_mode().including("local_useless_inc_subtensor")
for sub in [slice(None), slice(None, None, -1)]:
o = set_subtensor(x[::, sub], y)
f = function([x, y], o, mode=mode)
o_shape = set_subtensor(
x[::, sub], theano.compile.ops.specify_shape(y, x.shape)
)
o_shape = set_subtensor(x[::, sub], specify_shape(y, x.shape))
f_shape = function([x, y], o_shape, mode=mode)
# Test with shape info
......@@ -2391,7 +2386,7 @@ def test_local_useless_inc_subtensor():
# Test that we do not optimize others strides even when sub and y
# have same shapes
sub = x[::, ::2]
o_shape = set_subtensor(sub, theano.compile.ops.specify_shape(y, sub.shape))
o_shape = set_subtensor(sub, specify_shape(y, sub.shape))
f_shape = function([x, y], o_shape)
topo = f_shape.maker.fgraph.toposort()
# theano.printing.debugprint(f_shape)
......@@ -2415,7 +2410,7 @@ def test_local_useless_subtensor():
assert len(prog) == 1
f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something
x_c = theano.compile.ops.specify_shape(x, (2, 3))
x_c = specify_shape(x, (2, 3))
# Test constant
for dims, res in [
((slice(0, 2),), True),
......@@ -2431,7 +2426,7 @@ def test_local_useless_subtensor():
# theano.printing.debugprint(f)
prog = f.maker.fgraph.toposort()
if res:
assert isinstance(prog[0].op, theano.compile.ops.SpecifyShape), dims
assert isinstance(prog[0].op, SpecifyShape), dims
assert prog[1].op == tt.exp, (dims, prog)
assert len(prog) == 2, dims
else:
......@@ -2544,7 +2539,7 @@ def test_local_useless_subtensor():
# theano.printing.debugprint(f)
prog = f.maker.fgraph.toposort()
if res:
assert isinstance(prog[0].op, theano.compile.ops.SpecifyShape), dims
assert isinstance(prog[0].op, SpecifyShape), dims
assert prog[1].op == tt.exp, dims
assert len(prog) == 2, dims
else:
......@@ -2558,7 +2553,7 @@ def test_local_subtensor_remove_broadcastable_index():
# tests removing broadcastable dimensions with index 0 or -1,
# otherwise the optimzation should not be applied
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("local_subtensor_remove_broadcastable_index")
x = dmatrix("x")
y1 = x.dimshuffle(0, "x", 1)
......@@ -2643,9 +2638,7 @@ def test_local_subtensor_remove_broadcastable_index():
class TestSubtensorIncSubtensor:
@classmethod
def setup_class(cls):
cls.mode = theano.compile.mode.get_default_mode().including(
"local_subtensor_inc_subtensor"
)
cls.mode = get_default_mode().including("local_subtensor_inc_subtensor")
def test_basic(self):
# basic test
......@@ -2781,7 +2774,7 @@ class TestLocalSubtensorMakeVector:
prog = f.maker.fgraph.toposort()
assert len(prog) == 1
assert isinstance(prog[0].op, theano.compile.ops.DeepCopyOp)
assert isinstance(prog[0].op, DeepCopyOp)
assert f(0, 1, 2) == 0
def test_slice_idx_stop(self):
......@@ -2840,9 +2833,7 @@ class TestLocalSubtensorMakeVector:
x, y, z = lscalars("xyz")
v = make_vector(x, y, z)
mode = theano.compile.mode.get_default_mode().including(
"local_subtensor_make_vector"
)
mode = get_default_mode().including("local_subtensor_make_vector")
# list of subtensor cases, where local_subtensor_make_vector
# inserts a new MakeVector node
......@@ -3595,7 +3586,7 @@ class TestLocalSubtensorMerge:
class TestLocalAdvSub1AdvIncSub1:
def setup_method(self):
utt.seed_rng()
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
self.mode = mode.including("local_adv_sub1_adv_inc_sub1").excluding("fusion")
self.mode_no_assert = self.mode.including("local_remove_all_assert")
......@@ -3628,7 +3619,7 @@ class TestLocalAdvSub1AdvIncSub1:
utt.assert_allclose(dy, res)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, (compile.DeepCopyOp, Elemwise))
assert isinstance(topo[0].op, (DeepCopyOp, Elemwise))
# inc_subtensor(data[idx], y)
inc = inc_subtensor(x[idx], y)
......@@ -3690,7 +3681,7 @@ class TestLocalAdvSub1AdvIncSub1:
class TestAllocZero:
def setup_method(self):
mode = theano.compile.mode.get_default_mode()
mode = get_default_mode()
self.mode = mode.including(
"local_incsubtensor_of_zeros",
"local_setsubtensor_of_constants",
......@@ -3923,7 +3914,7 @@ def test_local_IncSubtensor_serialize():
y = (W[i] + W[j] + W[1] + W[i, j]).sum()
cost = tt.sqr(t - y)
dW = theano.grad(cost, W)
mode = theano.compile.mode.get_default_mode().excluding("fusion")
mode = get_default_mode().excluding("fusion")
mode = mode.including("local_IncSubtensor_serialize")
f = function([i, j, t], updates=[(W, W - 0.01 * dW)], mode=mode)
topo = f.maker.fgraph.toposort()
......@@ -3966,8 +3957,8 @@ def test_local_set_to_inc_subtensor():
s = v[[2, 1]]
g = s + 3
r = set_subtensor(s, g)
moder = compile.get_default_mode().excluding("local_set_to_inc_subtensor")
modet = compile.get_default_mode().including("local_set_to_inc_subtensor")
moder = get_default_mode().excluding("local_set_to_inc_subtensor")
modet = get_default_mode().including("local_set_to_inc_subtensor")
f1 = function([v], r, mode=moder)
f2 = function([v], r, mode=modet)
......@@ -4002,7 +3993,7 @@ def test_local_subtensor_of_dot():
m2 = matrix()
d1 = np.arange(6).reshape((3, 2)).astype(config.floatX)
d2 = np.arange(8).reshape((2, 4)).astype(config.floatX) + 10
mode = compile.get_default_mode().including("local_subtensor_of_dot")
mode = get_default_mode().including("local_subtensor_of_dot")
def test_equality(a, b):
return a.shape == b.shape and np.allclose(a, b)
......@@ -4364,7 +4355,7 @@ def test_local_elemwise_sub_zeros():
mat_val = rng.rand(3, 2).astype(config.floatX)
mode = (
theano.compile.get_default_mode()
get_default_mode()
.excluding(
"canonicalize",
"uncanonicalize",
......@@ -4450,7 +4441,7 @@ class TestLocalUselessElemwiseComparison:
> |X[t] [id M] -> [id I]
"""
mode = theano.compile.get_default_mode().excluding("fusion")
mode = get_default_mode().excluding("fusion")
f = function([X, Y], Z, mode=mode)
f(
self.rng.rand(2, 3).astype(config.floatX),
......@@ -4512,9 +4503,7 @@ class TestLocalUselessElemwiseComparison:
def test_inequality_with_self(self):
x = scalar("x", dtype=config.floatX)
mode = theano.compile.get_default_mode().including(
"local_useless_elemwise_comparison"
)
mode = get_default_mode().including("local_useless_elemwise_comparison")
f = function([x], tt.lt(x, x), mode=mode)
self.assert_eqs_const(f, 0)
......@@ -4536,7 +4525,7 @@ class TestLocalUselessElemwiseComparison:
def test_shape_inequality_with_self(self):
x = vector("x", dtype=config.floatX)
mode = theano.compile.get_default_mode().including(
mode = get_default_mode().including(
"local_useless_elemwise_comparison",
"local_shape_to_shape_i",
"local_track_shape_i",
......@@ -4576,7 +4565,7 @@ class TestLocalUselessElemwiseComparison:
def test_shape_add_inequality(self):
x = vector("x", dtype=config.floatX)
mode = theano.compile.get_default_mode().including(
mode = get_default_mode().including(
"local_useless_elemwise_comparison",
"local_shape_to_shape_i",
"local_track_shape_i",
......@@ -4620,7 +4609,7 @@ class TestLocalUselessElemwiseComparison:
def test_and(self):
# bitwise "and" with 0 should give 0 for both bool and int
# bitwise "and" with 1 should only simplify for bool
mode = theano.compile.get_default_mode().including("canonicalize")
mode = get_default_mode().including("canonicalize")
for dtype, zero, one in [
("bool", np.array(False), np.array(True)),
("int8", np.int8(0), np.int8(1)),
......@@ -4650,7 +4639,7 @@ class TestLocalUselessElemwiseComparison:
def test_or(self):
# bitwise "or" with 0 should simplify for both bool and int
# bitwise "or" with 1 should only give 1 for bool
mode = theano.compile.get_default_mode().including("canonicalize")
mode = get_default_mode().including("canonicalize")
for dtype, zero, one in [
("bool", np.array(False), np.array(True)),
("int8", np.int8(0), np.int8(1)),
......@@ -4679,7 +4668,7 @@ class TestLocalUselessElemwiseComparison:
def test_xor(self):
# bitwise "xor" with itself should always give 0 for both bool and int.
mode = theano.compile.get_default_mode().including("canonicalize")
mode = get_default_mode().including("canonicalize")
for dtype in ("bool", "int8"):
x = scalar("x", dtype=dtype)
......@@ -4687,9 +4676,7 @@ class TestLocalUselessElemwiseComparison:
self.assert_eqs_const(f, 0)
def test_stacktrace(self):
mode = theano.compile.get_default_mode().including(
"local_useless_elemwise_comparison"
)
mode = get_default_mode().including("local_useless_elemwise_comparison")
x = vector("x", dtype=config.floatX)
f = function([x], tt.gt(x, x), mode=mode)
......@@ -4717,7 +4704,7 @@ class TestLocalCanonicalizeAlloc:
assert [node.op for node in f.maker.fgraph.toposort()] == [deep_copy_op]
# In DebugMode, the shape mismatch should be detected
if isinstance(mode_opt, compile.debugmode.DebugMode):
if isinstance(mode_opt, DebugMode):
with pytest.raises(ValueError):
f
......@@ -4822,7 +4809,7 @@ class TestLocalUselessIncSubtensorAlloc:
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
self.mode = compile.mode.get_mode(mode)
self.mode = get_mode(mode)
def test_advanced_inc_subtensor(self):
x = vector("x")
......@@ -5009,7 +4996,7 @@ class TestShapeOptimizer:
out = self.max_pool_c01b(a, 1, 1, 1)
# max_pool_c01b use -inf and this will trigger DebugMode error.
mode = copy.copy(theano.compile.get_default_mode())
mode = copy.copy(get_default_mode())
mode.check_isfinite = False
f = function([], out, mode=mode)
f()
......@@ -5066,7 +5053,7 @@ class TestShapeOptimizer:
if isinstance(node.op, IdentityNoShape):
return [identity_shape(node.inputs[0])]
mode = theano.compile.get_default_mode().including("ShapeOpt", "specialize")
mode = get_default_mode().including("ShapeOpt", "specialize")
rng = np.random.RandomState(utt.fetch_seed())
x = tensor3("x")
ins_x = identity_noshape(x)
......@@ -5083,7 +5070,7 @@ class TestShapeOptimizer:
# Register the optimization
opt.register_specialize(local_identity_noshape_to_identity_shape)
mode = theano.compile.get_default_mode().including("ShapeOpt", "specialize")
mode = get_default_mode().including("ShapeOpt", "specialize")
# With the optimization
# The identity_shape op should not be needed anymore to compute
# the shape
......@@ -5110,7 +5097,7 @@ class TestShapeOptimizer:
X = matrix()
expr = X.shape[0]
mode = theano.compile.get_default_mode().excluding("ShapeOpt")
mode = get_default_mode().excluding("ShapeOpt")
f = function([X], expr, mode=mode)
print(f([[1, 2], [2, 3]]))
......@@ -5132,7 +5119,7 @@ class TestAssert(utt.InferShapeTester):
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode)
mode = get_mode(mode)
x = scalar()
f = function([x], assert_op(x, 1), mode=mode)
......@@ -5147,7 +5134,7 @@ class TestAssert(utt.InferShapeTester):
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode)
mode = get_mode(mode)
x = scalar()
y = scalar()
......@@ -5164,7 +5151,7 @@ class TestAssert(utt.InferShapeTester):
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode)
mode = get_mode(mode)
x = scalar()
y = scalar()
......@@ -5181,12 +5168,12 @@ class TestAssert(utt.InferShapeTester):
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode).including("local_remove_all_assert")
mode = get_mode(mode).including("local_remove_all_assert")
x = scalar()
y = scalar()
f = function([x, y], assert_op(x, y), mode=mode)
if isinstance(mode, theano.compile.debugmode.DebugMode):
if isinstance(mode, DebugMode):
# DebugMode will run the original version with the Assert
with pytest.raises(AssertionError):
f(1, 0)
......@@ -5196,7 +5183,7 @@ class TestAssert(utt.InferShapeTester):
assert len(topo) == 1, topo
assert topo[0].op == deep_copy_op, topo
mode = compile.mode.get_default_mode()
mode = get_default_mode()
a = assert_op(x, tt.eq(x, 0).any())
f = function([x], a, mode=mode.excluding("unsafe"))
topo = f.maker.fgraph.toposort()
......@@ -5227,7 +5214,7 @@ def test_local_mul_specialize():
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode)
mode = get_mode(mode)
mode = mode.excluding("fusion")
v = vector()
......@@ -5276,7 +5263,7 @@ class TestTile:
f = function([var], tile(var, (1,) * ndim), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, compile.DeepCopyOp)
assert isinstance(topo[0].op, DeepCopyOp)
f(data)
# In this case the opt only removes nodes,
# no need to check_stack_trace
......@@ -5294,7 +5281,7 @@ class TestTile:
def speed_local_pow_specialize_range():
val = np.random.rand(1e7)
v = vector()
mode = compile.mode.get_default_mode()
mode = get_default_mode()
mode_without_pow_opt = mode.excluding("local_pow_specialize")
for i in range(500, 513):
f1 = function([v], v ** i, mode=mode)
......@@ -5326,7 +5313,7 @@ def test_local_pow_specialize():
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode)
mode = get_mode(mode)
mode = mode.excluding("fusion")
v = vector()
......@@ -5379,7 +5366,7 @@ def test_local_pow_specialize_device_more_aggressive_on_cpu():
mode = config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = compile.mode.get_mode(mode)
mode = get_mode(mode)
mode = mode.excluding("fusion").excluding("gpu")
v = vector()
......@@ -5418,7 +5405,7 @@ def test_local_pow_specialize_device_more_aggressive_on_cpu():
class TestRebroadcast:
def test_local_useless_rebroadcast(self):
mode = theano.compile.get_default_mode().including("canonicalize")
mode = get_default_mode().including("canonicalize")
v1 = vector()
v2 = vector()
j = tt.join(0, v1, v2)
......@@ -5430,7 +5417,7 @@ class TestRebroadcast:
assert check_stack_trace(f, ops_to_check="all")
def test_rebroadcast_rebroadcast(self):
mode = theano.compile.get_default_mode().including("canonicalize")
mode = get_default_mode().including("canonicalize")
m = matrix()
s = tt.addbroadcast(m, 0, 1)
v = tt.unbroadcast(s, 1)
......@@ -5444,9 +5431,7 @@ class TestRebroadcast:
class TestUselessElemwise:
def setup_method(self):
self.mode = theano.compile.get_default_mode().including(
"canonicalize", "local_fill_to_alloc"
)
self.mode = get_default_mode().including("canonicalize", "local_fill_to_alloc")
def test_eq(self):
x = dmatrix()
......@@ -5533,7 +5518,7 @@ class TestUselessElemwise:
class TestCastCast:
def setup_method(self):
mode = theano.compile.get_default_mode()
mode = get_default_mode()
self.mode = mode.including("local_cast_cast")
def test_consecutive(self):
......@@ -5591,7 +5576,7 @@ class TestCastCast:
class TestFuncInverse:
def setup_method(self):
mode = theano.compile.get_default_mode()
mode = get_default_mode()
self.mode = mode.including("local_func_inv")
def assert_func_pair_optimized(
......@@ -5654,7 +5639,7 @@ def test_constant_folding():
# An error removed that registration during the registration.
x = dvector()
mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
mode = get_mode("FAST_COMPILE").excluding("fusion")
f = function([x], [x * 2, x + x], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
......@@ -5664,7 +5649,7 @@ def test_constant_folding():
x = tt.constant(3)
assert x.ndim == 0
mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
mode = get_mode("FAST_COMPILE").excluding("fusion")
f = function([], [x * 2, x + x], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
......@@ -5686,7 +5671,7 @@ def test_constant_get_stabilized():
x2 = scalar()
y2 = tt.log(1 + tt.exp(x2))
mode = theano.compile.get_default_mode()
mode = get_default_mode()
mode.check_isfinite = False
f2 = function([x2], y2, mode=mode)
......@@ -5727,7 +5712,7 @@ class TestLocalSwitchSink:
)
self.mode = (
theano.compile.mode.get_default_mode()
get_default_mode()
.including("canonicalize", "fast_run")
.excluding("gpu", "fusion")
)
......@@ -5837,7 +5822,7 @@ class TestLocalSwitchSink:
class TestLocalErf:
def setup_method(self):
self.mode = (
theano.compile.mode.get_default_mode()
get_default_mode()
.including("canonicalize", "fast_run")
.excluding("gpu", "fusion")
)
......@@ -5935,7 +5920,7 @@ class TestLocalErf:
class TestLocalErfc:
def setup_method(self):
self.mode_fusion = (
theano.compile.mode.get_default_mode()
get_default_mode()
.including("canonicalize")
.including("fast_run")
.excluding("gpu")
......@@ -6164,7 +6149,7 @@ class TestLocalErfc:
val = np.random.rand(1e6)
x = vector()
mode = theano.compile.mode.get_mode("FAST_RUN")
mode = get_mode("FAST_RUN")
f1 = function([x], tt.log(tt.erfc(x)), mode=mode.excluding("local_log_erfc"))
f2 = function([x], tt.log(tt.erfc(x)), mode=mode)
print(f1.maker.fgraph.toposort())
......@@ -6407,9 +6392,7 @@ class TestLocalSumProd:
"""
def setup_method(self):
self.mode = theano.compile.get_default_mode().including(
"canonicalize", "specialize"
)
self.mode = get_default_mode().including("canonicalize", "specialize")
def test_local_sum_prod_mul_by_scalar(self):
# Test the optimization local_sum_prod_mul_by_scalar for both Sum and
......@@ -6755,7 +6738,7 @@ class TestLocalSumProd:
def test_local_sum_prod_mul_by_scalar_stack_trace(self):
# Test that stack trace is copied over correctly for local_sum_prod_mul_by_scalar.
m0 = (
theano.compile.get_default_mode()
get_default_mode()
.excluding("inplace_elemwise_opt")
.including("canonicalize", "specialize")
)
......@@ -6821,7 +6804,7 @@ class TestLocalOptAllocF16(TestLocalOptAlloc):
class TestLocalReduce:
def setup_method(self):
self.mode = theano.compile.get_default_mode().including(
self.mode = get_default_mode().including(
"canonicalize", "specialize", "uncanonicalize", "local_max_and_argmax"
)
......@@ -6957,7 +6940,7 @@ class TestLocalReduce:
class TestLocalSumProdDimshuffle:
def setup_method(self):
self.mode = theano.compile.get_default_mode().including("canonicalize")
self.mode = get_default_mode().including("canonicalize")
def test_local_sum_div_dimshuffle(self):
a = matrix("a")
......@@ -7065,7 +7048,7 @@ class TestLocalSumProdDimshuffle:
c_val = rng.randn(2, 2, 2).astype(config.floatX)
d_val = np.asarray(rng.randn(), config.floatX)
default_mode = theano.compile.mode.get_default_mode()
default_mode = get_default_mode()
# FusionOptimizer is included to make sure that expected_outer_operator
# remains the same for all optimization modes.
mode_with_opt = default_mode.including(
......@@ -7484,7 +7467,7 @@ def test_local_useless_split():
opt = tt.split(x, splits, n_splits=1)
nonopt = tt.split(x, splits, n_splits=3)
mode = compile.get_default_mode().including("local_useless_split")
mode = get_default_mode().including("local_useless_split")
f_opt = function([x, splits], opt, mode=mode)
f_nonopt = function([x, splits], nonopt, mode=mode)
......@@ -7506,7 +7489,7 @@ def test_local_flatten_lift():
x = tensor4()
out = tt.flatten(tt.exp(x), i)
assert out.ndim == i
mode = compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("local_reshape_lift")
f = function([x], out, mode=mode)
x_np = np.random.rand(5, 4, 3, 2).astype(config.floatX)
......@@ -7544,7 +7527,7 @@ class TestLocalUselessReshape:
self.rng = np.random.RandomState(utt.fetch_seed())
def test_0(self):
mode = theano.compile.get_default_mode().including("local_useless_reshape")
mode = get_default_mode().including("local_useless_reshape")
i = iscalar("i")
m = tt.mgrid[
0:i,
......@@ -7557,7 +7540,7 @@ class TestLocalUselessReshape:
x = matrix("x")
r = x.reshape(x.shape)
m0 = theano.compile.get_default_mode()
m0 = get_default_mode()
m1 = m0.including("local_useless_reshape")
f1 = function([x], r, mode=m1)
topo = f1.maker.fgraph.toposort()
......@@ -7575,7 +7558,7 @@ class TestLocalUselessReshape:
x = matrix("x")
r = x.reshape([Shape_i(i)(x) for i in range(x.ndim)])
m0 = theano.compile.get_default_mode()
m0 = get_default_mode()
m1 = m0.including("local_useless_reshape")
f1 = function([x], r, mode=m1)
topo = f1.maker.fgraph.toposort()
......@@ -7590,7 +7573,7 @@ class TestLocalUselessReshape:
x = matrix("x")
r = x.reshape((x.shape[0], -1))
m0 = theano.compile.get_default_mode()
m0 = get_default_mode()
m1 = m0.including("local_useless_reshape")
f1 = function([x], r, mode=m1)
topo = f1.maker.fgraph.toposort()
......@@ -7646,7 +7629,7 @@ def test_local_reshape_lift():
x = tensor4()
out = tt.exp(x).reshape([x.size])
assert out.ndim == 1
mode = compile.mode.get_default_mode()
mode = get_default_mode()
mode = mode.including("local_reshape_lift")
f = function([x], out, mode=mode)
f(np.random.rand(5, 4, 3, 2).astype(config.floatX))
......@@ -7803,7 +7786,7 @@ def test_assert_op_gradient():
class TestIntDivByOne:
def setup_method(self):
self.mode = theano.compile.mode.get_default_mode()
self.mode = get_default_mode()
self.mode = self.mode.including("local_intdiv_by_one")
def test1(self):
......@@ -7881,7 +7864,7 @@ def test_local_sumsqr2dot():
W = matrix("W")
y = tt.sqr(W.dimshuffle("x", 0, 1) * G.dimshuffle(0, "x", 1)).sum(axis=(1, 2))
MODE = theano.compile.get_default_mode().including("local_sumsqr2dot")
MODE = get_default_mode().including("local_sumsqr2dot")
f = function([W, G], y, mode=MODE)
......@@ -7914,7 +7897,7 @@ def test_local_expm1():
z = tt.exp(x) - 2.0
t = tt.exp(x) - x
s = tt.exp(u) - np.ones((4, 3)).astype(config.floatX)
MODE = theano.compile.get_default_mode().including("local_expm1")
MODE = get_default_mode().including("local_expm1")
f = function([x], y, mode=MODE)
g = function([x], z, mode=MODE)
h = function([x], t, mode=MODE)
......@@ -7949,7 +7932,7 @@ def test_local_expm1():
def test_local_merge_alloc():
# Add this opt to the default mode,
# otherwise, FAST_COMPILE fails.
default_mode = theano.compile.mode.get_default_mode()
default_mode = get_default_mode()
opt_mode = default_mode.including("local_merge_alloc")
x = iscalar("x")
......@@ -8052,7 +8035,7 @@ def compile_graph_log_sum_exp(x, axis, dimshuffle_op=None):
if dimshuffle_op:
sum_exp = dimshuffle_op(sum_exp)
y = tt.log(sum_exp)
MODE = theano.compile.get_default_mode().including("local_log_sum_exp")
MODE = get_default_mode().including("local_log_sum_exp")
return function([x], y, mode=MODE)
......
......@@ -6,6 +6,7 @@ import pytest
import theano
from tests import unittest_tools as utt
from theano.compile.mode import Mode
from theano.tensor.sort import (
ArgSortOp,
SortOp,
......@@ -428,7 +429,7 @@ class TestTopK:
# So don't use DebugMode here.
mode = self.mode
if isinstance(self.mode, theano.compile.debugmode.DebugMode):
mode = theano.Mode(optimizer=mode.optimizer)
mode = Mode(optimizer=mode.optimizer)
fn = theano.function([x], y, mode=mode)
assert any(
[isinstance(n.op, self.op_class) for n in fn.maker.fgraph.apply_nodes]
......
......@@ -12,6 +12,7 @@ import theano.tensor as tt
from tests import unittest_tools as utt
from tests.tensor.utils import inplace_func, rand, randint_ranged
from theano.compile import DeepCopyOp, shared
from theano.compile.io import In
from theano.configdefaults import config
from theano.graph.op import get_test_value
from theano.graph.toolbox import is_same_graph
......@@ -1265,7 +1266,7 @@ class TestSubtensor(utt.OptimizationTestMixin):
data_copy[idx] = inc_num
else:
data_copy[idx] += inc_num
data_var = theano.In(data_var, mutable=True)
data_var = In(data_var, mutable=True)
# Remember data for the Theano function (see below).
all_inputs_var += [data_var, idx_var, inc_var]
......
......@@ -249,7 +249,7 @@ def test_mode_apply():
with pytest.raises(ValueError, match="Expected one of"):
configdefaults._filter_mode("not_a_mode")
# test with theano.Mode instance
# test with Mode instance
import theano.compile.mode
assert (
......
......@@ -282,8 +282,7 @@ class Param(In):
borrow=None,
):
warnings.warn(
"The Param class is deprecated. Replace Param(default=N)"
" by theano.In(value=N)",
"The Param class is deprecated. Replace Param(default=N)" " by In(value=N)",
stacklevel=2,
)
super().__init__(
......@@ -322,7 +321,7 @@ def pfunc(
Function parameters, these are not allowed to be shared variables.
outputs : list of Variables or Out instances
Expressions to compute.
mode : string or `theano.compile.Mode` instance
mode : string or `theano.compile.mode.Mode` instance
Compilation mode.
updates : iterable over pairs (shared_variable, new_expression). List, tuple or dict.
Update the values for SharedVariable inputs according to these
......
......@@ -111,14 +111,14 @@ def _filter_mode(val):
if val in str_options:
return val
# This can be executed before Theano is completly imported, so
# theano.Mode is not always available.
# Instead of isinstance(val, theano.Mode),
# theano.compile.mode.Mode is not always available.
# Instead of isinstance(val, theano.compile.mode.Mode),
# we can inspect the __mro__ of the object!
for type_ in type(val).__mro__:
if "theano.compile.mode.Mode" in str(type_):
return val
raise ValueError(
f"Expected one of {str_options}, or an instance of theano.Mode. "
f"Expected one of {str_options}, or an instance of theano.compile.mode.Mode. "
f"Instead got: {val}."
)
......
......@@ -10,6 +10,8 @@ import theano
import theano.pathparse
import theano.tensor as tt
from theano.assert_op import Assert
from theano.compile.io import Out
from theano.compile.mode import Mode
from theano.compile.ops import shape_i, shape_i_op
from theano.configdefaults import SUPPORTED_DNN_CONV_ALGO_RUNTIME, config
from theano.gpuarray import cudnn_defs, pygpu
......@@ -285,7 +287,7 @@ class MakerCDataType(CDataType):
self._fn = theano.function(
[v],
CDataMaker(self)(v),
mode=theano.Mode(optimizer=None),
mode=Mode(optimizer=None),
profile=False,
)
return self._fn
......@@ -421,9 +423,7 @@ def version(raises=True):
return -1
if version.v is None:
f = theano.function(
[], DnnVersion()(), theano.Mode(optimizer=None), profile=False
)
f = theano.function([], DnnVersion()(), Mode(optimizer=None), profile=False)
v = f()
if v[0] != v[1]:
raise RuntimeError(
......@@ -2632,7 +2632,7 @@ def _make_dropout_desc(dropout, seed, context_name):
desc, states = theano.function(
[],
_DropoutDescriptor(context_name)(dropout, seed, context_name),
theano.Mode(optimizer=None),
Mode(optimizer=None),
profile=False,
)()
return desc, states
......@@ -2740,7 +2740,7 @@ def _make_rnn_desc(
_RNNDescriptor(context_name)(
hidden_size, num_layers, ddesc, input_mode, direction_mode, rnn_mode, dtype
),
theano.Mode(optimizer=None),
Mode(optimizer=None),
profile=False,
)()
return desc
......@@ -2774,7 +2774,7 @@ def _get_param_size(desc, input_size, dtype, context_name):
return theano.function(
[],
_RNNParamSize(context_name)(desc, input_size, typecode),
theano.Mode(optimizer=None),
Mode(optimizer=None),
profile=False,
)()
......@@ -3016,8 +3016,8 @@ class _RNNSplitParams(DnnBase):
def _split_rnn_params(w, desc, layer, input_size, dtype, rnn_mode):
typecode = gpuarray.dtype_to_typecode(dtype)
outs = _RNNSplitParams(rnn_mode)(w, desc, layer, input_size, typecode)
outs = [theano.Out(o, borrow=True) for o in outs]
return theano.function([], outs, theano.Mode(optimizer=None), profile=False)()
outs = [Out(o, borrow=True) for o in outs]
return theano.function([], outs, Mode(optimizer=None), profile=False)()
class GpuDnnRNNOp(DnnBase):
......
......@@ -14,8 +14,8 @@ from io import StringIO
import numpy as np
import theano
from theano.compile import Function, SharedVariable, debugmode
from theano.compile.io import In, Out
from theano.configdefaults import config
from theano.graph.basic import (
Apply,
......@@ -176,7 +176,7 @@ def debugprint(
order.extend([topo for item in obj.outputs])
elif isinstance(obj, (int, float, np.ndarray)):
print(obj, file=_file)
elif isinstance(obj, (theano.In, theano.Out)):
elif isinstance(obj, (In, Out)):
results_to_print.append(obj.variable)
profile_list.append(None)
smap.append(None)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论