提交 a1388f92 authored 作者: CloudChaoszero's avatar CloudChaoszero 提交者: Thomas Wiecki

🎉 Start removal of circular deps

🔥 Remove circular aesara imports in tests/compile/ section 🎨 Replace self-import dep changes on tests/d3viz 📌 Continue deps change config for import aesara cycles 📌 Continue deps change config for import aesara cycles 🎨 Re-config module import specification 🎨 redefine imports as aesara.XXX 🔥 Remove duplicate config import in test_pfunc Fix precommit formatting on tests Revert value == 0 change due to consideration of data types on numerics, during pytests change import order from rebase
上级 ea528820
......@@ -3,15 +3,20 @@
from collections import OrderedDict
from functools import partial, reduce
import aesara
from aesara import tensor as aet
import aesara.tensor as aet
from aesara.compile.function.pfunc import rebuild_collect_shared
from aesara.compile.function.types import orig_function
from aesara.compile.mode import optdb
from aesara.compile.sharedvalue import SharedVariable
from aesara.configdefaults import config
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply, Variable, graph_inputs, io_connection_pattern
from aesara.gradient import DisconnectedType, Rop, grad
from aesara.graph.basic import (
Apply,
Variable,
clone_replace,
graph_inputs,
io_connection_pattern,
)
from aesara.graph.fg import FunctionGraph
from aesara.graph.null_type import NullType
from aesara.graph.op import Op, ops_with_inner_function
......@@ -441,7 +446,7 @@ class OpFromGraph(Op):
output_grads = [out_t() for out_t in self.output_types]
fn_grad = partial(
aesara.gradient.grad,
grad,
cost=None,
disconnected_inputs="ignore",
return_disconnected="Disconnected",
......@@ -559,7 +564,7 @@ class OpFromGraph(Op):
return
eval_points = [inp_t() for inp_t in self.input_types]
fn_rop = partial(aesara.gradient.Rop, wrt=local_inputs, eval_points=eval_points)
fn_rop = partial(Rop, wrt=local_inputs, eval_points=eval_points)
TYPE_ERR_MSG = (
"R_op overrides should be (single or list of)"
"OpFromGraph | 'default' | None | 0 | callable, got %s"
......@@ -786,7 +791,7 @@ class OpFromGraph(Op):
# each shape call. Aesara optimizer will clean this up later, but this
# will ask extra work to the optimizer.
repl = dict(zip(self.local_inputs, node.inputs))
cloned = aesara.clone_replace(reduce(tuple.__add__, out_shp), replace=repl)
cloned = clone_replace(reduce(tuple.__add__, out_shp), replace=repl)
ret = []
used = 0
for i in range(len(out_shp)):
......@@ -824,7 +829,7 @@ def inline_ofg_expansion(fgraph, node):
return False
if not op.is_inline:
return False
return aesara.clone_replace(
return clone_replace(
op.local_outputs, {u: v for u, v in zip(node.op.local_inputs, node.inputs)}
)
......
......@@ -9,8 +9,9 @@ import shutil
import numpy as np
import aesara
from aesara.configdefaults import config
from aesara.graph.op import Op
from aesara.graph.type import CType
from aesara.utils import flatten
......@@ -54,9 +55,9 @@ def cleanup():
have_npy_abi_version = True
elif obj.startswith("c_compiler_str="):
have_c_compiler = True
elif isinstance(
obj, (aesara.graph.op.Op, aesara.graph.type.CType)
) and hasattr(obj, "c_code_cache_version"):
elif isinstance(obj, (Op, CType)) and hasattr(
obj, "c_code_cache_version"
):
v = obj.c_code_cache_version()
if v not in [(), None] and v not in key[0]:
# Reuse have_npy_abi_version to
......@@ -125,13 +126,7 @@ def print_compiledir_content():
with open(filename, "rb") as file:
try:
keydata = pickle.load(file)
ops = list(
{
x
for x in flatten(keydata.keys)
if isinstance(x, aesara.graph.op.Op)
}
)
ops = list({x for x in flatten(keydata.keys) if isinstance(x, Op)})
# Whatever the case, we count compilations for OP classes.
for op_class in {op.__class__ for op in ops}:
table_op_class.setdefault(op_class, 0)
......@@ -140,11 +135,7 @@ def print_compiledir_content():
zeros_op += 1
else:
types = list(
{
x
for x in flatten(keydata.keys)
if isinstance(x, aesara.graph.type.CType)
}
{x for x in flatten(keydata.keys) if isinstance(x, CType)}
)
compile_start = compile_end = float("nan")
for fn in os.listdir(os.path.join(compiledir, dir)):
......
......@@ -23,16 +23,17 @@ from typing import Dict, List, Set
import numpy.distutils
import aesara
# we will abuse the lockfile mechanism when reading and writing the registry
from aesara.compile.compilelock import lock_ctx
from aesara.configdefaults import config, gcc_version_str
from aesara.configparser import BoolParam, StrParam
from aesara.graph.op import Op
from aesara.link.c.exceptions import CompileError, MissingGXX
from aesara.utils import (
LOCAL_BITWIDTH,
flatten,
hash_from_code,
maybe_add_to_os_environ_pathlist,
output_subprocess_Popen,
subprocess_Popen,
)
......@@ -1126,7 +1127,7 @@ class ModuleCache:
self.loaded_key_pkl.add(key_pkl)
elif config.cmodule__warn_no_version:
key_flat = flatten(key)
ops = [k for k in key_flat if isinstance(k, aesara.graph.op.Op)]
ops = [k for k in key_flat if isinstance(k, Op)]
_logger.warning(
"not all the"
" following op(s) implement"
......@@ -1993,7 +1994,7 @@ def try_march_flag(flags):
"""
)
cflags = flags + ["-L" + d for d in aesara.link.c.cmodule.std_lib_dirs()]
cflags = flags + ["-L" + d for d in std_lib_dirs()]
compilation_result, execution_result = GCC_compiler.try_compile_tmp(
test_code, tmp_prefix="try_march_", flags=cflags, try_run=True
)
......@@ -2773,7 +2774,7 @@ def default_blas_ldflags():
res = try_blas_flag(flags)
if res:
check_mkl_openmp()
aesara.utils.maybe_add_to_os_environ_pathlist("PATH", lib_path[0])
maybe_add_to_os_environ_pathlist("PATH", lib_path[0])
return res
# to support path that includes spaces, we need to wrap it with double quotes on Windows
......@@ -2844,7 +2845,7 @@ def add_blas_configvars():
config.add(
"blas__ldflags",
"lib[s] to include for [Fortran] level-3 blas implementation",
aesara.configparser.StrParam(default_blas_ldflags),
StrParam(default_blas_ldflags),
# Added elsewhere in the c key only when needed.
in_c_key=False,
)
......@@ -2852,7 +2853,7 @@ def add_blas_configvars():
config.add(
"blas__check_openmp",
"Check for openmp library conflict.\nWARNING: Setting this to False leaves you open to wrong results in blas-related operations.",
aesara.configparser.BoolParam(True),
BoolParam(True),
in_c_key=False,
)
......
......@@ -8,7 +8,7 @@ from collections import OrderedDict
import numpy as np
import pytest
import aesara
from aesara.compile import shared
from aesara.compile.function import function, function_dump
from aesara.compile.io import In
from aesara.configdefaults import config
......@@ -60,7 +60,7 @@ class TestFunctionIn:
def test_in_strict(self):
a = dvector()
b = aesara.shared(7)
b = shared(7)
out = a + b
f = function([In(a, strict=False)], out)
......@@ -79,14 +79,14 @@ class TestFunctionIn:
def test_explicit_shared_input(self):
# This is not a test of the In class per se, but the In class relies
# on the fact that shared variables cannot be explicit inputs
a = aesara.shared(1.0)
a = shared(1.0)
with pytest.raises(TypeError):
function([a], a + 1)
def test_in_shared_variable(self):
# Ensure that an error is raised if the In wrapped is used to wrap
# a shared variable
a = aesara.shared(1.0)
a = shared(1.0)
a_wrapped = In(a, update=a + 1)
with pytest.raises(TypeError):
function([a_wrapped])
......@@ -130,7 +130,7 @@ class TestFunctionIn:
def test_in_update_shared(self):
# Test that using both In() with updates and shared variables with
# updates in the same function behaves as expected
shared_var = aesara.shared(1.0)
shared_var = shared(1.0)
a = dscalar("a")
a_wrapped = In(a, value=0.0, update=shared_var)
f = function([a_wrapped], [], updates={shared_var: a}, mode="FAST_RUN")
......@@ -244,14 +244,14 @@ def test_pickle_unpickle_with_reoptimization():
mode = "FAST_RUN"
x1 = fmatrix("x1")
x2 = fmatrix("x2")
x3 = aesara.shared(np.ones((10, 10), dtype=floatX))
x4 = aesara.shared(np.ones((10, 10), dtype=floatX))
x3 = shared(np.ones((10, 10), dtype=floatX))
x4 = shared(np.ones((10, 10), dtype=floatX))
y = aet_sum(aet_sum(aet_sum(x1 ** 2 + x2) + x3) + x4)
updates = OrderedDict()
updates[x3] = x3 + 1
updates[x4] = x4 + 1
f = aesara.function([x1, x2], y, updates=updates, mode=mode)
f = function([x1, x2], y, updates=updates, mode=mode)
# now pickle the compiled aesara fn
string_pkl = pickle.dumps(f, -1)
......@@ -276,14 +276,14 @@ def test_pickle_unpickle_without_reoptimization():
mode = "FAST_RUN"
x1 = fmatrix("x1")
x2 = fmatrix("x2")
x3 = aesara.shared(np.ones((10, 10), dtype=floatX))
x4 = aesara.shared(np.ones((10, 10), dtype=floatX))
x3 = shared(np.ones((10, 10), dtype=floatX))
x4 = shared(np.ones((10, 10), dtype=floatX))
y = aet_sum(aet_sum(aet_sum(x1 ** 2 + x2) + x3) + x4)
updates = OrderedDict()
updates[x3] = x3 + 1
updates[x4] = x4 + 1
f = aesara.function([x1, x2], y, updates=updates, mode=mode)
f = function([x1, x2], y, updates=updates, mode=mode)
# now pickle the compiled aesara fn
string_pkl = pickle.dumps(f, -1)
......
import numpy as np
import pytest
import aesara
from aesara import tensor as aet
from aesara.compile.function import pfunc
import aesara.tensor as aet
from aesara.compile import UnusedInputError
from aesara.compile.function import function, pfunc
from aesara.compile.io import In
from aesara.compile.sharedvalue import shared
from aesara.configdefaults import config
from aesara.graph.fg import MissingInputError
from aesara.misc.safe_asarray import _asarray
from aesara.tensor.math import sum as aet_sum
from aesara.tensor.type import (
......@@ -380,7 +381,7 @@ class TestPfunc:
# the update_var has type matrix, and the update expression
# is a broadcasted scalar, and that should be allowed.
with pytest.raises(TypeError):
aesara.function(
function(
inputs=[],
outputs=[],
updates={output_var: output_var.sum().dimshuffle("x", "x")},
......@@ -390,7 +391,7 @@ class TestPfunc:
x, y = dmatrices("x", "y")
z = shared(np.ones((2, 3)))
with pytest.raises(ValueError):
aesara.function([x, y], [z], updates=[(z, (z + x + y)), (z, (z - x))])
function([x, y], [z], updates=[(z, (z + x + y)), (z, (z - x))])
def test_givens(self):
x = shared(0)
......@@ -623,7 +624,7 @@ class TestPfunc:
assert y.get_value() == 2
# a is needed as input if y.default_update is used
with pytest.raises(aesara.graph.fg.MissingInputError):
with pytest.raises(MissingInputError):
pfunc([], x)
def test_default_updates_partial_graph(self):
......@@ -657,8 +658,8 @@ class TestPfunc:
def test_duplicate_inputs(self):
x = lscalar("x")
with pytest.raises(aesara.compile.UnusedInputError):
aesara.function([x, x, x], x)
with pytest.raises(UnusedInputError):
function([x, x, x], x)
def test_update_same(self):
# There was a bug in CVM, triggered when a shared variable
......@@ -675,8 +676,8 @@ class TestPfunc:
# Is that all the comment above meant, or is the CVM intended
# to add extra non-determinism? Or is the CVM meant to
# deterministically but arbitrarily pick an order for the updates?
f = aesara.function([], [], updates=[(a, a), (b, (2 * b))])
g = aesara.function([], [], updates=[(a, (a * 2)), (b, b)])
f = function([], [], updates=[(a, a), (b, (2 * b))])
g = function([], [], updates=[(a, (a * 2)), (b, b)])
f()
assert a.get_value(borrow=True).shape == (), a.get_value()
......@@ -693,8 +694,8 @@ class TestPfunc:
# See comment in test_update_same about why we try both
# shared variables.
f = aesara.function([], [], updates=[(a, a), (b, (2 * b - b))])
g = aesara.function([], [], updates=[(a, (a * 2 - a)), (b, b)])
f = function([], [], updates=[(a, a), (b, (2 * b - b))])
g = function([], [], updates=[(a, (a * 2 - a)), (b, b)])
f()
assert a.get_value(borrow=True).shape == (), a.get_value()
......@@ -727,7 +728,7 @@ class TestAliasingRules:
# library code.
def shared(self, x):
return aesara.shared(x)
return shared(x)
def test_shared_constructor_copies(self):
# shared constructor makes copy
......@@ -751,9 +752,7 @@ class TestAliasingRules:
x = sparse.SparseType("csc", dtype="float64")()
y = sparse.SparseType("csc", dtype="float64")()
f = aesara.function(
[In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y)
)
f = function([In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y))
# Test 1. If the same variable is given twice
# Compute bogus values
......@@ -800,7 +799,7 @@ class TestAliasingRules:
y = dvector()
m1 = dmatrix()
m2 = dmatrix()
f = aesara.function(
f = function(
[
In(x, mutable=True),
In(y, mutable=True),
......@@ -862,7 +861,7 @@ class TestAliasingRules:
# and a shares memory with b, b shares memory with c, but
# c does not share memory with a
f = aesara.function(
f = function(
[
In(x, mutable=True),
In(y, mutable=True),
......@@ -1011,7 +1010,7 @@ class TestAliasingRules:
assert not np.may_share_memory(data_of(A), data_of(B))
# aesara should have been smart enough to not make copies
if aesara.config.mode not in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
if config.mode not in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
# We don't ask DebugMode and FAST_COMPILE not to make copy.
# We have the right to do so.
assert np.all(data_of(A) < 5)
......@@ -1042,7 +1041,7 @@ class TestRebuildStrict:
w = imatrix()
x, y = ivectors("x", "y")
z = x * y
f = aesara.function([w, y], z, givens=[(x, w)], rebuild_strict=False)
f = function([w, y], z, givens=[(x, w)], rebuild_strict=False)
z_val = f(np.ones((3, 5), dtype="int32"), np.arange(5, dtype="int32"))
assert z_val.ndim == 2
assert np.all(z_val == np.ones((3, 5)) * np.arange(5))
......@@ -6,14 +6,17 @@ import time
import numpy as np
import pytest
import aesara
import aesara.gpuarray
import aesara.tensor as aet
from aesara.compile import shared
from aesara.compile.debugmode import DebugMode, InvalidValueError
from aesara.compile.function import function
from aesara.compile.function.types import UnusedInputError
from aesara.compile.io import In, Out
from aesara.compile.mode import Mode
from aesara.compile.mode import Mode, get_default_mode
from aesara.configdefaults import config
from aesara.gpuarray import gpuarray_shared_constructor
from aesara.gpuarray.blas import GpuGemm
from aesara.graph.basic import Constant
from aesara.graph.fg import MissingInputError
from aesara.graph.opt import OpKeyOptimizer, PatternSub
......@@ -321,13 +324,13 @@ class TestFunction:
def test_copy_share_memory(self):
x = fscalar("x")
# SharedVariable for tests, one of them has update
y = aesara.shared(value=1)
z = aesara.shared(value=2)
y = shared(value=1)
z = shared(value=2)
out = tanh((x + y + 2) / (x + z - 0.2) ** 2)
# Test for different linkers
for mode in ["FAST_RUN", "FAST_COMPILE"]:
ori = aesara.function([x], [out], mode=mode, updates={z: z + 1})
ori = function([x], [out], mode=mode, updates={z: z + 1})
cpy = ori.copy(share_memory=True)
# Test if memories shared
......@@ -355,17 +358,17 @@ class TestFunction:
def test_swap_SharedVariable(self):
i = iscalar()
x_list = aesara.shared(value=np.random.rand(10).astype(config.floatX))
x_list = shared(value=np.random.rand(10).astype(config.floatX))
x = scalar("x")
# SharedVariable for tests, one of them has update
y = aesara.shared(value=1, name="y")
z = aesara.shared(value=2, name="z")
m = aesara.shared(value=0, name="m")
y = shared(value=1, name="y")
z = shared(value=2, name="z")
m = shared(value=0, name="m")
# SharedVariable to replace
y_rpl = aesara.shared(value=3, name="y_rpl")
z_rpl = aesara.shared(value=4, name="z_rpl")
y_rpl = shared(value=3, name="y_rpl")
z_rpl = shared(value=4, name="z_rpl")
swap = {y: y_rpl, z: z_rpl}
map_SV = {"y_rpl": y_rpl, "z_rpl": z_rpl}
......@@ -375,7 +378,7 @@ class TestFunction:
# for mode in ["FAST_RUN","FAST_COMPILE"]:
second_time = False
for mode in ["FAST_RUN", "FAST_COMPILE"]:
ori = aesara.function(
ori = function(
[i],
[out],
mode=mode,
......@@ -422,25 +425,25 @@ class TestFunction:
# A special testcase for logistic_sgd.py in Deep Learning Tutorial
# This test assert that SharedVariable in different function have same storage
train_x = aesara.shared(value=np.random.rand(10, 10).astype(config.floatX))
test_x = aesara.shared(value=np.random.rand(10, 10).astype(config.floatX))
train_x = shared(value=np.random.rand(10, 10).astype(config.floatX))
test_x = shared(value=np.random.rand(10, 10).astype(config.floatX))
train_y = aesara.shared(value=np.random.rand(10, 1).astype(config.floatX))
test_y = aesara.shared(value=np.random.rand(10, 1).astype(config.floatX))
train_y = shared(value=np.random.rand(10, 1).astype(config.floatX))
test_y = shared(value=np.random.rand(10, 1).astype(config.floatX))
i = iscalar("index")
x = vector("x")
y = vector("y")
# this formular has no sense but for a test
out = (aet_sum(x) - y) ** 2
train = aesara.function(
train = function(
[i],
out,
givens={x: train_x[i], y: train_y[i]},
updates={train_x: train_x + 0.1},
)
test_def = aesara.function([i], out, givens={x: test_x[i], y: test_y[i]})
test_def = function([i], out, givens={x: test_x[i], y: test_y[i]})
test_cpy = train.copy(
swap={train_x: test_x, train_y: test_y}, delete_updates=True
)
......@@ -452,15 +455,15 @@ class TestFunction:
w = iscalar("w")
x = fscalar("x")
# SharedVariable for tests, one of them has update
y = aesara.shared(value=1, name="y")
z = aesara.shared(value=2, name="z")
y = shared(value=1, name="y")
z = shared(value=2, name="z")
out = x + y + z
# Test for different linkers
# for mode in ["FAST_RUN","FAST_COMPILE"]:
# second_time = False
for mode in ["FAST_RUN", "FAST_COMPILE"]:
ori = aesara.function([x], out, mode=mode, updates={z: z * 2})
ori = function([x], out, mode=mode, updates={z: z * 2})
cpy = ori.copy(delete_updates=True)
assert cpy(1)[0] == 4
......@@ -470,10 +473,10 @@ class TestFunction:
# Test if unused implicit and explicit inputs from delete_updates
# are ignored as intended.
for mode in ["FAST_RUN", "FAST_COMPILE"]:
ori = aesara.function([x], x, mode=mode, updates={z: z * 2})
ori = function([x], x, mode=mode, updates={z: z * 2})
cpy = ori.copy(delete_updates=True)
ori = aesara.function([x, w], x, mode=mode, updates={z: z + w})
ori = function([x, w], x, mode=mode, updates={z: z + w})
cpy = ori.copy(delete_updates=True)
def test_shared_state0(self):
......@@ -575,7 +578,7 @@ class TestFunction:
def test_constant_output(self):
# Test that if the output is a constant, we respect the aesara memory interface
f = aesara.function([], aet.constant([4]))
f = function([], aet.constant([4]))
# print f.maker.fgraph.toposort()
out = f()
assert (out == 4).all()
......@@ -586,16 +589,14 @@ class TestFunction:
assert (out2 == 4).all()
# Test that if the output is a constant and borrow, we respect the aesara memory interface
f = aesara.function([], Out(aet.constant([4]), borrow=True))
f = function([], Out(aet.constant([4]), borrow=True))
# print f.maker.fgraph.toposort()
out = f()
assert (out == 4).all()
out[0] = 3
out2 = f()
if isinstance(
aesara.compile.mode.get_default_mode(), aesara.compile.debugmode.DebugMode
):
if isinstance(get_default_mode(), DebugMode):
# In DebugMode, we don't implement optimization based on borrow on the output.
assert (out2 == 4).all()
else:
......@@ -612,12 +613,12 @@ class TestFunction:
aval = np.random.rand(3, 3)
# when borrow=False, test that a destroy map cannot alias output to input
f = aesara.function([In(a, borrow=False)], Out(a + 1, borrow=True))
f = function([In(a, borrow=False)], Out(a + 1, borrow=True))
assert np.all(f(aval) == aval + 1)
assert not np.may_share_memory(aval, f(aval))
# when borrow=False, test that a viewmap cannot alias output to input
f = aesara.function([In(a, borrow=False)], Out(a[0, :], borrow=True))
f = function([In(a, borrow=False)], Out(a[0, :], borrow=True))
assert np.all(f(aval) == aval[0, :])
assert not np.may_share_memory(aval, f(aval))
......@@ -696,17 +697,17 @@ class TestFunction:
a, b = dscalars("a", "b")
c = a + b
func = aesara.function([In(a, name="first"), In(b, value=1, name="second")], c)
x = func(first=1)
funct = function([In(a, name="first"), In(b, value=1, name="second")], c)
x = funct(first=1)
try:
func(second=2)
funct(second=2)
except TypeError:
assert func(first=1) == x
assert funct(first=1) == x
def test_check_for_aliased_inputs(self):
b = np.random.rand(5, 4)
s1 = aesara.shared(b)
s2 = aesara.shared(b)
s1 = shared(b)
s2 = shared(b)
x1 = vector()
# Assert cases we should not check for aliased inputs
......@@ -718,7 +719,7 @@ class TestFunction:
]:
if "inputs" not in d:
d["inputs"] = []
f = aesara.function(**d)
f = function(**d)
assert not f._check_for_aliased_inputs, d
# Assert cases we should check for aliased inputs
......@@ -741,7 +742,7 @@ class TestFunction:
]:
if "inputs" not in d:
d["inputs"] = []
f = aesara.function(**d)
f = function(**d)
assert f._check_for_aliased_inputs, d
......@@ -825,19 +826,15 @@ class TestPicklefunction:
raise
assert f.trust_input is g.trust_input
f(np.asarray(2.0))
with pytest.raises(
(ValueError, AttributeError, aesara.compile.debugmode.InvalidValueError)
):
with pytest.raises((ValueError, AttributeError, InvalidValueError)):
f(2.0)
g(np.asarray(2.0))
with pytest.raises(
(ValueError, AttributeError, aesara.compile.debugmode.InvalidValueError)
):
with pytest.raises((ValueError, AttributeError, InvalidValueError)):
g(2.0)
def test_output_keys(self):
x = vector()
f = aesara.function([x], {"vec": x ** 2})
f = function([x], {"vec": x ** 2})
o = f([2, 3, 4])
assert isinstance(o, dict)
assert np.allclose(o["vec"], [4, 9, 16])
......@@ -1065,9 +1062,9 @@ class TestPicklefunction:
b = np.random.rand(5, 4)
x = matrix()
y = aesara.shared(b)
y = shared(b)
f = aesara.function([x], dot(x, y))
f = function([x], dot(x, y))
from io import BytesIO
......@@ -1170,12 +1167,12 @@ def test_sync_update():
sizes = [100, 500, 1000, 2000, 5000, 10000, 20000, 40000]
size = sizes[0]
w = aesara.gpuarray.gpuarray_shared_constructor(
w = gpuarray_shared_constructor(
np.random.rand(size, size).astype("float32"),
"w",
target=tests.gpuarray.config.test_ctx_name,
)
x = aesara.gpuarray.gpuarray_shared_constructor(
x = gpuarray_shared_constructor(
np.random.rand(size, size).astype("float32"),
"x",
target=tests.gpuarray.config.test_ctx_name,
......@@ -1183,12 +1180,9 @@ def test_sync_update():
updates = [(w, w + np.asarray(0.001, "float32") * dot(x, x))]
f = aesara.function([], updates=updates, mode=tests.gpuarray.config.mode_with_gpu)
f = function([], updates=updates, mode=tests.gpuarray.config.mode_with_gpu)
assert len(f.maker.fgraph.apply_nodes) == 1
assert any(
isinstance(n.op, aesara.gpuarray.blas.GpuGemm)
for n in f.maker.fgraph.apply_nodes
)
assert any(isinstance(n.op, GpuGemm) for n in f.maker.fgraph.apply_nodes)
# Make sure libgpuarray have compile all kernels
f()
f.sync_shared()
......@@ -1246,20 +1240,20 @@ def test_FunctionMaker_cache_optimizations():
with config.change_flags(cache_optimizations=True):
a = fmatrix("a")
b = fmatrix("b")
c = aesara.shared(np.ones((10, 10), dtype=floatX))
d = aesara.shared(np.ones((10, 10), dtype=floatX))
c = shared(np.ones((10, 10), dtype=floatX))
d = shared(np.ones((10, 10), dtype=floatX))
e = aet_sum(aet_sum(aet_sum(a ** 2 + b) + c) + d)
f1 = aesara.function([a, b], e, mode=mode)
f1 = function([a, b], e, mode=mode)
# FIXME: We can do much better about testing this.
assert os.path.exists(graph_db_file)
m = fmatrix("x1")
n = fmatrix("x2")
p = aesara.shared(np.ones((10, 10), dtype=floatX))
q = aesara.shared(np.ones((10, 10), dtype=floatX))
p = shared(np.ones((10, 10), dtype=floatX))
q = shared(np.ones((10, 10), dtype=floatX))
j = aet_sum(aet_sum(aet_sum(m ** 2 + n) + p) + q)
f2 = aesara.function([m, n], j, mode=mode)
f2 = function([m, n], j, mode=mode)
in1 = np.ones((10, 10), dtype=floatX)
in2 = np.ones((10, 10), dtype=floatX)
......
......@@ -3,12 +3,11 @@ from functools import partial
import numpy as np
import pytest
import aesara
from aesara import shared
from aesara.compile import shared
from aesara.compile.builders import OpFromGraph
from aesara.compile.function import function
from aesara.configdefaults import config
from aesara.gradient import DisconnectedType, Rop, grad
from aesara.gradient import DisconnectedType, Rop, disconnected_type, grad
from aesara.graph.null_type import NullType
from aesara.tensor.math import dot, exp
from aesara.tensor.math import round as aet_round
......@@ -300,7 +299,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
return y + aet_round(y)
def f1_back(inputs, output_gradients):
return [output_gradients[0], aesara.gradient.disconnected_type()]
return [output_gradients[0], disconnected_type()]
op = cls_ofg(
inputs=[x, y],
......@@ -312,7 +311,7 @@ class TestOpFromGraph(unittest_tools.InferShapeTester):
c = op(x, y)
g1 = aesara.grad(c.sum(), x)
g1 = grad(c.sum(), x)
out = g1.eval(
{x: np.ones((5,), dtype=np.float32), y: np.ones((5,), dtype=np.float32)}
......
......@@ -3,7 +3,6 @@ import sys
import numpy as np
import pytest
import aesara
import aesara.tensor as aet
from aesara.compile.debugmode import (
BadDestroyMap,
......@@ -13,6 +12,8 @@ from aesara.compile.debugmode import (
InvalidValueError,
StochasticOrder,
)
from aesara.compile.function import function
from aesara.compile.mode import predefined_modes
from aesara.configdefaults import config
from aesara.graph.basic import Apply, Variable
from aesara.graph.features import BadOptimization
......@@ -26,7 +27,7 @@ from tests import unittest_tools as utt
def test_debugmode_basic():
x = dvector()
f = aesara.function([x], ((2.0 * x) + 7) / 2.0, mode=DebugMode())
f = function([x], ((2.0 * x) + 7) / 2.0, mode=DebugMode())
f([1, 2])
......@@ -214,12 +215,12 @@ def test_badthunkoutput():
a = dvector()
b = dvector()
f_good = aesara.function(
f_good = function(
[a, b],
off_by_half(a, b),
mode=DebugMode(check_c_code=config.cxx),
)
f_inconsistent = aesara.function(
f_inconsistent = function(
[a, b],
inconsistent(a, b),
mode=DebugMode(check_c_code=config.cxx),
......@@ -248,7 +249,7 @@ def test_badoptimization():
a = dvector()
b = dvector()
f = aesara.function([a, b], a + b, mode=DebugMode(optimizer=opt))
f = function([a, b], a + b, mode=DebugMode(optimizer=opt))
with pytest.raises(BadOptimization) as einfo:
f(
......@@ -289,7 +290,7 @@ def test_badoptimization_opt_err():
a = dvector()
b = dvector()
f = aesara.function([a, b], a + b, mode=DebugMode(optimizer=opt))
f = function([a, b], a + b, mode=DebugMode(optimizer=opt))
with pytest.raises(ValueError, match=r"insert_bigger_b_add"):
f(
[1.0, 2.0, 3.0],
......@@ -299,7 +300,7 @@ def test_badoptimization_opt_err():
# Test that opt that do an illegal change still get the error from graph.
with pytest.raises(TypeError) as einfo:
with config.change_flags(on_opt_error="raise"):
f2 = aesara.function(
f2 = function(
[a, b],
a + b,
mode=DebugMode(optimizer=opt2, stability_patience=1),
......@@ -340,7 +341,7 @@ def test_stochasticoptimization():
b = dvector()
with pytest.raises(StochasticOrder):
aesara.function(
function(
[a, b],
add(a, b),
mode=DebugMode(
......@@ -356,7 +357,7 @@ def test_stochasticoptimization():
)
def test_just_c_code():
x = dvector()
f = aesara.function([x], wb2(x), mode=DebugMode(check_py_code=False))
f = function([x], wb2(x), mode=DebugMode(check_py_code=False))
assert np.all(f([1, 2]) == [2, 4])
......@@ -374,7 +375,7 @@ def test_baddestroymap():
x = dvector()
y = dvector()
f = aesara.function([x, y], BadAdd()(x, y), mode="DEBUG_MODE")
f = function([x, y], BadAdd()(x, y), mode="DEBUG_MODE")
with pytest.raises(BadDestroyMap):
f([1, 2], [3, 4])
......@@ -385,7 +386,7 @@ def test_baddestroymap():
)
def test_baddestroymap_c():
x = dvector()
f = aesara.function([x], wb2i(x), mode=DebugMode(check_py_code=False))
f = function([x], wb2i(x), mode=DebugMode(check_py_code=False))
with pytest.raises(BadDestroyMap):
assert np.all(f([1, 2]) == [2, 4])
......@@ -414,14 +415,14 @@ class TestViewMap:
def test_badviewmap_ref(self):
x = dvector()
y = dvector()
f = aesara.function([x, y], self.BadAddRef()(x, y), mode="DEBUG_MODE")
f = function([x, y], self.BadAddRef()(x, y), mode="DEBUG_MODE")
with pytest.raises(BadViewMap):
f([1, 2], [3, 4])
def test_badviewmap_slice(self):
x = dvector()
y = dvector()
f = aesara.function([x, y], self.BadAddSlice()(x, y), mode="DEBUG_MODE")
f = function([x, y], self.BadAddSlice()(x, y), mode="DEBUG_MODE")
with pytest.raises(BadViewMap):
f([1, 2], [3, 4])
......@@ -430,7 +431,7 @@ class TestViewMap:
goodop.view_map = {0: [1]}
x = dvector()
y = dvector()
f = aesara.function([x, y], goodop(x, y), mode="DEBUG_MODE")
f = function([x, y], goodop(x, y), mode="DEBUG_MODE")
# Shouldn't raise an error
f([1, 5, 1], [3, 4, 2, 1, 4])
......@@ -439,7 +440,7 @@ class TestViewMap:
)
def test_badviewmap_c(self):
x = dvector()
f = aesara.function([x], wb1i(x), mode=DebugMode(check_py_code=False))
f = function([x], wb1i(x), mode=DebugMode(check_py_code=False))
with pytest.raises(BadViewMap):
f([1, 2])
......@@ -462,7 +463,7 @@ class TestViewMap:
x = dvector("x")
y = dvector("y")
f = aesara.function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")
f = function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
......@@ -487,7 +488,7 @@ class TestViewMap:
x = dvector()
y = dvector()
f = aesara.function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")
f = function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
......@@ -513,7 +514,7 @@ class TestViewMap:
x = dvector("x")
y = dvector("y")
f = aesara.function([x, y], CustomOp()(x, y)[0] * 2, mode="DEBUG_MODE")
f = function([x, y], CustomOp()(x, y)[0] * 2, mode="DEBUG_MODE")
r0 = f([1, 2, 3, 4], [5, 6, 7, 8])
......@@ -542,7 +543,7 @@ class TestViewMap:
y = dvector()
bad_xy0, bad_xy1 = custom_op(x, y)
out = bad_xy0 * 2 + bad_xy1 * 2
f = aesara.function([x, y], out, mode="DEBUG_MODE")
f = function([x, y], out, mode="DEBUG_MODE")
with pytest.raises(BadViewMap):
f([1, 2, 3, 4], [5, 6, 7, 8])
......@@ -558,16 +559,16 @@ class TestViewMap:
class TestCheckIsfinite:
def setup_method(self):
self.old_ts = TensorType.filter_checks_isfinite
self.old_dm = aesara.compile.mode.predefined_modes["DEBUG_MODE"].check_isfinite
self.old_dm = predefined_modes["DEBUG_MODE"].check_isfinite
def teardown_method(self):
TensorType.filter_checks_isfinite = self.old_ts
aesara.compile.mode.predefined_modes["DEBUG_MODE"].check_isfinite = self.old_dm
predefined_modes["DEBUG_MODE"].check_isfinite = self.old_dm
def test_check_isfinite(self):
x = vector()
f = aesara.function([x], (x + 2) * 5, mode="DEBUG_MODE")
g = aesara.function([x], log(x), mode="DEBUG_MODE")
f = function([x], (x + 2) * 5, mode="DEBUG_MODE")
g = function([x], log(x), mode="DEBUG_MODE")
# this should work
f(np.log([3, 4, 5]).astype(config.floatX))
......@@ -590,13 +591,13 @@ class TestCheckIsfinite:
# this should disable the exception
TensorType.filter_checks_isfinite = False
aesara.compile.mode.predefined_modes["DEBUG_MODE"].check_isfinite = False
predefined_modes["DEBUG_MODE"].check_isfinite = False
# insert several Inf
f(np.asarray(np.asarray([1.0, 1.0, 1.0]) / 0, dtype=config.floatX))
def test_check_isfinite_disabled(self):
x = dvector()
f = aesara.function([x], (x + 2) * 5, mode=DebugMode(check_isfinite=False))
f = function([x], (x + 2) * 5, mode=DebugMode(check_isfinite=False))
# nan should go through
f(np.log([3, -4, 5]))
......@@ -750,7 +751,7 @@ class TestPreallocatedOutput:
# Should work
mode = DebugMode(check_preallocated_output=["c_contiguous"])
f = aesara.function([a, b], out, mode=mode)
f = function([a, b], out, mode=mode)
f(a_val, b_val)
# print 'out_val =', out_val
# print out_val.strides
......@@ -759,7 +760,7 @@ class TestPreallocatedOutput:
# used incorrectly.
mode = DebugMode(check_preallocated_output=["f_contiguous"])
f = aesara.function([a, b], out, mode=mode)
f = function([a, b], out, mode=mode)
if config.cxx:
with pytest.raises(BadThunkOutput):
......@@ -781,7 +782,7 @@ class TestPreallocatedOutput:
# Should work
mode = DebugMode(check_preallocated_output=["c_contiguous"])
f = aesara.function([a, b], out, mode=mode)
f = function([a, b], out, mode=mode)
f(a_val, b_val)
# print 'out_val =', out_val
# print out_val.strides
......@@ -790,7 +791,7 @@ class TestPreallocatedOutput:
# used incorrectly.
mode = DebugMode(check_preallocated_output=["f_contiguous"])
f = aesara.function([a, b], out, mode=mode)
f = function([a, b], out, mode=mode)
if config.cxx:
with pytest.raises(BadThunkOutput):
......@@ -802,7 +803,7 @@ class TestPreallocatedOutput:
def test_output_broadcast_tensor(self):
v = fvector("v")
c, r = VecAsRowAndCol()(v)
f = aesara.function([v], [c, r])
f = function([v], [c, r])
v_val = self.rng.randn(5).astype("float32")
f(v_val)
import pytest
import aesara
from aesara.compile.function import function
from aesara.compile.mode import AddFeatureOptimizer, Mode
from aesara.configdefaults import config
from aesara.graph.features import NoOutputFromInplace
from aesara.tensor.math import dot, tanh
from aesara.tensor.type import matrix
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_no_output_from_implace():
x = matrix()
......@@ -18,7 +19,7 @@ def test_no_output_from_implace():
# Ensure that the elemwise op that produces the output is inplace when
# using a mode that does not include the optimization
fct_no_opt = aesara.function([x, y], b, mode="FAST_RUN")
fct_no_opt = function([x, y], b, mode="FAST_RUN")
op = fct_no_opt.maker.fgraph.outputs[0].owner.op
assert op.destroy_map and 0 in op.destroy_map
......@@ -27,7 +28,7 @@ def test_no_output_from_implace():
opt = AddFeatureOptimizer(NoOutputFromInplace())
mode_opt = Mode(linker="cvm", optimizer="fast_run").register((opt, 49.9))
fct_opt = aesara.function([x, y], b, mode=mode_opt)
fct_opt = function([x, y], b, mode=mode_opt)
op = fct_opt.maker.fgraph.outputs[0].owner.op
assert not op.destroy_map or 0 not in op.destroy_map
......
......@@ -4,8 +4,9 @@ Test compilation modes
import copy
import aesara
from aesara.compile.mode import Mode
from aesara.compile.function import function
from aesara.compile.mode import Mode, get_default_mode
from aesara.configdefaults import config
from aesara.tensor.type import matrix, vector
......@@ -18,7 +19,7 @@ class TestBunchOfModes:
predef_modes = ["FAST_COMPILE", "FAST_RUN", "DEBUG_MODE"]
# Linkers to use with regular Mode
if aesara.config.cxx:
if config.cxx:
linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc", "cvm", "cvm_nogc"]
else:
linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc"]
......@@ -27,7 +28,7 @@ class TestBunchOfModes:
for mode in modes:
x = matrix()
y = vector()
f = aesara.function([x, y], x + y, mode=mode)
f = function([x, y], x + y, mode=mode)
# test that it runs something
f([[1, 2], [3, 4]], [5, 6])
linker_classes_involved.append(f.maker.mode.linker.__class__)
......@@ -45,7 +46,7 @@ class TestBunchOfModes:
class TestOldModesProblem:
def test_modes(self):
# Then, build a mode with the same linker, and a modified optimizer
default_mode = aesara.compile.mode.get_default_mode()
default_mode = get_default_mode()
modified_mode = default_mode.including("specialize")
# The following line used to fail, with Python 2.4, in July 2012,
......@@ -53,5 +54,5 @@ class TestOldModesProblem:
copy.deepcopy(modified_mode)
# More straightforward test
linker = aesara.compile.mode.get_default_mode().linker
linker = get_default_mode().linker
assert not hasattr(linker, "fgraph") or linker.fgraph is None
......@@ -3,7 +3,10 @@ from io import StringIO
import numpy as np
import aesara
from aesara.compile import MonitorMode
from aesara.compile.function import function
from aesara.printing import debugprint
from aesara.tensor import log, outer
from aesara.tensor.type import dscalar, vector
......@@ -16,17 +19,17 @@ def test_detect_nan():
for output in fn.outputs:
if np.isnan(output[0]).any():
print("*** NaN detected ***")
aesara.printing.debugprint(node)
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
nan_detected[0] = True
break
x = dscalar("x")
f = aesara.function(
f = function(
[x],
[aesara.tensor.log(x) * x],
mode=aesara.compile.MonitorMode(post_func=detect_nan),
[log(x) * x],
mode=MonitorMode(post_func=detect_nan),
)
try:
old_stdout = sys.stdout
......@@ -46,16 +49,16 @@ def test_optimizer():
for output in fn.outputs:
if np.isnan(output[0]).any():
print("*** NaN detected ***")
aesara.printing.debugprint(node)
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
nan_detected[0] = True
break
x = dscalar("x")
mode = aesara.compile.MonitorMode(post_func=detect_nan)
mode = MonitorMode(post_func=detect_nan)
mode = mode.excluding("fusion")
f = aesara.function([x], [aesara.tensor.log(x) * x], mode=mode)
f = function([x], [log(x) * x], mode=mode)
# Test that the fusion wasn't done
assert len(f.maker.fgraph.apply_nodes) == 2
try:
......@@ -78,19 +81,19 @@ def test_not_inplace():
for output in fn.outputs:
if np.isnan(output[0]).any():
print("*** NaN detected ***")
aesara.printing.debugprint(node)
debugprint(node)
print("Inputs : %s" % [input[0] for input in fn.inputs])
print("Outputs: %s" % [output[0] for output in fn.outputs])
nan_detected[0] = True
break
x = vector("x")
mode = aesara.compile.MonitorMode(post_func=detect_nan)
mode = MonitorMode(post_func=detect_nan)
# mode = mode.excluding('fusion', 'inplace')
mode = mode.excluding("local_elemwise_fusion", "inplace_elemwise_optimizer")
o = aesara.tensor.outer(x, x)
out = aesara.tensor.log(o) * o
f = aesara.function([x], [out], mode=mode)
o = outer(x, x)
out = log(o) * o
f = function([x], [out], mode=mode)
# Test that the fusion wasn't done
assert len(f.maker.fgraph.apply_nodes) == 5
......
......@@ -7,9 +7,11 @@ import logging
import numpy as np
import pytest
import aesara
import aesara.tensor as aet
from aesara.compile import shared
from aesara.compile.function import function
from aesara.compile.nanguardmode import NanGuardMode
from aesara.configdefaults import config
from aesara.tensor.math import dot
from aesara.tensor.type import matrix, tensor3
......@@ -19,16 +21,14 @@ def test_NanGuardMode():
# intentionally. A working implementation should be able to capture all
# the abnormalties.
x = matrix()
w = aesara.shared(np.random.randn(5, 7).astype(aesara.config.floatX))
w = shared(np.random.randn(5, 7).astype(config.floatX))
y = dot(x, w)
fun = aesara.function(
[x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True)
)
a = np.random.randn(3, 5).astype(aesara.config.floatX)
infa = np.tile((np.asarray(100.0) ** 1000000).astype(aesara.config.floatX), (3, 5))
nana = np.tile(np.asarray(np.nan).astype(aesara.config.floatX), (3, 5))
biga = np.tile(np.asarray(1e20).astype(aesara.config.floatX), (3, 5))
fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
a = np.random.randn(3, 5).astype(config.floatX)
infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 5))
nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 5))
biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 5))
fun(a) # normal values
......@@ -46,18 +46,14 @@ def test_NanGuardMode():
_logger.propagate = True
# slices
a = np.random.randn(3, 4, 5).astype(aesara.config.floatX)
infa = np.tile(
(np.asarray(100.0) ** 1000000).astype(aesara.config.floatX), (3, 4, 5)
)
nana = np.tile(np.asarray(np.nan).astype(aesara.config.floatX), (3, 4, 5))
biga = np.tile(np.asarray(1e20).astype(aesara.config.floatX), (3, 4, 5))
a = np.random.randn(3, 4, 5).astype(config.floatX)
infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 4, 5))
nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 4, 5))
biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 4, 5))
x = tensor3()
y = x[:, aet.arange(2), aet.arange(2), None]
fun = aesara.function(
[x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True)
)
fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
fun(a) # normal values
try:
_logger.propagate = False
......
......@@ -5,8 +5,10 @@ from io import StringIO
import numpy as np
import aesara
import aesara.tensor as aet
from aesara.compile import ProfileStats
from aesara.compile.function import function
from aesara.configdefaults import config
from aesara.ifelse import ifelse
from aesara.tensor.type import fvector, scalars
......@@ -16,13 +18,13 @@ class TestProfiling:
def test_profiling(self):
config1 = aesara.config.profile
config2 = aesara.config.profile_memory
config3 = aesara.config.profiling__min_peak_memory
config1 = config.profile
config2 = config.profile_memory
config3 = config.profiling__min_peak_memory
try:
aesara.config.profile = True
aesara.config.profile_memory = True
aesara.config.profiling__min_peak_memory = True
config.profile = True
config.profile_memory = True
config.profiling__min_peak_memory = True
x = [fvector("val%i" % i) for i in range(3)]
......@@ -30,14 +32,14 @@ class TestProfiling:
z += [aet.outer(x[i], x[i + 1]).sum(axis=1) for i in range(len(x) - 1)]
z += [x[i] + x[i + 1] for i in range(len(x) - 1)]
p = aesara.ProfileStats(False, gpu_checks=False)
p = ProfileStats(False, gpu_checks=False)
if aesara.config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
if config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
m = "FAST_RUN"
else:
m = None
f = aesara.function(x, z, profile=p, name="test_profiling", mode=m)
f = function(x, z, profile=p, name="test_profiling", mode=m)
inp = [np.arange(1024, dtype="float32") + 1 for i in range(len(x))]
f(*inp)
......@@ -49,7 +51,7 @@ class TestProfiling:
the_string = buf.getvalue()
lines1 = [l for l in the_string.split("\n") if "Max if linker" in l]
lines2 = [l for l in the_string.split("\n") if "Minimum peak" in l]
if aesara.config.device == "cpu":
if config.device == "cpu":
assert "CPU: 4112KB (4104KB)" in the_string, (lines1, lines2)
assert "CPU: 8204KB (8196KB)" in the_string, (lines1, lines2)
assert "CPU: 8208KB" in the_string, (lines1, lines2)
......@@ -68,33 +70,31 @@ class TestProfiling:
), (lines1, lines2)
finally:
aesara.config.profile = config1
aesara.config.profile_memory = config2
aesara.config.profiling__min_peak_memory = config3
config.profile = config1
config.profile_memory = config2
config.profiling__min_peak_memory = config3
def test_ifelse(self):
config1 = aesara.config.profile
config2 = aesara.config.profile_memory
config1 = config.profile
config2 = config.profile_memory
try:
aesara.config.profile = True
aesara.config.profile_memory = True
config.profile = True
config.profile_memory = True
a, b = scalars("a", "b")
x, y = scalars("x", "y")
z = ifelse(aet.lt(a, b), x * 2, y * 2)
p = aesara.ProfileStats(False, gpu_checks=False)
p = ProfileStats(False, gpu_checks=False)
if aesara.config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
if config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
m = "FAST_RUN"
else:
m = None
f_ifelse = aesara.function(
[a, b, x, y], z, profile=p, name="test_ifelse", mode=m
)
f_ifelse = function([a, b, x, y], z, profile=p, name="test_ifelse", mode=m)
val1 = 0.0
val2 = 1.0
......@@ -104,5 +104,5 @@ class TestProfiling:
f_ifelse(val1, val2, big_mat1, big_mat2)
finally:
aesara.config.profile = config1
aesara.config.profile_memory = config2
config.profile = config1
config.profile_memory = config2
import numpy as np
import pytest
import aesara
import aesara.tensor
from aesara.compile.sharedvalue import SharedVariable, generic, shared
from aesara.configdefaults import config
from aesara.misc.safe_asarray import _asarray
from aesara.tensor.type import (
TensorType,
......@@ -329,8 +330,8 @@ class TestSharedVariable:
# assert b.type == dvector
# f(b,[8])
b = shared(np.asarray([7.234], dtype=aesara.config.floatX), allow_downcast=True)
assert b.dtype == aesara.config.floatX
b = shared(np.asarray([7.234], dtype=config.floatX), allow_downcast=True)
assert b.dtype == config.floatX
f(b, [8])
assert b.get_value() == 8
......
import numpy as np
import aesara
import aesara.tensor as aet
from aesara import shared
from aesara.compile.builders import OpFromGraph
from aesara.tensor.type import dmatrix, scalars
......@@ -17,13 +18,13 @@ class Mlp:
self.nhiddens = nhiddens
x = dmatrix("x")
wh = aesara.shared(self.rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
bh = aesara.shared(np.zeros(nhiddens), borrow=True)
h = aesara.tensor.sigmoid(aet.dot(x, wh) + bh)
wh = shared(self.rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
bh = shared(np.zeros(nhiddens), borrow=True)
h = aet.nnet.sigmoid(aet.dot(x, wh) + bh)
wy = aesara.shared(self.rng.normal(0, 1, (nhiddens, noutputs)))
by = aesara.shared(np.zeros(noutputs), borrow=True)
y = aesara.tensor.nnet.softmax(aet.dot(h, wy) + by)
wy = shared(self.rng.normal(0, 1, (nhiddens, noutputs)))
by = shared(np.zeros(noutputs), borrow=True)
y = aet.nnet.softmax(aet.dot(h, wy) + by)
self.inputs = [x]
self.outputs = [y]
......@@ -33,9 +34,9 @@ class OfgNested:
def __init__(self):
x, y, z = scalars("xyz")
e = x * y
op = aesara.compile.builders.OpFromGraph([x, y], [e])
op = OpFromGraph([x, y], [e])
e2 = op(x, y) + z
op2 = aesara.compile.builders.OpFromGraph([x, y, z], [e2])
op2 = OpFromGraph([x, y, z], [e2])
e3 = op2(x, y, z) + z
self.inputs = [x, y, z]
......@@ -45,8 +46,8 @@ class OfgNested:
class Ofg:
def __init__(self):
x, y, z = scalars("xyz")
e = aesara.tensor.sigmoid((x + y + z) ** 2)
op = aesara.compile.builders.OpFromGraph([x, y, z], [e])
e = aet.nnet.sigmoid((x + y + z) ** 2)
op = OpFromGraph([x, y, z], [e])
e2 = op(x, y, z) + op(z, y, x)
self.inputs = [x, y, z]
......@@ -56,8 +57,8 @@ class Ofg:
class OfgSimple:
def __init__(self):
x, y, z = scalars("xyz")
e = aesara.tensor.sigmoid((x + y + z) ** 2)
op = aesara.compile.builders.OpFromGraph([x, y, z], [e])
e = aet.nnet.sigmoid((x + y + z) ** 2)
op = OpFromGraph([x, y, z], [e])
e2 = op(x, y, z)
self.inputs = [x, y, z]
......
......@@ -5,8 +5,10 @@ import tempfile
import numpy as np
import pytest
import aesara as th
import aesara.d3viz as d3v
from aesara import compile
from aesara.compile.function import function
from aesara.configdefaults import config
from aesara.d3viz.formatting import pydot_imported, pydot_imported_msg
from tests.d3viz import models
......@@ -32,30 +34,30 @@ class TestD3Viz:
def test_mlp(self):
m = models.Mlp()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
self.check(f)
def test_mlp_profiled(self):
if th.config.mode in ("DebugMode", "DEBUG_MODE"):
if config.mode in ("DebugMode", "DEBUG_MODE"):
pytest.skip("Can't profile in DebugMode")
m = models.Mlp()
profile = th.compile.profiling.ProfileStats(False)
f = th.function(m.inputs, m.outputs, profile=profile)
profile = compile.profiling.ProfileStats(False)
f = function(m.inputs, m.outputs, profile=profile)
x_val = self.rng.normal(0, 1, (1000, m.nfeatures))
f(x_val)
self.check(f)
def test_ofg(self):
m = models.Ofg()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
self.check(f)
def test_ofg_nested(self):
m = models.OfgNested()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
self.check(f)
def test_ofg_simple(self):
m = models.OfgSimple()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
self.check(f)
import numpy as np
import pytest
import aesara as th
from aesara import config, function
from aesara.d3viz.formatting import PyDotFormatter, pydot_imported, pydot_imported_msg
......@@ -23,16 +23,16 @@ class TestPyDotFormatter:
def test_mlp(self):
m = models.Mlp()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
pdf = PyDotFormatter()
graph = pdf(f)
expected = 11
if th.config.mode == "FAST_COMPILE":
if config.mode == "FAST_COMPILE":
expected = 12
assert len(graph.get_nodes()) == expected
nc = self.node_counts(graph)
if th.config.mode == "FAST_COMPILE":
if config.mode == "FAST_COMPILE":
assert nc["apply"] == 6
else:
assert nc["apply"] == 5
......@@ -40,14 +40,14 @@ class TestPyDotFormatter:
def test_ofg(self):
m = models.Ofg()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
pdf = PyDotFormatter()
graph = pdf(f)
assert len(graph.get_nodes()) == 10
sub_graphs = graph.get_subgraph_list()
assert len(sub_graphs) == 2
ofg1, ofg2 = sub_graphs
if th.config.mode == "FAST_COMPILE":
if config.mode == "FAST_COMPILE":
assert len(ofg1.get_nodes()) == 9
else:
assert len(ofg1.get_nodes()) == 5
......@@ -55,7 +55,7 @@ class TestPyDotFormatter:
def test_ofg_nested(self):
m = models.OfgNested()
f = th.function(m.inputs, m.outputs)
f = function(m.inputs, m.outputs)
pdf = PyDotFormatter()
graph = pdf(f)
assert len(graph.get_nodes()) == 7
......
import numpy as np
import pytest
import aesara
from aesara.compile import shared
from aesara.compile.function import function
from aesara.compile.mode import Mode
from aesara.configdefaults import config
from aesara.graph.basic import Apply, Constant, Variable
from aesara.graph.fg import FunctionGraph
from aesara.graph.op import COp
......@@ -181,7 +183,7 @@ def inputs():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_straightforward():
x, y, z = inputs()
......@@ -192,7 +194,7 @@ def test_clinker_straightforward():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_cthunk_str():
x = double("x")
......@@ -205,7 +207,7 @@ def test_cthunk_str():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_literal_inlining():
x, y, z = inputs()
......@@ -221,7 +223,7 @@ def test_clinker_literal_inlining():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_literal_cache():
mode = Mode(linker="c")
......@@ -235,15 +237,13 @@ def test_clinker_literal_cache():
[-4.664007e-07, 9.468691e-01, -3.18862e-02],
[-2.562651e-06, -3.188625e-02, 1.05226e00],
],
dtype=aesara.config.floatX,
dtype=config.floatX,
)
orientationi = np.array(
[59.36276866, 1.06116353, 0.93797339], dtype=aesara.config.floatX
)
orientationi = np.array([59.36276866, 1.06116353, 0.93797339], dtype=config.floatX)
for out1 in [A - input1[0] * np.identity(3), input1[0] * np.identity(3)]:
benchmark = aesara.function(
benchmark = function(
inputs=[A, input1], outputs=[out1], on_unused_input="ignore", mode=mode
)
......@@ -251,7 +251,7 @@ def test_clinker_literal_cache():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_single_node():
x, y, z = inputs()
......@@ -262,7 +262,7 @@ def test_clinker_single_node():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_dups():
# Testing that duplicate inputs are allowed.
......@@ -275,7 +275,7 @@ def test_clinker_dups():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_not_used_inputs():
# Testing that unused inputs are allowed.
......@@ -287,7 +287,7 @@ def test_clinker_not_used_inputs():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_clinker_dups_inner():
# Testing that duplicates are allowed inside the graph
......@@ -304,7 +304,7 @@ def test_opwiseclinker_straightforward():
e = add(mul(add(x, y), div(x, y)), bad_sub(bad_sub(x, y), z))
lnk = OpWiseCLinker().accept(FunctionGraph([x, y, z], [e]))
fn = lnk.make_function()
if aesara.config.cxx:
if config.cxx:
assert fn(2.0, 2.0, 2.0) == 2.0
else:
# The python version of bad_sub always return -10.
......@@ -340,7 +340,7 @@ def test_duallinker_straightforward():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_duallinker_mismatch():
x, y, z = inputs()
......@@ -382,7 +382,7 @@ add_fail = AddFail()
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_c_fail_error():
x, y, z = inputs()
......@@ -395,19 +395,19 @@ def test_c_fail_error():
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
not config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_shared_input_output():
# Test bug reported on the mailing list by Alberto Orlandi
# https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
# The shared variable is both an input and an output of the function.
inc = iscalar("inc")
state = aesara.shared(0)
state = shared(0)
state.name = "state"
linker = CLinker()
mode = Mode(linker=linker)
f = aesara.function([inc], state, updates=[(state, state + inc)], mode=mode)
g = aesara.function([inc], state, updates=[(state, state + inc)])
f = function([inc], state, updates=[(state, state + inc)], mode=mode)
g = function([inc], state, updates=[(state, state + inc)])
# Initial value
f0 = f(0)
......@@ -428,10 +428,10 @@ def test_shared_input_output():
g0 = g(0)
assert f0 == g0 == 5, (f0, g0)
vstate = aesara.shared(np.zeros(3, dtype="int32"))
vstate = shared(np.zeros(3, dtype="int32"))
vstate.name = "vstate"
fv = aesara.function([inc], vstate, updates=[(vstate, vstate + inc)], mode=mode)
gv = aesara.function([inc], vstate, updates=[(vstate, vstate + inc)])
fv = function([inc], vstate, updates=[(vstate, vstate + inc)], mode=mode)
gv = function([inc], vstate, updates=[(vstate, vstate + inc)])
# Initial value
fv0 = fv(0)
......
......@@ -11,8 +11,9 @@ from unittest.mock import patch
import numpy as np
import pytest
import aesara
from aesara.compile.function import function
from aesara.compile.ops import DeepCopyOp
from aesara.configdefaults import config
from aesara.link.c.cmodule import GCC_compiler, default_blas_ldflags
from aesara.link.c.exceptions import CompileError
from aesara.tensor.type import dvectors
......@@ -35,9 +36,7 @@ class MyOp(DeepCopyOp):
rand = np.random.rand()
return ('printf("%(rand)s\\n");' + code) % locals()
# Else, no C code
return super(aesara.compile.ops.DeepCopyOp, self).c_code(
node, name, inames, onames, sub
)
return super(DeepCopyOp, self).c_code(node, name, inames, onames, sub)
def test_compiler_error():
......@@ -56,18 +55,18 @@ def test_inter_process_cache():
# node.inputs[*].owner like the name of the variable.
x, y = dvectors("xy")
f = aesara.function([x, y], [MyOp()(x), MyOp()(y)])
f = function([x, y], [MyOp()(x), MyOp()(y)])
f(np.arange(60), np.arange(60))
if aesara.config.mode == "FAST_COMPILE" or aesara.config.cxx == "":
if config.mode == "FAST_COMPILE" or config.cxx == "":
assert MyOp.nb_called == 0
else:
assert MyOp.nb_called == 1
# What if we compile a new function with new variables?
x, y = dvectors("xy")
f = aesara.function([x, y], [MyOp()(x), MyOp()(y)])
f = function([x, y], [MyOp()(x), MyOp()(y)])
f(np.arange(60), np.arange(60))
if aesara.config.mode == "FAST_COMPILE" or aesara.config.cxx == "":
if config.mode == "FAST_COMPILE" or config.cxx == "":
assert MyOp.nb_called == 0
else:
assert MyOp.nb_called == 1
......
import numpy as np
import aesara
from aesara.breakpoint import PdbBreakpoint
from aesara.compile.function import function
from aesara.gradient import grad
from aesara.tensor.math import dot, gt
from aesara.tensor.type import fmatrix, fscalar
from tests import unittest_tools as utt
......@@ -46,16 +47,14 @@ class TestPdbBreakpoint(utt.InferShapeTester):
input2_value = 10.0
grads = [
aesara.grad(self.monitored_input1.sum(), self.input1),
aesara.grad(self.monitored_input2.sum(), self.input2),
grad(self.monitored_input1.sum(), self.input1),
grad(self.monitored_input2.sum(), self.input2),
]
# Add self.monitored_input1 as an output to the Aesara function to
# prevent Aesara from optimizing the PdbBreakpoint op out of the
# function graph
fct = aesara.function(
[self.input1, self.input2], grads + [self.monitored_input1]
)
fct = function([self.input1, self.input2], grads + [self.monitored_input1])
gradients = fct(input1_value, input2_value)[:-1]
......@@ -71,7 +70,7 @@ class TestPdbBreakpoint(utt.InferShapeTester):
input1_value = np.arange(9).reshape(3, 3).astype("float32")
input2_value = 10.0
fct = aesara.function(
fct = function(
[self.input1, self.input2], [self.monitored_input1, self.monitored_input2]
)
......
......@@ -4,9 +4,10 @@ from io import StringIO
import numpy as np
import aesara
from aesara import shared
from aesara.compile import shared
from aesara.compile.function import function
from aesara.configdefaults import config
from aesara.graph import basic
from aesara.printing import var_descriptor
from tests.record import Record, RecordMode
......@@ -78,16 +79,14 @@ def test_determinism_1():
s = sharedX(0.0, name="s_" + str(i))
updates.append((s, val))
for var in aesara.graph.basic.ancestors(update for _, update in updates):
for var in basic.ancestors(update for _, update in updates):
if var.name is not None and var.name != "b":
if var.name[0] != "s" or len(var.name) != 2:
var.name = None
for key in channels:
updates.append((s, channels[key]))
f = aesara.function(
[], mode=mode, updates=updates, on_unused_input="ignore", name="f"
)
f = function([], mode=mode, updates=updates, on_unused_input="ignore", name="f")
for output in f.maker.fgraph.outputs:
mode.record.handle_line(var_descriptor(output) + "\n")
disturb_mem()
......
import pytest
import aesara
from aesara import function
from aesara.tensor.type import scalar
class TestDictionaryOutput:
def test_output_dictionary(self):
# Tests that aesara.function works when outputs is a dictionary
# Tests that function works when outputs is a dictionary
x = scalar()
f = aesara.function([x], outputs={"a": x, "c": x * 2, "b": x * 3, "1": x * 4})
f = function([x], outputs={"a": x, "c": x * 2, "b": x * 3, "1": x * 4})
outputs = f(10.0)
......@@ -24,7 +24,7 @@ class TestDictionaryOutput:
x = scalar("x")
y = scalar("y")
f = aesara.function([x, y], outputs={"a": x + y, "b": x * y})
f = function([x, y], outputs={"a": x + y, "b": x * y})
assert f(2, 4) == {"a": 6, "b": 8}
assert f(2, y=4) == f(2, 4)
......@@ -39,7 +39,7 @@ class TestDictionaryOutput:
e1 = scalar("1")
e2 = scalar("2")
f = aesara.function(
f = function(
[x, y, z, e1, e2], outputs={"x": x, "y": y, "z": z, "1": e1, "2": e2}
)
......@@ -59,7 +59,7 @@ class TestDictionaryOutput:
a = x + y
b = x * y
f = aesara.function([x, y], outputs={"a": a, "b": b})
f = function([x, y], outputs={"a": a, "b": b})
a = scalar("a")
b = scalar("b")
......@@ -67,7 +67,7 @@ class TestDictionaryOutput:
l = a + b
r = a * b
g = aesara.function([a, b], outputs=[l, r])
g = function([a, b], outputs=[l, r])
result = g(**f(5, 7))
......@@ -75,11 +75,11 @@ class TestDictionaryOutput:
assert result[1] == 420.0
def test_output_list_still_works(self):
# Test that aesara.function works if outputs is a list.
# Test that function works if outputs is a list.
x = scalar("x")
f = aesara.function([x], outputs=[x * 3, x * 2, x * 4, x])
f = function([x], outputs=[x * 3, x * 2, x * 4, x])
result = f(5.0)
......@@ -93,9 +93,7 @@ class TestDictionaryOutput:
x = scalar("x")
f = aesara.function(
[x], outputs={"1": x, "2": 2 * x, "3": 3 * x}, mode="DEBUG_MODE"
)
f = function([x], outputs={"1": x, "2": 2 * x, "3": 3 * x}, mode="DEBUG_MODE")
result = f(3.0)
......@@ -108,7 +106,7 @@ class TestDictionaryOutput:
x = scalar("x")
f = aesara.function([x], outputs=[x, 2 * x, 3 * x], mode="DEBUG_MODE")
f = function([x], outputs=[x, 2 * x, 3 * x], mode="DEBUG_MODE")
result = f(5.0)
......@@ -122,10 +120,10 @@ class TestDictionaryOutput:
x = scalar("x")
with pytest.raises(AssertionError):
aesara.function([x], outputs={1.0: x})
function([x], outputs={1.0: x})
with pytest.raises(AssertionError):
aesara.function([x], outputs={1.0: x, "a": x ** 2})
function([x], outputs={1.0: x, "a": x ** 2})
with pytest.raises(AssertionError):
aesara.function([x], outputs={(1, "b"): x, 1.0: x ** 2})
function([x], outputs={(1, "b"): x, 1.0: x ** 2})
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论