提交 0e3a8128 authored 作者: Michael Osthege's avatar Michael Osthege 提交者: Brandon T. Willard

Move C-related linking to theano.link.c module

Closes #223
上级 0a24d376
......@@ -4,20 +4,22 @@ import logging
import os
import sys
if sys.platform == 'win32':
config_for_theano_cache_script = 'cxx=,device=cpu'
theano_flags = os.environ['THEANO_FLAGS'] if 'THEANO_FLAGS' in os.environ else ''
if sys.platform == "win32":
config_for_theano_cache_script = "cxx=,device=cpu"
theano_flags = os.environ["THEANO_FLAGS"] if "THEANO_FLAGS" in os.environ else ""
if theano_flags:
theano_flags += ','
theano_flags += ","
theano_flags += config_for_theano_cache_script
os.environ['THEANO_FLAGS'] = theano_flags
os.environ["THEANO_FLAGS"] = theano_flags
import theano
from theano import config
import theano.gof.compiledir
from theano.gof.cc import get_module_cache
from theano import config
from theano.link.c.cc import get_module_cache
_logger = logging.getLogger('theano.bin.theano-cache')
_logger = logging.getLogger("theano.bin.theano-cache")
def print_help(exit_status):
......@@ -28,16 +30,23 @@ def print_help(exit_status):
print('Type "theano-cache clear" to erase the cache')
print('Type "theano-cache list" to print the cache content')
print('Type "theano-cache unlock" to unlock the cache directory')
print('Type "theano-cache cleanup" to delete keys in the old '
'format/code version')
print(
'Type "theano-cache cleanup" to delete keys in the old ' "format/code version"
)
print('Type "theano-cache purge" to force deletion of the cache directory')
print('Type "theano-cache basecompiledir" '
'to print the parent of the cache directory')
print('Type "theano-cache basecompiledir list" '
'to print the content of the base compile dir')
print('Type "theano-cache basecompiledir purge" '
'to remove everything in the base compile dir, '
'that is, erase ALL cache directories')
print(
'Type "theano-cache basecompiledir" '
"to print the parent of the cache directory"
)
print(
'Type "theano-cache basecompiledir list" '
"to print the content of the base compile dir"
)
print(
'Type "theano-cache basecompiledir purge" '
"to remove everything in the base compile dir, "
"that is, erase ALL cache directories"
)
sys.exit(exit_status)
......@@ -45,48 +54,52 @@ def main():
if len(sys.argv) == 1:
print(config.compiledir)
elif len(sys.argv) == 2:
if sys.argv[1] == 'help':
if sys.argv[1] == "help":
print_help(exit_status=0)
if sys.argv[1] == 'clear':
if sys.argv[1] == "clear":
# We skip the refresh on module cache creation because the refresh will
# be done when calling clear afterwards.
cache = get_module_cache(init_args=dict(do_refresh=False))
cache.clear(unversioned_min_age=-1, clear_base_files=True,
delete_if_problem=True)
cache.clear(
unversioned_min_age=-1, clear_base_files=True, delete_if_problem=True
)
# Print a warning if some cached modules were not removed, so that the
# user knows he should manually delete them, or call
# theano-cache purge, # to properly clear the cache.
items = [item for item in sorted(os.listdir(cache.dirname))
if item.startswith('tmp')]
items = [
item
for item in sorted(os.listdir(cache.dirname))
if item.startswith("tmp")
]
if items:
_logger.warning(
'There remain elements in the cache dir that you may '
'need to erase manually. The cache dir is:\n %s\n'
"There remain elements in the cache dir that you may "
"need to erase manually. The cache dir is:\n %s\n"
'You can also call "theano-cache purge" to '
'remove everything from that directory.' %
config.compiledir)
"remove everything from that directory." % config.compiledir
)
_logger.debug(f"Remaining elements ({len(items)}): {', '.join(items)}")
elif sys.argv[1] == 'list':
elif sys.argv[1] == "list":
theano.gof.compiledir.print_compiledir_content()
elif sys.argv[1] == 'cleanup':
elif sys.argv[1] == "cleanup":
theano.gof.compiledir.cleanup()
cache = get_module_cache(init_args=dict(do_refresh=False))
cache.clear_old()
elif sys.argv[1] == 'unlock':
elif sys.argv[1] == "unlock":
theano.gof.compilelock.force_unlock()
print('Lock successfully removed!')
elif sys.argv[1] == 'purge':
print("Lock successfully removed!")
elif sys.argv[1] == "purge":
theano.gof.compiledir.compiledir_purge()
elif sys.argv[1] == 'basecompiledir':
elif sys.argv[1] == "basecompiledir":
# Simply print the base_compiledir
print(theano.config.base_compiledir)
else:
print_help(exit_status=1)
elif len(sys.argv) == 3 and sys.argv[1] == 'basecompiledir':
if sys.argv[2] == 'list':
elif len(sys.argv) == 3 and sys.argv[1] == "basecompiledir":
if sys.argv[2] == "list":
theano.gof.compiledir.basecompiledir_ls()
elif sys.argv[2] == 'purge':
elif sys.argv[2] == "purge":
theano.gof.compiledir.basecompiledir_purge()
else:
print_help(exit_status=1)
......@@ -94,5 +107,5 @@ def main():
print_help(exit_status=1)
if __name__ == '__main__':
if __name__ == "__main__":
main()
......@@ -73,7 +73,7 @@ extracted from the Mode. It is then called with the FunctionGraph as
argument to
produce a ``thunk``, which is a function with no arguments that
returns nothing. Along with the thunk, one list of input containers (a
theano.gof.Container is a sort of object that wraps another and does
theano.link.Container is a sort of object that wraps another and does
type casting) and one list of output containers are produced,
corresponding to the input and output Variables as well as the updates
defined for the inputs when applicable. To perform the computations,
......
......@@ -13,7 +13,7 @@ from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.gpuarray.test_basic_ops import rand_gpuarray
from tests.tensor import test_elemwise
from tests.unittest_tools import assert_allclose
from theano import gof, scalar, tensor
from theano import scalar, tensor
from theano.compile import DebugMode, Mode
from theano.gpuarray.dnn import GpuDnnReduction
from theano.gpuarray.elemwise import (
......@@ -25,6 +25,8 @@ from theano.gpuarray.elemwise import (
GpuErfinv,
)
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.link.basic import PerformLinker
from theano.link.c.cc import CLinker
# This is actually a test for GpuElemwise
......@@ -32,7 +34,7 @@ class TestGpuBroadcast(test_elemwise.TestBroadcast):
cop = GpuElemwise
ctype = GpuArrayType
# The order is important
linkers = [gof.PerformLinker, gof.CLinker]
linkers = [PerformLinker, CLinker]
def rand_cval(self, shp):
return rand_gpuarray(*shp, **dict(cls=gpuarray))
......
......@@ -3,11 +3,11 @@ import pytest
import theano
from theano.gof import fg
from theano.gof.cc import CLinker, DualLinker, OpWiseCLinker
from theano.gof.graph import Apply, Constant, Variable
from theano.gof.op import Op
from theano.gof.type import Type
from theano.link import PerformLinker
from theano.link.c.cc import CLinker, DualLinker, OpWiseCLinker
def as_variable(x):
......@@ -413,7 +413,7 @@ def test_shared_input_output():
inc = theano.tensor.iscalar("inc")
state = theano.shared(0)
state.name = "state"
linker = theano.gof.CLinker()
linker = CLinker()
mode = theano.Mode(linker=linker)
f = theano.function([inc], state, updates=[(state, state + inc)], mode=mode)
g = theano.function([inc], state, updates=[(state, state + inc)])
......
......@@ -11,7 +11,7 @@ from unittest.mock import patch
import numpy as np
import theano
from theano.gof.cmodule import GCC_compiler, default_blas_ldflags
from theano.link.c.cmodule import GCC_compiler, default_blas_ldflags
class MyOp(theano.compile.ops.DeepCopyOp):
......@@ -72,8 +72,8 @@ def test_flag_detection():
GCC_compiler.try_flags(["-lblas"])
@patch("theano.gof.cmodule.try_blas_flag", return_value=None)
@patch("theano.gof.cmodule.sys")
@patch("theano.link.c.cmodule.try_blas_flag", return_value=None)
@patch("theano.link.c.cmodule.sys")
def test_default_blas_ldflags(sys_mock, try_blas_flag_mock, caplog):
sys_mock.version = "3.8.0 | packaged by conda-forge | (default, Nov 22 2019, 19:11:38) \n[GCC 7.3.0]"
......
......@@ -124,7 +124,7 @@ def test_ifelse():
for cloop in cloops:
for lazy in lazys:
linker = theano.gof.vm.VM_Linker(use_cloop=cloop, lazy=lazy)
linker = theano.link.c.vm.VM_Linker(use_cloop=cloop, lazy=lazy)
f = function(
[a, b, c],
ifelse(a, notimpl(b), c),
......@@ -154,11 +154,11 @@ def test_nested():
t4 = ifelseifelseif(tt.eq(x1, x2), x1, tt.eq(x1, 5), x2, c2, t3, t3 + 0.5)
t4.name = "t4"
linker = theano.gof.vm.VM_Linker(lazy=False)
linker = theano.link.c.vm.VM_Linker(lazy=False)
f = function([c1, c2, x1, x2], t4, mode=Mode(linker=linker, optimizer="fast_run"))
with pytest.raises(NotImplementedOpException):
f(1, 0, np.array(10, dtype=x1.dtype), 0)
linker = theano.gof.vm.VM_Linker(lazy=True)
linker = theano.link.c.vm.VM_Linker(lazy=True)
f = function([c1, c2, x1, x2], t4, mode=Mode(linker=linker, optimizer="fast_run"))
assert f(1, 0, np.array(10, dtype=x1.dtype), 0) == 20.5
......@@ -8,8 +8,9 @@ import pytest
import theano
from theano import function, tensor
from theano.compile import Mode
from theano.gof import OpWiseCLinker, vm
from theano.ifelse import ifelse
from theano.link.c.cc import OpWiseCLinker
from theano.link.c.vm import VM_Linker
class TestCallbacks:
......@@ -28,7 +29,7 @@ class TestCallbacks:
f = function(
[a, b, c],
(a + b) + c,
mode=Mode(optimizer=None, linker=vm.VM_Linker(callback=self.callback)),
mode=Mode(optimizer=None, linker=VM_Linker(callback=self.callback)),
)
f(1, 2, 3)
......@@ -41,7 +42,7 @@ class TestCallbacks:
f = function(
[a, b, c],
ifelse(a, 2 * b, 2 * c),
mode=Mode(optimizer=None, linker=vm.VM_Linker(callback=self.callback)),
mode=Mode(optimizer=None, linker=VM_Linker(callback=self.callback)),
)
f(1, 2, 3)
......@@ -59,7 +60,7 @@ def test_c_thunks():
[a, b, c],
ifelse(a, a * b, b * c),
mode=Mode(
optimizer=None, linker=vm.VM_Linker(c_thunks=c_thunks, use_cloop=False)
optimizer=None, linker=VM_Linker(c_thunks=c_thunks, use_cloop=False)
),
)
f(1, [2], [3, 2])
......@@ -128,12 +129,10 @@ def test_speed():
print(f"{name} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
time_linker("c|py", OpWiseCLinker)
time_linker("vmLinker", vm.VM_Linker)
time_linker("vmLinker_nogc", lambda: vm.VM_Linker(allow_gc=False))
time_linker("vmLinker", VM_Linker)
time_linker("vmLinker_nogc", lambda: VM_Linker(allow_gc=False))
if theano.config.cxx:
time_linker(
"vmLinker_CLOOP", lambda: vm.VM_Linker(allow_gc=False, use_cloop=True)
)
time_linker("vmLinker_CLOOP", lambda: VM_Linker(allow_gc=False, use_cloop=True))
time_numpy()
......@@ -170,10 +169,10 @@ def test_speed_lazy():
print(f"{name} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
time_linker("vmLinker", vm.VM_Linker)
time_linker("vmLinker_nogc", lambda: vm.VM_Linker(allow_gc=False))
time_linker("vmLinker", VM_Linker)
time_linker("vmLinker_nogc", lambda: VM_Linker(allow_gc=False))
if theano.config.cxx:
time_linker("vmLinker_C", lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
time_linker("vmLinker_C", lambda: VM_Linker(allow_gc=False, use_cloop=True))
def test_partial_function():
......@@ -190,7 +189,7 @@ def test_partial_function():
assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]]
utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))
check_partial_function(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
check_partial_function(VM_Linker(allow_partial_eval=True, use_cloop=False))
if not theano.config.cxx:
pytest.skip("Need cxx for this test")
check_partial_function("cvm")
......@@ -210,7 +209,7 @@ def test_partial_function_with_output_keys():
assert f(5, output_subset=["a"])["a"] == f(5)["a"]
check_partial_function_output_keys(
vm.VM_Linker(allow_partial_eval=True, use_cloop=False)
VM_Linker(allow_partial_eval=True, use_cloop=False)
)
check_partial_function_output_keys("cvm")
......@@ -241,7 +240,7 @@ def test_partial_function_with_updates():
assert g(40, output_subset=[]) == []
assert y.get_value() == 10
check_updates(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
check_updates(VM_Linker(allow_partial_eval=True, use_cloop=False))
check_updates("cvm")
......@@ -327,9 +326,9 @@ if run_memory_usage_tests:
# print(pre.ru_maxrss, post.ru_maxrss)
print(1)
time_linker("vmLinker_C", lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
time_linker("vmLinker_C", lambda: VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker("vmLinker", lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
time_linker("vmLinker", lambda: VM_Linker(allow_gc=False, use_cloop=False))
def test_no_leak_many_call_nonlazy():
# Verify no memory leaks when calling a function a lot of times
......@@ -354,9 +353,9 @@ if run_memory_usage_tests:
f_a(inp)
print(1)
time_linker("vmLinker_C", lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
time_linker("vmLinker_C", lambda: VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker("vmLinker", lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
time_linker("vmLinker", lambda: VM_Linker(allow_gc=False, use_cloop=False))
class RunOnce(theano.Op):
......@@ -383,7 +382,7 @@ def test_vm_gc():
x = theano.tensor.vector()
p = RunOnce()(x)
mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True))
mode = theano.Mode(linker=VM_Linker(lazy=True))
f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2], mode=mode)
f([1, 2, 3])
......@@ -399,8 +398,8 @@ def test_reallocation():
z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
# The functinality is currently implement for non lazy and non c VM only.
for linker in [
vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False),
VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
VM_Linker(allow_gc=True, lazy=False, use_cloop=False),
]:
m = theano.compile.get_mode(theano.Mode(linker=linker))
m = m.excluding("fusion", "inplace")
......@@ -432,10 +431,10 @@ def test_reallocation():
def test_no_recycling():
x = theano.tensor.vector()
for lnk in [
vm.VM_Linker(use_cloop=True),
vm.VM_Linker(use_cloop=False, lazy=True),
vm.VM_Linker(use_cloop=False, lazy=False, allow_gc=True),
vm.VM_Linker(use_cloop=False, lazy=False, allow_gc=False),
VM_Linker(use_cloop=True),
VM_Linker(use_cloop=False, lazy=True),
VM_Linker(use_cloop=False, lazy=False, allow_gc=True),
VM_Linker(use_cloop=False, lazy=False, allow_gc=False),
]:
mode = theano.Mode(optimizer="fast_compile", linker=lnk)
......
......@@ -247,8 +247,8 @@ class RecordMode(Mode):
line = f"Outputs: {outputs_digest}\n"
handle_line(fgraph, line, i, node, fn)
# linker = theano.gof.OpWiseCLinker()
linker = theano.gof.vm.VM_Linker(use_cloop=bool(theano.config.cxx))
# linker = theano.link.c.cc.OpWiseCLinker()
linker = theano.link.c.vm.VM_Linker(use_cloop=bool(theano.config.cxx))
wrap_linker = theano.gof.WrapLinkerMany([linker], [callback])
wrap_linker = theano.link.WrapLinkerMany([linker], [callback])
super().__init__(wrap_linker, optimizer="fast_run")
......@@ -13,8 +13,8 @@ import pytest
import tests.unittest_tools as utt
import theano
from theano import gof
from theano.gof import FunctionGraph
from theano.link.c.cc import DualLinker
from theano.scalar.basic import (
ComplexError,
Composite,
......@@ -69,7 +69,7 @@ def test_mul_add_true():
x, y, z = floats("xyz")
e = mul(add(x, y), true_div(x, y))
g = FunctionGraph([x, y], [e])
fn = gof.DualLinker().accept(g).make_function()
fn = DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 1.5
......@@ -118,7 +118,7 @@ class TestComposite:
c = C.make_node(x, y)
# print c.c_code(['x', 'y'], ['z'], dict(id = 0))
g = FunctionGraph([x, y], [c.out])
fn = gof.DualLinker().accept(g).make_function()
fn = DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 1.5
def test_flatten(self):
......@@ -141,7 +141,7 @@ class TestComposite:
assert "70.0" in c.op.c_code(c, "dummy", ["x", "y"], ["z"], dict(id=0))
# print c.c_code(['x', 'y'], ['z'], dict(id = 0))
g = FunctionGraph([x, y], [c.out])
fn = gof.DualLinker().accept(g).make_function()
fn = DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 36.0
def test_many_outputs(self):
......@@ -153,7 +153,7 @@ class TestComposite:
c = C.make_node(x, y, z)
# print c.c_code(['x', 'y', 'z'], ['out0', 'out1', 'out2'], dict(id = 0))
g = FunctionGraph([x, y, z], c.outputs)
fn = gof.DualLinker().accept(g).make_function()
fn = DualLinker().accept(g).make_function()
assert fn(1.0, 2.0, 3.0) == [6.0, 7.0, 0.5]
def test_composite_printing(self):
......@@ -169,7 +169,7 @@ class TestComposite:
C = Composite([x, y, z], [e0, e1, e2, e3, e4, e5, e6, e7])
c = C.make_node(x, y, z)
g = FunctionGraph([x, y, z], c.outputs)
gof.DualLinker().accept(g).make_function()
DualLinker().accept(g).make_function()
assert str(g) == (
"FunctionGraph(*1 -> Composite{((i0 + i1) + i2),"
......@@ -203,73 +203,71 @@ class TestComposite:
class TestLogical:
def test_gt(self):
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x > y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x > y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a > b)
def test_lt(self):
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x < y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x < y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a < b)
def test_le(self):
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x <= y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x <= y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a <= b)
def test_ge(self):
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x >= y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x >= y])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a >= b)
def test_eq(self):
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [eq(x, y)])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [eq(x, y)])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a == b)
def test_neq(self):
x, y, z = floats("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [neq(x, y)])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [neq(x, y)])).make_function()
for a, b in ((3.0, 9), (3, 0.9), (3, 3)):
assert fn(a, b) == (a != b)
def test_or(self):
x, y, z = ints("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x | y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x | y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
assert fn(a, b) == (a | b), (a, b)
def test_xor(self):
x, y, z = ints("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x ^ y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x ^ y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
assert fn(a, b) == (a ^ b), (a, b)
def test_and(self):
x, y, z = ints("xyz")
fn = (
gof.DualLinker().accept(FunctionGraph([x, y], [and_(x, y)])).make_function()
)
fn = DualLinker().accept(FunctionGraph([x, y], [and_(x, y)])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
assert fn(a, b) == (a & b), (a, b)
x, y, z = ints("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x & y])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [x & y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
assert fn(a, b) == (a & b), (a, b)
def test_not(self):
x, y, z = ints("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [invert(x)])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [invert(x)])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
assert fn(a, b) == ~a, (a,)
x, y, z = ints("xyz")
fn = gof.DualLinker().accept(FunctionGraph([x, y], [~x])).make_function()
fn = DualLinker().accept(FunctionGraph([x, y], [~x])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
assert fn(a, b) == ~a, (a,)
......
......@@ -20,7 +20,7 @@ def test_SymPyCCode():
op = SymPyCCode([xs, ys], xs + ys)
e = op(xt, yt)
g = theano.gof.FunctionGraph([xt, yt], [e])
fn = theano.gof.CLinker().accept(g).make_function()
fn = theano.link.c.cc.CLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 3.0
......
......@@ -607,7 +607,7 @@ class TestConv2D(utt.InferShapeTester):
openmp=openmp,
)
mode = theano.Mode(
linker=theano.gof.vm.VM_Linker(
linker=theano.link.c.vm.VM_Linker(
allow_gc=False, use_cloop=True
)
)
......
......@@ -16,7 +16,7 @@ def test_view_op_c_code():
# TODO: It might be good to make sure that the registered C code works
# (even though it's basically copy-paste from other registered `Op`s).
# from theano.compile.ops import view_op
# from theano.gof.cc import CLinker
# from theano.link.c.cc import CLinker
# rng_var = random_state_type()
# rng_view = view_op(rng_var)
# function(
......
......@@ -72,6 +72,7 @@ from theano import compile, config, function, gof, shared
from theano.compile import DeepCopyOp
from theano.compile.mode import get_default_mode
from theano.gof.graph import Variable
from theano.link.c.cc import DualLinker
from theano.scalar import autocast_float, autocast_float_as
from theano.tensor import (
Alloc,
......@@ -5731,7 +5732,7 @@ def test_divmod():
# Confirm that divmod is equivalent to the python version.
x, y = fscalars("xy")
d, r = divmod(x, y)
fn = gof.DualLinker().accept(gof.FunctionGraph([x, y], [d, r])).make_function()
fn = DualLinker().accept(gof.FunctionGraph([x, y], [d, r])).make_function()
for a, b in (
(0, 1),
(1, 1),
......
......@@ -11,6 +11,8 @@ import theano.tensor as tt
from tests import unittest_tools
from theano import config, gof, scalar
from theano.compile.mode import Mode, get_default_mode
from theano.link.basic import PerformLinker
from theano.link.c.cc import CLinker, OpWiseCLinker
from theano.tensor import TensorType, as_tensor_variable
from theano.tensor.elemwise import (
CAReduce,
......@@ -79,12 +81,12 @@ class TestDimShuffle(unittest_tools.InferShapeTester):
DimShuffle(ib, shuffle)
def test_perform(self):
self.with_linker(gof.PerformLinker())
self.with_linker(PerformLinker())
def test_c_or_py(self):
# Shape op don't have C code.
# But This will test DimShuffle c code
self.with_linker(gof.OpWiseCLinker())
self.with_linker(OpWiseCLinker())
def test_infer_shape(self):
......@@ -167,7 +169,7 @@ class TestBroadcast:
openmp_minsize_sqrt = int(math.ceil(math.sqrt(openmp_minsize)))
# The order is important if you change them.
linkers = [gof.PerformLinker, gof.CLinker]
linkers = [PerformLinker, CLinker]
def rand_val(self, shp):
return np.asarray(np.random.rand(*shp), dtype=theano.config.floatX)
......@@ -207,7 +209,7 @@ class TestBroadcast:
# test Elemwise.infer_shape
# the Shape op don't implement c_code!
if isinstance(linker, gof.PerformLinker):
if isinstance(linker, PerformLinker):
x = type(theano.config.floatX, [(entry == 1) for entry in xsh])("x")
y = type(theano.config.floatX, [(entry == 1) for entry in ysh])("y")
e = op(scalar.add)(x, y)
......@@ -242,7 +244,7 @@ class TestBroadcast:
assert (xv == zv).all()
# test Elemwise.infer_shape
# the Shape op don't implement c_code!
if isinstance(linker, gof.PerformLinker):
if isinstance(linker, PerformLinker):
x = type(theano.config.floatX, [(entry == 1) for entry in xsh])("x")
y = type(theano.config.floatX, [(entry == 1) for entry in ysh])("y")
e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
......@@ -260,22 +262,22 @@ class TestBroadcast:
assert xv.shape == zv.shape
def test_perform(self):
self.with_linker(gof.PerformLinker(), self.op, self.type, self.rand_val)
self.with_linker(PerformLinker(), self.op, self.type, self.rand_val)
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_c(self):
self.with_linker(gof.CLinker(), self.cop, self.ctype, self.rand_cval)
self.with_linker(CLinker(), self.cop, self.ctype, self.rand_cval)
def test_perform_inplace(self):
self.with_linker_inplace(gof.PerformLinker(), self.op, self.type, self.rand_val)
self.with_linker_inplace(PerformLinker(), self.op, self.type, self.rand_val)
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_c_inplace(self):
self.with_linker_inplace(gof.CLinker(), self.cop, self.ctype, self.rand_cval)
self.with_linker_inplace(CLinker(), self.cop, self.ctype, self.rand_cval)
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
......
......@@ -99,19 +99,12 @@ from theano.compile.function import function, function_dump
from theano.compile.function.types import FunctionMaker
from theano.gof import (
Apply,
CLinker,
Constant,
Container,
DualLinker,
FunctionGraph,
Generic,
InconsistencyError,
Linker,
LocalLinker,
Op,
OpenMPOp,
OpWiseCLinker,
PerformLinker,
Type,
Variable,
generic,
......@@ -121,6 +114,8 @@ from theano.gof import (
utils,
)
from theano.gradient import Lop, Rop, grad, subgraph_grad
from theano.link import Container, Linker, LocalLinker, PerformLinker
from theano.link.c import CLinker, DualLinker, OpWiseCLinker
from theano.misc.safe_asarray import _asarray
from theano.printing import pp, pprint
from theano.updates import OrderedUpdates
......
......@@ -2566,7 +2566,7 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
self.refeed = [
(
i.value is not None
and not isinstance(i.value, gof.Container)
and not isinstance(i.value, link.Container)
and i.update is None
)
for i in self.inputs
......
......@@ -440,7 +440,7 @@ class Function:
if value is not None:
# Always initialize the storage.
if isinstance(value, gof.Container):
if isinstance(value, link.Container):
# There is no point in obtaining the current value
# stored in the container, since the container is
# shared.
......@@ -485,7 +485,7 @@ class Function:
"names of the inputs of your function "
"for duplicates."
)
if isinstance(s, gof.Container):
if isinstance(s, link.Container):
return s.value
else:
raise NotImplementedError
......@@ -503,7 +503,7 @@ class Function:
"names of the inputs of your function "
"for duplicates."
)
if isinstance(s, gof.Container):
if isinstance(s, link.Container):
s.value = value
s.provided += 1
else:
......@@ -812,7 +812,7 @@ class Function:
def restore_defaults():
for i, (required, refeed, value) in enumerate(self.defaults):
if refeed:
if isinstance(value, gof.Container):
if isinstance(value, link.Container):
value = value.storage[0]
self[i] = value
......@@ -1534,7 +1534,7 @@ class FunctionMaker:
# too much execution time during testing as we compile
# much more functions then the number of compile c
# module.
theano.gof.cc.get_module_cache().refresh()
theano.link.c.cc.get_module_cache().refresh()
# Handle the case where inputs and/or outputs is a single
# Variable (not in a list)
unpack_single = False
......@@ -1679,7 +1679,7 @@ class FunctionMaker:
self.refeed = [
(
i.value is not None
and not isinstance(i.value, gof.Container)
and not isinstance(i.value, link.Container)
and i.update is None
)
for i in self.inputs
......@@ -1772,8 +1772,8 @@ class FunctionMaker:
if isinstance(input_storage_i, gof.Variable):
input_storage_i = input_storage_i.container
if isinstance(input_storage_i, gof.Container):
# If the default is a gof.Container, this means we want to
if isinstance(input_storage_i, link.Container):
# If the default is a link.Container, this means we want to
# share the same storage. This is done by appending
# input_storage_i.storage to input_storage_lists.
if indices is not None:
......@@ -1815,7 +1815,7 @@ class FunctionMaker:
# Get a function instance
start_linker = time.time()
start_import_time = theano.gof.cmodule.import_time
start_import_time = theano.link.c.cmodule.import_time
with config.change_flags(traceback__limit=config.traceback__compile_limit):
_fn, _i, _o = self.linker.make_thunk(
......@@ -1830,7 +1830,7 @@ class FunctionMaker:
if self.profile:
self.profile.linker_time += linker_time
_fn.time_thunks = self.profile.flag_time_thunks
import_time = theano.gof.cmodule.import_time - start_import_time
import_time = theano.link.c.cmodule.import_time - start_import_time
self.profile.import_time += import_time
fn = self.function_builder(
......
......@@ -6,7 +6,7 @@ Define `SymbolicInput`, `SymbolicOutput`, `In`, `Out`.
import logging
from theano import gof
from theano import link
_logger = logging.getLogger("theano.compile.io")
......@@ -208,7 +208,7 @@ class In(SymbolicInput):
if implicit is None:
from theano.compile.sharedvalue import SharedVariable
implicit = isinstance(value, gof.Container) or isinstance(
implicit = isinstance(value, link.Container) or isinstance(
value, SharedVariable
)
super().__init__(
......
......@@ -7,9 +7,11 @@ import logging
import warnings
import theano
import theano.gof.vm
from theano import config, gof
from theano.compile.function.types import Supervisor
from theano.link.basic import PerformLinker
from theano.link.c.cc import CLinker, OpWiseCLinker
from theano.link.c.vm import VM_Linker
from theano.link.jax import JAXLinker
......@@ -20,14 +22,14 @@ _logger = logging.getLogger("theano.compile.mode")
# Mode, it will be used as the key to retrieve the real linker in this
# dictionary
predefined_linkers = {
"py": gof.PerformLinker(), # Use allow_gc Theano flag
"c": gof.CLinker(), # Don't support gc. so don't check allow_gc
"c|py": gof.OpWiseCLinker(), # Use allow_gc Theano flag
"c|py_nogc": gof.OpWiseCLinker(allow_gc=False),
"vm": gof.vm.VM_Linker(use_cloop=False), # Use allow_gc Theano flag
"cvm": gof.vm.VM_Linker(use_cloop=True), # Use allow_gc Theano flag
"vm_nogc": gof.vm.VM_Linker(allow_gc=False, use_cloop=False),
"cvm_nogc": gof.vm.VM_Linker(allow_gc=False, use_cloop=True),
"py": PerformLinker(), # Use allow_gc Theano flag
"c": CLinker(), # Don't support gc. so don't check allow_gc
"c|py": OpWiseCLinker(), # Use allow_gc Theano flag
"c|py_nogc": OpWiseCLinker(allow_gc=False),
"vm": VM_Linker(use_cloop=False), # Use allow_gc Theano flag
"cvm": VM_Linker(use_cloop=True), # Use allow_gc Theano flag
"vm_nogc": VM_Linker(allow_gc=False, use_cloop=False),
"cvm_nogc": VM_Linker(allow_gc=False, use_cloop=True),
"jax": JAXLinker(),
}
......@@ -407,7 +409,7 @@ class Mode:
# string as the key
# Use VM_linker to allow lazy evaluation by default.
FAST_COMPILE = Mode(
theano.gof.vm.VM_Linker(use_cloop=False, c_thunks=False), "fast_compile"
theano.link.c.vm.VM_Linker(use_cloop=False, c_thunks=False), "fast_compile"
)
if theano.config.cxx:
FAST_RUN = Mode("cvm", "fast_run")
......
......@@ -40,8 +40,8 @@ class MonitorMode(Mode):
def __init__(self, pre_func=None, post_func=None, optimizer="default", linker=None):
self.pre_func = pre_func
self.post_func = post_func
wrap_linker = theano.gof.WrapLinkerMany(
[theano.gof.OpWiseCLinker()], [self.eval]
wrap_linker = theano.link.WrapLinkerMany(
[theano.link.c.cc.OpWiseCLinker()], [self.eval]
)
if optimizer == "default":
optimizer = theano.config.optimizer
......
......@@ -292,7 +292,7 @@ class NanGuardMode(Mode):
if getattr(var.tag, "nan_guard_mode_check", True):
do_check_on(value, None, var=var)
wrap_linker = theano.gof.vm.VM_Linker(
wrap_linker = theano.link.c.vm.VM_Linker(
callback=nan_check, callback_input=nan_check_input
)
super().__init__(wrap_linker, optimizer=self.provided_optimizer)
"""Graph optimization framework"""
import theano
from theano.gof.cc import CLinker, DualLinker, HideC, OpWiseCLinker
from theano.gof.destroyhandler import DestroyHandler
from theano.gof.fg import FunctionGraph, InconsistencyError, MissingInputError
from theano.gof.graph import Apply, Constant, Variable, view_roots
......@@ -46,17 +45,3 @@ from theano.gof.toolbox import (
)
from theano.gof.type import CEnumType, EnumList, EnumType, Generic, Type, generic
from theano.gof.utils import MethodNotDefined, hashtype, object2
from theano.link import (
Container,
Linker,
LocalLinker,
PerformLinker,
WrapLinker,
WrapLinkerMany,
)
if theano.config.cmodule__preload_cache:
from theano.gof.cc import get_module_cache
get_module_cache()
......@@ -16,10 +16,8 @@ import warnings
import numpy as np
import theano
import theano.gof.cc
from theano import config
from theano.gof import graph
from theano.gof.cmodule import GCC_compiler
from theano.gof.fg import FunctionGraph
from theano.gof.utils import (
MethodNotDefined,
......@@ -842,6 +840,11 @@ class Op(object2, PureOp, CLinkerOp):
def make_c_thunk(self, node, storage_map, compute_map, no_recycling):
"""Like make_thunk, but will only try to make a C thunk."""
# FIXME: Putting the following import on the module level causes an import cycle.
# The conclusion should be that the antire "make_c_thunk" method should be defined
# in theano.link.c and dispatched onto the Op!
import theano.link.c.cc
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
......@@ -851,7 +854,7 @@ class Op(object2, PureOp, CLinkerOp):
for (new_o, old_o) in zip(e.outputs, node.outputs)
if old_o in no_recycling
]
cl = theano.gof.cc.CLinker().accept(e, no_recycling=e_no_recycling)
cl = theano.link.c.cc.CLinker().accept(e, no_recycling=e_no_recycling)
# float16 gets special treatment since running
# unprepared C code will get bad results.
if not getattr(self, "_f16_ok", False):
......@@ -1173,6 +1176,8 @@ class OpenMPOp(Op):
"""
Check if openMP is supported
"""
from theano.link.c.cmodule import GCC_compiler
code = """
#include <omp.h>
int main( int argc, const char* argv[] )
......
......@@ -7,10 +7,11 @@ import numpy as np
import theano
from theano import Apply, Op, Type, Variable, config, tensor
from theano.gof import COp, HideC, ParamsType
from theano.gof import COp, ParamsType
from theano.gof.opt import copy_stack_trace
from theano.gof.utils import MethodNotDefined
from theano.gradient import grad_undefined
from theano.link.c.cc import HideC
from theano.scalar import bool as bool_t
from theano.scalar import int32 as int32_t
from theano.tensor.basic import Alloc, AllocEmpty, Join, Split, alloc_validate_shape
......
......@@ -12,7 +12,6 @@ from theano import Apply, Op, Variable, config, tensor
from theano.compile.ops import shape_i, shape_i_op
from theano.configdefaults import SUPPORTED_DNN_CONV_ALGO_RUNTIME
from theano.gof import COp, EnumList, ParamsType
from theano.gof.cmodule import GCC_compiler
from theano.gof.type import CDataType, Generic
from theano.gpuarray import cudnn_defs, pygpu
from theano.gpuarray.basic_ops import (
......@@ -27,6 +26,7 @@ from theano.gpuarray.basic_ops import (
)
from theano.gpuarray.type import GpuArraySharedVariable, get_context, gpu_context_type
from theano.gradient import DisconnectedType, grad_not_implemented
from theano.link.c.cmodule import GCC_compiler
from theano.scalar import as_scalar
from theano.scalar import bool as bool_t
from theano.scalar import constant, get_scalar_type
......
......@@ -150,6 +150,7 @@ from theano.gpuarray.type import (
move_to_gpu,
)
from theano.ifelse import IfElse
from theano.link.c.cc import CLinker
from theano.misc.ordered_set import OrderedSet
from theano.scalar.basic import Cast, Pow, Scalar, log, neg, true_div
from theano.scalar.basic_scipy import Erfcinv, Erfinv
......@@ -2641,7 +2642,7 @@ def local_gpua_scan_to_gpua(fgraph, op, context_name, inputs, outputs):
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
_cmodule_key = CLinker().cmodule_key_(local_fgraph, [])
info["gpu_hash"] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
......
from theano.link.c.cc import CLinker, DualLinker, OpWiseCLinker
......@@ -11,13 +11,23 @@ from io import StringIO
import numpy as np
from theano import config, link
from theano.gof import cmodule, graph, utils
from theano import config
from theano.gof import graph, utils
from theano.gof.callcache import CallCache
from theano.gof.compilelock import get_lock, release_lock
from theano.link.basic import Container, Linker, LocalLinker, PerformLinker
from theano.link.c.cmodule import (
METH_VARARGS,
DynamicModule,
ExtFunction,
GCC_compiler,
dlimport_workdir,
)
from theano.link.c.cmodule import get_module_cache as _get_module_cache
from theano.link.utils import gc_helper, map_storage, raise_with_op, streamline
_logger = logging.getLogger("theano.gof.cc")
_logger = logging.getLogger("theano.link.c.cc")
run_cthunk = None # Will be imported only when needed.
......@@ -33,7 +43,7 @@ def get_module_cache(init_args=None):
the ModuleCache constructor as keyword arguments.
"""
return cmodule.get_module_cache(config.compiledir, init_args=init_args)
return _get_module_cache(config.compiledir, init_args=init_args)
_persistent_module_cache = None
......@@ -577,7 +587,7 @@ def struct_variable_codeblocks(fgraph, variable, policies, id, symbol_table, sub
return struct_builder, block
class CLinker(link.Linker):
class CLinker(Linker):
"""
Creates C code for an fgraph, compiles it and returns callables
through make_thunk and make_function that make use of the compiled
......@@ -1094,7 +1104,7 @@ class CLinker(link.Linker):
(c_compiler, x_compiler),
)
if c_compiler is None:
return cmodule.GCC_compiler
return GCC_compiler
else:
return c_compiler
......@@ -1209,11 +1219,11 @@ class CLinker(link.Linker):
thunk,
module,
[
link.Container(input, storage)
Container(input, storage)
for input, storage in zip(self.fgraph.inputs, input_storage)
],
[
link.Container(output, storage, readonly=True)
Container(output, storage, readonly=True)
for output, storage in zip(self.fgraph.outputs, output_storage)
],
error_storage,
......@@ -1597,7 +1607,7 @@ class CLinker(link.Linker):
"""
if location is None:
location = cmodule.dlimport_workdir(config.compiledir)
location = dlimport_workdir(config.compiledir)
mod = self.get_dynamic_module()
c_compiler = self.c_compiler()
libs = self.libraries()
......@@ -1634,14 +1644,12 @@ class CLinker(link.Linker):
if not hasattr(self, "_mod"):
self.code_gen()
mod = cmodule.DynamicModule()
mod = DynamicModule()
# The code of instantiate
# the 1 is for error_storage
code = self.instantiate_code(1 + len(self.args))
instantiate = cmodule.ExtFunction(
"instantiate", code, method=cmodule.METH_VARARGS
)
instantiate = ExtFunction("instantiate", code, method=METH_VARARGS)
# ['error_storage'] + argnames,
# local_dict = d,
# global_dict = {})
......@@ -1797,7 +1805,7 @@ class _CThunk:
global run_cthunk
if run_cthunk is None:
# Lazy import to avoid compilation when importing theano.
from theano.gof.cutils import run_cthunk # noqa
from theano.link.c.cutils import run_cthunk # noqa
self.cthunk = cthunk
self.init_tasks = init_tasks
self.tasks = tasks
......@@ -1846,7 +1854,7 @@ class _CThunk:
raise exc_value.with_traceback(exc_trace)
class OpWiseCLinker(link.LocalLinker):
class OpWiseCLinker(LocalLinker):
"""
Uses CLinker on the individual Ops that comprise an fgraph and loops
over them in Python. The variable is slower than a compiled version of
......@@ -1915,11 +1923,11 @@ class OpWiseCLinker(link.LocalLinker):
order = self.schedule(fgraph)
no_recycling = self.no_recycling
input_storage, output_storage, storage_map = link.map_storage(
input_storage, output_storage, storage_map = map_storage(
fgraph, order, input_storage, output_storage, storage_map
)
if self.allow_gc:
computed, last_user = link.gc_helper(order)
computed, last_user = gc_helper(order)
post_thunk_old_storage = []
else:
post_thunk_old_storage = None
......@@ -1960,7 +1968,7 @@ class OpWiseCLinker(link.LocalLinker):
storage_map[r] for r in no_recycling if r not in fgraph.inputs
]
f = link.streamline(
f = streamline(
fgraph,
thunks,
order,
......@@ -1980,11 +1988,11 @@ class OpWiseCLinker(link.LocalLinker):
return (
f,
[
link.Container(input, storage)
Container(input, storage)
for input, storage in zip(fgraph.inputs, input_storage)
],
[
link.Container(output, storage, readonly=True)
Container(output, storage, readonly=True)
for output, storage in zip(fgraph.outputs, output_storage)
],
thunks,
......@@ -2007,7 +2015,7 @@ def _default_checker(x, y):
raise Exception("Output mismatch.", {"performlinker": x[0], "clinker": y[0]})
class DualLinker(link.Linker):
class DualLinker(Linker):
"""
Runs the fgraph in parallel using PerformLinker and CLinker.
......@@ -2071,7 +2079,7 @@ class DualLinker(link.Linker):
no_recycling = self.no_recycling
_f, i1, o1, thunks1, order1 = (
link.PerformLinker(schedule=self.schedule)
PerformLinker(schedule=self.schedule)
.accept(fgraph, no_recycling=no_recycling)
.make_all(**kwargs)
)
......@@ -2101,7 +2109,7 @@ class DualLinker(link.Linker):
for output1, output2 in zip(thunk1.outputs, thunk2.outputs):
self.checker(output1, output2)
except Exception:
link.raise_with_op(fgraph, node1)
raise_with_op(fgraph, node1)
return f, i1, o1
......@@ -2135,3 +2143,7 @@ class HideC:
def c_code_cache_version_apply(self, node):
return self.c_code_cache_version()
if config.cmodule__preload_cache:
get_module_cache()
......@@ -37,7 +37,7 @@ try:
except ImportError:
pass
_logger = logging.getLogger("theano.gof.cmodule")
_logger = logging.getLogger("theano.link.c.cmodule")
METH_VARARGS = "METH_VARARGS"
METH_NOARGS = "METH_NOARGS"
......@@ -1999,7 +1999,7 @@ def try_march_flag(flags):
"""
)
cflags = flags + ["-L" + d for d in theano.gof.cmodule.std_lib_dirs()]
cflags = flags + ["-L" + d for d in theano.link.c.cmodule.std_lib_dirs()]
compilation_result, execution_result = GCC_compiler.try_compile_tmp(
test_code, tmp_prefix="try_march_", flags=cflags, try_run=True
)
......
......@@ -4,8 +4,7 @@ import sys
from theano import config
from theano.gof.compilelock import get_lock, release_lock
from . import cmodule
from theano.link.c import cmodule
# TODO These two lines may be removed in the future, when we are 100% sure
......
......@@ -7,11 +7,11 @@ from importlib import reload
import theano
from theano import config
from theano.gof import cmodule
from theano.gof.compilelock import get_lock, release_lock
from theano.link.c.cmodule import GCC_compiler
_logger = logging.getLogger("theano.gof.lazylinker_c")
_logger = logging.getLogger(__file__)
force_compile = False
version = 0.211 # must match constant returned in function get_version()
......@@ -108,7 +108,9 @@ except ImportError:
raise
_logger.info("Compiling new CVM")
dirname = "lazylinker_ext"
cfile = os.path.join(theano.__path__[0], "gof", "c_code", "lazylinker_c.c")
cfile = os.path.join(
theano.__path__[0], "link", "c", "c_code", "lazylinker_c.c"
)
if not os.path.exists(cfile):
# This can happen in not normal case. We just
# disable the c clinker. If we are here the user
......@@ -132,8 +134,8 @@ except ImportError:
assert e.errno == errno.EEXIST
assert os.path.exists(loc)
args = cmodule.GCC_compiler.compile_args()
cmodule.GCC_compiler.compile_str(dirname, code, location=loc, preargs=args)
args = GCC_compiler.compile_args()
GCC_compiler.compile_str(dirname, code, location=loc, preargs=args)
# Save version into the __init__.py file.
init_py = os.path.join(loc, "__init__.py")
with open(init_py, "w") as f:
......
......@@ -12,7 +12,7 @@ import time
import warnings
from collections import defaultdict
import theano.gof.cmodule
import theano.link.c.cmodule
from theano import config, link
......@@ -694,10 +694,10 @@ try:
# If cxx is explicitely set to an empty string, we do not want to import neither lazylinker C code
# nor lazylinker compiled C code from cache.
if not theano.config.cxx:
raise theano.gof.cmodule.MissingGXX(
raise theano.link.c.cmodule.MissingGXX(
"lazylinker will not be imported if theano.config.cxx is not set."
)
from . import lazylinker_c
from theano.link.c import lazylinker_c
class CVM(lazylinker_c.CLazyLinker, VM):
def __init__(self, fgraph, *args, **kwargs):
......@@ -708,7 +708,7 @@ try:
except ImportError:
pass
except (OSError, theano.gof.cmodule.MissingGXX) as e:
except (OSError, theano.link.c.cmodule.MissingGXX) as e:
# OSError happens when g++ is not installed. In that case, we
# already changed the default linker to something else then CVM.
# Currently this is the py linker.
......
......@@ -53,7 +53,7 @@ from collections import OrderedDict
import numpy as np
import theano
from theano import compile, config, gof, gradient, link, tensor
from theano import compile, config, gof, gradient, tensor
from theano.compile.builders import infer_shape
from theano.compile.function import function
from theano.compile.io import In, Out
......@@ -63,6 +63,8 @@ from theano.gof import Apply, PureOp
from theano.gof.graph import equal_computations, io_connection_pattern
from theano.gof.toolbox import NoOutputFromInplace
from theano.gradient import DisconnectedType, NullType, grad_undefined
from theano.link.c.cc import CLinker
from theano.link.utils import raise_with_op
from theano.scan.utils import Validator, forced_replace, hash_listsDictsTuples, safe_new
from theano.tensor import TensorType, as_tensor_variable
from theano.tensor.opt import Shape_i
......@@ -203,7 +205,7 @@ class Scan(PureOp):
raise theano.gof.MissingInputError(
f"ScanOp is missing an input: {repr(var)}"
)
self._cmodule_key = gof.CLinker().cmodule_key_variables(
self._cmodule_key = CLinker().cmodule_key_variables(
self.inputs, self.outputs, []
)
self._hash_inner_graph = hash(self._cmodule_key)
......@@ -972,7 +974,7 @@ class Scan(PureOp):
try:
if impl == "py":
raise theano.gof.cmodule.MissingGXX
raise theano.link.c.cmodule.MissingGXX
cython_mintaps = np.asarray(self.mintaps, dtype="int32")
cython_tap_array_len = np.asarray(
[len(x) for x in self.tap_array], dtype="int32"
......@@ -1049,7 +1051,7 @@ class Scan(PureOp):
node,
)
except (ImportError, theano.gof.cmodule.MissingGXX):
except (ImportError, theano.link.c.cmodule.MissingGXX):
p = self.execute
# default arguments are stored in the closure of `rval`
......@@ -1465,7 +1467,7 @@ class Scan(PureOp):
# done by raise_with_op is not implemented in C.
if hasattr(fn, "thunks"):
# For the CVM
link.raise_with_op(
raise_with_op(
self.fn.maker.fgraph,
fn.nodes[fn.position_of_error],
fn.thunks[fn.position_of_error],
......@@ -1475,7 +1477,7 @@ class Scan(PureOp):
# We don't have access from python to all the
# temps values So for now, we just don't print
# the extra shapes/strides info
link.raise_with_op(
raise_with_op(
self.fn.maker.fgraph, fn.nodes[fn.position_of_error]
)
else:
......
......@@ -18,8 +18,8 @@ import numpy as np
import theano
from theano import config
from theano.gof import cmodule
from theano.gof.compilelock import get_lock, release_lock
from theano.link.c import cmodule
_logger = logging.getLogger("theano.scan.scan_perform")
......
......@@ -645,7 +645,7 @@ cgemv_no_inplace = CGemv(inplace=False)
def check_force_gemv_init():
if check_force_gemv_init._force_init_beta is None:
from theano.gof.cmodule import GCC_compiler
from theano.link.c.cmodule import GCC_compiler
"""
Test issue 1569.
......
......@@ -12,7 +12,7 @@ import textwrap
from os.path import dirname
from theano import config
from theano.gof.cmodule import GCC_compiler
from theano.link.c.cmodule import GCC_compiler
_logger = logging.getLogger("theano.tensor.blas")
......
......@@ -1037,7 +1037,7 @@ second dimension
if self.openmp:
# If we are using openmp, we need to get rid of the "goto"
# statement in sub['fail']. For now we recreate it here.
fail = gof.cc.failure_code(sub, use_goto=False)
fail = theano.link.c.cc.failure_code(sub, use_goto=False)
else:
fail = sub["fail"]
task_code = self.scalar_op.c_code(
......
......@@ -1184,7 +1184,7 @@ using namespace std;
def c_no_compile_args(self):
# when the ksph==(1,1) gcc 4.3.0 segfault during the
# compilation with -O3. This don't happen at -O2
if theano.gof.cmodule.gcc_version() in ["4.3.0"] and self.kshp == (1, 1):
if theano.link.c.cmodule.gcc_version() in ["4.3.0"] and self.kshp == (1, 1):
return ["-O3"]
else:
return []
......@@ -1194,7 +1194,7 @@ using namespace std;
if self.use_blas():
ret = blas.ldflags(libs=False, flags=True)
if theano.gof.cmodule.gcc_version() in ["4.3.0"] and self.kshp == (1, 1):
if theano.link.c.cmodule.gcc_version() in ["4.3.0"] and self.kshp == (1, 1):
ret += ["-O2"]
# Add the -fopenmp flags
ret += super().c_compile_args()
......
......@@ -4,8 +4,8 @@ import sys
import theano.tensor as tt
from theano import config, gof
from theano.gof import local_optimizer
from theano.gof.cmodule import GCC_compiler
from theano.gradient import grad_undefined
from theano.link.c.cmodule import GCC_compiler
from theano.tensor.extra_ops import cpu_contiguous
from theano.tensor.opt import register_canonicalize
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论