* mode.py now defines default_mode, which is read from THEANO_DEFAULT_MODE env.…

* mode.py now defines default_mode, which is read from THEANO_DEFAULT_MODE env. var (or defaults to FAST_RUN if absent). * added registration of DEBUG_MODE string for DebugMode (old OptCheck) * all unittests now run with default mode. To change mode, run unittests like so: THEANO_DEFAULT_MODE=[FAST_COMPILE|FAST_RUN|DEBUG_MODE] nosetests Note: unittests specific to one mode (ie. gemm and blas) were kept in FAST_RUN only TODO: once DEBUG_MODE is made such that it defaults to FAST_RUN after the first successfull execution of the graph, put DEBUG_MODE as the default
上级 5438f37b
...@@ -57,6 +57,7 @@ import tensor ...@@ -57,6 +57,7 @@ import tensor
import scalar import scalar
import sparse import sparse
import gradient import gradient
import gof
## import scalar_opt ## import scalar_opt
......
...@@ -14,5 +14,7 @@ from builders import * ...@@ -14,5 +14,7 @@ from builders import *
import module import module
from module import * from module import *
import debugmode
from profilemode import ProfileMode from profilemode import ProfileMode
...@@ -11,13 +11,9 @@ from ..gof import Env, graph, utils, link ...@@ -11,13 +11,9 @@ from ..gof import Env, graph, utils, link
from ..gof.link import WrapLinkerMany, raise_with_op from ..gof.link import WrapLinkerMany, raise_with_op
from ..gof.cutils import run_cthunk from ..gof.cutils import run_cthunk
from ..gof.cc import OpWiseCLinker, CLinker from ..gof.cc import OpWiseCLinker, CLinker
from ..compile.mode import Mode
import numpy import numpy
from ..compile.function_module import (FunctionMaker,
from ..compile.function_module import (convert_function_input,
FunctionMaker,
predefined_modes,
Function, Function,
infer_reuse_pattern, infer_reuse_pattern,
SymbolicInput, SymbolicInput,
...@@ -622,6 +618,9 @@ class DebugModeFunctionMaker(FunctionMaker): #inheritance buys a few helper func ...@@ -622,6 +618,9 @@ class DebugModeFunctionMaker(FunctionMaker): #inheritance buys a few helper func
fn = self.function_builder(_fn, _i, _o, self.indices, self.outputs, defaults, self.unpack_single, self) fn = self.function_builder(_fn, _i, _o, self.indices, self.outputs, defaults, self.unpack_single, self)
return fn return fn
from ..compile.mode import Mode, register_mode
class DebugMode(Mode): class DebugMode(Mode):
"""Evaluation Mode that detects optimization errors. """Evaluation Mode that detects optimization errors.
...@@ -648,6 +647,4 @@ class DebugMode(Mode): ...@@ -648,6 +647,4 @@ class DebugMode(Mode):
linker=DebugModeLinker) linker=DebugModeLinker)
self.stability_patience = stability_patience self.stability_patience = stability_patience
self.check_c_code = check_c_code self.check_c_code = check_c_code
register_mode('DEBUG_MODE',DebugMode(optimizer='fast_run'))
...@@ -15,10 +15,6 @@ from copy import copy ...@@ -15,10 +15,6 @@ from copy import copy
from mode import * from mode import *
from io import * from io import *
# used by function and module as the default compilation mode
mode_default = 'FAST_COMPILE'
def infer_reuse_pattern(env, outputs_to_disown): def infer_reuse_pattern(env, outputs_to_disown):
""" """
Given an env and a list of results, returns the list of all Given an env and a list of results, returns the list of all
...@@ -454,7 +450,7 @@ class FunctionMaker(object): ...@@ -454,7 +450,7 @@ class FunctionMaker(object):
raise TypeError("Unknown output type: %s (%s)", type(output), output) raise TypeError("Unknown output type: %s (%s)", type(output), output)
def __init__(self, inputs, outputs, def __init__(self, inputs, outputs,
mode = mode_default, accept_inplace = False, function_builder = Function): mode = default_mode, accept_inplace = False, function_builder = Function):
""" """
:type inputs: a list of SymbolicInput instances :type inputs: a list of SymbolicInput instances
...@@ -644,7 +640,7 @@ def register_checker(checker): ...@@ -644,7 +640,7 @@ def register_checker(checker):
def function(inputs, outputs, mode=mode_default, accept_inplace = False): def function(inputs, outputs, mode=default_mode, accept_inplace = False):
""" """
Return a function calculating the outputs from the inputs. Return a function calculating the outputs from the inputs.
......
import numpy import numpy
import os
import scipy.sparse as sp import scipy.sparse as sp
from .. import gof from .. import gof
...@@ -144,16 +144,21 @@ class Mode(object): ...@@ -144,16 +144,21 @@ class Mode(object):
# If a string is passed as the mode argument in function or # If a string is passed as the mode argument in function or
# FunctionMaker, the Mode will be taken from this dictionary using the # FunctionMaker, the Mode will be taken from this dictionary using the
# string as the key # string as the key
FAST_COMPILE = Mode('py', 'fast_compile') FAST_COMPILE = Mode('py', 'fast_compile')
FAST_RUN = Mode('c|py', 'fast_run') FAST_RUN = Mode('c|py', 'fast_run')
SANITY_CHECK = [Mode('c|py', None), SANITY_CHECK = [Mode('c|py', None),
Mode('c|py', 'fast_run')] Mode('c|py', 'fast_run')]
predefined_modes = {'FAST_COMPILE': FAST_COMPILE, predefined_modes = {'FAST_COMPILE': FAST_COMPILE,
'FAST_RUN': FAST_RUN, 'FAST_RUN': FAST_RUN,
'SANITY_CHECK': SANITY_CHECK} 'SANITY_CHECK': SANITY_CHECK}
default_mode = 'FAST_COMPILE'
##
# The default mode used by functions and modules is read from the environment
# variable THEANO_DEFAULT_MODE. Unit tests will run using this value. If the env. var.
# is not set, it will default to 'FAST_RUN'
##
default_mode = os.getenv('THEANO_DEFAULT_MODE','FAST_RUN')
def register_mode(name, mode): def register_mode(name, mode):
"""Add a `Mode` which can be referred to by `name` in `function`.""" """Add a `Mode` which can be referred to by `name` in `function`."""
......
...@@ -7,6 +7,7 @@ from functools import partial ...@@ -7,6 +7,7 @@ from functools import partial
from copy import copy from copy import copy
import io import io
import function_module as F import function_module as F
from mode import default_mode
def join(*args): def join(*args):
...@@ -125,7 +126,7 @@ class Component(object): ...@@ -125,7 +126,7 @@ class Component(object):
""" """
raise NotImplementedError raise NotImplementedError
def make_no_init(self, mode=F.mode_default): def make_no_init(self, mode=default_mode):
""" """
Allocates the necessary containers using allocate() and uses Allocates the necessary containers using allocate() and uses
build() with the provided mode to make an instance which will build() with the provided mode to make an instance which will
...@@ -145,7 +146,7 @@ class Component(object): ...@@ -145,7 +146,7 @@ class Component(object):
arguments and the keyword arguments. If 'mode' is in the arguments and the keyword arguments. If 'mode' is in the
keyword arguments it will be passed to build(). keyword arguments it will be passed to build().
""" """
mode = kwargs.pop('mode', F.mode_default) mode = kwargs.pop('mode', default_mode)
rval = self.make_no_init(mode) rval = self.make_no_init(mode)
if hasattr(rval, 'initialize'): if hasattr(rval, 'initialize'):
rval.initialize(*args, **kwargs) rval.initialize(*args, **kwargs)
...@@ -958,7 +959,7 @@ class Module(ComponentDict): ...@@ -958,7 +959,7 @@ class Module(ComponentDict):
""" """
self.make_mi(args,kwargs) self.make_mi(args,kwargs)
mode = kwargs.pop('mode', F.mode_default) mode = kwargs.pop('mode', default_mode)
rval = self.make_no_init(mode) rval = self.make_no_init(mode)
if hasattr(rval, 'initialize'): if hasattr(rval, 'initialize'):
rval.initialize(*args, **kwargs) rval.initialize(*args, **kwargs)
...@@ -1011,10 +1012,3 @@ class KitComponent(Component): ...@@ -1011,10 +1012,3 @@ class KitComponent(Component):
def build(self, mode, memo): def build(self, mode, memo):
return [memo[i.result].value for i in self.kit.sinputs] return [memo[i.result].value for i in self.kit.sinputs]
...@@ -37,6 +37,13 @@ class Print(Op): ...@@ -37,6 +37,13 @@ class Print(Op):
def grad(self,input,output_gradients): def grad(self,input,output_gradients):
return output_gradients return output_gradients
def __eq__(self, other):
return type(self)==type(other) and self.message==other.message and self.attrs==other.attrs
def __hash__(self):
return hash(self.message) ^ hash(self.attrs)
class PrinterState(gof.utils.scratchpad): class PrinterState(gof.utils.scratchpad):
def __init__(self, props = {}, **more_props): def __init__(self, props = {}, **more_props):
......
...@@ -335,22 +335,21 @@ class test_structureddot(unittest.TestCase): ...@@ -335,22 +335,21 @@ class test_structureddot(unittest.TestCase):
return structured_dot(csc, images.T) return structured_dot(csc, images.T)
out = buildgraphCSC(kerns,images) out = buildgraphCSC(kerns,images)
for mode in 'FAST_COMPILE','FAST_RUN': f = theano.function([kerns,images], out)
f = theano.function([kerns,images], out, mode=mode) kernvals = spmat.data[:spmat.size]
kernvals = spmat.data[:spmat.size] imvals = 1.0 * numpy.arange(bsize*spmat.shape[1]).reshape(bsize,spmat.shape[1])
imvals = 1.0 * numpy.arange(bsize*spmat.shape[1]).reshape(bsize,spmat.shape[1]) outvals = f(kernvals,imvals)
outvals = f(kernvals,imvals) print type(spmat.dot(imvals.T))
print type(spmat.dot(imvals.T)) print spmat.dot(imvals.T)
print spmat.dot(imvals.T) print dir(spmat.dot(imvals.T))
print dir(spmat.dot(imvals.T))
# scipy 0.7.0 should already make the output dense
# scipy 0.7.0 should already make the output dense # assert numpy.all(outvals == spmat.dot(imvals.T).todense())
# assert numpy.all(outvals == spmat.dot(imvals.T).todense()) c = spmat.dot(imvals.T)
c = spmat.dot(imvals.T) assert _is_dense(c)
assert _is_dense(c) assert numpy.all(outvals == c)
assert numpy.all(outvals == c)
tensor.verify_grad(None, buildgraphCSC, [kernvals,imvals])
tensor.verify_grad(None, buildgraphCSC, [kernvals,imvals], mode=mode)
spmat = spmat.tocsr() spmat = spmat.tocsr()
def buildgraphCSR(kerns,images): def buildgraphCSR(kerns,images):
...@@ -358,19 +357,18 @@ class test_structureddot(unittest.TestCase): ...@@ -358,19 +357,18 @@ class test_structureddot(unittest.TestCase):
return structured_dot(csr, images.T) return structured_dot(csr, images.T)
out = buildgraphCSR(kerns,images) out = buildgraphCSR(kerns,images)
for mode in 'FAST_COMPILE','FAST_RUN': f = theano.function([kerns,images], out)
f = theano.function([kerns,images], out, mode=mode) kernvals = spmat.data[:spmat.size]
kernvals = spmat.data[:spmat.size] imvals = 1.0 * numpy.arange(bsize*spmat.shape[1]).reshape(bsize,spmat.shape[1])
imvals = 1.0 * numpy.arange(bsize*spmat.shape[1]).reshape(bsize,spmat.shape[1]) outvals = f(kernvals,imvals)
outvals = f(kernvals,imvals)
# scipy 0.7.0 should already make the output dense # scipy 0.7.0 should already make the output dense
# assert numpy.all(outvals == spmat.dot(imvals.T).todense()) # assert numpy.all(outvals == spmat.dot(imvals.T).todense())
c = spmat.dot(imvals.T) c = spmat.dot(imvals.T)
assert _is_dense(c) assert _is_dense(c)
assert numpy.all(outvals == c) assert numpy.all(outvals == c)
tensor.verify_grad(None, buildgraphCSR, [kernvals,imvals], mode=mode) tensor.verify_grad(None, buildgraphCSR, [kernvals,imvals])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,16 +15,16 @@ from theano import gof ...@@ -15,16 +15,16 @@ from theano import gof
from theano.gof.utils import AbstractFunctionError from theano.gof.utils import AbstractFunctionError
from theano.tensor.elemwise import DimShuffle from theano.tensor.elemwise import DimShuffle
from theano.compile.mode import default_mode
from theano import function
default_mode = compile.Mode(optimizer = None,
linker = 'c&py')
def function(inputs, outputs, mode = default_mode): def inplace_func(inputs, outputs, mode=default_mode):
return compile.function(inputs, outputs, mode = mode, accept_inplace = True) return function(inputs, outputs, mode=mode, accept_inplace=True)
def eval_outputs(outputs, mode = default_mode): def eval_outputs(outputs):
results = function([], outputs, mode = mode)() results = inplace_func([], outputs)()
if len(results) == 1: if len(results) == 1:
return results[0] return results[0]
return results return results
...@@ -85,9 +85,7 @@ def make_restet(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_ ...@@ -85,9 +85,7 @@ def make_restet(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
raise type, exc_value, traceback raise type, exc_value, traceback
try: try:
f = function(inputrs, node.outputs, f = inplace_func(inputrs, node.outputs)
mode = default_mode, ##lambda env, **kwargs: gof.DualLinker(env, checker = _numpy_checker, **kwargs),
)
except: except:
type, exc_value, traceback = sys.exc_info() type, exc_value, traceback = sys.exc_info()
err_msg = "Test %s::%s: Error occurred while trying to make a Function" \ err_msg = "Test %s::%s: Error occurred while trying to make a Function" \
...@@ -144,9 +142,7 @@ def make_restet(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_ ...@@ -144,9 +142,7 @@ def make_restet(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
raise type, exc_value, traceback raise type, exc_value, traceback
try: try:
f = function(inputrs, node.outputs, f = inplace_func(inputrs, node.outputs)
mode = default_mode, #lambda env, **kwargs: gof.DualLinker(env, checker = _numpy_checker, **kwargs),
)
except: except:
type, exc_value, traceback = sys.exc_info() type, exc_value, traceback = sys.exc_info()
err_msg = "Test %s::%s: Error occurred while trying to make a Function" \ err_msg = "Test %s::%s: Error occurred while trying to make a Function" \
...@@ -599,7 +595,7 @@ class T_Cast(unittest.TestCase): ...@@ -599,7 +595,7 @@ class T_Cast(unittest.TestCase):
[convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64, [convert_to_int8, convert_to_int16, convert_to_int32, convert_to_int64,
convert_to_float32, convert_to_float64]): convert_to_float32, convert_to_float64]):
y = converter(x) y = converter(x)
f = function([compile.In(x, strict = True)], y, mode = default_mode) f = inplace_func([compile.In(x, strict = True)], y)
a = numpy.arange(10, dtype = type1) a = numpy.arange(10, dtype = type1)
b = f(a) b = f(a)
self.failUnless(numpy.all(b == numpy.arange(10, dtype = type2))) self.failUnless(numpy.all(b == numpy.arange(10, dtype = type2)))
...@@ -959,7 +955,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -959,7 +955,7 @@ class T_Join_and_Split(unittest.TestCase):
ax = lscalar() ax = lscalar()
s = join(ax, a, b) s = join(ax, a, b)
f = function([ax], [s]) f = inplace_func([ax], [s])
want = numpy.array([[1, 2, 3], [4, 5, 6] ,[1, 2, 3], [4, 5, 6]]) want = numpy.array([[1, 2, 3], [4, 5, 6] ,[1, 2, 3], [4, 5, 6]])
got = f(0) got = f(0)
...@@ -976,7 +972,7 @@ class T_Join_and_Split(unittest.TestCase): ...@@ -976,7 +972,7 @@ class T_Join_and_Split(unittest.TestCase):
class test_comparison(unittest.TestCase): class test_comparison(unittest.TestCase):
def test_gt(self): def test_gt(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], x > y) fn = inplace_func([x,y], x > y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -984,7 +980,7 @@ class test_comparison(unittest.TestCase): ...@@ -984,7 +980,7 @@ class test_comparison(unittest.TestCase):
def test_lt(self): def test_lt(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], x < y) fn = inplace_func([x,y], x < y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -992,7 +988,7 @@ class test_comparison(unittest.TestCase): ...@@ -992,7 +988,7 @@ class test_comparison(unittest.TestCase):
def test_le(self): def test_le(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], x <= y) fn = inplace_func([x,y], x <= y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -1000,7 +996,7 @@ class test_comparison(unittest.TestCase): ...@@ -1000,7 +996,7 @@ class test_comparison(unittest.TestCase):
def test_ge(self): def test_ge(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], x >= y) fn = inplace_func([x,y], x >= y)
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -1008,7 +1004,7 @@ class test_comparison(unittest.TestCase): ...@@ -1008,7 +1004,7 @@ class test_comparison(unittest.TestCase):
def test_eq(self): def test_eq(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], eq(x,y)) fn = inplace_func([x,y], eq(x,y))
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -1016,7 +1012,7 @@ class test_comparison(unittest.TestCase): ...@@ -1016,7 +1012,7 @@ class test_comparison(unittest.TestCase):
def test_neq(self): def test_neq(self):
x, y = fvector(), fvector() x, y = fvector(), fvector()
fn = function([x,y], neq(x, y)) fn = inplace_func([x,y], neq(x, y))
l = numpy.asarray([0.,-1.,1.]) l = numpy.asarray([0.,-1.,1.])
r = numpy.asarray([0.,1.,-1.]) r = numpy.asarray([0.,1.,-1.])
v = fn(l, r) v = fn(l, r)
...@@ -1025,7 +1021,7 @@ class test_comparison(unittest.TestCase): ...@@ -1025,7 +1021,7 @@ class test_comparison(unittest.TestCase):
class test_bitwise(unittest.TestCase): class test_bitwise(unittest.TestCase):
def test_or(self): def test_or(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], x|y) fn = inplace_func([x,y], x|y)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1033,10 +1029,10 @@ class test_bitwise(unittest.TestCase): ...@@ -1033,10 +1029,10 @@ class test_bitwise(unittest.TestCase):
def test_xor(self): def test_xor(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], x^y) fn = inplace_func([x,y], x^y)
ix = x ix = x
ix = inplace.xor_inplace(ix, y) ix = inplace.xor_inplace(ix, y)
gn = function([x,y], ix) gn = inplace_func([x,y], ix)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1047,7 +1043,7 @@ class test_bitwise(unittest.TestCase): ...@@ -1047,7 +1043,7 @@ class test_bitwise(unittest.TestCase):
def test_and(self): def test_and(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], x&y) fn = inplace_func([x,y], x&y)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1055,7 +1051,7 @@ class test_bitwise(unittest.TestCase): ...@@ -1055,7 +1051,7 @@ class test_bitwise(unittest.TestCase):
def test_inv(self): def test_inv(self):
x, y = bvector(), bvector() x, y = bvector(), bvector()
fn = function([x,y], ~x) fn = inplace_func([x,y], ~x)
l = numpy.asarray([0,0,1,1], dtype = 'int8') l = numpy.asarray([0,0,1,1], dtype = 'int8')
r = numpy.asarray([0,1,0,1], dtype = 'int8') r = numpy.asarray([0,1,0,1], dtype = 'int8')
v = fn(l, r) v = fn(l, r)
...@@ -1074,7 +1070,9 @@ class T_add(unittest.TestCase): ...@@ -1074,7 +1070,9 @@ class T_add(unittest.TestCase):
("*", lambda x,y: x*y), ("*", lambda x,y: x*y),
("/", lambda x,y: x/y)) ("/", lambda x,y: x/y))
for s, fn in tests: for s, fn in tests:
f = function([a,b], fn(a, b), mode = compile.Mode(optimizer = None, linker = 'c')) f = inplace_func([a,b], fn(a, b))
print 'valid output:', fn(a.data, b.data)
print 'theano output:', f(a.data, b.data)
self.failUnless(numpy.all(fn(a.data, b.data) == f(a.data, b.data))) self.failUnless(numpy.all(fn(a.data, b.data) == f(a.data, b.data)))
def test_grad_scalar_l(self): def test_grad_scalar_l(self):
...@@ -1283,7 +1281,7 @@ class test_matinv(unittest.TestCase): ...@@ -1283,7 +1281,7 @@ class test_matinv(unittest.TestCase):
# compilation to function # compilation to function
# [a,b] are the inputs, [ssdiff,g_b] are the outputs # [a,b] are the inputs, [ssdiff,g_b] are the outputs
fn = function([a,b], [ssdiff,g_b]) fn = inplace_func([a,b], [ssdiff,g_b])
# use the function # use the function
x = numpy.random.rand(dim,dim)+0.1 # Initialized s.t. x is not too tiny x = numpy.random.rand(dim,dim)+0.1 # Initialized s.t. x is not too tiny
...@@ -1340,11 +1338,11 @@ class t_dot(unittest.TestCase): ...@@ -1340,11 +1338,11 @@ class t_dot(unittest.TestCase):
def not_aligned(self, x, y): def not_aligned(self, x, y):
z = dot(x,y) z = dot(x,y)
try: try:
tz = eval_outputs([z], mode = compile.Mode(optimizer = None, linker = 'py')) tz = eval_outputs([z])
except ValueError, e: except ValueError, e:
self.failUnless(e[0].split()[1:4] == ['are', 'not', 'aligned'], e) self.failUnless(
return e[0].split()[1:4] == ['are', 'not', 'aligned'] or # reported by numpy
self.fail() e[0].split()[2:5] == ['do', 'not', 'agree'], e) # reported by blas return self.fail()
def test_align_1_1(self): self.not_aligned(self.rand(5), self.rand(6)) def test_align_1_1(self): self.not_aligned(self.rand(5), self.rand(6))
def test_align_1_2(self): self.not_aligned(self.rand(5), self.rand(6,4)) def test_align_1_2(self): self.not_aligned(self.rand(5), self.rand(6,4))
...@@ -1585,8 +1583,8 @@ class T_op_cache(unittest.TestCase): ...@@ -1585,8 +1583,8 @@ class T_op_cache(unittest.TestCase):
v = matrix() v = matrix()
v.name = 'v' v.name = 'v'
gv = fill(v/v, 1.0)/v - (fill(v/v, 1.0) * v) / (v*v) gv = fill(v/v, 1.0)/v - (fill(v/v, 1.0) * v) / (v*v)
fn_py = function([v], gv, mode = compile.Mode(optimizer = None, linker = 'py')) fn_py = inplace_func([v], gv)
fn_c_or_py = function([v], gv, compile.Mode(optimizer = None, linker = 'c|py')) fn_c_or_py = inplace_func([v], gv)
a = numpy.random.rand(5,2) a = numpy.random.rand(5,2)
self.failUnless(numpy.all(fn_py(a) == fn_c_or_py(a))) self.failUnless(numpy.all(fn_py(a) == fn_c_or_py(a)))
...@@ -1600,7 +1598,7 @@ def test_reshape(): ...@@ -1600,7 +1598,7 @@ def test_reshape():
c = reshape(a, [2,3]) c = reshape(a, [2,3])
#basic #basic
f = function([a], c, mode='FAST_COMPILE') f = inplace_func([a], c)
assert numpy.all(f(numpy.asarray([0,1,2,3,4,5])) == numpy.asarray([[0,1,2], [3,4,5]])) assert numpy.all(f(numpy.asarray([0,1,2,3,4,5])) == numpy.asarray([[0,1,2], [3,4,5]]))
#test that it works without inplace operations #test that it works without inplace operations
...@@ -1608,7 +1606,7 @@ def test_reshape(): ...@@ -1608,7 +1606,7 @@ def test_reshape():
a_val_copy = numpy.asarray([0,1,2,3,4,5]) a_val_copy = numpy.asarray([0,1,2,3,4,5])
b_val = numpy.asarray([[0,1,2],[3,4,5]]) b_val = numpy.asarray([[0,1,2],[3,4,5]])
f_sub = function([a,b], c-b, mode='FAST_COMPILE') f_sub = inplace_func([a,b], c-b)
assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(a_val == a_val_copy) assert numpy.all(a_val == a_val_copy)
...@@ -1617,7 +1615,7 @@ def test_reshape(): ...@@ -1617,7 +1615,7 @@ def test_reshape():
a_val_copy = numpy.asarray([0,1,2,3,4,5], dtype='float64') a_val_copy = numpy.asarray([0,1,2,3,4,5], dtype='float64')
b_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') b_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
f_sub = function([a,b], c-b, mode=compile.Mode(optimizer='fast_run', linker='c|py')) f_sub = inplace_func([a,b], c-b)
assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(a_val == a_val_copy) assert numpy.all(a_val == a_val_copy)
...@@ -1631,11 +1629,11 @@ def test_flatten_outdimNone(): ...@@ -1631,11 +1629,11 @@ def test_flatten_outdimNone():
a = dmatrix() a = dmatrix()
c = flatten(a) c = flatten(a)
f = function([a], c, mode='FAST_COMPILE') f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64') c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = function([a], c, mode='FAST_RUN') f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
tensor.verify_grad(None, Flatten(), [a_val]) tensor.verify_grad(None, Flatten(), [a_val])
...@@ -1643,11 +1641,11 @@ def test_flatten_outdimNone(): ...@@ -1643,11 +1641,11 @@ def test_flatten_outdimNone():
def test_flatten_scalar(): def test_flatten_scalar():
a = dscalar() a = dscalar()
c = flatten(a) c = flatten(a)
f = function([a], c, mode='FAST_COMPILE') f = inplace_func([a], c)
a_val = numpy.asarray(3.0, dtype='float64') a_val = numpy.asarray(3.0, dtype='float64')
c_val = numpy.asarray([3.0], dtype='float64') c_val = numpy.asarray([3.0], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = function([a], c, mode='FAST_RUN') f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
#tensor.verify_grad(None, Flatten(), [a_val]) #TODO: fix verify_grd to work on scalars #tensor.verify_grad(None, Flatten(), [a_val]) #TODO: fix verify_grd to work on scalars
...@@ -1655,11 +1653,11 @@ def test_flatten_scalar(): ...@@ -1655,11 +1653,11 @@ def test_flatten_scalar():
def test_flatten_outdim1(): def test_flatten_outdim1():
a = dmatrix() a = dmatrix()
c = flatten(a, 1) c = flatten(a, 1)
f = function([a], c, mode='FAST_COMPILE') f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64') c_val = numpy.asarray([0,1,2,3,4,5], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = function([a], c, mode='FAST_RUN') f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
tensor.verify_grad(None, Flatten(1), [a_val]) tensor.verify_grad(None, Flatten(1), [a_val])
...@@ -1667,10 +1665,10 @@ def test_flatten_outdim1(): ...@@ -1667,10 +1665,10 @@ def test_flatten_outdim1():
def test_flatten_outdim2(): def test_flatten_outdim2():
a = dmatrix() a = dmatrix()
c = flatten(a, 2) c = flatten(a, 2)
f = function([a], c, mode='FAST_COMPILE') f = inplace_func([a], c)
a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64') a_val = numpy.asarray([[0,1,2],[3,4,5]], dtype='float64')
assert numpy.all(f(a_val)==a_val) assert numpy.all(f(a_val)==a_val)
f = function([a], c, mode='FAST_RUN') f = inplace_func([a], c)
assert numpy.all(f(a_val)==a_val) assert numpy.all(f(a_val)==a_val)
tensor.verify_grad(None, Flatten(2), [a_val]) tensor.verify_grad(None, Flatten(2), [a_val])
...@@ -1678,11 +1676,11 @@ def test_flatten_outdim2(): ...@@ -1678,11 +1676,11 @@ def test_flatten_outdim2():
def test_flatten_outdim2_of_3(): def test_flatten_outdim2_of_3():
a = Tensor('float64', (False, False, False))() a = Tensor('float64', (False, False, False))()
c = flatten(a, 2) c = flatten(a, 2)
f = function([a], c, mode='FAST_COMPILE') f = inplace_func([a], c)
a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64') a_val = numpy.asarray([[[0,1],[2,3]], [[4,5],[6,7]]], dtype='float64')
c_val = numpy.asarray([[0,1,2,3], [4,5,6,7]], dtype='float64') c_val = numpy.asarray([[0,1,2,3], [4,5,6,7]], dtype='float64')
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
f = function([a], c, mode='FAST_RUN') f = inplace_func([a], c)
assert numpy.all(f(a_val)==c_val) assert numpy.all(f(a_val)==c_val)
tensor.verify_grad(None, Flatten(2), [a_val]) tensor.verify_grad(None, Flatten(2), [a_val])
...@@ -1710,76 +1708,74 @@ class test_tensordot(unittest.TestCase): ...@@ -1710,76 +1708,74 @@ class test_tensordot(unittest.TestCase):
def test0(self): def test0(self):
for mod in 'FAST_COMPILE', 'FAST_RUN', default_mode: # test vector-vector
avec = dvector()
# test vector-vector bvec = dvector()
avec = dvector() axes = ((0,),(0,))
bvec = dvector() c = tensordot(axes)(avec, bvec)
axes = ((0,),(0,)) f1 = inplace_func([avec,bvec],c)
c = tensordot(axes)(avec, bvec) aval = numpy.random.rand(5);
f1 = function([avec,bvec],c, mode=mod) bval = numpy.random.rand(5);
aval = numpy.random.rand(5); self.failUnless(numpy.tensordot(aval,bval,axes) == \
bval = numpy.random.rand(5); f1(aval,bval))
self.failUnless(numpy.tensordot(aval,bval,axes) == \ tensor.verify_grad(None, TensorDot(axes), [aval,bval])
f1(aval,bval))
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) # test matrix-vector
bmat = dmatrix()
# test matrix-vector axes = ((0,),(1,))
bmat = dmatrix() c = tensordot(axes)(avec, bmat)
axes = ((0,),(1,)) f2 = inplace_func([avec,bmat],c)
c = tensordot(axes)(avec, bmat) aval = numpy.random.rand(5);
f2 = function([avec,bmat],c, mode=mod) bval = numpy.random.rand(8,5);
aval = numpy.random.rand(5); self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \
bval = numpy.random.rand(8,5); f2(aval,bval)))
self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \ tensor.verify_grad(None, TensorDot(axes), [aval,bval])
f2(aval,bval)))
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) # test matrix-matrix
amat = dmatrix()
# test matrix-matrix axes = ((1,),(0,))
amat = dmatrix() c = tensordot(axes)(amat, bmat)
axes = ((1,),(0,)) f3 = inplace_func([amat,bmat],c)
c = tensordot(axes)(amat, bmat) aval = numpy.random.rand(4,7);
f3 = function([amat,bmat],c, mode=mod) bval = numpy.random.rand(7,9);
aval = numpy.random.rand(4,7); self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \
bval = numpy.random.rand(7,9); f3(aval,bval)))
self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \ tensor.verify_grad(None, TensorDot(axes), [aval,bval])
f3(aval,bval)))
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) # test ndarray-matrix, sum over one dim of matrix
atens = Tensor('float64', broadcastable=(False,)*4)()
# test ndarray-matrix, sum over one dim of matrix axes = ((2,),(1,))
atens = Tensor('float64', broadcastable=(False,)*4)() c = tensordot(axes)(atens, bmat)
axes = ((2,),(1,)) f4 = inplace_func([atens,bmat],c)
c = tensordot(axes)(atens, bmat) aval = numpy.random.rand(1,2,3,4);
f4 = function([atens,bmat],c, mode=mod) bval = numpy.random.rand(2,3);
aval = numpy.random.rand(1,2,3,4); self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \
bval = numpy.random.rand(2,3); f4(aval,bval)))
self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \ tensor.verify_grad(None, TensorDot(axes), [aval,bval])
f4(aval,bval)))
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) # test ndarray-ndarray
atens = Tensor('float64', broadcastable=(False,)*4)()
# test ndarray-ndarray btens = Tensor('float64', broadcastable=(False,)*3)()
atens = Tensor('float64', broadcastable=(False,)*4)() axes = ((1,3),(0,2))
btens = Tensor('float64', broadcastable=(False,)*3)() c = tensordot(axes)(atens, btens)
axes = ((1,3),(0,2)) f5 = inplace_func([atens,btens],c)
c = tensordot(axes)(atens, btens) aval = numpy.random.rand(4,3,5,2);
f5 = function([atens,btens],c, mode=mod) bval = numpy.random.rand(3,4,2);
aval = numpy.random.rand(4,3,5,2); self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \
bval = numpy.random.rand(3,4,2); f5(aval,bval)))
self.failUnless(numpy.all(numpy.tensordot(aval,bval,axes) == \ tensor.verify_grad(None, TensorDot(axes), [aval,bval])
f5(aval,bval)))
tensor.verify_grad(None, TensorDot(axes), [aval,bval]) axes = (axes[1],axes[0])
c = tensordot(axes)(btens, atens)
axes = (axes[1],axes[0]) f6 = inplace_func([btens,atens],c)
c = tensordot(axes)(btens, atens) self.failUnless(numpy.all(numpy.tensordot(bval,aval,axes) == \
f6 = function([btens,atens],c, mode=mod) f6(bval,aval)))
self.failUnless(numpy.all(numpy.tensordot(bval,aval,axes) == \ tensor.verify_grad(None, TensorDot(axes), [bval,aval])
f6(bval,aval)))
tensor.verify_grad(None, TensorDot(axes), [bval,aval])
def test_smallest_stack(): def test_smallest_stack():
sx, sy = dscalar(), dscalar() sx, sy = dscalar(), dscalar()
rval = function([sx,sy], stack(sx,sy))(-4.0, -2.0) rval = inplace_func([sx,sy], stack(sx,sy))(-4.0, -2.0)
assert type(rval) == numpy.ndarray assert type(rval) == numpy.ndarray
assert [-4, -2] == list(rval) assert [-4, -2] == list(rval)
...@@ -1788,14 +1784,14 @@ def test_smallest(): ...@@ -1788,14 +1784,14 @@ def test_smallest():
x = dvector() x = dvector()
y = dvector() y = dvector()
z = dvector() z = dvector()
f1 = function([x], smallest(x)) f1 = inplace_func([x], smallest(x))
assert numpy.all([1,2,3] == f1([1,2,3])) assert numpy.all([1,2,3] == f1([1,2,3]))
f3 = function([x,y,z], smallest(x,y,z)) f3 = inplace_func([x,y,z], smallest(x,y,z))
assert numpy.all([1,2,3] == f3([1,3,9], [7,7,7], [8,2,3])) assert numpy.all([1,2,3] == f3([1,3,9], [7,7,7], [8,2,3]))
sx, sy = dscalar(), dscalar() sx, sy = dscalar(), dscalar()
assert -4 == function([sx,sy], smallest(sx,sy))(-4.0, -2.0) assert -4 == inplace_func([sx,sy], smallest(sx,sy))(-4.0, -2.0)
......
...@@ -11,7 +11,7 @@ _as_scalar = GemmLocalOptimizer._as_scalar ...@@ -11,7 +11,7 @@ _as_scalar = GemmLocalOptimizer._as_scalar
_is_real_matrix = GemmLocalOptimizer._is_real_matrix _is_real_matrix = GemmLocalOptimizer._is_real_matrix
from theano import In, Out from theano import In, Out
from .test_basic import (_approx_eq, as_tensor, function, from .test_basic import (_approx_eq, as_tensor, inplace_func,
compile, value, constant, inplace, eval_outputs) compile, value, constant, inplace, eval_outputs)
class t_gemm(TestCase): class t_gemm(TestCase):
...@@ -36,7 +36,7 @@ class t_gemm(TestCase): ...@@ -36,7 +36,7 @@ class t_gemm(TestCase):
z_orig = z.copy() z_orig = z.copy()
tz,ta,tx,ty,tb = [as_tensor(p).type() for p in z,a,x,y,b] tz,ta,tx,ty,tb = [as_tensor(p).type() for p in z,a,x,y,b]
f = function([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode=compile.Mode(optimizer = None, linker = l)) f = inplace_func([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode=compile.Mode(optimizer = None, linker = l))
new_z = f(z,a,x,y,b) new_z = f(z,a,x,y,b)
z_after = self._gemm(z_orig, a, x, y, b) z_after = self._gemm(z_orig, a, x, y, b)
...@@ -158,7 +158,7 @@ class t_gemm(TestCase): ...@@ -158,7 +158,7 @@ class t_gemm(TestCase):
tz,ta,tx,ty,tb = [value(p) for p in z,a,x,y,b] tz,ta,tx,ty,tb = [value(p) for p in z,a,x,y,b]
f = function([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode = compile.Mode(optimizer = None, linker=l)) f = inplace_func([tz,ta,tx,ty,tb], gemm(tz,ta,tx,ty,tb), mode = compile.Mode(optimizer = None, linker=l))
f(z, a, x, y, b) f(z, a, x, y, b)
self.failUnless(_approx_eq(z_after, z), (z_orig, z_after, z, z_after - z)) self.failUnless(_approx_eq(z_after, z), (z_orig, z_after, z, z_after - z))
...@@ -256,11 +256,11 @@ class Warning(Exception): ...@@ -256,11 +256,11 @@ class Warning(Exception):
def just_gemm(i, o, ishapes = [(4,3), (3,5), (4,5), (), ()]): def just_gemm(i, o, ishapes = [(4,3), (3,5), (4,5), (), ()]):
try: try:
f = function([In(ii, mutable=True) for ii in i],o, mode='FAST_RUN') f = inplace_func([In(ii, mutable=True) for ii in i],o, mode='FAST_RUN')
for node in f.maker.env.nodes: for node in f.maker.env.nodes:
if node.op == T.dot: raise Warning('dot not changed to gemm in graph') if node.op == T.dot: raise Warning('dot not changed to gemm in graph')
if node.op == _dot22: raise Warning('_dot22 not changed to gemm in graph') if node.op == _dot22: raise Warning('_dot22 not changed to gemm in graph')
g = function(i, o, mode=compile.Mode(linker='py', optimizer=None)) g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None))
for node in g.maker.env.nodes: for node in g.maker.env.nodes:
if node.op == gemm: raise Exception('gemm in original graph') if node.op == gemm: raise Exception('gemm in original graph')
...@@ -320,11 +320,11 @@ def test_gemm_opt_double_gemm(): ...@@ -320,11 +320,11 @@ def test_gemm_opt_double_gemm():
i = [X,Y,Z,a,b, R, S, c] i = [X,Y,Z,a,b, R, S, c]
o = [a * T.dot(X,Y) + gemm(Z, b, S.T, R.T, 1.0)] o = [a * T.dot(X,Y) + gemm(Z, b, S.T, R.T, 1.0)]
try: try:
f = function([In(ii, mutable=True) for ii in i],o, mode='FAST_RUN') f = inplace_func([In(ii, mutable=True) for ii in i],o, mode='FAST_RUN')
for node in f.maker.env.nodes: for node in f.maker.env.nodes:
if node.op == T.dot: raise Failure('dot in graph') if node.op == T.dot: raise Failure('dot in graph')
if node.op == _dot22: raise Failure('_dot22 in graph') if node.op == _dot22: raise Failure('_dot22 in graph')
g = function(i, o, mode=compile.Mode(linker='py', optimizer=None)) g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None))
#for node in g.maker.env.nodes: #for node in g.maker.env.nodes:
# if node.op == gemm: raise Failure('gemm in graph') # if node.op == gemm: raise Failure('gemm in graph')
...@@ -379,11 +379,11 @@ def test_gemm_opt_vector_stuff(): ...@@ -379,11 +379,11 @@ def test_gemm_opt_vector_stuff():
X,Y,Z,a,b = T.dmatrix(), T.dmatrix(), T.dmatrix(), T.dscalar(), T.dscalar() X,Y,Z,a,b = T.dmatrix(), T.dmatrix(), T.dmatrix(), T.dscalar(), T.dscalar()
u,v = T.dvector(), T.dvector() u,v = T.dvector(), T.dvector()
f = function([a, u, v], a + T.dot(u,v), mode='FAST_RUN') f = inplace_func([a, u, v], a + T.dot(u,v), mode='FAST_RUN')
if gemm in [n.op for n in f.maker.env.nodes]: if gemm in [n.op for n in f.maker.env.nodes]:
raise Failure('gemm in graph') raise Failure('gemm in graph')
f = function([a, u, X,Y], a * u + T.dot(X,Y), mode='FAST_RUN') f = inplace_func([a, u, X,Y], a * u + T.dot(X,Y), mode='FAST_RUN')
if (gemm in [n.op for n in f.maker.env.nodes]): if (gemm in [n.op for n in f.maker.env.nodes]):
raise Failure('gemm in graph') raise Failure('gemm in graph')
...@@ -392,7 +392,7 @@ def test_inplace0(): ...@@ -392,7 +392,7 @@ def test_inplace0():
X,Y,Z,a,b = T.dmatrix(), T.dmatrix(), T.dmatrix(), T.dscalar(), T.dscalar() X,Y,Z,a,b = T.dmatrix(), T.dmatrix(), T.dmatrix(), T.dscalar(), T.dscalar()
R, S, c = T.dmatrix(), T.dmatrix(), T.dscalar() R, S, c = T.dmatrix(), T.dmatrix(), T.dscalar()
f = function([X,Y,Z,a,b, R, S, c], f = inplace_func([X,Y,Z,a,b, R, S, c],
[Z * (Z *c + a * T.dot(X,Y) + b * T.dot(R,S).T)], mode='FAST_RUN') [Z * (Z *c + a * T.dot(X,Y) + b * T.dot(R,S).T)], mode='FAST_RUN')
if (gemm in [n.op for n in f.maker.env.nodes]): if (gemm in [n.op for n in f.maker.env.nodes]):
raise Failure('gemm in graph') raise Failure('gemm in graph')
...@@ -400,7 +400,7 @@ def test_inplace0(): ...@@ -400,7 +400,7 @@ def test_inplace0():
def test_inplace1(): def test_inplace1():
X,Y,Z,a,b = XYZab() X,Y,Z,a,b = XYZab()
# with > 2 terms in the overall addition # with > 2 terms in the overall addition
f = function([X,Y,Z,a,b], f = inplace_func([X,Y,Z,a,b],
[Z + Z + T.dot(X,Y)], mode='FAST_RUN') [Z + Z + T.dot(X,Y)], mode='FAST_RUN')
if (gemm in [n.op for n in f.maker.env.nodes]): if (gemm in [n.op for n in f.maker.env.nodes]):
raise Failure('gemm in graph') raise Failure('gemm in graph')
......
...@@ -73,7 +73,7 @@ def test_merge_opt_runtime(): ...@@ -73,7 +73,7 @@ def test_merge_opt_runtime():
else: else:
r = x r = x
t = time.time() t = time.time()
f = theano.function([x], r,mode='FAST_COMPILE') f = theano.function([x], r)
dt = time.time() - t dt = time.time() - t
assert dt < 5.0 #it should never take longer than 5 seconds to compile this graph assert dt < 5.0 #it should never take longer than 5 seconds to compile this graph
...@@ -5,6 +5,7 @@ from theano import tensor as T ...@@ -5,6 +5,7 @@ from theano import tensor as T
from theano.tensor import nnet as NN from theano.tensor import nnet as NN
import numpy as N import numpy as N
from theano.compile import module from theano.compile import module
from theano.compile.mode import default_mode
from theano import tensor as T, sparse as S from theano import tensor as T, sparse as S
import numpy as N import numpy as N
import sys import sys
...@@ -482,15 +483,17 @@ def create_realistic(window_size=3,#7, ...@@ -482,15 +483,17 @@ def create_realistic(window_size=3,#7,
model = architecture.make(input_size=input_dimension, input_representation_size=token_representation_size, hidden_representation_size=concatenated_representation_size, output_size=output_vocabsize, lr=lr, seed=seed, noise_level=noise_level, qfilter_relscale=qfilter_relscale, mode=compile_mode) model = architecture.make(input_size=input_dimension, input_representation_size=token_representation_size, hidden_representation_size=concatenated_representation_size, output_size=output_vocabsize, lr=lr, seed=seed, noise_level=noise_level, qfilter_relscale=qfilter_relscale, mode=compile_mode)
return model return model
def test_naacl_model(optimizer='fast_run', iters_per_unsup=10, iters_per_sup=10, def test_naacl_model(iters_per_unsup=10, iters_per_sup=10,
realistic=False): optimizer=None, realistic=False):
print "BUILDING MODEL" print "BUILDING MODEL"
import time import time
t = time.time() t = time.time()
mode = theano.Mode(linker='c|py', optimizer=optimizer) if optimizer else default_mode
if realistic: if realistic:
m = create_realistic(compile_mode = theano.Mode(linker='c|py', optimizer=optimizer)) m = create_realistic(compile_mode=mode)
else: else:
m = create(compile_mode = theano.Mode(linker='c|py', optimizer=optimizer)) m = create(compile_mode=mode)
print 'BUILD took', time.time() - t print 'BUILD took', time.time() - t
prog_str = [] prog_str = []
......
...@@ -136,7 +136,7 @@ class test_greedy_distribute(unittest.TestCase): ...@@ -136,7 +136,7 @@ class test_greedy_distribute(unittest.TestCase):
, eps + y/s , eps + y/s
, s) , s)
f = function([s, eps, x,y], r**2, mode=DebugMode()) f = function([s, eps, x,y], r**2)
r0 = f(4,1.e-6, [1.5,2], [2.3,3.1]) r0 = f(4,1.e-6, [1.5,2], [2.3,3.1])
r1 = f(4,1.e-6, [1.5,2], [2.3,3.1]) r1 = f(4,1.e-6, [1.5,2], [2.3,3.1])
......
...@@ -13,7 +13,7 @@ class T_test_module(unittest.TestCase): ...@@ -13,7 +13,7 @@ class T_test_module(unittest.TestCase):
def test_state_propagation(self): def test_state_propagation(self):
x = tensor.vector() x = tensor.vector()
rk = RandomKit('rk', 1000) rk = RandomKit('rk', 1000)
f = compile.function([x, (rk, [gof.Container(r = gof.generic, storage = [123], name='bla')])], rk.binomial(tensor.shape(x)), mode='FAST_COMPILE') f = compile.function([x, (rk, [gof.Container(r = gof.generic, storage = [123], name='bla')])], rk.binomial(tensor.shape(x)))
f['rk'] = 9873456 f['rk'] = 9873456
rvals = [f([1,2,3,4,6, 7, 8]) for i in xrange(5)] rvals = [f([1,2,3,4,6, 7, 8]) for i in xrange(5)]
...@@ -45,7 +45,7 @@ class T_test_module(unittest.TestCase): ...@@ -45,7 +45,7 @@ class T_test_module(unittest.TestCase):
self.f = compile.Method([self.b.x], self.b.r) self.f = compile.Method([self.b.x], self.b.r)
b = E() b = E()
m = b.make(mode='FAST_COMPILE') m = b.make()
m.seed(1000) m.seed(1000)
#print m.f(N.ones(5)) #print m.f(N.ones(5))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论