提交 0f798670 authored 作者: James Bergstra's avatar James Bergstra

merge

......@@ -24,6 +24,11 @@ To learn more, check out:
"""
__docformat__ = "restructuredtext en"
import configparser, configdefaults
config = configparser.TheanoConfigParser()
import gof
from gof import \
......@@ -60,17 +65,8 @@ import scalar
import sparse
import gradient
import gof
import floatX
floatX.set_floatX()
import config
#if THEANO_GPU not defined: don't automaticcaly importe cuda
#if THEANO_GPU defined to something else then "": automatically import cuda
# he will init cuda automatically if THEANO_GPU is not -1 or GPU
#if cuda.use() and THEANO_GPU not defined or defined to "": init to device 0.
#if THEANO_GPU defined to "-1" or "CPU", automatically import cuda, but don't init it.
if config.THEANO_GPU not in [None,""]:
if config.device.startswith('gpu'):
import theano.sandbox.cuda
## import scalar_opt
......
......@@ -11,7 +11,7 @@ from theano.gof import Env, graph, utils, link
from theano.gof.link import WrapLinkerMany, raise_with_op
#from theano.gof.cutils import run_cthunk
from theano.gof.cc import OpWiseCLinker, CLinker
import theano.config as config
from ..configparser import config
from theano.compile.function_module import (FunctionMaker,
Function,
infer_reuse_pattern,
......@@ -1375,27 +1375,27 @@ class DebugMode(Mode):
"""
stability_patience = config.THEANO_DEBUGMODE_PATIENCE
stability_patience = config.DebugMode.patience
"""
When checking for the stability of optimization, recompile the graph this many times.
"""
check_c_code = config.THEANO_DEBUGMODE_CHECK_C
check_c_code = config.DebugMode.check_c
"""
Should we evaluate (and check) the `c_code` implementations?
"""
check_py_code = config.THEANO_DEBUGMODE_CHECK_PY
check_py_code = config.DebugMode.check_py
"""
Should we evaluate (and check) the `perform` implementations?
"""
check_isfinite = config.THEANO_DEBUGMODE_CHECK_FINITE
check_isfinite = config.DebugMode.check_finite
"""
Should we check for (and complain about) NaN/Inf ndarray elements?
"""
require_matching_strides = config.THEANO_DEBUGMODE_CHECK_STRIDES
require_matching_strides = config.DebugMode.check_strides
"""
Should we check for (and complain about) Ops whose python and C outputs are ndarrays with
different strides? (This can catch bugs, but is generally overly strict.) 0 no check, 1 warn, 2 err.
......
......@@ -4,7 +4,7 @@ import os, logging
import numpy
from theano import gof
import theano.config as config
from ..configparser import config
_logger = logging.getLogger('theano.compile.mode')
......@@ -225,7 +225,7 @@ predefined_modes = {'FAST_COMPILE': FAST_COMPILE,
# is not set, it will default to 'FAST_RUN'
# keep default_mode.optimizer==default_optimizer and default_mode.linker==default_linker!
##
default_mode = config.THEANO_DEFAULT_MODE
default_mode = config.mode
def get_mode(string):
if string is None: string = default_mode
......
......@@ -6,7 +6,7 @@ from theano.compile.mode import Mode, register_mode, predefined_modes, predefine
from theano.gof.cc import OpWiseCLinker
from theano.gof.python25 import any
from theano import gof
import theano.config as config
from ..configparser import config
import_time = time.time()
......@@ -83,8 +83,8 @@ class ProfileMode(Mode):
self._optimizer = optimizer
def print_summary(self,
n_apply_to_print=config.config.getint("ProfileMode.n_apply_to_print", None),
n_ops_to_print=config.config.getint("ProfileMode.n_ops_to_print", None)):
n_apply_to_print=config.ProfileMode.n_apply_to_print,
n_ops_to_print=config.ProfileMode.n_ops_to_print):
""" Print 3 summary that show where the time is spend. The first show an Apply-wise summary, the second show an Op-wise summary, the third show an type-Op-wise summary.
The Apply-wise summary print the timing information for the worst offending Apply nodes. This corresponds to individual Op applications within your graph which take the longest to execute (so if you use dot twice, you will see two entries there).
......
import os
import ConfigParser
userconf_filename=""
default_={
'ProfileMode.n_apply_to_print':15,
'ProfileMode.n_ops_to_print':20,
'tensor_opt.local_elemwise_fusion':False,
'lib.amdlibm':False,
'op.set_flops':False,#currently used only in ConvOp. The profile mode will print the flops/s for the op.
'nvcc.fastmath':False,
'gpuelemwise.sync':True, #when true, wait that the gpu fct finished and check it error code.
}
#default value taked from env variable
THEANO_UNITTEST_SEED = os.getenv('THEANO_UNITTEST_SEED', 666)
THEANO_NOCLEANUP = os.getenv('THEANO_NOCLEANUP', 0)
THEANO_COMPILEDIR = os.getenv('THEANO_COMPILEDIR', None)
THEANO_BASE_COMPILEDIR = os.getenv('THEANO_BASE_COMPILEDIR', None)
HOME = os.getenv('HOME')
#0 compare with default precission, 1 less precission, 2 event less.
THEANO_CMP_SLOPPY = int(os.getenv('THEANO_CMP_SLOPPY', 0))
#flag for compiling with an optimized blas library. Used for gemm operation
#if THEANO_BLAS_LDFLAGS exist but empty, we will use numpy.dot()
THEANO_BLAS_LDFLAGS = os.getenv('THEANO_BLAS_LDFLAGS','-lblas')
#for gpu
CUDA_ROOT = os.getenv('CUDA_ROOT')
THEANO_GPU = os.getenv("THEANO_GPU")
THEANO_DEFAULT_MODE = os.getenv('THEANO_DEFAULT_MODE','FAST_RUN')
#debug mode
THEANO_DEBUGMODE_PATIENCE = int(os.getenv('THEANO_DEBUGMODE_PATIENCE', 10))
THEANO_DEBUGMODE_CHECK_C = bool(int(os.getenv('THEANO_DEBUGMODE_CHECK_C', 1)))
THEANO_DEBUGMODE_CHECK_PY = bool(int(os.getenv('THEANO_DEBUGMODE_CHECK_PY', 1)))
THEANO_DEBUGMODE_CHECK_FINITE = bool(int(os.getenv('THEANO_DEBUGMODE_CHECK_FINITE', 1)))
THEANO_DEBUGMODE_CHECK_STRIDES = bool(int(os.getenv('THEANO_DEBUGMODE_CHECK_STRIDES', 1)))
THEANO_FLAGS=os.getenv("THEANO_FLAGS","")
def parse_env_flags(flags, name , default_value=None):
#The value in the env variable THEANO_FLAGS override the previous value
val = default_value
for flag in flags.split(','):
if not flag:
continue
sp=flag.split('=',1)
if sp[0]==name:
if len(sp)==1:
val=True
else:
val=sp[1]
val=str(val)
return val
floatX=parse_env_flags(THEANO_FLAGS,'floatX','float64')
class TheanoConfig(object):
"""Return the value for a key after parsing ~/.theano.cfg and
the THEANO_FLAGS environment variable.
We parse in that order the value to have:
1)the pair 'section.option':value in default_
2)The ~/.theano.cfg file
3)The value value provided in the get*() fct.
The last value found is the value returned.
The THEANO_FLAGS environement variable should be a list of comma-separated [section.]option[=value] entries. If the section part is omited, their should be only one section with that contain the gived option.
"""
def __init__(self):
d={} # no section
for k,v in default_.items():
if len(k.split('.'))==1:
d[k]=v
#set default value common for all section
self.config = ConfigParser.SafeConfigParser(d)
#set default value specific for each section
for k, v in default_.items():
sp = k.split('.',1)
if len(sp)==2:
if not self.config.has_section(sp[0]):
self.config.add_section(sp[0])
self.config.set(sp[0], sp[1], str(v))
#user config file override the default value
self.config.read(['theano.cfg', os.path.expanduser('~/.theano.cfg')])
self.env_flags=THEANO_FLAGS
#The value in the env variable THEANO_FLAGS override the previous value
for flag in self.env_flags.split(','):
if not flag:
continue
sp=flag.split('=',1)
if len(sp)==1:
val=True
else:
val=sp[1]
val=str(val)
sp=sp[0].split('.',1)#option or section.option
if len(sp)==2:
self.config.set(sp[0],sp[1],val)
else:
found=0
sp=sp[0].lower()#the ConfigParser seam to use only lower letter.
for sec in self.config.sections():
for opt in self.config.options(sec):
if opt == sp:
found+=1
section=sec
option=opt
if found==1:
self.config.set(section,option,val)
elif found>1:
raise Exception("Ambiguous option (%s) in THEANO_FLAGS"%(sp))
def __getitem__(self, key):
""":returns: a str with the value associated to the key"""
return self.get(key)
def get(self, key, val=None):
"""
:param key: the key that we want the value
:type key: str
:returns: a str with the value associated to the key
"""
#self.config.get(section, option, raw, vars)
if val is not None:
return val
sp = key.split('.',1)
if len(sp)!=2:
raise Exception("When we get a key, their must be a section and an option")
return self.config.get(sp[0],sp[1], False)
def getfloat(self, key, val=None):
""" :return: cast the output of self.get to a float"""
if val is not None:
return float(val)
return float(self.get(key))
def getboolean(self, key, val=None):
""" :return: cast the output of self.get to a boolean"""
if val is None:
val=self.get(key)
if val == "False" or val == "0" or not val:
val = False
else:
val = True
return val
def getint(self, key, val=None):
""" :return: cast the output of self.get to an int"""
if val is not None:
return int(val)
return int(self.get(key))
config = TheanoConfig()
if floatX not in ['float32', 'float64']:
raise Exception("the configuration scalar.floatX must have value float32 or float64 not", floatX)
import os
from .configparser import TheanoConfigParser, AddConfigVar, EnumStr, StrParam, IntParam, FloatParam, BoolParam
config = TheanoConfigParser()
AddConfigVar('floatX',
"Default floating-point precision for python casts",
EnumStr('float64', 'float32'),
)
AddConfigVar('device',
"Default device for computations",
EnumStr('cpu', *['gpu%i'%i for i in range(16)])
)
AddConfigVar('mode',
"Default compilation mode",
EnumStr('FAST_RUN', 'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'))
AddConfigVar('home',
"User home directory",
EnumStr(os.getenv("HOME")))
AddConfigVar('base_compiledir',
"arch-independent cache directory for compiled modules",
StrParam(os.path.join(config.home, '.theano')))
AddConfigVar('compiledir',
"arch-dependent cache directory for compiled modules",
StrParam("")) #NO DEFAULT??
AddConfigVar('nocleanup',
"suppress the deletion of code files that did not compile cleanly",
BoolParam(False))
AddConfigVar('blas.ldflags',
"lib[s] to include for level-3 blas implementation",
StrParam("-lblas"))
AddConfigVar('DebugMode.patience',
"Optimize graph this many times",
IntParam(10, lambda i: i > 0))
AddConfigVar('DebugMode.check_c',
"Run C implementations where possible",
BoolParam(True))
AddConfigVar('DebugMode.check_py',
"Run Python implementations where possible",
BoolParam(True))
AddConfigVar('DebugMode.check_finite',
"True -> complain about NaN/Inf results",
BoolParam(True))
AddConfigVar('DebugMode.check_strides',
("Check that Python- and C-produced ndarrays have same strides. "
"On difference: (0) - ignore, (1) warn, or (2) raise error"),
IntParam(1, lambda i: i in (0,1,2)))
AddConfigVar('ProfileMode.n_apply_to_print',
"",
IntParam(15, lambda i: i > 0))
AddConfigVar('ProfileMode.n_ops_to_print',
"",
IntParam(20, lambda i: i > 0))
AddConfigVar('tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0,1,2)))
AddConfigVar('tensor.local_elemwise_fusion',
"",
BoolParam(False))
AddConfigVar('lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar('op.set_flops',
"currently used only in ConvOp. The profile mode will print the flops/s for the op.",
BoolParam(False))
AddConfigVar('nvcc.fastmath',
"",
BoolParam(False))
AddConfigVar('cuda.root',
"directory with bin/, lib/, include/ for cuda utilities",
StrParam("/usr/local/cuda"))
AddConfigVar('gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True))
import os, StringIO
import ConfigParser
import logging
_logger = logging.getLogger('theano.config')
THEANO_FLAGS=os.getenv("THEANO_FLAGS","")
# The THEANO_FLAGS environement variable should be a list of comma-separated
# [section.]option[=value] entries. If the section part is omited, their should be only one
# section with that contain the gived option.
theano_cfg = ConfigParser.SafeConfigParser()
theano_cfg.read(['theano.cfg', os.path.expanduser('~/.theano.cfg')])
def parse_env_flags(flags, name , default_value=None):
#The value in the env variable THEANO_FLAGS override the previous value
val = default_value
for flag in flags.split(','):
if not flag:
continue
sp=flag.split('=',1)
if sp[0]==name:
if len(sp)==1:
val=True
else:
val=sp[1]
val=str(val)
return val
def fetch_val_for_key(key):
"""Return the overriding config value for a key.
A successful search returs a string value.
An unsuccessful search raises a KeyError
The priority order is:
- THEANO_FLAGS
- ~./theano.cfg
"""
# first try to find it in the FLAGS
matches = []
for name_val in THEANO_FLAGS.split(','):
if not name_val:
continue
name_val_tuple=name_val.split('=',1)
if len(name_val_tuple)==1:
name, val = name_val_tuple, str(True)
else:
name, val = name_val_tuple
if name.endswith(key): #we found it in FLAGS
matches.append((name, val))
if matches:
if len(matches) > 1:
_logging.error('ambiguous THEANO_FLAGS flag %s matches %s (ignoring it)' % (key, [name for name,val in matches]))
else:
return matches[0][1]
# next try to find it in the config file
# config file keys can be of form option, or section.option
key_tokens = key.split('.')
if len(key_tokens) > 2:
raise KeyError(key)
if len(key_tokens) == 2:
section, option = key_tokens
else:
section, option = 'global', key
try:
return theano_cfg.get(section, option)
except ConfigParser.NoOptionError:
raise KeyError(key)
class TheanoConfigParser(object):
#properties are installed by AddConfigVar
def __str__(self):
sio = StringIO.StringIO()
_config_print(self.__class__, sio)
return sio.getvalue()
pass
# N.B. all instances of TheanoConfigParser give access to the same properties.
config = TheanoConfigParser()
_config_var_list = []
def _config_print(thing, buf):
for cv in _config_var_list:
print >> buf, cv
print >> buf, " Doc: ", cv.doc
print >> buf, " Value: ", cv.val
print >> buf, ""
def AddConfigVar(name, doc, thing, cls=TheanoConfigParser):
if cls == TheanoConfigParser:
thing.fullname = name
if hasattr(TheanoConfigParser, name):
raise ValueError('This name is already taken')
parts = name.split('.')
if len(parts) > 1:
# set up a subobject
if not hasattr(cls, parts[0]):
class SubObj(object):
pass
setattr(cls, parts[0], SubObj)
AddConfigVar('.'.join(parts[1:]), doc, thing, cls=getattr(cls, parts[0]))
else:
thing.name = name
thing.doc = doc
thing.__get__() # trigger a read of the value
setattr(cls, parts[0], thing)
_config_var_list.append(thing)
class ConfigParam(object):
def __init__(self, default, filter=None):
self.default = default
self.filter=filter
# there is a name attribute too, but it is set by AddConfigVar
def __get__(self, *args):
#print "GETTING PARAM", self.name, self, args
if not hasattr(self, 'val'):
try:
val_str = fetch_val_for_key(self.name)
except KeyError:
val_str = self.default
self.__set__(None, val_str)
#print "RVAL", self.val
return self.val
def __set__(self, cls, val):
#print "SETTING PARAM", self.name,(cls), val
if self.filter:
self.val = self.filter(val)
else:
self.val = val
deleter=None
class EnumStr(ConfigParam):
def __init__(self, default, *options):
self.default = default
self.all = (default,) + options
def filter(val):
if val in self.all:
return val
else:
raise ValueError('Invalid value (%s) for configuration variable "%s". Legal options are %s'
% (val, self.name, self.all), val)
super(EnumStr, self).__init__(default, filter)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.all)
class TypedParam(ConfigParam):
def __init__(self, default, mytype, is_valid=None):
self.mytype = mytype
def filter(val):
casted_val = mytype(val)
if callable(is_valid):
if is_valid(casted_val):
return casted_val
else:
raise ValueError('Invalid value (%s) for configuration variable "%s".'
% (val, self.name), val)
return casted_val
super(TypedParam, self).__init__(default, filter)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.mytype)
def StrParam(default, is_valid=None):
return TypedParam(default, str, is_valid)
def IntParam(default, is_valid=None):
return TypedParam(default, int, is_valid)
def FloatParam(default, is_valid=None):
return TypedParam(default, float, is_valid)
def BoolParam(default, is_valid=None):
return TypedParam(default, bool, is_valid)
"""Provide xscalar, xvector, xmatrix, etc. pseudo-types
"""
import theano.config as config
from theano.scalar import float64, float32
from theano.tensor import (fscalar, fvector, fmatrix, frow, fcol, ftensor3, ftensor4, dscalar,
dvector, dmatrix, drow, dcol, dtensor3, dtensor4)
#
# !!! set_floatX adds symbols directly to the module's symbol table !!!
#
def set_floatX(dtype = config.floatX):
""" add the xmatrix, xvector, xscalar etc. aliases to theano.tensor
"""
config.floatX = dtype
if dtype == 'float32': prefix = 'f'
elif dtype == 'float64' : prefix = 'd'
else: raise Exception("Bad param in set_floatX(%s). Only float32 and float64 are supported"%config.floatX)
#tensor.scalar stuff
globals()['floatX'] = globals()[dtype]
# convert_to_floatX = Cast(floatX, name='convert_to_floatX')
#tensor.tensor stuff
for symbol in ('scalar', 'vector', 'matrix', 'row', 'col','tensor3','tensor4'):
globals()['x'+symbol] = globals()[prefix+symbol]
#_convert_to_floatX = _conversion(elemwise.Elemwise(scal.convert_to_floatX), 'floatX')
......@@ -2,7 +2,7 @@
"""
import os, tempfile, StringIO, sys, logging, subprocess, cPickle, atexit, time, shutil, stat
import distutils.sysconfig
import theano.config as config
from theano.configparser import config
import numpy.distutils #TODO: TensorType should handle this
import compilelock # we will abuse the lockfile mechanism when reading and writing the registry
......@@ -515,7 +515,7 @@ class ModuleCache(object):
def _rmtree(parent):
try:
if not config.THEANO_NOCLEANUP:
if not config.nocleanup:
shutil.rmtree(parent)
except Exception, e:
try:
......
......@@ -4,7 +4,7 @@ import os
import platform
import re
import theano.config as config
from ..configparser import config
def set_compiledir(path=None):
"""Set the directory into which theano will compile code objects
......@@ -27,15 +27,15 @@ def set_compiledir(path=None):
if path is None:
# we need to set the default, which can come from one of two places
if config.THEANO_COMPILEDIR:
path = config.THEANO_COMPILEDIR
if config.compiledir:
path = config.compiledir
else:
platform_id = platform.platform() + '-' + platform.processor()
platform_id = re.sub("[\(\)\s]+", "_", platform_id)
if config.THEANO_BASE_COMPILEDIR:
base = config.THEANO_BASE_COMPILEDIR
if config.base_compiledir:
base = config.base_compiledir
else:
base = os.path.join(config.HOME,'.theano')
base = os.path.join(config.home,'.theano')
path = os.path.join(base, 'compiledir_'+platform_id)
if not os.access(path, os.R_OK | os.W_OK):
......
......@@ -210,7 +210,7 @@ class ConvOp(Op):
"'valid' mode)")%(self.imshp_logical,self.kshp_logical))
self._rehash()
if config.config.getboolean('op.set_flops'):
if config.op.set_flops:
self.set_flops()
def __eq__(self, other):
......
import os, sys, stat
from theano.gof.compiledir import get_compiledir
from theano.compile import optdb
import theano.config as config
from theano import config
import logging, copy
_logger_name = 'theano.sandbox.cuda'
......@@ -96,7 +96,13 @@ if enable_cuda:
import cuda_ndarray
def use(device=config.THEANO_GPU):
def use(device=config.device):
if device.startswith('gpu'):
device = int(device[3:])
elif device == 'cpu':
device = -1
else:
raise ValueError("Invalid device identifier", device)
if use.device_number is None:
# No successful call to use() has been made yet
if device=="-1" or device=="CPU":
......@@ -109,7 +115,7 @@ def use(device=config.THEANO_GPU):
handle_shared_float32(True)
use.device_number = device
except RuntimeError, e:
logging.getLogger('theano.sandbox.cuda').warning("WARNING: Won't use the GPU as the initialisation of device %i failed. %s" %(device, e))
_logger.warning("ERROR: Not using GPU. Initialisation of device %i failed. %s" %(device, e))
elif use.device_number != device:
logging.getLogger('theano.sandbox.cuda').warning("WARNING: ignoring call to use(%s), GPU number %i is already in use." %(str(device), use.device_number))
optdb.add_tags('gpu',
......@@ -131,5 +137,5 @@ def handle_shared_float32(tf):
raise NotImplementedError('removing our handler')
if enable_cuda and config.THEANO_GPU not in [None, ""]:
if enable_cuda and config.device.startswith('gpu'):
use()
import sys, os, subprocess, logging
from theano.gof.cmodule import (std_libs, std_lib_dirs, std_include_dirs, dlimport,
get_lib_extension)
import theano.config as config
from theano import config
_logger=logging.getLogger("theano.sandbox.cuda.nvcc_compiler")
_logger.setLevel(logging.WARN)
......@@ -46,7 +46,7 @@ def nvcc_module_compile_str(module_name, src_code, location=None, include_dirs=[
else: preargs = list(preargs)
preargs.append('-fPIC')
no_opt = False
cuda_root = config.CUDA_ROOT
cuda_root = config.cuda.root
include_dirs = std_include_dirs() + include_dirs + [os.path.split(__file__)[0]]
libs = std_libs() + ['cudart'] + libs
lib_dirs = std_lib_dirs() + lib_dirs
......
......@@ -7,7 +7,7 @@ import numpy
from theano import gof
from theano.gof import Op, utils, Variable, Constant, Type, Apply, Env
from theano.gof.python25 import partial, all, any
import theano.config as config
from ..configparser import config
def upcast(dtype, *dtypes):
z = numpy.zeros((), dtype = dtype)
......@@ -77,18 +77,18 @@ class Scalar(Type):
def c_headers(self):
l=['<math.h>']
if config.config.getboolean('lib.amdlibm'):
if config.lib.amdlibm:
l+=['<amdlibm.h>']
return l
def c_libraries(self):
l=[]
if config.config.getboolean('lib.amdlibm'):
if config.lib.amdlibm:
l+=['amdlibm']
return l
def c_compile_args(self):
if config.config.getboolean('lib.amdlibm'):
if config.lib.amdlibm:
return ['-DREPLACE_WITH_AMDLIBM']
else: return []
......
......@@ -4,7 +4,7 @@ __docformat__ = "restructuredtext en"
import __builtin__
import sys # for sys.maxint
import theano.config as config # for THEANO_CMP_SLOPPY
from ..configparser import config
import traceback #for overriding Op.__call__
if sys.version_info >= (2,5):
import functools
......@@ -260,7 +260,7 @@ def _wrap_tensor_into_member(x):
return compile.module.Member(constant(x))
compile.module.register_wrapper(_obj_is_wrappable_as_tensor, _wrap_tensor_into_member)
if int(config.THEANO_CMP_SLOPPY)>1:
if int(config.tensor.cmp_sloppy)>1:
# This environment variable is a quick-and-dirty way to get low-precision comparisons.
# For a more precise setting of these tolerances set them explicitly in your user code by
# assigning, for example, "theano.tensor.basic.float32_atol = ..."
......@@ -270,7 +270,7 @@ if int(config.THEANO_CMP_SLOPPY)>1:
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.THEANO_CMP_SLOPPY):
elif int(config.tensor.cmp_sloppy):
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
......@@ -657,7 +657,13 @@ bscalar = TensorType('int8', ())
wscalar = TensorType('int16', ())
iscalar = TensorType('int32', ())
lscalar = TensorType('int64', ())
def scalar(name = None, dtype = 'float64'):
def scalar(name = None, dtype = None):
"""Return a symbolic scalar variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, ())
return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(scalar, fscalar, dscalar, iscalar, lscalar)
......@@ -677,7 +683,13 @@ bvector = TensorType('int8', (False,))
wvector = TensorType('int16', (False,))
ivector = TensorType('int32', (False, ))
lvector = TensorType('int64', (False, ))
def vector(name = None, dtype = 'float64'):
def vector(name = None, dtype = None):
"""Return a symbolic vector variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, ))
return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(vector, fvector, dvector, ivector, lvector)
......@@ -694,7 +706,13 @@ bmatrix = TensorType('int8', (False, False))
wmatrix = TensorType('int16', (False, False))
imatrix = TensorType('int32', (False, False))
lmatrix = TensorType('int64', (False, False))
def matrix(name = None, dtype = 'float64'):
def matrix(name = None, dtype = None):
"""Return a symbolic matrix variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False))
return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(matrix, fmatrix, dmatrix, imatrix, lmatrix)
......@@ -711,7 +729,13 @@ brow = TensorType('int8', (True, False))
wrow = TensorType('int16', (True, False))
irow = TensorType('int32', (True, False))
lrow = TensorType('int64', (True, False))
def row(name = None, dtype = 'float64'):
def row(name = None, dtype = None):
"""Return a symbolic row variable (ndim=2, broadcastable=[True,False]).
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (True, False))
return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
......@@ -724,7 +748,13 @@ bcol = TensorType('int8', (False, True))
wcol = TensorType('int16', (False, True))
icol = TensorType('int32', (False, True))
lcol = TensorType('int64', (False, True))
def col(name = None, dtype = 'float64'):
def col(name = None, dtype = None):
"""Return a symbolic column variable (ndim=2, broadcastable=[False,True]).
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, True))
return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
......@@ -737,7 +767,13 @@ btensor3 = TensorType('int8', (False,)*3)
wtensor3 = TensorType('int16', (False,)*3)
itensor3 = TensorType('int32', (False,)*3)
ltensor3 = TensorType('int64', (False,)*3)
def tensor3(name=None, dtype='float64'):
def tensor3(name=None, dtype=None):
"""Return a symbolic 3-D variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False))
return type(name)
tensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(tensor3, ftensor3, dtensor3,
......@@ -751,7 +787,13 @@ btensor4 = TensorType('int8', (False,)*4)
wtensor4 = TensorType('int16', (False,)*4)
itensor4 = TensorType('int32', (False,)*4)
ltensor4 = TensorType('int64', (False,)*4)
def tensor4(name=None, dtype='float64'):
def tensor4(name=None, dtype=None):
"""Return a symbolic 4-D variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False, False))
return type(name)
tensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(tensor4, ftensor4, dtensor4,
......
......@@ -2,7 +2,7 @@
import sys, traceback, logging
import numpy
import theano.config as config
from ..configparser import config
from theano.gof import (utils, Op, Apply, view_roots, PatternSub, DestroyHandler,
SeqOptimizer, local_optimizer, Optimizer, LocalOptimizer, OpKeyOptimizer,
InconsistencyError, toolbox)
......@@ -33,7 +33,7 @@ def ldflags(libs=True, flags=False):
Default: ['blas'], but environment variable THEANO_BLAS_LDFLAGS overrides this.
"""
rval = []
for t in config.THEANO_BLAS_LDFLAGS.split():
for t in config.blas.ldflags.split():
try:
t0, t1, t2 = t[0:3]
assert t0 == '-'
......
......@@ -9,7 +9,7 @@ _logger = logging.getLogger('theano.tensor.opt')
from theano import gof
from theano.gof import opt, InconsistencyError, TopoOptimizer, graph
from theano.gof.utils import MethodNotDefined
import theano.config as config
from ..configparser import config
from elemwise import Elemwise, DimShuffle
from theano import scalar
import basic as T
......@@ -1467,7 +1467,7 @@ class FusionOptimizer(Optimizer):
pass
if config.config.getboolean('tensor_opt.local_elemwise_fusion'):
if config.tensor.local_elemwise_fusion:
_logger.debug("enabling optimization fusion elemwise in fast_run")
compile.optdb.register('elemwise_fusion', FusionOptimizer(), 71.00, 'fast_run', 'fusion', 'local_elemwise_fusion')
else:
......
......@@ -871,7 +871,7 @@ class T_subtensor(unittest.TestCase):
print gval
good = numpy.zeros_like(data)
good[1,0] = numpy.exp(data[1,0])
self.failUnless(numpy.all(gval == good), (gval, good))
self.failUnless(numpy.allclose(gval, good), (gval, good))
class T_Join_and_Split(unittest.TestCase):
......@@ -1992,7 +1992,7 @@ class TestPermuteRowElements(unittest.TestCase):
def test_1_1(self):
"""Test PermuteRowElements(vector, vector)"""
input = vector()
input = dvector()
p = ivector()
out = permute_row_elements(input, p)
permute = function([input, p], out)
......@@ -2014,7 +2014,7 @@ class TestPermuteRowElements(unittest.TestCase):
def test_2_1(self):
"""Test broadcasting in PermuteRowElements(matrix, vector)"""
input = matrix()
input = dmatrix()
p = ivector()
out = permute_row_elements(input, p)
permute = function([input, p], out)
......@@ -2036,7 +2036,7 @@ class TestPermuteRowElements(unittest.TestCase):
def test_2_2(self):
"""Test PermuteRowElements(matrix, matrix)"""
input = matrix()
input = dmatrix()
p = imatrix()
out = permute_row_elements(input, p)
permute = function([input, p], out)
......@@ -2060,7 +2060,7 @@ class TestPermuteRowElements(unittest.TestCase):
def test_1_2(self):
"""Test PermuteRowElements(vector, matrix)
Different permutations will be applied to the same input vector"""
input = vector()
input = dvector()
p = imatrix()
out = permute_row_elements(input, p)
permute = function([input, p], out)
......@@ -2228,7 +2228,7 @@ def test_sum_overflow():
assert f([1]*300) == 300
def test_default():
x, y = dscalars('xy')
x, y = scalars('xy')
z = default(x, y)
f = function([x, y], z)
assert f(1, 2) == 1
......@@ -2236,14 +2236,17 @@ def test_default():
assert f(1, None) == 1
def test_default_state():
x, y = dscalars('xy')
x, y = scalars('xy')
print config.floatX
print x.type
print y.type
z = default(x, 3.8)
new_x = y + z
f = function([y, compile.In(x, update = new_x, value = 12.0)], new_x)
assert f(3) == 15
f['x'] = None
assert f(1) == 4.8
assert f(2.2) == 7
assert numpy.allclose(f(1), 4.8)
assert numpy.allclose(f(2.2), 7)
def test_autocast():
orig_autocast = autocast_float.dtypes
......
......@@ -104,7 +104,6 @@ class T_prepend(unittest.TestCase):
f=theano.function([x],y)
m=numpy.ones((3,5),dtype="float32")
my = f(m)
self.failUnless(str(my.dtype) == 'float64')
self.failUnless(my.shape == (3, 6))
self.failUnless(numpy.all(my[:,0] == 5.0))
......
from theano.tensor import *
import theano.config as config
from theano import function
#from theano.floatx import set_floatX, xscalar, xmatrix, xrow, xcol, xvector, xtensor3, xtensor4
import theano.floatX as FX
def test_floatX():
def test():
floatx=config.floatX
#TODO test other fct then ?vector
#float64 cast to float64 should not generate an op
x = dvector('x')
f = function([x],[cast(x,'float64')])
# print f.maker.env.toposort()
assert len(f.maker.env.toposort())==0
#float32 cast to float32 should not generate an op
x = fvector('x')
f = function([x],[cast(x,'float32')])
# print f.maker.env.toposort()
assert len(f.maker.env.toposort())==0
#floatX cast to float64
x = FX.xvector('x')
f = function([x],[cast(x,'float64')])
# print f.maker.env.toposort()
if floatx=='float64':
assert len(f.maker.env.toposort()) == 0
else:
assert len(f.maker.env.toposort()) == 1
#floatX cast to float32
x = FX.xvector('x')
f = function([x],[cast(x,'float32')])
# print f.maker.env.toposort()
if floatx=='float32':
assert len(f.maker.env.toposort()) == 0
else:
assert len(f.maker.env.toposort()) == 1
#float64 cast to floatX
x = dvector('x')
f = function([x],[cast(x,'floatX')])
# print f.maker.env.toposort()
if floatx=='float64':
assert len(f.maker.env.toposort()) == 0
else:
assert len(f.maker.env.toposort()) == 1
#float32 cast to floatX
x = fvector('x')
f = function([x],[cast(x,'floatX')])
# print f.maker.env.toposort()
if floatx=='float32':
assert len(f.maker.env.toposort()) == 0
else:
assert len(f.maker.env.toposort()) == 1
#floatX cast to floatX
x = FX.xvector('x')
f = function([x],[cast(x,'floatX')])
# print f.maker.env.toposort()
assert len(f.maker.env.toposort()) == 0
orig_floatx = config.floatX
try:
print 'float32'
FX.set_floatX('float32')
test()
print 'float64'
FX.set_floatX('float64')
test()
finally:
pass
FX.set_floatX(orig_floatx)
import unittest
import numpy
import theano.tensor as T
import theano.config as config
from ..configparser import config, AddConfigVar, IntParam
import os, sys
AddConfigVar('unittests.rseed',
"Seed to use for randomized unit tests",
IntParam(666))
def fetch_seed(pseed=None):
"""
Returns the seed to use for running the unit tests.
......@@ -17,7 +21,7 @@ def fetch_seed(pseed=None):
>>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
"""
seed = pseed or config.THEANO_UNITTEST_SEED
seed = pseed or config.unittests.rseed
if seed=='random':
seed = None
#backport
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论