提交 6cf8a013 authored 作者: abergeron's avatar abergeron

Merge pull request #2926 from nouiz/mixed

Mixed
......@@ -11,6 +11,7 @@ Here are some quick guides to NumPy:
* `Numpy User Guide <http://docs.scipy.org/doc/numpy/user/index.html>`__
* `More detailed Numpy tutorial <http://www.scipy.org/Tentative_NumPy_Tutorial>`__
* `100 NumPy exercises <https://github.com/rougier/numpy-100>`__
* `Numpy tutorial <http://www.labri.fr/perso/nrougier/teaching/numpy/numpy.html>`__
.. [TODO: More doc, e.g. see _test_tensor.py]
......
......@@ -89,6 +89,37 @@ theano_raw_cfg = ConfigParser.RawConfigParser()
theano_raw_cfg.read(config_files)
def change_flags(**kwargs):
"""
Use this as a decorator to change the value of Theano config variable.
Useful during tests.
"""
def change_flags_exec(f):
def inner(*args, **kwargs_):
old_val = {}
for k in kwargs:
l = [v for v in theano.configparser._config_var_list
if v.fullname == k]
assert len(l) == 1
old_val[k] = l[0].__get__()
try:
for k in kwargs:
l = [v for v in theano.configparser._config_var_list
if v.fullname == k]
assert len(l) == 1
l[0].__set__(None, kwargs[k])
return f(*args, **kwargs_)
finally:
for k in kwargs:
l = [v for v in theano.configparser._config_var_list
if v.fullname == k]
assert len(l) == 1
l[0].__set__(None, old_val[k])
return inner
return change_flags_exec
def fetch_val_for_key(key):
"""Return the overriding config value for a key.
A successful search returns a string value.
......
......@@ -4,7 +4,6 @@ from __future__ import print_function
import atexit
import cPickle
import logging
import operator
import os
import re
import shutil
......@@ -25,12 +24,12 @@ except ImportError:
import numpy.distutils # TODO: TensorType should handle this
import theano
from theano.compat import PY3, next, decode, decode_iter
from theano.compat import PY3, decode, decode_iter
from theano.compat.six import b, BytesIO, StringIO
from theano.gof.utils import flatten
from theano.configparser import config
from theano.gof.cc import hash_from_code
from theano.misc.windows import (subprocess_Popen, call_subprocess_Popen,
from theano.misc.windows import (subprocess_Popen,
output_subprocess_Popen)
# we will abuse the lockfile mechanism when reading and writing the registry
......@@ -846,14 +845,15 @@ class ModuleCache(object):
get_safe_part(key),
[]).append(key)
else:
dir1 = os.path.dirname(self.entry_from_key[key])
dir2 = os.path.dirname(entry)
_logger.warning(
"The same cache key is associated to "
"different modules (%s and %s). This "
"is not supposed to happen! You may "
"need to manually delete your cache "
"directory to fix this.",
self.entry_from_key[key],
entry)
dir1, dir2)
# Clean up the name space to prevent bug.
if key_data.keys:
del key
......@@ -1050,8 +1050,6 @@ class ModuleCache(object):
if module is not None:
return module
lock_taken = False
src_code = lnk.get_src_code()
# Is the source code already in the cache?
module_hash = get_module_hash(src_code, key)
......@@ -1498,8 +1496,8 @@ def std_lib_dirs_and_libs():
r'EGG-INFO\mingw\usr\x86_64-w64-mingw32\lib')]
for f, lib in [('libmsvcr90.a',
'mingw 4.5.2 or 4.8.1-2 (newer could work)')]:
if not any([os.path.exists(os.path.join(libdir, f))
for libdir in libdirs]):
if not any([os.path.exists(os.path.join(tmp_libdir, f))
for tmp_libdir in libdirs]):
print(("Your Python version is from Canopy. " +
"You need to install the package '" + lib +
"' from Canopy package manager."
......
......@@ -110,13 +110,17 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
trace = ()
exc_value.__thunk_trace__ = trace
exc_value.__op_instance__ = node
if node in node.fgraph.toposort():
exc_value.__applynode_index__ = node.fgraph.toposort().index(node)
topo = node.fgraph.toposort()
if node in topo:
node_index = topo.index(node)
else:
exc_value.__applynode_index__ = None
node_index = None
exc_value.__applynode_index__ = node_index
hints = []
detailed_err_msg = "\nApply node that caused the error: " + str(node)
if exc_value.__applynode_index__ is not None:
detailed_err_msg += "\nToposort index: %d" % node_index
types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs]
detailed_err_msg += "\nInputs types: %s\n" % types
......@@ -137,10 +141,11 @@ def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
shapes = "The thunk don't have an inputs attributes."
strides = "So we can't access the strides of inputs values"
scalar_values = "And can't print its inputs scalar value"
clients = [[c[0] for c in var.clients] for var in node.outputs]
detailed_err_msg += ("Inputs shapes: %s" % shapes +
"\nInputs strides: %s" % strides +
"\nInputs values: %s\n" % scalar_values)
"\nInputs values: %s" % scalar_values +
"\nOutputs clients: %s\n" % clients)
else:
hints.append(
"HINT: Use another linker then the c linker to"
......
......@@ -436,13 +436,13 @@ def use(device,
cuda_enabled = True
if default_to_move_computation_to_gpu:
# Do not add inplace tag here. We do not want to
# enable/disable gpu opt based on the inplace tag.
optdb.add_tags('gpu_opt',
'fast_compile',
'fast_run',
'inplace')
'fast_run')
optdb.add_tags('gpu_after_fusion',
'fast_run',
'inplace')
'fast_run')
if force:
try:
......
......@@ -1672,6 +1672,8 @@ if True:
if not dnn_available():
return
if isinstance(node.op, DownsampleFactorMaxGrad):
if not node.op.ignore_border:
return
inp, out, inp_grad = node.inputs
ds = node.op.ds
st = node.op.st
......@@ -1683,8 +1685,6 @@ if True:
(inp_grad.owner and isinstance(inp_grad.owner.op,
HostFromGpu))):
desc = GpuDnnPoolDesc(ws=ds, stride=st, mode=mode, pad=pad)()
if not node.op.ignore_border:
return
ret = GpuDnnPoolGrad()(gpu_contiguous(inp),
gpu_contiguous(out),
gpu_contiguous(inp_grad),
......
......@@ -90,7 +90,7 @@ def test_dot22scalar():
[a, b],
tensor.dot(a, b) * numpy.asarray(4, 'float32'))
t = f.maker.fgraph.toposort()
assert len(t) == 4
assert len(t) == 4, t
assert isinstance(t[0].op, tcn.GpuFromHost)
assert isinstance(t[1].op, tcn.GpuFromHost)
assert isinstance(t[2].op, tcn.blas.GpuDot22Scalar)
......
......@@ -122,6 +122,7 @@ def test_memory():
assert mem1 == freemem(), (mem1, freemem())
@theano.configparser.change_flags(**{'vm.lazy': True})
def test_memory_lazy():
"""As test_memory, but with the ifelse op.
......
......@@ -56,7 +56,7 @@ if pygpu:
init_dev(config.device)
import theano.compile
theano.compile.shared_constructor(gpuarray_shared_constructor)
optdb.add_tags('gpuarray_opt', 'fast_run', 'fast_compile', 'inplace')
optdb.add_tags('gpuarray_opt', 'fast_run', 'fast_compile')
elif config.gpuarray.init_device != '':
init_dev(config.gpuarray.init_device)
......
......@@ -727,8 +727,9 @@ class GpuAllocEmpty(HideC, Alloc):
def make_node(self, *shape):
sh, bcast = self.validate_shape(shape)
otype = GpuArrayType(dtype=self.dtype, broadcastable=bcast)
return Apply(self, sh, [otype()])
output = GpuArrayType(dtype=self.dtype, broadcastable=bcast)()
output.tag.values_eq_approx = tensor.type.values_eq_approx_always_true
return Apply(self, sh, [output])
def perform(self, node, inputs, out_):
out = out_[0]
......
......@@ -16,7 +16,7 @@ import theano
from theano import gof
from theano.tensor import basic as tensor
from theano.tensor import subtensor
from theano.tensor import elemwise, dmatrix, fmatrix, dvector, fvector
from theano.tensor import elemwise
from theano.tensor import opt
from theano.compile import optdb
from theano.gof import Apply
......@@ -1426,9 +1426,11 @@ optdb.register('crossentropy_to_crossentropy_with_softmax',
'fast_run', 'xent', 'fast_compile_gpu')
@opt.register_specialize('fast_compile_gpu')
@opt.register_specialize(
'fast_compile_gpu',
'local_crossentropy_to_crossentropy_with_softmax_grad') # old name
@gof.local_optimizer([softmax_grad])
def local_crossentropy_to_crossentropy_with_softmax_grad(node):
def local_softmax_grad_to_crossentropy_with_softmax_grad(node):
if node.op == softmax_grad:
g_coding_dist, coding_dist = node.inputs
if (g_coding_dist.owner and
......
......@@ -35,5 +35,9 @@ def test_none_Constant():
x = tensor.vector('x')
y = tensor.argmax(x)
f = theano.function([x], [y])
kwargs = {}
# We can't pickle DebugMode
if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
kwargs = {'mode': 'FAST_RUN'}
f = theano.function([x], [y], **kwargs)
cPickle.loads(cPickle.dumps(f))
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论