提交 e09272f5 authored 作者: abergeron's avatar abergeron

Merge pull request #4161 from nouiz/opt_failure

[OPT FAILURE] Fix gh-4131
......@@ -65,7 +65,7 @@ script:
- cd $(python -c 'import theano; import os; print(os.path.split(theano.__file__)[0])')
- echo "$PART"
- cd -; cd Theano
- python -c 'import theano; print(theano.config)'
- python -c 'import theano; print(theano.config.__str__(print_doc=False))'
- python -c 'import theano; assert(theano.config.blas.ldflags != "")'
- theano-nose -v $PART
- if [[ $DOC == "1" ]]; then python doc/scripts/docgen.py --nopdf --check; fi
......
......@@ -165,10 +165,11 @@ def fetch_val_for_key(key, delete_key=False):
_config_var_list = []
def _config_print(thing, buf):
def _config_print(thing, buf, print_doc=True):
for cv in _config_var_list:
print(cv, file=buf)
print(" Doc: ", cv.doc, file=buf)
if print_doc:
print(" Doc: ", cv.doc, file=buf)
print(" Value: ", cv.__get__(True, None), file=buf)
print("", file=buf)
......@@ -191,9 +192,9 @@ class TheanoConfigParser(object):
# properties are installed by AddConfigVar
_i_am_a_config_class = True
def __str__(self):
def __str__(self, print_doc=True):
sio = StringIO()
_config_print(self.__class__, sio)
_config_print(self.__class__, sio, print_doc=print_doc)
return sio.getvalue()
# N.B. all instances of TheanoConfigParser give access to the same properties.
......
......@@ -219,16 +219,11 @@ class FunctionGraph(utils.object2):
def disown(self):
"""
WRITEME
Cleans up all of this FunctionGraph's nodes and variables so they are
not associated with this FunctionGraph anymore.
The FunctionGraph should not be used anymore after disown is called.
This may not clean everything this FunctionGraph's features set in the
nodes and variables. If there are no features, this should set
them back to what they were originally.
"""
for f in self._features:
self.remove_feature(f)
......
......@@ -165,7 +165,6 @@ if compile_cuda_ndarray and cuda_available:
os.makedirs(tmpdir)
compiler = nvcc_compiler.NVCC_compiler()
preargs = ['-O3'] + compiler.compile_args()
preargs += [f for f in config.nvcc.flags.split(' ') if f]
compiler.compile_str(
'cuda_ndarray',
code,
......
......@@ -548,7 +548,7 @@ def local_gpu_lazy_ifelse(node):
for i in range(len(outs)):
if (not isinstance(outs[i].type, CudaNdarrayType) and
outs[i].dtype == 'float32'):
getattr(outs[i], 'dtype', None) == 'float32'):
outs[i] = as_cuda_ndarray_variable(outs[i])
outs = gpu_ifelse(c, *outs, return_list=True)
for i in range(len(outs)):
......@@ -1023,11 +1023,11 @@ def local_gpu_flatten(node):
return [gpu_flatten(host_input.owner.inputs[0], outdim)(
as_cuda_ndarray_variable(host_input.owner.inputs[0]))]
if isinstance(node.op, tensor.Flatten):
x, shp= node.inputs
outdim = node.op.outdim
x, = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
outdim = node.op.outdim
gpu_x, = x.owner.inputs
return [host_from_gpu(gpu_flatten(host_input.owner.inputs[0], outdim)(gpu_x))]
return [host_from_gpu(gpu_flatten(gpu_x, outdim))]
return False
......
......@@ -546,7 +546,12 @@ def local_gpua_lazy_ifelse(node, context_name):
if node.op.gpu:
return
c = node.inputs[0]
inps = [as_gpuarray_variable(v, context_name) for v in node.inputs[1:]]
inps = []
for v in node.inputs[1:]:
if isinstance(v.type, (tensor.TensorType, GpuArrayType)):
inps.append(as_gpuarray_variable(v, context_name))
else:
inps.append(v)
return IfElse(node.op.n_outs, gpu=True)(c, *inps, return_list=True)
......
......@@ -2,7 +2,8 @@ from __future__ import print_function
from . import pool
import warnings
warnings.warn("downsample module has been moved to the pool module.")
warnings.warn(
"downsample module has been moved to the theano.tensor.signal.pool module.")
max_pool_2d_same_size = pool.max_pool_2d_same_size
max_pool_2d = pool.pool_2d
DownsampleFactorMax = pool.Pool
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论