提交 167df2c4 authored 作者: Frédéric Bastien's avatar Frédéric Bastien

Merge pull request #3801 from nouiz/doc

Doc clarification and skip flake8 tests on auto saved file
......@@ -14,7 +14,6 @@ if not cuda_ndarray.cuda_available:
from nose.plugins.skip import SkipTest
raise SkipTest('Optional package cuda disabled')
import theano
import theano.tensor as T
from theano.misc.pycuda_example import (PycudaElemwiseSourceModuleOp,
# PycudaElemwiseKernelOp,
......
......@@ -368,8 +368,8 @@ def local_gpu_split(node):
outs_clients = reduce(list.__add__,
[out.clients for out in node.outputs])
if (input.owner and isinstance(input.owner.op, HostFromGpu) or
any([c != 'output' and isinstance(c.op, GpuFromHost) for c, idx
in outs_clients])):
any(c != 'output' and isinstance(c.op, GpuFromHost) for c, idx
in outs_clients)):
new_op = GpuSplit(node.op.len_splits)
split_res = new_op(as_cuda_ndarray_variable(input),
*node.inputs[1:], return_list=True)
......@@ -1253,9 +1253,9 @@ def local_gpu_pdbbreakpoint_op(node):
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = any([c[0] != "output" and
isinstance(c[0].op, GpuFromHost)
for c in out.clients])
output_goes_to_gpu = any(c[0] != "output" and
isinstance(c[0].op, GpuFromHost)
for c in out.clients)
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
......@@ -2154,9 +2154,9 @@ def local_gpualloc(node):
replace = True
elif all([c != 'output' and
c.op == tensor.join and
all([i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:]])
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients]):
# if the client is on gpu or alloc
replace = True
......
......@@ -35,7 +35,8 @@ class TestConv2d(unittest.TestCase):
self.border_modes = ["valid", "full", (0, 0), (1, 1), (5, 5), (5, 2)]
self.filter_flip = [True, False]
def get_output_shape(self, inputs_shape, filters_shape, subsample, border_mode):
def get_output_shape(self, inputs_shape, filters_shape,
subsample, border_mode):
if border_mode == "valid":
border_mode = (0, 0)
if border_mode == "full":
......@@ -139,8 +140,10 @@ class TestConv2d(unittest.TestCase):
utt.verify_grad(abstract_conv2d_gradweight, [inputs_val, output_val],
mode=mode, eps=1)
def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradinput,
subsample=(1, 1), filter_flip=True, verify_grad=True, mode=mode_without_gpu,
def run_gradinput(self, inputs_shape, filters_shape,
output_shape, ref=dnn_gradinput,
subsample=(1, 1), filter_flip=True,
verify_grad=True, mode=mode_without_gpu,
border_mode='valid', device='cpu', provide_shape=False):
output_val = numpy.random.random(output_shape).astype('float32')
......@@ -188,7 +191,6 @@ class TestConv2d(unittest.TestCase):
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
for (i, f), s, b, flip in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
......@@ -210,7 +212,7 @@ class TestConv2d(unittest.TestCase):
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
def test_gpucormm_conv(self):
def test_gpucorrmm_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
......
......@@ -873,8 +873,6 @@ def test_hostfromgpu_shape_i():
# -----------------------------------------------------------------------
import theano.sandbox.cuda as cuda_ndarray
def test_gpujoin_assert_cndas():
# this will end up being an ndarray, as it's float64
......
import errno
import logging
import os
from six.moves import reload_module as reload
import sys
import warnings
......
......@@ -179,13 +179,13 @@ class BaseCorrMM(gof.Op):
raise ValueError("height must be given for backprop with vertical sampling or border_mode='half'")
height = '(*(npy_int*)(PyArray_DATA(%s)))' % height
else:
height = 'NULL'
height = '-1'
if ((direction != 0) and (dW != 1)) or ((direction == 1) and (padW == -1)):
if not width:
raise ValueError("width must be given for backprop with horizontal sampling or border_mode='half'")
width = '(*(npy_int*)(PyArray_DATA(%s)))' % width
else:
width = 'NULL'
width = '-1'
sub = sub.copy()
sub.update(locals())
......
......@@ -63,7 +63,7 @@ def max_pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5) input with ds=(2,2) will generate a (2,2) output.
(3,3) otherwise.
st : tuple of lenght 2
st : tuple of two ints
Stride size, which is the number of shifts over rows/cols to get the
next pool region. If st is None, it is considered equal to ds
(no overlap on pooling regions).
......@@ -80,13 +80,17 @@ def max_pool_2d(input, ds, ignore_border=None, st=None, padding=(0, 0),
if input.ndim < 2:
raise NotImplementedError('max_pool_2d requires a dimension >= 2')
if ignore_border is None:
warnings.warn("max_pool_2d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter"
" ignore_border=True. (this is also faster than"
" ignore_border=False)",
stacklevel=2)
warnings.warn(
"max_pool_2d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=False is needed to use CuDNN."
" When using ignore_border=False and not using CuDNN, the only"
" GPU combination supported is when"
" `ds == st and padding == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
if input.ndim == 4:
op = DownsampleFactorMax(ds, ignore_border, st=st, padding=padding,
......
......@@ -5020,7 +5020,7 @@ class T_reshape(utt.InferShapeTester, utt.TestOptimizationMixin):
# The tag canonicalize is needed for the shape test in FAST_COMPILE
self.mode = mode
self.ignore_topo = ignore_topo
return super(T_reshape, self).__init__(name)
super(T_reshape, self).__init__(name)
def function(self, inputs, outputs):
f = function(inputs, outputs, mode=self.mode)
......
......@@ -12,7 +12,6 @@ from theano.tensor.blas_scipy import ScipyGer
from theano.tensor.blas import Ger
from theano.tensor.blas_c import CGemv
from theano.tensor.blas_scipy import ScipyGer
from theano.tensor.blas import Gemv
from theano.tensor.blas_c import check_force_gemv_init
......
......@@ -1264,7 +1264,7 @@ class TestAdvancedSubtensor(unittest.TestCase):
self.mode = mode
self.dtype = dtype
self.ignore_topo = ignore_topo
return super(TestAdvancedSubtensor, self).__init__(name)
super(TestAdvancedSubtensor, self).__init__(name)
def setUp(self):
self.s = iscalar()
......
......@@ -183,7 +183,7 @@ whitelist_flake8 = [
]
def list_files(dir_path=theano.__path__[0], pattern='*.py'):
def list_files(dir_path=theano.__path__[0], pattern='*.py', no_match=".#"):
"""
List all files under theano's path.
"""
......@@ -192,7 +192,8 @@ def list_files(dir_path=theano.__path__[0], pattern='*.py'):
for f in files:
if fnmatch(f, pattern):
path = os.path.join(dir, f)
files_list.append(path)
if not f.startswith(no_match):
files_list.append(path)
return files_list
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论