提交 980faeee authored 作者: Taesup (TS) Kim's avatar Taesup (TS) Kim

fix flake8 errors

上级 842da1f2
...@@ -2,4 +2,3 @@ from __future__ import print_function ...@@ -2,4 +2,3 @@ from __future__ import print_function
import sys import sys
print("DEPRECATION: theano.sandbox.conv no longer provides conv. " print("DEPRECATION: theano.sandbox.conv no longer provides conv. "
"They have been moved to theano.tensor.nnet.conv", file=sys.stderr) "They have been moved to theano.tensor.nnet.conv", file=sys.stderr)
from theano.tensor.nnet.conv import *
from __future__ import print_function
from six import reraise
from theano import gof
import sys
class DebugException(Exception):
pass
class DebugLinker(gof.WrapLinker):
def __init__(self,
linkers,
debug_pre=None,
debug_post=None,
copy_originals=False,
check_types=True,
compare_variables=True,
compare_fn=(lambda x, y: x == y)):
if debug_pre is None:
debug_pre = []
if debug_post is None:
debug_post = []
gof.WrapLinker.__init__(self,
linkers=linkers,
wrapper=self.wrapper)
self.fgraph = None
self.compare_fn = compare_fn
self.copy_originals = copy_originals
if check_types not in [None, True]:
self.check_types = check_types
if compare_variables not in [None, True]:
self.compare_variables = compare_variables
if not isinstance(debug_pre, (list, tuple)):
debug_pre = [debug_pre]
self.debug_pre = debug_pre
if not isinstance(debug_post, (list, tuple)):
debug_post = [debug_post]
self.debug_post = debug_post
if check_types is not None:
self.debug_post.append(self.check_types)
if compare_variables is not None:
self.debug_post.append(self.compare_variables)
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
return gof.WrapLinker.accept(self,
fgraph=fgraph,
no_recycling=no_recycling)
def store_value(self, i, node, *thunks):
th1 = thunks[0]
for r, oval in zip(node.outputs, th1.outputs):
r.step = i
r.value = oval[0]
if self.copy_originals:
r.original_value = copy(oval[0])
def check_types(self, i, node, *thunks):
for thunk, linker in zip(thunks, self.linkers):
for r in node.outputs:
try:
r.type.filter(r.value, strict=True)
except TypeError as e:
exc_type, exc_value, exc_trace = sys.exc_info()
exc = DebugException(
e,
"The output %s was filled with data with the wrong "
"type using linker " +
("%s. This happened at step %i of the program."
% (r, linker, i)) +
"For more info, inspect this exception's "
"'original_exception', 'debugger', 'output_at_fault', "
"'step', 'node', 'thunk' and 'linker' fields.")
exc.debugger = self
exc.original_exception = e
exc.output_at_fault = r
exc.step = i
exc.node = node
exc.thunk = thunk
exc.linker = linker
reraise(DebugException, exc, exc_trace)
def compare_variables(self, i, node, *thunks):
thunk0 = thunks[0]
linker0 = self.linkers[0]
for thunk, linker in zip(thunks[1:], self.linkers[1:]):
for o, output0, output in zip(node.outputs,
thunk0.outputs,
thunk.outputs):
if not self.compare_fn(output0[0], output[0]):
exc = DebugException(
("The variables from %s and %s for output %s are not "
"the same. This happened at step %i."
% (linker0, linker, o, step)) +
"For more info, inspect this exception's 'debugger', "
"'output', 'output_value1', 'output_value2', 'step', "
"'node', 'thunk1', 'thunk2', 'linker1' "
"and 'linker2' fields.")
exc.debugger = self
exc.output = o
exc.output_value1 = output0
exc.output_value2 = output
exc.step = i
exc.node = node
exc.thunk1 = thunk0
exc.thunk2 = thunk
exc.linker1 = linker0
exc.linker2 = linker
raise exc
def pre(self, f, inputs, order, thunk_groups):
fgraph = f.fgraph
for r in fgraph.variables:
if r.owner is None:
r.step = "value" # this will be overwritten if r is an input
else:
r.step = None
r.value = None
r.original_value = None
if r.owner is None and r not in fgraph.inputs:
r.value = r.data
if self.copy_originals:
r.original_value = copy(r.data)
for idx, (i, r) in enumerate(zip(inputs, fgraph.inputs)):
r.step = "input %i" % idx
r.value = i
if self.copy_originals:
r.original_value = copy(i)
for node, thunk_group in zip(order, thunk_groups):
node.step = None
def wrapper(self, i, node, *thunks):
try:
node.step = i
for f in self.debug_pre:
f(i, node, *thunks)
for thunk in thunks:
thunk()
self.store_value(i, node, *thunks)
for f in self.debug_post:
f(i, node, *thunks)
except Exception as e:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(e, DebugException):
raise
exc = DebugException(
e,
("An exception occurred while processing node %s at step %i "
"of the program." % (node, i)) +
"For more info, inspect this exception's 'original_exception',"
"'debugger', 'step', 'node' and 'thunks' fields.")
exc.debugger = self
exc.original_exception = e
exc.step = i
exc.node = node
exc.thunks = thunks
reraise(DebugException, exc, exc_trace)
def print_info(i, node, *thunks):
print("step %i, node %s" % (i, node))
def print_from(i, node, *thunks):
print("parents:", ", ".join(str(input.step) for input in node.inputs))
def print_input_shapes(i, node, *thunks):
shapes = []
for input in node.inputs:
if hasattr(input.value, 'shape'):
shapes.append(str(input.value.shape))
else:
shapes.append('N/A')
print("input shapes:", ", ".join(shapes))
def print_input_types(i, node, *thunks):
print("input types:", ", ".join(str(type(input.value))
for input in node.inputs))
def print_sep(i, node, *thunks):
print("===================================")
import numpy
def numpy_compare(a, b, tolerance=1e-6):
if isinstance(a, numpy.ndarray):
return (abs(a - b) <= tolerance).all()
else:
return a == b
def numpy_debug_linker(pre, post=None):
if post is None:
post = []
return DebugLinker([gof.OpWiseCLinker],
pre,
post,
compare_fn=numpy_compare)
...@@ -11,11 +11,11 @@ import warnings ...@@ -11,11 +11,11 @@ import warnings
import numpy import numpy
from six.moves import xrange from six.moves import xrange
from theano import Op, Apply, shared, config, Variable, Out from theano import Op, Apply, shared, config, Variable
from theano import gradient, function from theano import gradient, function
from theano import tensor from theano import tensor
from theano.tensor import (raw_random, TensorType, as_tensor_variable, from theano.tensor import (TensorType, as_tensor_variable, get_vector_length,
get_vector_length, cast, opt, scal) cast, opt, scal)
from theano.tensor import sqrt, log, sin, cos, join, prod from theano.tensor import sqrt, log, sin, cos, join, prod
from theano.compile import optdb from theano.compile import optdb
from theano.gof import local_optimizer from theano.gof import local_optimizer
...@@ -23,19 +23,20 @@ from . import multinomial ...@@ -23,19 +23,20 @@ from . import multinomial
import theano.sandbox.cuda import theano.sandbox.cuda
from theano.sandbox.cuda import GpuOp from theano.sandbox.cuda import GpuOp
if theano.sandbox.cuda.cuda_available:
from theano.sandbox.cuda import (CudaNdarrayType,
float32_shared_constructor)
from theano.sandbox.gpuarray.basic_ops import GpuKernelBase, Kernel from theano.sandbox.gpuarray.basic_ops import GpuKernelBase, Kernel
from theano.sandbox.gpuarray.type import GpuArrayType from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.fp16_help import write_w from theano.sandbox.gpuarray.fp16_help import write_w
from theano.sandbox.gpuarray.opt import (register_opt as register_gpua,
host_from_gpu as host_from_gpua)
if theano.sandbox.cuda.cuda_available:
from theano.sandbox.cuda import (CudaNdarrayType,
float32_shared_constructor)
def matVecModM(A, s, m): def matVecModM(A, s, m):
# TODO : need description for method, parameter and return # TODO : need description for method, parameter and return
assert A.dtype == 'int64' assert A.dtype == 'int64'
return numpy.int32(numpy.sum((A*s) % m, 1) % m) return numpy.int32(numpy.sum((A * s) % m, 1) % m)
def multMatVect(v, A, m1, B, m2): def multMatVect(v, A, m1, B, m2):
...@@ -336,7 +337,7 @@ class mrg_uniform(mrg_uniform_base): ...@@ -336,7 +337,7 @@ class mrg_uniform(mrg_uniform_base):
v_size = as_tensor_variable(size) v_size = as_tensor_variable(size)
if ndim is None: if ndim is None:
ndim = get_vector_length(v_size) ndim = get_vector_length(v_size)
op = cls(TensorType(dtype, (False,)*ndim)) op = cls(TensorType(dtype, (False,) * ndim))
return op(rstate, cast(v_size, 'int32')) return op(rstate, cast(v_size, 'int32'))
def perform(self, node, inp, out): def perform(self, node, inp, out):
...@@ -547,7 +548,7 @@ class GPU_mrg_uniform(mrg_uniform_base, GpuOp): ...@@ -547,7 +548,7 @@ class GPU_mrg_uniform(mrg_uniform_base, GpuOp):
v_size = as_tensor_variable(size) v_size = as_tensor_variable(size)
if ndim is None: if ndim is None:
ndim = get_vector_length(v_size) ndim = get_vector_length(v_size)
op = cls(CudaNdarrayType((False,)*ndim)) op = cls(CudaNdarrayType((False,) * ndim))
return op(rstate, cast(v_size, 'int32')) return op(rstate, cast(v_size, 'int32'))
def c_support_code_apply(self, node, nodename): def c_support_code_apply(self, node, nodename):
...@@ -789,7 +790,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base): ...@@ -789,7 +790,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
v_size = as_tensor_variable(size) v_size = as_tensor_variable(size)
if ndim is None: if ndim is None:
ndim = get_vector_length(v_size) ndim = get_vector_length(v_size)
op = cls(GpuArrayType(dtype, (False,)*ndim)) op = cls(GpuArrayType(dtype, (False,) * ndim))
return op(rstate, cast(v_size, 'int32')) return op(rstate, cast(v_size, 'int32'))
def c_headers(self): def c_headers(self):
...@@ -1073,7 +1074,7 @@ def guess_n_streams(size, warn=False): ...@@ -1073,7 +1074,7 @@ def guess_n_streams(size, warn=False):
class MRG_RandomStreams(object): class MRG_RandomStreams(object):
# TODO : need description for parameter 'use_cuda' # TODO : need description for parameter 'use_cuda'
""" """
Module component with similar interface to numpy.random Module component with similar interface to numpy.random
(numpy.random.RandomState). (numpy.random.RandomState).
Parameters Parameters
...@@ -1105,7 +1106,7 @@ class MRG_RandomStreams(object): ...@@ -1105,7 +1106,7 @@ class MRG_RandomStreams(object):
self.set_rstate(seed) self.set_rstate(seed)
if use_cuda is None: if use_cuda is None:
self.use_cuda = theano.sandbox.cuda.cuda_enabled self.use_cuda = theano.sandbox.cuda.cuda_enabled
else: else:
self.use_cuda = use_cuda self.use_cuda = use_cuda
...@@ -1247,7 +1248,7 @@ class MRG_RandomStreams(object): ...@@ -1247,7 +1248,7 @@ class MRG_RandomStreams(object):
Parameters Parameters
---------- ----------
low low
Lower bound of the interval on which values are sampled. Lower bound of the interval on which values are sampled.
If the ``dtype`` arg is provided, ``low`` will be cast into If the ``dtype`` arg is provided, ``low`` will be cast into
dtype. This bound is excluded. dtype. This bound is excluded.
high high
...@@ -1393,11 +1394,11 @@ class MRG_RandomStreams(object): ...@@ -1393,11 +1394,11 @@ class MRG_RandomStreams(object):
elements. elements.
`n` needs to be in [1, m], where m is the number of elements to select `n` needs to be in [1, m], where m is the number of elements to select
from, i.e. m == pvals.shape[1]. By default n = 1. from, i.e. m == pvals.shape[1]. By default n = 1.
Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=1 will Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=1 will
probably result in [[0],[2]]. When setting n=2, this probably result in [[0],[2]]. When setting n=2, this
will probably result in [[0,1],[2,1]]. will probably result in [[0,1],[2,1]].
Notes Notes
----- -----
-`size` and `ndim` are only there keep the same signature as other -`size` and `ndim` are only there keep the same signature as other
...@@ -1520,9 +1521,6 @@ class MRG_RandomStreams(object): ...@@ -1520,9 +1521,6 @@ class MRG_RandomStreams(object):
assert final_samples.dtype == dtype assert final_samples.dtype == dtype
return final_samples return final_samples
from theano.sandbox.gpuarray.opt import (register_opt as register_gpua,
host_from_gpu as host_from_gpua)
@register_gpua('fast_compile') @register_gpua('fast_compile')
@local_optimizer([mrg_uniform]) @local_optimizer([mrg_uniform])
......
...@@ -92,13 +92,6 @@ whitelist_flake8 = [ ...@@ -92,13 +92,6 @@ whitelist_flake8 = [
"tensor/nnet/tests/test_sigm.py", "tensor/nnet/tests/test_sigm.py",
"scalar/__init__.py", "scalar/__init__.py",
"scalar/tests/test_basic.py", "scalar/tests/test_basic.py",
"sandbox/__init__.py",
"sandbox/rng_mrg.py",
"sandbox/theano_object.py",
"sandbox/scan.py",
"sandbox/symbolic_module.py",
"sandbox/conv.py",
"sandbox/debug.py",
"sandbox/tests/test_theano_object.py", "sandbox/tests/test_theano_object.py",
"sandbox/tests/test_scan.py", "sandbox/tests/test_scan.py",
"sandbox/tests/test_neighbourhoods.py", "sandbox/tests/test_neighbourhoods.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论