提交 b945c53b authored 作者: Francesco's avatar Francesco

Merge pull request #4133 from taesupkim/issue_4056

flake8 sandbox/*.py
from __future__ import print_function
import sys
print("DEPRECATION: theano.sandbox.conv no longer provides conv. They have been moved to theano.tensor.nnet.conv", file=sys.stderr)
from theano.tensor.nnet.conv import *
print("DEPRECATION: theano.sandbox.conv no longer provides conv. "
"They have been moved to theano.tensor.nnet.conv", file=sys.stderr)
from __future__ import print_function
from six import reraise
from theano import gof
import sys
class DebugException(Exception):
pass
class DebugLinker(gof.WrapLinker):
def __init__(self,
linkers,
debug_pre=None,
debug_post=None,
copy_originals=False,
check_types=True,
compare_variables=True,
compare_fn=(lambda x, y: x == y)):
if debug_pre is None:
debug_pre = []
if debug_post is None:
debug_post = []
gof.WrapLinker.__init__(self,
linkers=linkers,
wrapper=self.wrapper)
self.fgraph = None
self.compare_fn = compare_fn
self.copy_originals = copy_originals
if check_types not in [None, True]:
self.check_types = check_types
if compare_variables not in [None, True]:
self.compare_variables = compare_variables
if not isinstance(debug_pre, (list, tuple)):
debug_pre = [debug_pre]
self.debug_pre = debug_pre
if not isinstance(debug_post, (list, tuple)):
debug_post = [debug_post]
self.debug_post = debug_post
if check_types is not None:
self.debug_post.append(self.check_types)
if compare_variables is not None:
self.debug_post.append(self.compare_variables)
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
return gof.WrapLinker.accept(self,
fgraph=fgraph,
no_recycling=no_recycling)
def store_value(self, i, node, *thunks):
th1 = thunks[0]
for r, oval in zip(node.outputs, th1.outputs):
r.step = i
r.value = oval[0]
if self.copy_originals:
r.original_value = copy(oval[0])
def check_types(self, i, node, *thunks):
for thunk, linker in zip(thunks, self.linkers):
for r in node.outputs:
try:
r.type.filter(r.value, strict=True)
except TypeError as e:
exc_type, exc_value, exc_trace = sys.exc_info()
exc = DebugException(e, "The output %s was filled with data with the wrong type using linker " \
("%s. This happened at step %i of the program." % (r, linker, i)) + \
"For more info, inspect this exception's 'original_exception', 'debugger', " \
"'output_at_fault', 'step', 'node', 'thunk' and 'linker' fields.")
exc.debugger = self
exc.original_exception = e
exc.output_at_fault = r
exc.step = i
exc.node = node
exc.thunk = thunk
exc.linker = linker
reraise(DebugException, exc, exc_trace)
def compare_variables(self, i, node, *thunks):
thunk0 = thunks[0]
linker0 = self.linkers[0]
for thunk, linker in zip(thunks[1:], self.linkers[1:]):
for o, output0, output in zip(node.outputs, thunk0.outputs, thunk.outputs):
if not self.compare_fn(output0[0], output[0]):
exc = DebugException(("The variables from %s and %s for output %s are not the same. This happened at step %i." % (linker0, linker, o, step)) + \
"For more info, inspect this exception's 'debugger', 'output', 'output_value1', 'output_value2', " \
"'step', 'node', 'thunk1', 'thunk2', 'linker1' and 'linker2' fields.")
exc.debugger = self
exc.output = o
exc.output_value1 = output0
exc.output_value2 = output
exc.step = i
exc.node = node
exc.thunk1 = thunk0
exc.thunk2 = thunk
exc.linker1 = linker0
exc.linker2 = linker
raise exc
def pre(self, f, inputs, order, thunk_groups):
fgraph = f.fgraph
for r in fgraph.variables:
if r.owner is None:
r.step = "value" # this will be overwritten if r is an input
else:
r.step = None
r.value = None
r.original_value = None
if r.owner is None and r not in fgraph.inputs:
r.value = r.data
if self.copy_originals:
r.original_value = copy(r.data)
for idx, (i, r) in enumerate(zip(inputs, fgraph.inputs)):
r.step = "input %i" % idx
r.value = i
if self.copy_originals:
r.original_value = copy(i)
for node, thunk_group in zip(order, thunk_groups):
node.step = None
def wrapper(self, i, node, *thunks):
try:
node.step = i
for f in self.debug_pre:
f(i, node, *thunks)
for thunk in thunks:
thunk()
self.store_value(i, node, *thunks)
for f in self.debug_post:
f(i, node, *thunks)
except Exception as e:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(e, DebugException):
raise
exc = DebugException(e, ("An exception occurred while processing node %s at step %i of the program." % (node, i)) + \
"For more info, inspect this exception's 'original_exception', 'debugger', 'step', 'node' and 'thunks' fields.")
exc.debugger = self
exc.original_exception = e
exc.step = i
exc.node = node
exc.thunks = thunks
reraise(DebugException, exc, exc_trace)
def print_info(i, node, *thunks):
print("step %i, node %s" % (i, node))
def print_from(i, node, *thunks):
print("parents:", ", ".join(str(input.step) for input in node.inputs))
def print_input_shapes(i, node, *thunks):
shapes = []
for input in node.inputs:
if hasattr(input.value, 'shape'):
shapes.append(str(input.value.shape))
else:
shapes.append('N/A')
print("input shapes:", ", ".join(shapes))
def print_input_types(i, node, *thunks):
print("input types:", ", ".join(str(type(input.value)) for input in node.inputs))
def print_sep(i, node, *thunks):
print("===================================")
import numpy
def numpy_compare(a, b, tolerance=1e-6):
if isinstance(a, numpy.ndarray):
return (abs(a - b) <= tolerance).all()
else:
return a == b
def numpy_debug_linker(pre, post=None):
if post is None:
post = []
return DebugLinker([gof.OpWiseCLinker],
pre,
post,
compare_fn=numpy_compare)
......@@ -12,7 +12,7 @@ from theano.gof import Op, Apply, generic
class GradTodo(Op):
# TODO : need description for class
__props__ = ()
def make_node(self, x):
......@@ -24,6 +24,7 @@ grad_todo = GradTodo()
class FFT(Op):
# TODO : need description for parameters
"""
Fast Fourier Transform.
......@@ -44,7 +45,8 @@ class FFT(Op):
# don't return the plan object in the 'buf' output
half = False
"""Only return the first half (positive-valued) of the frequency components."""
"""Only return the first half (positive-valued) of the frequency
components."""
__props__ = ("half", "inverse")
def __init__(self, half=False, inverse=False):
......@@ -82,11 +84,13 @@ class FFT(Op):
M, N = fft.shape
if axis == 0:
if (M % 2):
raise ValueError('halfFFT on odd-length vectors is undefined')
raise ValueError(
'halfFFT on odd-length vectors is undefined')
spectrogram[0] = fft[0:M / 2, :]
elif axis == 1:
if (N % 2):
raise ValueError('halfFFT on odd-length vectors is undefined')
raise ValueError(
'halfFFT on odd-length vectors is undefined')
spectrogram[0] = fft[:, 0:N / 2]
else:
raise NotImplementedError()
......@@ -105,6 +109,7 @@ half_ifft = FFT(half=True, inverse=True)
def dct_matrix(rows, cols, unitary=True):
# TODO : need description for parameters
"""
Return a (rows x cols) matrix implementing a discrete cosine transform.
......@@ -115,7 +120,8 @@ def dct_matrix(rows, cols, unitary=True):
col_range = numpy.arange(cols)
scale = numpy.sqrt(2.0 / cols)
for i in xrange(rows):
rval[i] = numpy.cos(i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale
rval[i] = numpy.cos(
i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale
if unitary:
rval[0] *= numpy.sqrt(0.5)
......
......@@ -9,17 +9,19 @@ from theano.tests import unittest_tools as utt
class Minimal(gof.Op):
# TODO : need description for class
# if the Op has any attributes,
# consider using them in the eq function. If two Apply nodes have the same inputs and the
# ops compare equal... then they will be MERGED so they had better have computed the same
# thing!
# if the Op has any attributes, consider using them in the eq function.
# If two Apply nodes have the same inputs and the ops compare equal...
# then they will be MERGED so they had better have computed the same thing!
def __init__(self):
# If you put things here, think about whether they change the outputs computed by
# self.perform()
# - If they do, then you should take them into consideration in __eq__ and __hash__
# - If they do not, then you should not use them in __eq__ and __hash__
# If you put things here, think about whether they change the outputs
# computed by # self.perform()
# - If they do, then you should take them into consideration in
# __eq__ and __hash__
# - If they do not, then you should not use them in
# __eq__ and __hash__
super(Minimal, self).__init__()
......
......@@ -16,6 +16,7 @@ if cuda_available:
class MultinomialFromUniform(Op):
# TODO : need description for parameter 'odtype'
"""
Converts samples from a uniform into sample from a multinomial.
......@@ -197,7 +198,8 @@ class MultinomialFromUniform(Op):
class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
"""
Converts samples from a uniform into sample (without replacement) from a multinomial.
Converts samples from a uniform into sample (without replacement) from a
multinomial.
"""
......@@ -347,8 +349,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
(z,) = outs
if n_samples > pvals.shape[1]:
raise ValueError("Cannot sample without replacement n samples bigger "
"than the size of the distribution.")
raise ValueError("Cannot sample without replacement n samples "
"bigger than the size of the distribution.")
if unis.shape[0] != pvals.shape[0] * n_samples:
raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples",
......@@ -358,7 +360,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
odtype = 'int64'
else:
odtype = self.odtype
if z[0] is None or not numpy.all(z[0].shape == [pvals.shape[0], n_samples]):
if (z[0] is None or
not numpy.all(z[0].shape == [pvals.shape[0], n_samples])):
z[0] = -1 * numpy.ones((pvals.shape[0], n_samples), dtype=odtype)
nb_multi = pvals.shape[0]
......@@ -374,7 +377,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
cummul += pvals[n, m]
if (cummul > unis_n):
z[0][n, c] = m
# set to zero and re-normalize so that it's not selected again
# set to zero and re-normalize so that it's not
# selected again
pvals[n, m] = 0.
pvals[n] /= pvals[n].sum()
break
......@@ -562,6 +566,7 @@ class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
@local_optimizer([MultinomialFromUniform])
def local_gpu_multinomial(node):
# TODO : need description for function
if type(node.op) is MultinomialFromUniform:
if len(node.inputs) == 2:
p, u = node.inputs
......
......@@ -116,7 +116,8 @@ class NeighbourhoodsFromImages(Op):
return dims, num_strides
# for inverse mode
# "output" here actually referes to the Op's input shape (but it's inverse mode)
# "output" here actually referes to the Op's input shape (but it's inverse
# mode)
def in_shape(self, output_shape):
out_dims = list(output_shape[:self.n_dims_before])
num_strides = []
......@@ -168,9 +169,10 @@ class NeighbourhoodsFromImages(Op):
for dim in self.dims_neighbourhoods:
prod *= dim
if x.shape[-1] != prod:
raise ValueError("Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod)))
raise ValueError(
"Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod)))
else:
if len(x.shape) != (self.n_dims_before +
len(self.dims_neighbourhoods)):
......@@ -195,6 +197,7 @@ class NeighbourhoodsFromImages(Op):
exec(self.code)
def make_py_code(self):
# TODO : need description for method and return
code = self._py_outerloops()
for i in xrange(len(self.strides)):
code += self._py_innerloop(i)
......@@ -202,6 +205,7 @@ class NeighbourhoodsFromImages(Op):
return code, builtins.compile(code, '<string>', 'exec')
def _py_outerloops(self):
# TODO : need description for method, parameter and return
code_before = ""
for dim_idx in xrange(self.n_dims_before):
code_before += ('\t' * (dim_idx)) + \
......@@ -210,6 +214,7 @@ class NeighbourhoodsFromImages(Op):
return code_before
def _py_innerloop(self, inner_dim_no):
# TODO : need description for method, parameter and return
base_indent = ('\t' * (self.n_dims_before + inner_dim_no * 2))
code_before = base_indent + \
"for stride_idx_%d in xrange(num_strides[%d]):\n" % \
......@@ -229,10 +234,12 @@ class NeighbourhoodsFromImages(Op):
return code_before
def _py_flattened_idx(self):
# TODO : need description for method and return
return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i, i)
for i in xrange(len(self.strides))])
def _py_assignment(self):
# TODO : need description for method and return
input_idx = "".join(["outer_idx_%d," % (i,)
for i in xrange(self.n_dims_before)])
input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
......@@ -259,6 +266,7 @@ class NeighbourhoodsFromImages(Op):
class ImagesFromNeighbourhoods(NeighbourhoodsFromImages):
# TODO : need description for class, parameters
def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False):
NeighbourhoodsFromImages.__init__(self, n_dims_before,
......
差异被折叠。
......@@ -4,6 +4,7 @@ import theano.tensor
class ScalarSoftsign(theano.scalar.UnaryScalarOp):
# TODO : need description for class
@staticmethod
def static_impl(x):
return x / (1.0 + abs(x))
......
......@@ -24,7 +24,8 @@ class Solve(gof.Op):
# sym_pos, lower, overwrite_a, overwrite_b
# TODO: Add C code that calls the underlying LAPACK routines
# and keeps a memory workspace from call to call as a non-default Op output
# and keeps a memory workspace from call to call as a non-default Op
# output
def __eq__(self, other):
return type(self) == type(other)
......
......@@ -92,13 +92,6 @@ whitelist_flake8 = [
"tensor/nnet/tests/test_sigm.py",
"scalar/__init__.py",
"scalar/tests/test_basic.py",
"sandbox/__init__.py",
"sandbox/rng_mrg.py",
"sandbox/theano_object.py",
"sandbox/scan.py",
"sandbox/symbolic_module.py",
"sandbox/conv.py",
"sandbox/debug.py",
"sandbox/tests/test_theano_object.py",
"sandbox/tests/test_scan.py",
"sandbox/tests/test_neighbourhoods.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论