提交 842da1f2 authored 作者: Taesup (TS) Kim's avatar Taesup (TS) Kim

fix PEP8 in sandbox/*.py

上级 c51b2833
from __future__ import print_function from __future__ import print_function
import sys import sys
print("DEPRECATION: theano.sandbox.conv no longer provides conv. They have been moved to theano.tensor.nnet.conv", file=sys.stderr) print("DEPRECATION: theano.sandbox.conv no longer provides conv. "
"They have been moved to theano.tensor.nnet.conv", file=sys.stderr)
from theano.tensor.nnet.conv import * from theano.tensor.nnet.conv import *
...@@ -71,10 +71,15 @@ class DebugLinker(gof.WrapLinker): ...@@ -71,10 +71,15 @@ class DebugLinker(gof.WrapLinker):
r.type.filter(r.value, strict=True) r.type.filter(r.value, strict=True)
except TypeError as e: except TypeError as e:
exc_type, exc_value, exc_trace = sys.exc_info() exc_type, exc_value, exc_trace = sys.exc_info()
exc = DebugException(e, "The output %s was filled with data with the wrong type using linker " \ exc = DebugException(
("%s. This happened at step %i of the program." % (r, linker, i)) + \ e,
"For more info, inspect this exception's 'original_exception', 'debugger', " \ "The output %s was filled with data with the wrong "
"'output_at_fault', 'step', 'node', 'thunk' and 'linker' fields.") "type using linker " +
("%s. This happened at step %i of the program."
% (r, linker, i)) +
"For more info, inspect this exception's "
"'original_exception', 'debugger', 'output_at_fault', "
"'step', 'node', 'thunk' and 'linker' fields.")
exc.debugger = self exc.debugger = self
exc.original_exception = e exc.original_exception = e
exc.output_at_fault = r exc.output_at_fault = r
...@@ -88,11 +93,18 @@ class DebugLinker(gof.WrapLinker): ...@@ -88,11 +93,18 @@ class DebugLinker(gof.WrapLinker):
thunk0 = thunks[0] thunk0 = thunks[0]
linker0 = self.linkers[0] linker0 = self.linkers[0]
for thunk, linker in zip(thunks[1:], self.linkers[1:]): for thunk, linker in zip(thunks[1:], self.linkers[1:]):
for o, output0, output in zip(node.outputs, thunk0.outputs, thunk.outputs): for o, output0, output in zip(node.outputs,
thunk0.outputs,
thunk.outputs):
if not self.compare_fn(output0[0], output[0]): if not self.compare_fn(output0[0], output[0]):
exc = DebugException(("The variables from %s and %s for output %s are not the same. This happened at step %i." % (linker0, linker, o, step)) + \ exc = DebugException(
"For more info, inspect this exception's 'debugger', 'output', 'output_value1', 'output_value2', " \ ("The variables from %s and %s for output %s are not "
"'step', 'node', 'thunk1', 'thunk2', 'linker1' and 'linker2' fields.") "the same. This happened at step %i."
% (linker0, linker, o, step)) +
"For more info, inspect this exception's 'debugger', "
"'output', 'output_value1', 'output_value2', 'step', "
"'node', 'thunk1', 'thunk2', 'linker1' "
"and 'linker2' fields.")
exc.debugger = self exc.debugger = self
exc.output = o exc.output = o
exc.output_value1 = output0 exc.output_value1 = output0
...@@ -140,8 +152,12 @@ class DebugLinker(gof.WrapLinker): ...@@ -140,8 +152,12 @@ class DebugLinker(gof.WrapLinker):
exc_type, exc_value, exc_trace = sys.exc_info() exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(e, DebugException): if isinstance(e, DebugException):
raise raise
exc = DebugException(e, ("An exception occurred while processing node %s at step %i of the program." % (node, i)) + \ exc = DebugException(
"For more info, inspect this exception's 'original_exception', 'debugger', 'step', 'node' and 'thunks' fields.") e,
("An exception occurred while processing node %s at step %i "
"of the program." % (node, i)) +
"For more info, inspect this exception's 'original_exception',"
"'debugger', 'step', 'node' and 'thunks' fields.")
exc.debugger = self exc.debugger = self
exc.original_exception = e exc.original_exception = e
exc.step = i exc.step = i
...@@ -169,7 +185,8 @@ def print_input_shapes(i, node, *thunks): ...@@ -169,7 +185,8 @@ def print_input_shapes(i, node, *thunks):
def print_input_types(i, node, *thunks): def print_input_types(i, node, *thunks):
print("input types:", ", ".join(str(type(input.value)) for input in node.inputs)) print("input types:", ", ".join(str(type(input.value))
for input in node.inputs))
def print_sep(i, node, *thunks): def print_sep(i, node, *thunks):
...@@ -192,5 +209,3 @@ def numpy_debug_linker(pre, post=None): ...@@ -192,5 +209,3 @@ def numpy_debug_linker(pre, post=None):
pre, pre,
post, post,
compare_fn=numpy_compare) compare_fn=numpy_compare)
...@@ -12,7 +12,7 @@ from theano.gof import Op, Apply, generic ...@@ -12,7 +12,7 @@ from theano.gof import Op, Apply, generic
class GradTodo(Op): class GradTodo(Op):
# TODO : need description for class
__props__ = () __props__ = ()
def make_node(self, x): def make_node(self, x):
...@@ -24,6 +24,7 @@ grad_todo = GradTodo() ...@@ -24,6 +24,7 @@ grad_todo = GradTodo()
class FFT(Op): class FFT(Op):
# TODO : need description for parameters
""" """
Fast Fourier Transform. Fast Fourier Transform.
...@@ -44,7 +45,8 @@ class FFT(Op): ...@@ -44,7 +45,8 @@ class FFT(Op):
# don't return the plan object in the 'buf' output # don't return the plan object in the 'buf' output
half = False half = False
"""Only return the first half (positive-valued) of the frequency components.""" """Only return the first half (positive-valued) of the frequency
components."""
__props__ = ("half", "inverse") __props__ = ("half", "inverse")
def __init__(self, half=False, inverse=False): def __init__(self, half=False, inverse=False):
...@@ -82,11 +84,13 @@ class FFT(Op): ...@@ -82,11 +84,13 @@ class FFT(Op):
M, N = fft.shape M, N = fft.shape
if axis == 0: if axis == 0:
if (M % 2): if (M % 2):
raise ValueError('halfFFT on odd-length vectors is undefined') raise ValueError(
'halfFFT on odd-length vectors is undefined')
spectrogram[0] = fft[0:M / 2, :] spectrogram[0] = fft[0:M / 2, :]
elif axis == 1: elif axis == 1:
if (N % 2): if (N % 2):
raise ValueError('halfFFT on odd-length vectors is undefined') raise ValueError(
'halfFFT on odd-length vectors is undefined')
spectrogram[0] = fft[:, 0:N / 2] spectrogram[0] = fft[:, 0:N / 2]
else: else:
raise NotImplementedError() raise NotImplementedError()
...@@ -105,6 +109,7 @@ half_ifft = FFT(half=True, inverse=True) ...@@ -105,6 +109,7 @@ half_ifft = FFT(half=True, inverse=True)
def dct_matrix(rows, cols, unitary=True): def dct_matrix(rows, cols, unitary=True):
# TODO : need description for parameters
""" """
Return a (rows x cols) matrix implementing a discrete cosine transform. Return a (rows x cols) matrix implementing a discrete cosine transform.
...@@ -115,7 +120,8 @@ def dct_matrix(rows, cols, unitary=True): ...@@ -115,7 +120,8 @@ def dct_matrix(rows, cols, unitary=True):
col_range = numpy.arange(cols) col_range = numpy.arange(cols)
scale = numpy.sqrt(2.0 / cols) scale = numpy.sqrt(2.0 / cols)
for i in xrange(rows): for i in xrange(rows):
rval[i] = numpy.cos(i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale rval[i] = numpy.cos(
i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale
if unitary: if unitary:
rval[0] *= numpy.sqrt(0.5) rval[0] *= numpy.sqrt(0.5)
......
...@@ -9,17 +9,19 @@ from theano.tests import unittest_tools as utt ...@@ -9,17 +9,19 @@ from theano.tests import unittest_tools as utt
class Minimal(gof.Op): class Minimal(gof.Op):
# TODO : need description for class
# if the Op has any attributes, # if the Op has any attributes, consider using them in the eq function.
# consider using them in the eq function. If two Apply nodes have the same inputs and the # If two Apply nodes have the same inputs and the ops compare equal...
# ops compare equal... then they will be MERGED so they had better have computed the same # then they will be MERGED so they had better have computed the same thing!
# thing!
def __init__(self): def __init__(self):
# If you put things here, think about whether they change the outputs computed by # If you put things here, think about whether they change the outputs
# self.perform() # computed by # self.perform()
# - If they do, then you should take them into consideration in __eq__ and __hash__ # - If they do, then you should take them into consideration in
# - If they do not, then you should not use them in __eq__ and __hash__ # __eq__ and __hash__
# - If they do not, then you should not use them in
# __eq__ and __hash__
super(Minimal, self).__init__() super(Minimal, self).__init__()
......
...@@ -16,6 +16,7 @@ if cuda_available: ...@@ -16,6 +16,7 @@ if cuda_available:
class MultinomialFromUniform(Op): class MultinomialFromUniform(Op):
# TODO : need description for parameter 'odtype'
""" """
Converts samples from a uniform into sample from a multinomial. Converts samples from a uniform into sample from a multinomial.
...@@ -197,7 +198,8 @@ class MultinomialFromUniform(Op): ...@@ -197,7 +198,8 @@ class MultinomialFromUniform(Op):
class MultinomialWOReplacementFromUniform(MultinomialFromUniform): class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
""" """
Converts samples from a uniform into sample (without replacement) from a multinomial. Converts samples from a uniform into sample (without replacement) from a
multinomial.
""" """
...@@ -222,8 +224,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform): ...@@ -222,8 +224,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
(z,) = outs (z,) = outs
if n_samples > pvals.shape[1]: if n_samples > pvals.shape[1]:
raise ValueError("Cannot sample without replacement n samples bigger " raise ValueError("Cannot sample without replacement n samples "
"than the size of the distribution.") "bigger than the size of the distribution.")
if unis.shape[0] != pvals.shape[0] * n_samples: if unis.shape[0] != pvals.shape[0] * n_samples:
raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples", raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples",
...@@ -233,7 +235,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform): ...@@ -233,7 +235,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
odtype = 'int64' odtype = 'int64'
else: else:
odtype = self.odtype odtype = self.odtype
if z[0] is None or not numpy.all(z[0].shape == [pvals.shape[0], n_samples]): if (z[0] is None or
not numpy.all(z[0].shape == [pvals.shape[0], n_samples])):
z[0] = -1 * numpy.ones((pvals.shape[0], n_samples), dtype=odtype) z[0] = -1 * numpy.ones((pvals.shape[0], n_samples), dtype=odtype)
nb_multi = pvals.shape[0] nb_multi = pvals.shape[0]
...@@ -249,7 +252,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform): ...@@ -249,7 +252,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
cummul += pvals[n, m] cummul += pvals[n, m]
if (cummul > unis_n): if (cummul > unis_n):
z[0][n, c] = m z[0][n, c] = m
# set to zero and re-normalize so that it's not selected again # set to zero and re-normalize so that it's not
# selected again
pvals[n, m] = 0. pvals[n, m] = 0.
pvals[n] /= pvals[n].sum() pvals[n] /= pvals[n].sum()
break break
...@@ -443,6 +447,7 @@ class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp): ...@@ -443,6 +447,7 @@ class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
@local_optimizer([MultinomialFromUniform]) @local_optimizer([MultinomialFromUniform])
def local_gpu_multinomial(node): def local_gpu_multinomial(node):
# TODO : need description for function
if type(node.op) is MultinomialFromUniform: if type(node.op) is MultinomialFromUniform:
if len(node.inputs) == 2: if len(node.inputs) == 2:
p, u = node.inputs p, u = node.inputs
......
...@@ -116,7 +116,8 @@ class NeighbourhoodsFromImages(Op): ...@@ -116,7 +116,8 @@ class NeighbourhoodsFromImages(Op):
return dims, num_strides return dims, num_strides
# for inverse mode # for inverse mode
# "output" here actually referes to the Op's input shape (but it's inverse mode) # "output" here actually referes to the Op's input shape (but it's inverse
# mode)
def in_shape(self, output_shape): def in_shape(self, output_shape):
out_dims = list(output_shape[:self.n_dims_before]) out_dims = list(output_shape[:self.n_dims_before])
num_strides = [] num_strides = []
...@@ -168,7 +169,8 @@ class NeighbourhoodsFromImages(Op): ...@@ -168,7 +169,8 @@ class NeighbourhoodsFromImages(Op):
for dim in self.dims_neighbourhoods: for dim in self.dims_neighbourhoods:
prod *= dim prod *= dim
if x.shape[-1] != prod: if x.shape[-1] != prod:
raise ValueError("Last dimension of neighbourhoods (%s) is not" raise ValueError(
"Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions" " the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod))) " (%s)" % (str(x.shape[-1]), str(prod)))
else: else:
...@@ -195,6 +197,7 @@ class NeighbourhoodsFromImages(Op): ...@@ -195,6 +197,7 @@ class NeighbourhoodsFromImages(Op):
exec(self.code) exec(self.code)
def make_py_code(self): def make_py_code(self):
# TODO : need description for method and return
code = self._py_outerloops() code = self._py_outerloops()
for i in xrange(len(self.strides)): for i in xrange(len(self.strides)):
code += self._py_innerloop(i) code += self._py_innerloop(i)
...@@ -202,6 +205,7 @@ class NeighbourhoodsFromImages(Op): ...@@ -202,6 +205,7 @@ class NeighbourhoodsFromImages(Op):
return code, builtins.compile(code, '<string>', 'exec') return code, builtins.compile(code, '<string>', 'exec')
def _py_outerloops(self): def _py_outerloops(self):
# TODO : need description for method, parameter and return
code_before = "" code_before = ""
for dim_idx in xrange(self.n_dims_before): for dim_idx in xrange(self.n_dims_before):
code_before += ('\t' * (dim_idx)) + \ code_before += ('\t' * (dim_idx)) + \
...@@ -210,6 +214,7 @@ class NeighbourhoodsFromImages(Op): ...@@ -210,6 +214,7 @@ class NeighbourhoodsFromImages(Op):
return code_before return code_before
def _py_innerloop(self, inner_dim_no): def _py_innerloop(self, inner_dim_no):
# TODO : need description for method, parameter and return
base_indent = ('\t' * (self.n_dims_before + inner_dim_no * 2)) base_indent = ('\t' * (self.n_dims_before + inner_dim_no * 2))
code_before = base_indent + \ code_before = base_indent + \
"for stride_idx_%d in xrange(num_strides[%d]):\n" % \ "for stride_idx_%d in xrange(num_strides[%d]):\n" % \
...@@ -229,10 +234,12 @@ class NeighbourhoodsFromImages(Op): ...@@ -229,10 +234,12 @@ class NeighbourhoodsFromImages(Op):
return code_before return code_before
def _py_flattened_idx(self): def _py_flattened_idx(self):
# TODO : need description for method and return
return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i, i) return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i, i)
for i in xrange(len(self.strides))]) for i in xrange(len(self.strides))])
def _py_assignment(self): def _py_assignment(self):
# TODO : need description for method and return
input_idx = "".join(["outer_idx_%d," % (i,) input_idx = "".join(["outer_idx_%d," % (i,)
for i in xrange(self.n_dims_before)]) for i in xrange(self.n_dims_before)])
input_idx += "".join(["dim_%d_offset+neigh_idx_%d," % input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
...@@ -259,6 +266,7 @@ class NeighbourhoodsFromImages(Op): ...@@ -259,6 +266,7 @@ class NeighbourhoodsFromImages(Op):
class ImagesFromNeighbourhoods(NeighbourhoodsFromImages): class ImagesFromNeighbourhoods(NeighbourhoodsFromImages):
# TODO : need description for class, parameters
def __init__(self, n_dims_before, dims_neighbourhoods, def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False): strides=None, ignore_border=False):
NeighbourhoodsFromImages.__init__(self, n_dims_before, NeighbourhoodsFromImages.__init__(self, n_dims_before,
......
差异被折叠。
...@@ -4,6 +4,7 @@ import theano.tensor ...@@ -4,6 +4,7 @@ import theano.tensor
class ScalarSoftsign(theano.scalar.UnaryScalarOp): class ScalarSoftsign(theano.scalar.UnaryScalarOp):
# TODO : need description for class
@staticmethod @staticmethod
def static_impl(x): def static_impl(x):
return x / (1.0 + abs(x)) return x / (1.0 + abs(x))
......
...@@ -24,7 +24,8 @@ class Solve(gof.Op): ...@@ -24,7 +24,8 @@ class Solve(gof.Op):
# sym_pos, lower, overwrite_a, overwrite_b # sym_pos, lower, overwrite_a, overwrite_b
# TODO: Add C code that calls the underlying LAPACK routines # TODO: Add C code that calls the underlying LAPACK routines
# and keeps a memory workspace from call to call as a non-default Op output # and keeps a memory workspace from call to call as a non-default Op
# output
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论