提交 b945c53b authored 作者: Francesco's avatar Francesco

Merge pull request #4133 from taesupkim/issue_4056

flake8 sandbox/*.py
from __future__ import print_function
import sys
print("DEPRECATION: theano.sandbox.conv no longer provides conv. They have been moved to theano.tensor.nnet.conv", file=sys.stderr)
from theano.tensor.nnet.conv import *
print("DEPRECATION: theano.sandbox.conv no longer provides conv. "
"They have been moved to theano.tensor.nnet.conv", file=sys.stderr)
from __future__ import print_function
from six import reraise
from theano import gof
import sys
class DebugException(Exception):
pass
class DebugLinker(gof.WrapLinker):
def __init__(self,
linkers,
debug_pre=None,
debug_post=None,
copy_originals=False,
check_types=True,
compare_variables=True,
compare_fn=(lambda x, y: x == y)):
if debug_pre is None:
debug_pre = []
if debug_post is None:
debug_post = []
gof.WrapLinker.__init__(self,
linkers=linkers,
wrapper=self.wrapper)
self.fgraph = None
self.compare_fn = compare_fn
self.copy_originals = copy_originals
if check_types not in [None, True]:
self.check_types = check_types
if compare_variables not in [None, True]:
self.compare_variables = compare_variables
if not isinstance(debug_pre, (list, tuple)):
debug_pre = [debug_pre]
self.debug_pre = debug_pre
if not isinstance(debug_post, (list, tuple)):
debug_post = [debug_post]
self.debug_post = debug_post
if check_types is not None:
self.debug_post.append(self.check_types)
if compare_variables is not None:
self.debug_post.append(self.compare_variables)
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
return gof.WrapLinker.accept(self,
fgraph=fgraph,
no_recycling=no_recycling)
def store_value(self, i, node, *thunks):
th1 = thunks[0]
for r, oval in zip(node.outputs, th1.outputs):
r.step = i
r.value = oval[0]
if self.copy_originals:
r.original_value = copy(oval[0])
def check_types(self, i, node, *thunks):
for thunk, linker in zip(thunks, self.linkers):
for r in node.outputs:
try:
r.type.filter(r.value, strict=True)
except TypeError as e:
exc_type, exc_value, exc_trace = sys.exc_info()
exc = DebugException(e, "The output %s was filled with data with the wrong type using linker " \
("%s. This happened at step %i of the program." % (r, linker, i)) + \
"For more info, inspect this exception's 'original_exception', 'debugger', " \
"'output_at_fault', 'step', 'node', 'thunk' and 'linker' fields.")
exc.debugger = self
exc.original_exception = e
exc.output_at_fault = r
exc.step = i
exc.node = node
exc.thunk = thunk
exc.linker = linker
reraise(DebugException, exc, exc_trace)
def compare_variables(self, i, node, *thunks):
thunk0 = thunks[0]
linker0 = self.linkers[0]
for thunk, linker in zip(thunks[1:], self.linkers[1:]):
for o, output0, output in zip(node.outputs, thunk0.outputs, thunk.outputs):
if not self.compare_fn(output0[0], output[0]):
exc = DebugException(("The variables from %s and %s for output %s are not the same. This happened at step %i." % (linker0, linker, o, step)) + \
"For more info, inspect this exception's 'debugger', 'output', 'output_value1', 'output_value2', " \
"'step', 'node', 'thunk1', 'thunk2', 'linker1' and 'linker2' fields.")
exc.debugger = self
exc.output = o
exc.output_value1 = output0
exc.output_value2 = output
exc.step = i
exc.node = node
exc.thunk1 = thunk0
exc.thunk2 = thunk
exc.linker1 = linker0
exc.linker2 = linker
raise exc
def pre(self, f, inputs, order, thunk_groups):
fgraph = f.fgraph
for r in fgraph.variables:
if r.owner is None:
r.step = "value" # this will be overwritten if r is an input
else:
r.step = None
r.value = None
r.original_value = None
if r.owner is None and r not in fgraph.inputs:
r.value = r.data
if self.copy_originals:
r.original_value = copy(r.data)
for idx, (i, r) in enumerate(zip(inputs, fgraph.inputs)):
r.step = "input %i" % idx
r.value = i
if self.copy_originals:
r.original_value = copy(i)
for node, thunk_group in zip(order, thunk_groups):
node.step = None
def wrapper(self, i, node, *thunks):
try:
node.step = i
for f in self.debug_pre:
f(i, node, *thunks)
for thunk in thunks:
thunk()
self.store_value(i, node, *thunks)
for f in self.debug_post:
f(i, node, *thunks)
except Exception as e:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(e, DebugException):
raise
exc = DebugException(e, ("An exception occurred while processing node %s at step %i of the program." % (node, i)) + \
"For more info, inspect this exception's 'original_exception', 'debugger', 'step', 'node' and 'thunks' fields.")
exc.debugger = self
exc.original_exception = e
exc.step = i
exc.node = node
exc.thunks = thunks
reraise(DebugException, exc, exc_trace)
def print_info(i, node, *thunks):
print("step %i, node %s" % (i, node))
def print_from(i, node, *thunks):
print("parents:", ", ".join(str(input.step) for input in node.inputs))
def print_input_shapes(i, node, *thunks):
shapes = []
for input in node.inputs:
if hasattr(input.value, 'shape'):
shapes.append(str(input.value.shape))
else:
shapes.append('N/A')
print("input shapes:", ", ".join(shapes))
def print_input_types(i, node, *thunks):
print("input types:", ", ".join(str(type(input.value)) for input in node.inputs))
def print_sep(i, node, *thunks):
print("===================================")
import numpy
def numpy_compare(a, b, tolerance=1e-6):
if isinstance(a, numpy.ndarray):
return (abs(a - b) <= tolerance).all()
else:
return a == b
def numpy_debug_linker(pre, post=None):
if post is None:
post = []
return DebugLinker([gof.OpWiseCLinker],
pre,
post,
compare_fn=numpy_compare)
......@@ -12,7 +12,7 @@ from theano.gof import Op, Apply, generic
class GradTodo(Op):
# TODO : need description for class
__props__ = ()
def make_node(self, x):
......@@ -24,6 +24,7 @@ grad_todo = GradTodo()
class FFT(Op):
# TODO : need description for parameters
"""
Fast Fourier Transform.
......@@ -44,7 +45,8 @@ class FFT(Op):
# don't return the plan object in the 'buf' output
half = False
"""Only return the first half (positive-valued) of the frequency components."""
"""Only return the first half (positive-valued) of the frequency
components."""
__props__ = ("half", "inverse")
def __init__(self, half=False, inverse=False):
......@@ -82,11 +84,13 @@ class FFT(Op):
M, N = fft.shape
if axis == 0:
if (M % 2):
raise ValueError('halfFFT on odd-length vectors is undefined')
raise ValueError(
'halfFFT on odd-length vectors is undefined')
spectrogram[0] = fft[0:M / 2, :]
elif axis == 1:
if (N % 2):
raise ValueError('halfFFT on odd-length vectors is undefined')
raise ValueError(
'halfFFT on odd-length vectors is undefined')
spectrogram[0] = fft[:, 0:N / 2]
else:
raise NotImplementedError()
......@@ -105,6 +109,7 @@ half_ifft = FFT(half=True, inverse=True)
def dct_matrix(rows, cols, unitary=True):
# TODO : need description for parameters
"""
Return a (rows x cols) matrix implementing a discrete cosine transform.
......@@ -115,7 +120,8 @@ def dct_matrix(rows, cols, unitary=True):
col_range = numpy.arange(cols)
scale = numpy.sqrt(2.0 / cols)
for i in xrange(rows):
rval[i] = numpy.cos(i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale
rval[i] = numpy.cos(
i * (col_range * 2 + 1) / (2.0 * cols) * numpy.pi) * scale
if unitary:
rval[0] *= numpy.sqrt(0.5)
......
......@@ -9,17 +9,19 @@ from theano.tests import unittest_tools as utt
class Minimal(gof.Op):
# TODO : need description for class
# if the Op has any attributes,
# consider using them in the eq function. If two Apply nodes have the same inputs and the
# ops compare equal... then they will be MERGED so they had better have computed the same
# thing!
# if the Op has any attributes, consider using them in the eq function.
# If two Apply nodes have the same inputs and the ops compare equal...
# then they will be MERGED so they had better have computed the same thing!
def __init__(self):
# If you put things here, think about whether they change the outputs computed by
# self.perform()
# - If they do, then you should take them into consideration in __eq__ and __hash__
# - If they do not, then you should not use them in __eq__ and __hash__
# If you put things here, think about whether they change the outputs
# computed by # self.perform()
# - If they do, then you should take them into consideration in
# __eq__ and __hash__
# - If they do not, then you should not use them in
# __eq__ and __hash__
super(Minimal, self).__init__()
......
......@@ -16,6 +16,7 @@ if cuda_available:
class MultinomialFromUniform(Op):
# TODO : need description for parameter 'odtype'
"""
Converts samples from a uniform into sample from a multinomial.
......@@ -197,7 +198,8 @@ class MultinomialFromUniform(Op):
class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
"""
Converts samples from a uniform into sample (without replacement) from a multinomial.
Converts samples from a uniform into sample (without replacement) from a
multinomial.
"""
......@@ -347,8 +349,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
(z,) = outs
if n_samples > pvals.shape[1]:
raise ValueError("Cannot sample without replacement n samples bigger "
"than the size of the distribution.")
raise ValueError("Cannot sample without replacement n samples "
"bigger than the size of the distribution.")
if unis.shape[0] != pvals.shape[0] * n_samples:
raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples",
......@@ -358,7 +360,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
odtype = 'int64'
else:
odtype = self.odtype
if z[0] is None or not numpy.all(z[0].shape == [pvals.shape[0], n_samples]):
if (z[0] is None or
not numpy.all(z[0].shape == [pvals.shape[0], n_samples])):
z[0] = -1 * numpy.ones((pvals.shape[0], n_samples), dtype=odtype)
nb_multi = pvals.shape[0]
......@@ -374,7 +377,8 @@ class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
cummul += pvals[n, m]
if (cummul > unis_n):
z[0][n, c] = m
# set to zero and re-normalize so that it's not selected again
# set to zero and re-normalize so that it's not
# selected again
pvals[n, m] = 0.
pvals[n] /= pvals[n].sum()
break
......@@ -562,6 +566,7 @@ class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
@local_optimizer([MultinomialFromUniform])
def local_gpu_multinomial(node):
# TODO : need description for function
if type(node.op) is MultinomialFromUniform:
if len(node.inputs) == 2:
p, u = node.inputs
......
......@@ -116,7 +116,8 @@ class NeighbourhoodsFromImages(Op):
return dims, num_strides
# for inverse mode
# "output" here actually referes to the Op's input shape (but it's inverse mode)
# "output" here actually referes to the Op's input shape (but it's inverse
# mode)
def in_shape(self, output_shape):
out_dims = list(output_shape[:self.n_dims_before])
num_strides = []
......@@ -168,9 +169,10 @@ class NeighbourhoodsFromImages(Op):
for dim in self.dims_neighbourhoods:
prod *= dim
if x.shape[-1] != prod:
raise ValueError("Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod)))
raise ValueError(
"Last dimension of neighbourhoods (%s) is not"
" the product of the neighbourhoods dimensions"
" (%s)" % (str(x.shape[-1]), str(prod)))
else:
if len(x.shape) != (self.n_dims_before +
len(self.dims_neighbourhoods)):
......@@ -195,6 +197,7 @@ class NeighbourhoodsFromImages(Op):
exec(self.code)
def make_py_code(self):
# TODO : need description for method and return
code = self._py_outerloops()
for i in xrange(len(self.strides)):
code += self._py_innerloop(i)
......@@ -202,6 +205,7 @@ class NeighbourhoodsFromImages(Op):
return code, builtins.compile(code, '<string>', 'exec')
def _py_outerloops(self):
# TODO : need description for method, parameter and return
code_before = ""
for dim_idx in xrange(self.n_dims_before):
code_before += ('\t' * (dim_idx)) + \
......@@ -210,6 +214,7 @@ class NeighbourhoodsFromImages(Op):
return code_before
def _py_innerloop(self, inner_dim_no):
# TODO : need description for method, parameter and return
base_indent = ('\t' * (self.n_dims_before + inner_dim_no * 2))
code_before = base_indent + \
"for stride_idx_%d in xrange(num_strides[%d]):\n" % \
......@@ -229,10 +234,12 @@ class NeighbourhoodsFromImages(Op):
return code_before
def _py_flattened_idx(self):
# TODO : need description for method and return
return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i, i)
for i in xrange(len(self.strides))])
def _py_assignment(self):
# TODO : need description for method and return
input_idx = "".join(["outer_idx_%d," % (i,)
for i in xrange(self.n_dims_before)])
input_idx += "".join(["dim_%d_offset+neigh_idx_%d," %
......@@ -259,6 +266,7 @@ class NeighbourhoodsFromImages(Op):
class ImagesFromNeighbourhoods(NeighbourhoodsFromImages):
# TODO : need description for class, parameters
def __init__(self, n_dims_before, dims_neighbourhoods,
strides=None, ignore_border=False):
NeighbourhoodsFromImages.__init__(self, n_dims_before,
......
......@@ -11,11 +11,11 @@ import warnings
import numpy
from six.moves import xrange
from theano import Op, Apply, shared, config, Variable, Out
from theano import Op, Apply, shared, config, Variable
from theano import gradient, function
from theano import tensor
from theano.tensor import (raw_random, TensorType, as_tensor_variable,
get_vector_length, cast, opt, scal)
from theano.tensor import (TensorType, as_tensor_variable, get_vector_length,
cast, opt, scal)
from theano.tensor import sqrt, log, sin, cos, join, prod
from theano.compile import optdb
from theano.gof import local_optimizer
......@@ -23,21 +23,24 @@ from . import multinomial
import theano.sandbox.cuda
from theano.sandbox.cuda import GpuOp
if theano.sandbox.cuda.cuda_available:
from theano.sandbox.cuda import (CudaNdarrayType,
float32_shared_constructor)
from theano.sandbox.gpuarray.basic_ops import GpuKernelBase, Kernel
from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.fp16_help import write_w
from theano.sandbox.gpuarray.opt import (register_opt as register_gpua,
host_from_gpu as host_from_gpua)
if theano.sandbox.cuda.cuda_available:
from theano.sandbox.cuda import (CudaNdarrayType,
float32_shared_constructor)
def matVecModM(A, s, m):
# TODO : need description for method, parameter and return
assert A.dtype == 'int64'
return numpy.int32(numpy.sum((A*s) % m, 1) % m)
return numpy.int32(numpy.sum((A * s) % m, 1) % m)
def multMatVect(v, A, m1, B, m2):
# TODO : need description for parameter and return
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
......@@ -193,13 +196,13 @@ class DotModulo(Op):
# MRG31k3p
# generator constants :
M1 = numpy.asarray(numpy.int32(2147483647)) #2^31 - 1
M2 = numpy.asarray(numpy.int32(2147462579)) #2^31 - 21069
MASK12 = numpy.int32(511) #2^9 - 1
MASK13 = numpy.int32(16777215) #2^24 - 1
MASK2 = numpy.int32(65535) #2^16 - 1
M1 = numpy.asarray(numpy.int32(2147483647)) # 2^31 - 1
M2 = numpy.asarray(numpy.int32(2147462579)) # 2^31 - 21069
MASK12 = numpy.int32(511) # 2^9 - 1
MASK13 = numpy.int32(16777215) # 2^24 - 1
MASK2 = numpy.int32(65535) # 2^16 - 1
MULT2 = numpy.int32(21069)
NORM = 4.656612873077392578125e-10 #1./2^31
NORM = 4.656612873077392578125e-10 # 1./2^31
# A1p0 = numpy.asarray([[0, 4194304, 129], [1, 0, 0], [0, 1, 0]],
# dtype='int64')
......@@ -229,14 +232,17 @@ np_int32_vals = [numpy.int32(i) for i in (0, 7, 9, 15, 16, 22, 24)]
def ff_2p134(rstate):
# TODO : need description for method, parameter and return
return multMatVect(rstate, A1p134, M1, A2p134, M2)
def ff_2p72(rstate):
# TODO : need description for method, parameter and return
return multMatVect(rstate, A1p72, M1, A2p72, M2)
def mrg_next_value(rstate, new_rstate):
# TODO : need description for method, parameter and return
x11, x12, x13, x21, x22, x23 = rstate
assert type(x11) == numpy.int32
......@@ -286,7 +292,7 @@ def mrg_next_value(rstate, new_rstate):
class mrg_uniform_base(Op):
# TODO : need description for class, parameter
__props__ = ("output_type", "inplace")
def __init__(self, output_type, inplace=False):
......@@ -314,9 +320,9 @@ class mrg_uniform_base(Op):
[rstate.type(), self.output_type()])
def grad(self, inputs, ograd):
return [gradient.grad_undefined(
self, k, inp,
'No gradient defined through random sampling op')
return [gradient.grad_undefined(self, k, inp,
'No gradient defined through '
'random sampling op')
for k, inp in enumerate(inputs)]
def R_op(self, inputs, eval_points):
......@@ -331,7 +337,7 @@ class mrg_uniform(mrg_uniform_base):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(TensorType(dtype, (False,)*ndim))
op = cls(TensorType(dtype, (False,) * ndim))
return op(rstate, cast(v_size, 'int32'))
def perform(self, node, inp, out):
......@@ -371,9 +377,12 @@ class mrg_uniform(mrg_uniform_base):
assert isinstance(node.inputs[0].type, TensorType)
o_rstate, o_sample = out
if self.inplace:
o_rstate_requirement = 'NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ALIGNED'
o_rstate_requirement = (
'NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ALIGNED')
else:
o_rstate_requirement = 'NPY_ARRAY_ENSURECOPY|NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ALIGNED'
o_rstate_requirement = (
'NPY_ARRAY_ENSURECOPY|NPY_ARRAY_C_CONTIGUOUS|'
'NPY_ARRAY_ALIGNED')
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
......@@ -539,7 +548,7 @@ class GPU_mrg_uniform(mrg_uniform_base, GpuOp):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(CudaNdarrayType((False,)*ndim))
op = cls(CudaNdarrayType((False,) * ndim))
return op(rstate, cast(v_size, 'int32'))
def c_support_code_apply(self, node, nodename):
......@@ -781,7 +790,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(GpuArrayType(dtype, (False,)*ndim))
op = cls(GpuArrayType(dtype, (False,) * ndim))
return op(rstate, cast(v_size, 'int32'))
def c_headers(self):
......@@ -1021,6 +1030,7 @@ class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
def guess_n_streams(size, warn=False):
# TODO : need description for parameter 'size'
"""
Return a guess at a good number of streams.
......@@ -1035,7 +1045,7 @@ def guess_n_streams(size, warn=False):
# Note that this code was moved out of `MRG_RandomStreams` so that it can
# be easily accessed from tests, where we want to disable the warning.
if (isinstance(size, (tuple, list)) and
all([isinstance(i, int) for i in size])):
all([isinstance(i, int) for i in size])):
# We can make a guess.
r = 1
for s in size:
......@@ -1044,8 +1054,9 @@ def guess_n_streams(size, warn=False):
r = r // 6 # chosen as fastest for rbm_benchmark
# The purpose of sampling from many streams is to be able to use
# the GPU to its full capacity. It just wastes RAM and stream-initialization time to
# allocate more streams than necessary for the GPU.
# the GPU to its full capacity. It just wastes RAM and
# stream-initialization time to allocate more streams than necessary
# for the GPU.
# XXX: This number is chosen to be good for 280 and 480 architectures,
# Better would be to use pycuda to query the number of
# processors on the GPU device,
......@@ -1053,16 +1064,17 @@ def guess_n_streams(size, warn=False):
return min(r, 60 * 256)
else:
if warn:
warnings.warn((
"MRG_RandomStreams Can't determine #streams from "
"size (%s), guessing 60*256") % str(size),
stacklevel=3)
warnings.warn(
("MRG_RandomStreams Can't determine #streams "
"from size (%s), guessing 60*256") % str(size),
stacklevel=3)
return 60 * 256
class MRG_RandomStreams(object):
# TODO : need description for parameter 'use_cuda'
"""
Module component with similar interface to numpy.random
Module component with similar interface to numpy.random
(numpy.random.RandomState).
Parameters
......@@ -1077,11 +1089,13 @@ class MRG_RandomStreams(object):
"""
def updates(self):
# TODO : need description for method and return
return list(self.state_updates)
def __init__(self, seed=12345, use_cuda=None):
# A list of pairs of the form (input_r, output_r), representing the
# update rules of all the random states generated by this RandomStreams.
# update rules of all the random states generated
# by this RandomStreams.
self.state_updates = []
super(MRG_RandomStreams, self).__init__()
......@@ -1092,11 +1106,12 @@ class MRG_RandomStreams(object):
self.set_rstate(seed)
if use_cuda is None:
self.use_cuda = theano.sandbox.cuda.cuda_enabled
self.use_cuda = theano.sandbox.cuda.cuda_enabled
else:
self.use_cuda = use_cuda
def set_rstate(self, seed):
# TODO : need description for method, parameter
if isinstance(seed, int):
if seed == 0:
raise ValueError('seed should not be 0', seed)
......@@ -1158,11 +1173,12 @@ class MRG_RandomStreams(object):
start.
"""
#self.rstate = ff_2p134(self.rstate)
# self.rstate = ff_2p134(self.rstate)
self.rstate = multMatVect(self.rstate, A1p134, M1, A2p134, M2)
assert self.rstate.dtype == numpy.int32
def get_substream_rstates(self, n_streams, dtype, inc_rstate=True):
# TODO : need description for parameter and return
"""
Initialize a matrix in which each row is a MRG stream state,
and they are spaced by 2**72 samples.
......@@ -1186,7 +1202,7 @@ class MRG_RandomStreams(object):
f.input_storage[5].storage[0] = M2
for i in xrange(1, n_streams):
# Inline the following call to bypass Python overhead
#rval[i] = ff_2p72(rval[i - 1])
# rval[i] = ff_2p72(rval[i - 1])
v = rval[i - 1]
f.input_storage[1].storage[0] = v[:3]
f.input_storage[4].storage[0] = v[3:]
......@@ -1208,9 +1224,11 @@ class MRG_RandomStreams(object):
return rval
def n_streams(self, size):
# TODO : need description for method, parameter and return
return guess_n_streams(size)
def pretty_return(self, node_rstate, new_rstate, sample, size, nstreams):
# TODO : need description for method, parameter and return
sample.rstate = node_rstate
sample.update = (node_rstate, new_rstate)
self.state_updates.append((node_rstate, new_rstate, size, nstreams))
......@@ -1219,6 +1237,7 @@ class MRG_RandomStreams(object):
def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype=None,
nstreams=None):
# TODO : need description for parameter 'size', 'ndim', 'nstreams'
"""
Sample a tensor of given size whose element from a uniform
distribution between low and high.
......@@ -1229,7 +1248,7 @@ class MRG_RandomStreams(object):
Parameters
----------
low
Lower bound of the interval on which values are sampled.
Lower bound of the interval on which values are sampled.
If the ``dtype`` arg is provided, ``low`` will be cast into
dtype. This bound is excluded.
high
......@@ -1306,6 +1325,7 @@ class MRG_RandomStreams(object):
def binomial(self, size=None, n=1, p=0.5, ndim=None, dtype='int64',
nstreams=None):
# TODO : need description for method, parameter and return
if n == 1:
if dtype == 'float32' and self.use_cuda:
x = self.uniform(size=size, dtype=dtype, nstreams=nstreams)
......@@ -1317,6 +1337,7 @@ class MRG_RandomStreams(object):
def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64',
nstreams=None):
# TODO : need description for parameter and return
"""
Sample `n` (`n` needs to be >= 1, default 1) times from a multinomial
distribution defined by probabilities pvals.
......@@ -1347,15 +1368,15 @@ class MRG_RandomStreams(object):
size)
if size is not None:
raise ValueError("Provided a size argument to "
"MRG_RandomStreams.multinomial, which does not use "
"the size argument.")
raise ValueError(
"Provided a size argument to MRG_RandomStreams.multinomial, "
"which does not use the size argument.")
if ndim is not None:
raise ValueError("Provided an ndim argument to "
"MRG_RandomStreams.multinomial, which does not use "
"the ndim argument.")
raise ValueError(
"Provided an ndim argument to MRG_RandomStreams.multinomial, "
"which does not use the ndim argument.")
if pvals.ndim == 2:
size = pvals[:,0].shape * n
size = pvals[:, 0].shape * n
unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
op = multinomial.MultinomialFromUniform(dtype)
n_samples = as_tensor_variable(n)
......@@ -1364,19 +1385,20 @@ class MRG_RandomStreams(object):
raise NotImplementedError(("MRG_RandomStreams.multinomial only"
" implemented for pvals.ndim = 2"))
def multinomial_wo_replacement(self, size=None, n=1, pvals=None, ndim=None, dtype='int64',
nstreams=None):
def multinomial_wo_replacement(self, size=None, n=1, pvals=None,
ndim=None, dtype='int64', nstreams=None):
# TODO : need description for parameter
"""
Sample `n` times *WITHOUT replacement* from a multinomial distribution
defined by probabilities pvals, and returns the indices of the sampled
elements.
`n` needs to be in [1, m], where m is the number of elements to select
from, i.e. m == pvals.shape[1]. By default n = 1.
Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=1 will
probably result in [[0],[2]]. When setting n=2, this
will probably result in [[0,1],[2,1]].
Notes
-----
-`size` and `ndim` are only there keep the same signature as other
......@@ -1395,25 +1417,27 @@ class MRG_RandomStreams(object):
if size is not None:
raise ValueError("Provided a size argument to "
"MRG_RandomStreams.multinomial_wo_replacement, which does not use "
"the size argument.")
"MRG_RandomStreams.multinomial_wo_replacement, "
"which does not use the size argument.")
if ndim is not None:
raise ValueError("Provided an ndim argument to "
"MRG_RandomStreams.multinomial_wo_replacement, which does not use "
"the ndim argument.")
"MRG_RandomStreams.multinomial_wo_replacement, "
"which does not use the ndim argument.")
if pvals.ndim == 2:
# size = [pvals.shape[0], as_tensor_variable(n)]
size = pvals[:,0].shape * n
size = pvals[:, 0].shape * n
unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
op = multinomial.MultinomialWOReplacementFromUniform(dtype)
n_samples = as_tensor_variable(n)
return op(pvals, unis, n_samples)
else:
raise NotImplementedError(("MRG_RandomStreams.multinomial_wo_replacement only"
" implemented for pvals.ndim = 2"))
raise NotImplementedError(
"MRG_RandomStreams.multinomial_wo_replacement only implemented"
" for pvals.ndim = 2")
def normal(self, size, avg=0.0, std=1.0, ndim=None,
dtype=None, nstreams=None):
# TODO : need description for method
"""
Parameters
----------
......@@ -1443,7 +1467,8 @@ class MRG_RandomStreams(object):
evened = False
constant = False
if isinstance(size, tuple) and all([isinstance(i, (numpy.integer, int)) for i in size]):
if (isinstance(size, tuple) and
all([isinstance(i, (numpy.integer, int)) for i in size])):
constant = True
# Force dtype because it defaults to float when size is empty
n_samples = numpy.prod(size, dtype='int64')
......@@ -1464,16 +1489,18 @@ class MRG_RandomStreams(object):
U1 = flattened[:prod(flattened.shape) // 2]
U2 = flattened[prod(flattened.shape) // 2:]
#normal_samples = zeros_like(flattened)
# normal_samples = zeros_like(flattened)
sqrt_ln_U1 = sqrt(-2.0 * log(U1))
# TypeError: 'TensorVariable' object does not support item assignment
# so this doesn't work...
#normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
#normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)
# normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
# normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)
# so trying this instead
first_half = sqrt_ln_U1 * cos(numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
second_half = sqrt_ln_U1 * sin(numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
first_half = sqrt_ln_U1 * cos(
numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
second_half = sqrt_ln_U1 * sin(
numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
normal_samples = join(0, first_half, second_half)
final_samples = None
......@@ -1494,15 +1521,13 @@ class MRG_RandomStreams(object):
assert final_samples.dtype == dtype
return final_samples
from theano.sandbox.gpuarray.opt import (register_opt as register_gpua,
host_from_gpu as host_from_gpua)
@register_gpua('fast_compile')
@local_optimizer([mrg_uniform])
def local_gpua_mrg(node):
# TODO : need description for function
if (type(node.op) == mrg_uniform and
isinstance(node.inputs[0].type, GpuArrayType)):
isinstance(node.inputs[0].type, GpuArrayType)):
outs = GPUA_mrg_uniform.new(node.inputs[0],
node.op.output_type.ndim,
node.op.output_type.dtype,
......@@ -1515,6 +1540,7 @@ MRG_RNGs = (mrg_uniform, GPU_mrg_uniform, GPUA_mrg_uniform)
@local_optimizer(MRG_RNGs)
def mrg_random_make_inplace(node):
op = node.op
if isinstance(op, MRG_RNGs) and not op.inplace:
# op might be gpu version
......
......@@ -4,6 +4,7 @@ import theano.tensor
class ScalarSoftsign(theano.scalar.UnaryScalarOp):
# TODO : need description for class
@staticmethod
def static_impl(x):
return x / (1.0 + abs(x))
......
......@@ -24,7 +24,8 @@ class Solve(gof.Op):
# sym_pos, lower, overwrite_a, overwrite_b
# TODO: Add C code that calls the underlying LAPACK routines
# and keeps a memory workspace from call to call as a non-default Op output
# and keeps a memory workspace from call to call as a non-default Op
# output
def __eq__(self, other):
return type(self) == type(other)
......
......@@ -92,13 +92,6 @@ whitelist_flake8 = [
"tensor/nnet/tests/test_sigm.py",
"scalar/__init__.py",
"scalar/tests/test_basic.py",
"sandbox/__init__.py",
"sandbox/rng_mrg.py",
"sandbox/theano_object.py",
"sandbox/scan.py",
"sandbox/symbolic_module.py",
"sandbox/conv.py",
"sandbox/debug.py",
"sandbox/tests/test_theano_object.py",
"sandbox/tests/test_scan.py",
"sandbox/tests/test_neighbourhoods.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论