提交 20a0882d authored 作者: Frederic Bastien's avatar Frederic Bastien

white space/indentation fix.

上级 bf1c0d30
......@@ -37,12 +37,12 @@ def matVecModM(A, s, m):
return x
def multMatVect(v, A, m1, B, m2):
#multiply the first half of v by A with a modulo of m1
#and the second half by B with a modulo of m2
r = numpy.zeros_like(v)
r[:3] = matVecModM(A, v[:3], m1)
r[3:] = matVecModM(B, v[3:], m2)
return r
#multiply the first half of v by A with a modulo of m1
#and the second half by B with a modulo of m2
r = numpy.zeros_like(v)
r[:3] = matVecModM(A, v[:3], m1)
r[3:] = matVecModM(B, v[3:], m2)
return r
#MRG31k3p
#generator constants :
......@@ -59,7 +59,7 @@ A2p0 = numpy.asarray([[32768, 0, 32769], [1, 0, 0], [0, 1, 0]])
A1p72 = numpy.asarray([[1516919229, 758510237, 499121365],
[1884998244, 1516919229, 335398200],
[601897748, 1884998244, 358115744]])
[601897748, 1884998244, 358115744]])
A2p72 = numpy.asarray([[1228857673, 1496414766, 954677935],
[1133297478, 1407477216, 1496414766],
[2002613992, 1639496704, 1407477216]])
......@@ -154,8 +154,8 @@ class mrg_uniform_base(Op):
# this op should not be called directly.
#
# call through MRG_RandomStreams instead.
return Apply(self,
[rstate, size],
return Apply(self,
[rstate, size],
[rstate.type(), self.output_type()])
def grad(self,inputs,ograd):
......@@ -205,17 +205,17 @@ class mrg_uniform(mrg_uniform_base):
if self.inplace:
o_rstate_requirement = 'NPY_C_CONTIGUOUS|NPY_ALIGNED'
else:
o_rstate_requirement = 'NPY_ENSURECOPY|NPY_C_CONTIGUOUS|NPY_ALIGNED'
o_rstate_requirement = 'NPY_ENSURECOPY|NPY_C_CONTIGUOUS|NPY_ALIGNED'
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
if self.output_type.dtype == 'float32':
otype = 'float'
otype = 'float'
NORM = '4.6566126e-10f' #numpy.float32(1.0/(2**31+65))
# this was determined by finding the biggest number such that
# numpy.float32(number * M1) < 1.0
else:
otype = 'double'
otype = 'double'
NORM = '4.656612873077392578125e-10'
return """
//////// <code generated by mrg_uniform>
......@@ -368,12 +368,12 @@ class GPU_mrg_uniform(mrg_uniform_base):
def c_support_code_apply(self, node, nodename):
if self.output_type.dtype == 'float32':
otype = 'float'
otype = 'float'
NORM = '4.6566126e-10f' #numpy.float32(1.0/(2**31+65))
# this was determined by finding the biggest number such that
# numpy.float32(number * M1) < 1.0
else:
otype = 'double'
otype = 'double'
NORM = '4.656612873077392578125e-10'
return """
......@@ -450,7 +450,7 @@ class GPU_mrg_uniform(mrg_uniform_base):
state_data[idx*6+4]= x22;
state_data[idx*6+5]= x23;
}
}
}
""" %locals()
......@@ -461,9 +461,9 @@ class GPU_mrg_uniform(mrg_uniform_base):
fail = sub['fail']
if self.output_type.dtype == 'float32':
otype = 'float'
otype = 'float'
else:
otype = 'double'
otype = 'double'
SYNC="CNDA_THREAD_SYNC";
return """
......@@ -495,7 +495,7 @@ class GPU_mrg_uniform(mrg_uniform_base):
{
odims[i] = ((npy_int32*)(%(size)s->data + %(size)s->strides[0] * i))[0];
n_elements *= odims[i];
must_alloc_sample = (must_alloc_sample
must_alloc_sample = (must_alloc_sample
|| CudaNdarray_HOST_DIMS(%(o_sample)s)[i] != odims[i]);
}
if (must_alloc_sample)
......@@ -555,7 +555,7 @@ class GPU_mrg_uniform(mrg_uniform_base):
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s.\\n", "mrg_uniform", cudaGetErrorString(err));
%(fail)s;
......@@ -657,7 +657,7 @@ class MRG_RandomStreams(object):
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing
information.
:param: size: Can be a list of integer or Theano variable(ex: the shape of other Theano Variable)
TODO: can size be None?
"""
......@@ -671,7 +671,7 @@ class MRG_RandomStreams(object):
if self.use_cuda and dtype=='float32':
rstates = self.get_substream_rstates(nstreams)
rstates = rstates.flatten()
# HACK - we use fact that int32 and float32 have same size to
# HACK - we use fact that int32 and float32 have same size to
# sneak ints into the CudaNdarray type.
# these *SHOULD NEVER BE USED AS FLOATS*
tmp_float_buf = numpy.frombuffer(rstates.data, dtype='float32')
......@@ -683,7 +683,7 @@ class MRG_RandomStreams(object):
# we can't use the normal mrg_uniform constructor + later optimization
# because of the tmp_float_buf hack above. There is
# currently no Theano node that will do a frombuffer reinterpretation.
u = self.pretty_return(node_rstate,
u = self.pretty_return(node_rstate,
*GPU_mrg_uniform.new(node_rstate, ndim, dtype, size))
else:
node_rstate = shared(self.get_substream_rstates(nstreams))
......@@ -703,12 +703,12 @@ class MRG_RandomStreams(object):
return cast(self.uniform(size=size) < p, dtype)
else:
raise NotImplementedError("MRG_RandomStreams.binomial with n > 1")
def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64'):
"""
Sample `n` (currently `n` needs to be 1) times from a multinomial distribution defined by
probabilities pvals.
Example : pvals = [[.98,.01, .01], [.01, .98 .01]] will probably result in [[1,0,0],[0,1,0]].
.. note::
......@@ -736,9 +736,9 @@ class MRG_RandomStreams(object):
evened = False
constant = False
if isinstance(size, tuple) and all([isinstance(i,int) for i in size]):
constant = True
constant = True
n_samples = numpy.prod(size)
if n_samples % 2 == 1:
n_samples += 1
evened = True
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论