提交 d0184177 authored 作者: Dumitru Erhan's avatar Dumitru Erhan

branch merge

......@@ -41,12 +41,27 @@ precise inspection of what's being computed where, when, and how, see the
:ref:`faq_wraplinker`.
How do I print a graph before or after compilation?
----------------------------------------------------------
Theano provides a function to print a graph before and after compilation:
>>> x = T.dscalar('x')
>>> y = x**2
>>> gy = T.grad(y, x)
>>> pp(gy) # print out the gradient prior to optimization
'((fill((x ** 2), 1.0) * 2) * (x ** (2 - 1)))'
>>> f = function([x], gy)
>>> pp(f.maker.env.outputs[0])
'(2.0 * x)'
The parameter in T.dscalar('x') in the first line is the name of this variable(in the graph, not in python). This name is reused when printing the graph. Otherwise the variable x is printed as its type as: <TensorType(float64, scalar)>. That is not the most comprehensible. The string 'x' can be any string, but to make the code more comprehensible, try to pass the same name or derivative of the name in python.
The function I compiled is too slow, what's up?
-----------------------------------------------
First, make sure you're running in FAST_RUN mode, by passing
``mode='FAST_RUN'`` to ``theano.function`` or ``theano.make``. Some
First, make sure you're running in FAST_RUN mode, by passing ``mode='FAST_RUN'``
to ``theano.function`` or ``theano.make`` or by setting to ``PROFILE_MODE``
the flags :attr:`config.mode`. Some
operations have excruciatingly slow Python implementations and that
can negatively effect the performance of FAST_COMPILE.
......
......@@ -1211,7 +1211,7 @@ class GpuSum(Op):
class GpuReshape(tensor.Reshape):
# __hash__, __eq__, __str__ come from tensor.Subtensor
def make_node(self, x, shp):
host_reshaped = host_from_gpu(x).reshape(shp)
host_reshaped = host_from_gpu(x).reshape(shp,ndim=self.ndim)
return Apply(self, [x, shp], [CudaNdarrayType(host_reshaped.broadcastable)()])
def perform(self, node, (x, shp), (out,)):
if (len(shp) != self.ndim):
......
......@@ -198,7 +198,7 @@ class GpuConv(Op):
return ['cuda_ndarray.cuh','<stdio.h>']
def c_code_cache_version(self):
return (0,2)
return (0,3)
def c_support_code_apply(self, node, nodename):
return open(os.path.join(os.path.split(__file__)[0],'conv_kernel.cu')).read()+\
......
......@@ -626,7 +626,7 @@ CudaNdarray_conv_valid(const CudaNdarray *img, const CudaNdarray * kern,
}
else
{
PyErr_Format(PyExc_RuntimeError, "ERROR: all implementations failed! (%s)",
PyErr_Format(PyExc_RuntimeError, "ERROR: all implementations failed for CudaNdarray_conv_valid! (%s)",
cudaGetErrorString(sts));
return -1;
}
......@@ -673,7 +673,7 @@ CudaNdarray_conv_full(const CudaNdarray *img, const CudaNdarray * kern, CudaNdar
const int nkern=CudaNdarray_HOST_DIMS(kern)[0];
const int img_wid=CudaNdarray_HOST_DIMS(img)[3];
const int img_len=CudaNdarray_HOST_DIMS(img)[2];
const int kern_wid=CudaNdarray_HOST_DIMS(img)[3];
const int kern_wid=CudaNdarray_HOST_DIMS(kern)[3];
const int kern_len=CudaNdarray_HOST_DIMS(kern)[2];
const int out_wid=CudaNdarray_HOST_DIMS(out)[3];
const int out_len=CudaNdarray_HOST_DIMS(out)[2];
......@@ -821,13 +821,13 @@ CudaNdarray_conv_full(const CudaNdarray *img, const CudaNdarray * kern, CudaNdar
cudaError_t sts = cudaGetLastError();
if (cudaSuccess == sts)
{
if (verbose>1) printf("threads.x=%i, threads.y=%i, grid.x=%i, grid.y=%i,shared_size=%i, nb_threads=%i, out_len=%i, nb_split=%i, version=%i\n", threads.x, threads.y, grid.x, grid.y, shared_size, threads.x * threads.y, out_len, nb_split, version);
if (verbose>1) printf("threads.x=%i, threads.y=%i, threads.z=%i, grid.x=%i, grid.y=%i,shared_size=%i, nb_threads=%i, out_len=%i, nb_split=%i, version=%i\n", threads.x, threads.y, threads.z, grid.x, grid.y, shared_size, threads.x * threads.y * threads.z, out_len, nb_split, version);
if (verbose) printf("INFO: used 'conv_full_patch_stack_padded' nb_split=%d low_mem=%s\n",nb_split,(version==5?"true":"false"));
work_complete = true;
}
else
{
if (verbose) printf("threads.x=%i, threads.y=%i, grid.x=%i, grid.y=%i,shared_size=%i, nb_threads=%i, out_len=%i, nb_split=%i, version=%i\n", threads.x, threads.y, grid.x, grid.y, shared_size, threads.x * threads.y, out_len, nb_split, version);
if (verbose) printf("threads.x=%i, threads.y=%i, threads.z=%i, grid.x=%i, grid.y=%i,shared_size=%i, nb_threads=%i, out_len=%i, nb_split=%i, version=%i\n", threads.x, threads.y, threads.z, grid.x, grid.y, shared_size, threads.x * threads.y * threads.z, out_len, nb_split, version);
if (verbose) printf("INFO: impl 'conv_full_patch_stack_padded' %s %s failed (%s), trying next implementation\n",
version==3?"no split": "split",(version==5?"low_mem":"not_low_mem"),
cudaGetErrorString(sts));
......@@ -1013,7 +1013,7 @@ CudaNdarray_conv_full(const CudaNdarray *img, const CudaNdarray * kern, CudaNdar
if (verbose) printf("threads.x=%i, threads.y=%i, grid.x=%i, grid.y=%i, shared_size=%i, nb_threads=%i\n", n_threads, 1, n_blocks, 1, 0, n_threads);
if (verbose) printf("INFO: impl 'conv_reference_full' failed (%s), trying next implementation\n",
cudaGetErrorString(sts));
PyErr_Format(PyExc_RuntimeError, "ERROR: all implementations failed! (%s)",
PyErr_Format(PyExc_RuntimeError, "ERROR: all implementations failed for CudaNdarray_conv_full! (%s)",
cudaGetErrorString(sts));
return -1;
}
......
......@@ -5,9 +5,6 @@
#include <iostream>
#include "cuda_ndarray.cuh"
#ifndef DONT_UNROLL
#define UNROLL_LOOP
#endif
/////////////////////////
// Static helper methods
......
......@@ -1168,6 +1168,8 @@ class ScalarFromTensor(Op):
out[0] = s.flatten()[0]
def grad(self, (s,), (dt,)):
return [TensorFromScalar(dt)]
def __str__(self):
return self.__class__.__name__
scalar_from_tensor = ScalarFromTensor()
......
......@@ -71,6 +71,8 @@ class GemmRelated(Op):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def c_support_code(self):
#return cblas_header_text()
mod_str = """
......
......@@ -1516,7 +1516,7 @@ if(mode != VALID && mode != FULL){
if(dim_zz[0]<=0 || dim_zz[1]<=0){
PyErr_Format(PyExc_ValueError,
"Output dimensions are not valid %%dx%%d",dim_zz[0],dim_zz[1]);
"Output dimensions are not valid %%ldx%%ld",(long int)dim_zz[0],(long int)dim_zz[1]);
%(fail)s;
}
......
......@@ -495,7 +495,8 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
return type(self) == type(other)
def __hash__(self):
return tensor.hashtype(self)
def __str__(self):
return self.__class__.__name__
def make_node(self, x, b, y_idx):
x = tensor.as_tensor_variable(x)
b = tensor.as_tensor_variable(b)
......@@ -673,6 +674,8 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op):
return type(self) == type(other)
def __hash__(self):
return tensor.hashtype(self)
def __str__(self):
return self.__class__.__name__
def make_node(self, dy, sm, y_idx,**kwargs):
dy = tensor.as_tensor_variable(dy)
sm = tensor.as_tensor_variable(sm)
......@@ -720,14 +723,14 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op):
}
if (%(dnll)s->dimensions[0] != %(sm)s->dimensions[0])
{
PyErr_Format(PyExc_ValueError, "dnll.shape[0] (%%d) != sm.shape[0] (%%d)",
%(dnll)s->dimensions[0], %(sm)s->dimensions[0]);
//PyErr_SetString(PyExc_ValueError, "dnll.shape[0] != sm.shape[0]");
PyErr_Format(PyExc_ValueError, "dnll.shape[0] (%%ld) != sm.shape[0] (%%ld)",
(long int)%(dnll)s->dimensions[0], (long int)%(sm)s->dimensions[0]);
%(fail)s;
}
if (%(dnll)s->dimensions[0] != %(y_idx)s->dimensions[0])
{
PyErr_SetString(PyExc_ValueError, "dnll.shape[0] != y_idx.shape[0]");
PyErr_Format(PyExc_ValueError, "dnll.shape[0] (%%ld) != y_idx.shape[0] (%%ld)",
(long int)%(dnll)s->dimensions[0], (long int)%(y_idx)s->dimensions[0]);
%(fail)s;
}
if ((NULL == %(dx)s)
......
......@@ -13,10 +13,11 @@ from theano.compile import optdb
class RandomStateType(gof.Type):
"""A Type wrapper for numpy.RandomState
The reason this exists (and `Generic` doesn't suffice) is that RandomState objects that
would appear to be equal do not compare equal with the '==' operator. This Type exists to
provide an equals function that is used by DebugMode.
The reason this exists (and `Generic` doesn't suffice) is that
RandomState objects that would appear to be equal do not compare
equal with the '==' operator. This Type exists to provide an equals
function that is used by DebugMode.
"""
def __str__(self):
return 'RandomStateType'
......@@ -53,12 +54,14 @@ class RandomFunction(gof.Op):
def __init__(self, fn, outtype, inplace=False, ndim_added=0 ):
"""
:param fn: a member function of numpy.RandomState
Technically, any function with a signature like the ones in numpy.random.RandomState
will do. This function must accept the shape (sometimes called size) of the output as
the last positional argument.
Technically, any function with a signature like the ones in
numpy.random.RandomState will do. This function must accept
the shape (sometimes called size) of the output as the last
positional argument.
:type fn: string or function reference. A string will be interpreted as the name of a
member function of numpy.random.RandomState.
:type fn: string or function reference. A string will
be interpreted as the name of a member function of
numpy.random.RandomState.
:param outtype: the theano Type of the output
......@@ -96,8 +99,6 @@ class RandomFunction(gof.Op):
self.fn = getattr(numpy.random.RandomState, fn)
else:
self.fn = fn
#backport
#self.fn = getattr(numpy.random.RandomState, fn) if isinstance(fn, str) else fn
self.outtype = outtype
self.inplace = inplace
if self.inplace:
......@@ -106,32 +107,36 @@ class RandomFunction(gof.Op):
def make_node(self, r, shape, *args):
"""
:param r: a numpy.RandomState instance, or a Variable of Type RandomStateType that will
contain a RandomState instance.
:param shape: an lvector with a shape defining how many samples to draw.
In the case of scalar distributions, it is the shape of the tensor output by this Op.
In that case, at runtime, the value associated with this lvector must have a length
equal to the number of dimensions promised by `self.outtype`.
In general, the number of output dimenstions is equal to
len(self.outtype)+self.ndim_added.
:param args: the values associated with these variables will be passed to the RandomState
function during perform as extra "*args"-style arguments. These should be castable to
variables of Type TensorType.
:param r: a numpy.RandomState instance, or a Variable of Type
RandomStateType that will contain a RandomState instance.
:param shape: an lvector with a shape defining how many samples
to draw. In the case of scalar distributions, it is the shape
of the tensor output by this Op. In that case, at runtime, the
value associated with this lvector must have a length equal to
the number of dimensions promised by `self.outtype`.
In a more general case, the number of output dimensions,
len(self.outtype), is equal to len(shape)+self.ndim_added.
The special case where len(shape) == 0 means that the smallest
shape compatible with the argument's shape will be used.
:param args: the values associated with these variables will
be passed to the RandomState function during perform as extra
"*args"-style arguments. These should be castable to variables
of Type TensorType.
:rtype: Apply
:return: Apply with two outputs. The first output is a gof.generic Variable from which
to draw further random numbers. The second output is the outtype() instance holding
the random draw.
:return: Apply with two outputs. The first output is a
gof.generic Variable from which to draw further random numbers.
The second output is the outtype() instance holding the random
draw.
"""
if shape == () or shape == []:
shape = tensor.lvector()
shape = tensor.as_tensor_variable(shape, dtype='int64')
else:
shape = tensor.as_tensor_variable(shape, ndim=1)
#print 'SHAPE TYPE', shape.type, tensor.lvector
assert shape.type.ndim == 1
assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32')
if not isinstance(r.type, RandomStateType):
......@@ -157,96 +162,248 @@ class RandomFunction(gof.Op):
r, shape, args = inputs[0], inputs[1], inputs[2:]
assert type(r) == numpy.random.RandomState
r_orig = r
if self.outtype.ndim != len(shape) + self.ndim_added:
# If shape == [], that means no shape is enforced, and numpy is
# trusted to draw the appropriate number of samples, numpy uses
# shape "None" to represent that. Else, numpy expects a tuple.
# TODO: compute the appropriate shape, and pass it to numpy.
if len(shape) == 0:
shape = None
else:
shape = tuple(shape)
if shape is not None and self.outtype.ndim != len(shape) + self.ndim_added:
raise ValueError('Shape mismatch: self.outtype.ndim (%i) != len(shape) (%i) + self.ndim_added (%i)'\
%(self.outtype.ndim, len(shape), self.ndim_added))
if not self.inplace:
r = copy(r)
rout[0] = r
rval = self.fn(r, *(args + [tuple(shape)]))
rval = self.fn(r, *(args + [shape]))
if not isinstance(rval, numpy.ndarray) \
or str(rval.dtype) != node.outputs[1].type.dtype:
out[0] = theano._asarray(rval, dtype = node.outputs[1].type.dtype)
else:
out[0] = rval
rval = theano._asarray(rval, dtype = node.outputs[1].type.dtype)
# When shape is None, numpy has a tendency to unexpectedly
# return a scalar instead of a higher-dimension array containing
# only one element. This value should be reshaped
if shape is None and rval.ndim == 0 and self.outtype.ndim > 0:
rval = rval.reshape([1]*self.outtype.ndim)
if len(rval.shape) != self.outtype.ndim:
raise ValueError('Shape mismatch: "out" should have dimension %i, but the value produced by "perform" has dimension %i'\
% (self.outtype.ndim, len(rval.shape)))
# Check the output has the right shape
if shape is not None:
if self.ndim_added == 0 and shape != rval.shape:
raise ValueError('Shape mismatch: "out" should have shape %s, but the value produced by "perform" has shape %s'\
% (shape, rval.shape))
elif self.ndim_added > 0 and shape != rval.shape[:-self.ndim_added]:
raise ValueError('Shape mismatch: "out" should have shape starting with %s (plus %i extra dimensions), but the value produced by "perform" has shape %s'\
% (shape, self.ndim_added, rval.shape))
out[0] = rval
def grad(self, inputs, outputs):
return [None for i in inputs]
def _infer_ndim(ndim, shape):
"""returns int, variable pair, such that the int is the length of the variable, and the
variable is an integer or uint vector
def _infer_ndim(ndim, shape, *args):
"""
Infer the number of dimensions from the shape or the other arguments.
:rtype: (int, variable) pair, where the variable is an integer vector.
:returns: the first element returned is the inferred number of dimensions.
The second element's length is either the first element, or 0
(if the original shape was None).
In the special case where the shape argument is None, the variable
returned has a length of 0, meaning that the shape will be computed
at runtime from the shape of the other args.
"""
# Find the minimum value of ndim required by the *args
if len(args) > 0:
args_ndim = max(arg.ndim for arg in args)
else:
args_ndim = 0
if isinstance(shape, (tuple, list)):
v_shape = tensor.TensorConstant(type=tensor.lvector, data=theano._asarray(shape, dtype='int64'))
shape_ndim = len(shape)
if ndim is None:
ndim = shape_ndim
else:
if shape_ndim != ndim:
raise ValueError('ndim should be equal to len(shape), but\n',
'ndim = %s, len(shape) = %s, shape = %s'
% (ndim, shape_ndim, shape))
elif shape is None:
# The number of drawn samples will be determined automatically,
# but we need to know ndim
v_shape = tensor.constant([], dtype='int64')
if ndim is None:
ndim = args_ndim
else:
v_shape = tensor.as_tensor_variable(shape)
if ndim is None:
ndim = tensor.get_vector_length(v_shape)
if not (v_shape.dtype.startswith('int') or v_shape.dtype.startswith('uint')):
raise TypeError('shape must be an integer vector or list')
if ndim is None:
#infer ndim
ndim = tensor.get_vector_length(v_shape)
if args_ndim > ndim:
raise ValueError('ndim should be at least as big as required by args value',
(ndim, args_ndim), args)
return ndim, v_shape
def uniform(random_state, size=(), low=0.0, high=1.0, ndim=None):
def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None):
"""
Sample from a uniform distribution between low and high.
If the size argument is ambiguous on the number of
dimensions, the first argument may be a plain integer
to supplement the missing information.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
"""
ndim, size = _infer_ndim(ndim, size)
op = RandomFunction('uniform',
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
ndim, size = _infer_ndim(ndim, size, low, high)
op = RandomFunction('uniform',
tensor.TensorType(dtype = 'float64', broadcastable = (False,)*ndim) )
return op(random_state, size, low, high)
def binomial(random_state, size=(), n=1, prob=0.5, ndim=None):
def binomial(random_state, size=None, n=1, prob=0.5, ndim=None):
"""
Sample n times with probability of success prob for each trial, return the number of
successes.
Sample n times with probability of success prob for each trial,
return the number of successes.
If the size argument is ambiguous on the number of dimensions, the first argument may be a
plain integer to supplement the missing information.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of n and prob.
"""
ndim, size = _infer_ndim(ndim, size)
op = RandomFunction('binomial',
n = tensor.as_tensor_variable(n)
prob = tensor.as_tensor_variable(prob)
ndim, size = _infer_ndim(ndim, size, n, prob)
op = RandomFunction('binomial',
tensor.TensorType(dtype = 'int64', broadcastable = (False,)*ndim) )
return op(random_state, size, n, prob)
def normal(random_state, size=(), avg=0.0, std=1.0, ndim=None):
def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None):
"""
Usage: normal(random_state, size,
Sample from a normal distribution centered on avg with
the specified standard deviation (std)
the specified standard deviation (std).
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If the size argument is ambiguous on the number of
dimensions, the first argument may be a plain integer
to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of avg and std.
"""
ndim, size = _infer_ndim(ndim, size)
op = RandomFunction('normal',
avg = tensor.as_tensor_variable(avg)
std = tensor.as_tensor_variable(std)
ndim, size = _infer_ndim(ndim, size, avg, std)
op = RandomFunction('normal',
tensor.TensorType(dtype = 'float64', broadcastable = (False,)*ndim) )
return op(random_state, size, avg, std)
def random_integers(random_state, size=(), low=0, high=1, ndim=None):
def random_integers_helper(random_state, low, high, size):
'''
Helper function to draw random integers.
This is a generalization of numpy.random.random_integers to the case where
low and high are tensors.
'''
# Figure out the output shape
if size is not None:
out_ndim = len(size)
else:
out_ndim = max(low.ndim, high.ndim)
# broadcast low and high to out_ndim dimensions
if low.ndim > out_ndim:
raise ValueError('low.ndim (%i) should not be larger than len(size) (%i)' % (low.ndim, out_ndim),
low, size)
if low.ndim < out_ndim:
low = low.reshape((1,)*(out_ndim-low.ndim) + low.shape)
if high.ndim > out_ndim:
raise ValueError('high.ndim (%i) should not be larger than len(size) (%i)' % (high.ndim, out_ndim),
high, size)
if high.ndim < out_ndim:
high = high.reshape((1,)*(out_ndim-high.ndim) + high.shape)
if size is not None:
out_size = tuple(size)
else:
out_size = ()
for dim in range(out_ndim):
dim_len = max(low.shape[dim], high.shape[dim])
out_size = out_size + (dim_len,)
# Build the indices over which to loop
# This process leads to the same result as numpy.ndindex for out_ind,
# but allows for indices of low and high to be repeated if these
# tensors are broadcasted along some dimensions.
# TODO: move the logic somewhere else
out_ind = [()]
low_ind = [()]
high_ind = [()]
for dim in range(out_ndim):
_out_ind = []
_low_ind = []
_high_ind = []
o_range = range(out_size[dim])
if low.shape[dim] == out_size[dim]:
l_range = o_range
elif low.shape[dim] == 1: #broadcast
l_range = (0,)*out_size[dim]
else:
raise ValueError('low.shape[%i] (%i) should be equal to size[%i] (%i) or to 1'\
% (dim, low.shape[dim], dim, out_size[dim]), low, size)
if high.shape[dim] == out_size[dim]:
h_range = o_range
elif high.shape[dim] == 1: #broadcast
h_range = (0,)*out_size[dim]
else:
raise ValueError('high.shape[%i] (%i) should be equal to size[%i] (%i) or to 1'\
% (dim, high.shape[dim], dim, out_size[dim]), high, size)
for (ol, ll, hl) in zip(out_ind, low_ind, high_ind):
for oi, li, hi in zip(o_range, l_range, h_range):
_out_ind.append(ol + (oi,))
_low_ind.append(ll + (li,))
_high_ind.append(hl + (hi,))
out_ind = _out_ind
low_ind = _low_ind
high_ind = _high_ind
# Iterate over these indices, drawing one sample at a time from numpy
out = numpy.ndarray(out_size)
for oi, li, hi in zip(out_ind, low_ind, high_ind):
out[oi] = random_state.random_integers(low = low[li], high = high[li])
return out
def random_integers(random_state, size=None, low=0, high=1, ndim=None):
"""
Usage: random_integers(random_state, size, low=0, high=1)
Sample a random integer between low and high, both inclusive.
If the size argument is ambiguous on the number of
dimensions, the first argument may be a plain integer
to supplement the missing information.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
"""
ndim, size = _infer_ndim(ndim, size)
op = RandomFunction('random_integers',
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
ndim, size = _infer_ndim(ndim, size, low, high)
op = RandomFunction(random_integers_helper,
tensor.TensorType(dtype = 'int64', broadcastable = (False,)*ndim) )
return op(random_state, size, low, high)
......@@ -260,7 +417,7 @@ def permutation_helper(random_state, n, shape):
output shape will be (p,q,n), because each permutation is of size n.
If you wish to perform a permutation of the elements of an existing vector,
see shuffle (to be implemented).
see shuffle_row_elements.
"""
# n should be a 0-dimension array
assert n.shape == ()
......@@ -268,49 +425,142 @@ def permutation_helper(random_state, n, shape):
# is a long, the numpy permutation function will crash on Windows.
n = int(n.item())
if shape is None:
# Draw only one permutation, equivalent to shape = ()
shape = ()
out_shape = list(shape)
out_shape.append(n)
out = numpy.zeros(out_shape, int)
for i in numpy.ndindex(*shape):
out[i] = random_state.permutation(n)
print 'RETURNING', out.shape
#print 'RETURNING', out.shape
return out
def permutation(random_state, size=(), n=1, ndim=None):
def permutation(random_state, size=None, n=1, ndim=None):
"""
Returns permutations of the integers between 0 and n-1, as many times
as required by size. For instance, if size=(p,q), p*q permutations
will be generated, and the output shape will be (p,q,n), because each
permutation is of size n.
Theano tries to infer the number of dimensions from the length of the size argument, but you
may always specify it with the `ndim` parameter.
Theano tries to infer the number of dimensions from the length of
the size argument and the shape of n, but you may always specify it
with the `ndim` parameter.
.. note::
.. note::
Note that the output will then be of dimension ndim+1.
"""
ndim, size = _infer_ndim(ndim, size)
print "NDIM", ndim, size
op = RandomFunction(permutation_helper,
#print "NDIM", ndim, size
op = RandomFunction(permutation_helper,
tensor.TensorType(dtype='int64', broadcastable=(False,)*(ndim+1)),
ndim_added=1)
return op(random_state, size, n)
def multinomial(random_state, size=(), n=1, pvals=[0.5, 0.5], ndim=None):
def multinomial_helper(random_state, n, pvals, size):
'''
Helper function drawing from multinomial distributions.
This is a generalization of numpy.random.multinomial to the case where
n and pvals are tensors.
'''
# Figure out the shape if it's None
# Note: the output ndim will be ndim+1, because the multinomial
# adds a dimension. The length of that dimension is pvals.shape[-1].
if size is not None:
ndim = len(size)
else:
ndim = max(n.ndim, pvals.ndim-1)
out_ndim = ndim+1
# broadcast n to ndim dimensions and pvals to ndim+1
if n.ndim > ndim:
raise ValueError('n.ndim (%i) should not be larger than len(size) (%i)' % (n.ndim, ndim),
n, size)
if n.ndim < ndim:
n = n.reshape((1,)*(ndim-n.ndim) + n.shape)
if pvals.ndim-1 > ndim:
raise ValueError('pvals.ndim-1 (%i) should not be larger than len(size) (%i)' % (pvals.ndim-1, ndim),
pvals, size)
if pvals.ndim-1 < ndim:
pvals = pvals.reshape((1,)*(ndim-pvals.ndim+1) + pvals.shape)
if size is not None:
size = tuple(size)
else:
size = ()
for dim in range(ndim):
dim_len = max(n.shape[dim], pvals.shape[dim])
size = size + (dim_len,)
out_size = size+(pvals.shape[-1],)
# Build the indices over which to loop
# This process leads to the same result as numpy.ndindex for main_ind,
# but allows for indices of n and pvals to be repeated if these tensors
# are broadcasted along some dimensions.
# TODO: move the logic somewhere else
# Note that here, pvals_ind and main_ind index the rows (inner-most
# 1D subtensors) of pvals and out (respectively), not their
# individual elements
main_ind = [()]
n_ind = [()]
pvals_ind = [()]
for dim in range(ndim):
_main_ind = []
_n_ind = []
_pvals_ind = []
m_range = range(size[dim])
if n.shape[dim] == size[dim]:
n_range = m_range
elif n.shape[dim] == 1: #broadcast
n_range = (0,)*size[dim]
else:
raise ValueError('n.shape[%i] (%i) should be equal to size[%i] (%i) or to 1'\
% (dim, n.shape[dim], dim, size[dim]), n, size)
if pvals.shape[dim] == size[dim]:
p_range = m_range
elif pvals.shape[dim] == 1: #broadcast
p_range = (0,)*size[dim]
else:
raise ValueError('pvals.shape[%i] (%i) should be equal to size[%i] (%i) or to 1'\
% (dim, pvals.shape[dim], dim, size[dim]), pvals, size)
for (ml, nl, pl) in zip(main_ind, n_ind, pvals_ind):
for mi, ni, pi in zip(m_range, n_range, p_range):
_main_ind.append(ml + (mi,))
_n_ind.append(nl + (ni,))
_pvals_ind.append(pl + (pi,))
main_ind = _main_ind
n_ind = _n_ind
pvals_ind = _pvals_ind
# Iterate over these indices, drawing from one multinomial at a time from numpy
out = numpy.ndarray(out_size)
for mi, ni, pi in zip(main_ind, n_ind, pvals_ind):
out[mi] = random_state.multinomial(n=n[ni], pvals=pvals[pi])
return out
def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5], ndim=None):
"""
Sample n times from a multinomial distribution defined by probabilities pvals,
as many times as required by size. For instance, if size=(p,q), p*q
samples will be drawn, and the output shape will be (p,q,len(pvals)).
Sample n times from a multinomial distribution defined by
probabilities pvals, as many times as required by size. For
instance, if size=(p,q), p*q samples will be drawn, and the output
shape will be (p,q,len(pvals)).
Theano tries to infer the number of dimensions from the length of the size argument, but you
may always specify it with the `ndim` parameter.
Theano tries to infer the number of dimensions from the length of
the size argument and the shapes of n and pvals, but you may always
specify it with the `ndim` parameter.
.. note::
.. note::
Note that the output will then be of dimension ndim+1.
"""
ndim, size = _infer_ndim(ndim, size)
op = RandomFunction('multinomial',
n = tensor.as_tensor_variable(n)
pvals = tensor.as_tensor_variable(pvals)
ndim, size = _infer_ndim(ndim, size, n, pvals[0])
op = RandomFunction(multinomial_helper,
tensor.TensorType(dtype = 'int64', broadcastable = (False,)*(ndim+1)),
ndim_added=1)
return op(random_state, size, n, pvals)
......@@ -330,83 +580,86 @@ optdb.register('random_make_inplace', opt.in2out(random_make_inplace, ignore_new
class RandomStreamsBase(object):
def binomial(self, size=(), n=1, prob=0.5, ndim=None):
def binomial(self, size=None, n=1, prob=0.5, ndim=None):
"""
Sample n times with probability of success prob for each trial, return the number of
successes.
Sample n times with probability of success prob for each trial,
return the number of successes.
If the size argument is ambiguous on the number of dimensions, the first argument may be a
plain integer to supplement the missing information.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing
information.
"""
return self.gen(binomial, size, n, prob, ndim=ndim)
def uniform(self, size=(), low=0.0, high=1.0, ndim=None):
def uniform(self, size=None, low=0.0, high=1.0, ndim=None):
"""
Sample a tensor of given size whose element from a uniform distribution between low and high.
Sample a tensor of given size whose element from a uniform
distribution between low and high.
If the size argument is ambiguous on the number of
dimensions, the first argument may be a plain integer
to supplement the missing information.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing
information.
"""
return self.gen(uniform, size, low, high, ndim=ndim)
def normal(self, size=(), avg=0.0, std=1.0, ndim=None):
def normal(self, size=None, avg=0.0, std=1.0, ndim=None):
"""
Usage: normal(random_state, size,
Sample from a normal distribution centered on avg with
the specified standard deviation (std)
the specified standard deviation (std).
If the size argument is ambiguous on the number of
dimensions, the first argument may be a plain integer
to supplement the missing information.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing
information.
"""
return self.gen(normal, size, avg, std, ndim=ndim)
def random_integers(self, size=(), low=0, high=1, ndim=None):
def random_integers(self, size=None, low=0, high=1, ndim=None):
"""
Usage: random_integers(random_state, size, low=0, high=1)
Sample a random integer between low and high, both inclusive.
If the size argument is ambiguous on the number of
dimensions, the first argument may be a plain integer
to supplement the missing information.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing
information.
"""
return self.gen(random_integers, size, low, high, ndim=ndim)
def permutation(self, size=(), n=1, ndim=None):
def permutation(self, size=None, n=1, ndim=None):
"""
Returns permutations of the integers between 0 and n-1, as many times
as required by size. For instance, if size=(p,q), p*q permutations
will be generated, and the output shape will be (p,q,n), because each
permutation is of size n.
Theano tries to infer the number of dimensions from the length of the size argument, but you
may always specify it with the `ndim` parameter.
Theano tries to infer the number of dimensions from the length
of the size argument and the shape of n, but you may always
specify it with the `ndim` parameter.
.. note::
.. note::
Note that the output will then be of dimension ndim+1.
"""
return self.gen(permutation, size, n, ndim=ndim)
def multinomial(self, size=(), n=1, pvals=[0.5, 0.5], ndim=None):
def multinomial(self, size=None, n=1, pvals=[0.5, 0.5], ndim=None):
"""
Sample n times from a multinomial distribution defined by probabilities pvals,
as many times as required by size. For instance, if size=(p,q), p*q
samples will be drawn, and the output shape will be (p,q,len(pvals)).
Sample n times from a multinomial distribution defined by
probabilities pvals, as many times as required by size. For
instance, if size=(p,q), p*q samples will be drawn, and the
output shape will be (p,q,len(pvals)).
Theano tries to infer the number of dimensions from the length of the size argument, but you
may always specify it with the `ndim` parameter.
Theano tries to infer the number of dimensions from the length
of the size argument and the shapes of n and pvals, but you may
always specify it with the `ndim` parameter.
.. note::
.. note::
Note that the output will then be of dimension ndim+1.
"""
return self.gen(multinomial, size, n, pvals, ndim=ndim)
def shuffle_row_elements(self, input):
"""Return a variable with every row (rightmost index) shuffled.
This uses permutation random variable internally, available via the ``.permutation``
attribute of the return value.
This uses permutation random variable internally, available via
the ``.permutation`` attribute of the return value.
"""
perm = self.permutation(size=input.shape[:-1], n=input.shape[-1], ndim=input.ndim-1)
shuffled = tensor.permute_row_elements(input, perm)
......
......@@ -170,10 +170,7 @@ class T_RandomStreams(unittest.TestCase):
# ndim specified, inconsistent with shape, should raise ValueError
m3 = Module()
m3.random = RandomStreams(234)
m3.fn = Method([], m3.random.uniform((2,2), ndim=1))
made3 = m3.make()
made3.random.initialize()
self.assertRaises(ValueError, made3.fn)
self.assertRaises(ValueError, m3.random.uniform, (2,2), ndim=1)
def test_uniform(self):
"""Test that RandomStreams.uniform generates the same results as numpy"""
......
......@@ -19,7 +19,9 @@ class T_random_function(unittest.TestCase):
rng_R = random_state_type()
post_r, out = rf(rng_R, (4,))
# If calling RandomFunction directly, all args have to be specified,
# because shape will have to be moved to the end
post_r, out = rf(rng_R, (4,), 0., 1.)
assert out.type == tensor.dvector
......@@ -67,8 +69,9 @@ class T_random_function(unittest.TestCase):
rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)
rng_R = random_state_type()
# use make_node to override some of the self.args
post_r2, out2 = rf2(rng_R, (4,))
# If calling RandomFunction directly, all args have to be specified,
# because shape will have to be moved to the end
post_r2, out2 = rf2(rng_R, (4,), 0., 1.)
f = compile.function(
[compile.In(rng_R,
......@@ -90,7 +93,7 @@ class T_random_function(unittest.TestCase):
assert not numpy.allclose(val0, val1)
def test_random_function_ndim(self):
"""Test that random_function helper function accepts ndim as first argument"""
"""Test that random_function helper function accepts argument ndim"""
rng_R = random_state_type()
# ndim is an optional argument indicating the length of the 'shape'
......@@ -102,16 +105,12 @@ class T_random_function(unittest.TestCase):
post_out2_4_4, out2_4_4= uniform(rng_R, (4, 4), ndim=2)
# ndim specified, but not compatible with shape
post_out2_4, out2_4 = uniform(rng_R, (4,), ndim=2)
self.assertRaises(ValueError, uniform, rng_R, (4,), ndim=2)
f_ok = compile.function(
[compile.In(rng_R, value=numpy.random.RandomState(55), update=post_out2_4_4, mutable=True)],
[out4, out1_4, out2_4_4],
accept_inplace=True)
f_no = compile.function(
[compile.In(rng_R, value=numpy.random.RandomState(55), update=post_out2_4, mutable=True)],
[out2_4],
accept_inplace=True)
# The correct cases should execute properly
o4, o1_4, o2_4_4 = f_ok()
......@@ -120,9 +119,6 @@ class T_random_function(unittest.TestCase):
self.assertTrue(numpy.allclose(o4, o1_4))
self.assertTrue(numpy.allclose(o4, o2_4_4[0]))
# The incorrect case should raise ValueError
self.assertRaises(ValueError, f_no)
def test_random_function_ndim_added(self):
"""Test that random_function helper function accepts ndim_added as keyword argument"""
# If using numpy's uniform distribution, ndim_added should be 0,
......@@ -395,6 +391,267 @@ class T_random_function(unittest.TestCase):
self.assertRaises(ValueError, f, rng_state0, [4])
self.assertRaises(ValueError, f, rng_state0, [4,3,4,5])
def test_default_shape(self):
rng_R = random_state_type()
post_r, out = uniform(rng_R)
f = compile.function([rng_R], [post_r, out], accept_inplace=True)
rng_state0 = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
post0, val0 = f(rng_state0)
post1, val1 = f(post0)
numpy_val0 = numpy_rng.uniform()
numpy_val1 = numpy_rng.uniform()
assert numpy.all(val0 == numpy_val0)
assert numpy.all(val1 == numpy_val1)
post_r, out = multinomial(rng_R)
g = compile.function([rng_R], [post_r, out], accept_inplace=True)
post2, val2 = g(post1)
numpy_val2 = numpy_rng.multinomial(n=1, pvals=[.5, .5])
assert numpy.all(val2 == numpy_val2)
def test_vector_arguments(self):
rng_R = random_state_type()
low = tensor.vector()
post_r, out = uniform(rng_R, low=low, high=1)
assert out.ndim == 1
f = compile.function([rng_R, low], [post_r, out], accept_inplace=True)
rng_state0 = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
post0, val0 = f(rng_state0, [-5, .5, 0, 1])
post1, val1 = f(post0, [.9])
numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=1)
numpy_val1 = numpy_rng.uniform(low=[.9], high=1)
assert numpy.all(val0 == numpy_val0)
assert numpy.all(val1 == numpy_val1)
high = tensor.vector()
post_rb, outb = uniform(rng_R, low=low, high=high)
assert outb.ndim == 1
fb = compile.function([rng_R, low, high], [post_rb, outb], accept_inplace=True)
post0b, val0b = fb(post1, [-4., -2], [-1, 0])
post1b, val1b = fb(post0b, [-4.], [-1])
numpy_val0b = numpy_rng.uniform(low=[-4., -2], high=[-1, 0])
numpy_val1b = numpy_rng.uniform(low=[-4.], high=[-1])
assert numpy.all(val0b == numpy_val0b)
assert numpy.all(val1b == numpy_val1b)
self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1, 0, 1])
#TODO: do we want that?
#self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1])
size = tensor.lvector()
post_rc, outc = uniform(rng_R, low=low, high=high, size=size, ndim=1)
fc = compile.function([rng_R, low, high, size], [post_rc, outc], accept_inplace=True)
post0c, val0c = fc(post1b, [-4., -2], [-1, 0], [2])
post1c, val1c = fc(post0c, [-4.], [-1], [1])
numpy_val0c = numpy_rng.uniform(low=[-4., -2], high=[-1, 0])
numpy_val1c = numpy_rng.uniform(low=[-4.], high=[-1])
assert numpy.all(val0c == numpy_val0c)
assert numpy.all(val1c == numpy_val1c)
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1])
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1,2])
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [2,1])
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [1])
#TODO: do we want that?
#self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [2])
def test_broadcast_arguments(self):
rng_R = random_state_type()
low = tensor.vector()
high = tensor.col()
post_r, out = uniform(rng_R, low=low, high=high)
assert out.ndim == 2
f = compile.function([rng_R, low, high], [post_r, out], accept_inplace=True)
rng_state0 = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
post0, val0 = f(rng_state0, [-5, .5, 0, 1], [[1.]])
post1, val1 = f(post0, [.9], [[1.], [1.1], [1.5]])
post2, val2 = f(post1, [-5, .5, 0, 1], [[1.], [1.1], [1.5]])
numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.])
numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]])
numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[[1.], [1.1], [1.5]])
assert numpy.all(val0 == numpy_val0)
assert numpy.all(val1 == numpy_val1)
assert numpy.all(val2 == numpy_val2)
def test_uniform_vector(self):
rng_R = random_state_type()
low = tensor.vector()
high = tensor.vector()
post_r, out = uniform(rng_R, low=low, high=high)
assert out.ndim == 1
f = compile.function([rng_R, low, high], [post_r, out], accept_inplace=True)
low_val = [.1, .2, .3]
high_val = [1.1, 2.2, 3.3]
rng = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
# Arguments of size (3,)
rng0, val0 = f(rng, low_val, high_val)
numpy_val0 = numpy_rng.uniform(low=low_val, high=high_val)
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])
numpy_val1 = numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, low, high],
uniform(rng_R, low=low, high=high, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, low_val, high_val)
numpy_val2 = numpy_rng.uniform(low=low_val, high=high_val, size=(3,))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])
def test_binomial_vector(self):
rng_R = random_state_type()
n = tensor.lvector()
prob = tensor.vector()
post_r, out = binomial(rng_R, n=n, prob=prob)
assert out.ndim == 1
f = compile.function([rng_R, n, prob], [post_r, out], accept_inplace=True)
n_val = [1, 2, 3]
prob_val = [.1, .2, .3]
rng = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
# Arguments of size (3,)
rng0, val0 = f(rng, n_val, prob_val)
numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1])
numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, n, prob],
binomial(rng_R, n=n, prob=prob, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, n_val, prob_val)
numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, n_val[:-1], prob_val[:-1])
def test_normal_vector(self):
rng_R = random_state_type()
avg = tensor.vector()
std = tensor.vector()
post_r, out = normal(rng_R, avg=avg, std=std)
assert out.ndim == 1
f = compile.function([rng_R, avg, std], [post_r, out], accept_inplace=True)
avg_val = [1, 2, 3]
std_val = [.1, .2, .3]
rng = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
# Arguments of size (3,)
rng0, val0 = f(rng, avg_val, std_val)
numpy_val0 = numpy_rng.normal(loc=avg_val, scale=std_val)
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, avg_val[:-1], std_val[:-1])
numpy_val1 = numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, avg, std],
normal(rng_R, avg=avg, std=std, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, avg_val, std_val)
numpy_val2 = numpy_rng.normal(loc=avg_val, scale=std_val, size=(3,))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, avg_val[:-1], std_val[:-1])
def test_random_integers_vector(self):
rng_R = random_state_type()
low = tensor.lvector()
high = tensor.lvector()
post_r, out = random_integers(rng_R, low=low, high=high)
assert out.ndim == 1
f = compile.function([rng_R, low, high], [post_r, out], accept_inplace=True)
low_val = [.1, .2, .3]
high_val = [1.1, 2.2, 3.3]
rng = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
# Arguments of size (3,)
rng0, val0 = f(rng, low_val, high_val)
numpy_val0 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
for lv, hv in zip(low_val, high_val)])
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])
numpy_val1 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
for lv, hv in zip(low_val[:-1], high_val[:-1])])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, low, high],
random_integers(rng_R, low=low, high=high, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, low_val, high_val)
numpy_val2 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
for lv, hv in zip(low_val, high_val)])
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])
# Vectorized permutation don't make sense: the only parameter, n,
# controls one dimension of the returned tensor.
def test_multinomial_vector(self):
rng_R = random_state_type()
n = tensor.lvector()
pvals = tensor.matrix()
post_r, out = multinomial(rng_R, n=n, pvals=pvals)
assert out.ndim == 2
f = compile.function([rng_R, n, pvals], [post_r, out], accept_inplace=True)
n_val = [1, 2, 3]
pvals_val = [[.1, .9], [.2, .8], [.3, .7]]
rng = numpy.random.RandomState(55)
numpy_rng = numpy.random.RandomState(55)
# Arguments of size (3,)
rng0, val0 = f(rng, n_val, pvals_val)
numpy_val0 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
for nv, pv in zip(n_val, pvals_val)])
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, n_val[:-1], pvals_val[:-1])
numpy_val1 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
for nv, pv in zip(n_val[:-1], pvals_val[:-1])])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, n, pvals],
multinomial(rng_R, n=n, pvals=pvals, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, n_val, pvals_val)
numpy_val2 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
for nv, pv in zip(n_val, pvals_val)])
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, n_val[:-1], pvals_val[:-1])
if __name__ == '__main__':
from theano.tests import main
main("test_raw_random")
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论