提交 7331258b authored 作者: Amjad Almahairi's avatar Amjad Almahairi

inherent from multinomial, output is by default int64, fix tests

上级 ae635037
......@@ -7,6 +7,7 @@ from theano.gof import local_optimizer
from theano.tensor import NotScalarConstantError, get_scalar_constant_value
from theano.scalar import as_scalar
import sys
import copy
from theano.sandbox.cuda import cuda_available, GpuOp
if cuda_available:
......@@ -194,23 +195,12 @@ class MultinomialFromUniform(Op):
break
class MultinomialWOReplacementFromUniform(Op):
class MultinomialWOReplacementFromUniform(MultinomialFromUniform):
"""
Converts samples from a uniform into sample from a multinomial.
Converts samples from a uniform into sample (without replacement) from a multinomial.
"""
__props__ = ("odtype",)
def __init__(self, odtype):
self.odtype = odtype
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.odtype)
def __setstate__(self, dct):
self.__dict__.update(dct)
def make_node(self, pvals, unis, n=1):
pvals = T.as_tensor_variable(pvals)
unis = T.as_tensor_variable(unis)
......@@ -219,22 +209,16 @@ class MultinomialWOReplacementFromUniform(Op):
if unis.ndim != 1:
raise NotImplementedError('unis ndim should be 1', unis.ndim)
if self.odtype == 'auto':
if sys.maxsize > 2**32:
odtype = 'int64'
else:
odtype = 'int32'
odtype = 'int64'
else:
odtype = self.odtype
out = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
return Apply(self, [pvals, unis, as_scalar(n)], [out])
def grad(self, ins, outgrads):
pvals, unis, n = ins
(gz,) = outgrads
return [T.zeros_like(x) for x in ins]
def perform(self, node, ins, outs):
(pvals, unis, n_samples) = ins
# make a copy so we do not overwrite the input
pvals = copy.copy(pvals)
(z,) = outs
if n_samples > pvals.shape[1]:
......@@ -245,10 +229,10 @@ class MultinomialWOReplacementFromUniform(Op):
raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples",
unis.shape[0], pvals.shape[0], n_samples)
if sys.maxsize > 2**32:
if self.odtype == 'auto':
odtype = 'int64'
else:
odtype = 'int32'
odtype = self.odtype
if z[0] is None or not numpy.all(z[0].shape == [pvals.shape[0], n_samples]):
z[0] = -1 * numpy.ones((pvals.shape[0], n_samples), dtype=odtype)
......@@ -270,6 +254,12 @@ class MultinomialWOReplacementFromUniform(Op):
pvals[n] /= pvals[n].sum()
break
def c_code_cache_version(self):
return None
def c_code(self, node, name, ins, outs, sub):
raise NotImplementedError('no C implementation yet!')
class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
"""
......
......@@ -68,7 +68,7 @@ class test_OP(unittest.TestCase):
numpy.random.seed(12345)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = numpy.zeros((n_elements,))
avg_pvals = numpy.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
uni = numpy.random.rand(n_selected).astype(config.floatX)
......@@ -76,7 +76,8 @@ class test_OP(unittest.TestCase):
res = numpy.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
assert numpy.mean(abs(avg_pvals - pvals)) < mean_rtol
avg_diff = numpy.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol, avg_diff
class test_function(unittest.TestCase):
......@@ -143,11 +144,12 @@ class test_function(unittest.TestCase):
numpy.random.seed(12345)
pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = numpy.zeros((n_elements,))
avg_pvals = numpy.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
res = f(pvals, n_selected)
res = numpy.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
assert numpy.mean(abs(avg_pvals - pvals)) < mean_rtol
avg_diff = numpy.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论