提交 39db1f8e authored 作者: lamblin's avatar lamblin

Merge pull request #1465 from nouiz/tests

Tests
......@@ -1392,7 +1392,8 @@ class _CThunk(object):
trace = ()
try:
exc_type, _exc_value, exc_trace = self.error_storage
self.position_of_error = self.nodes.index(task)
if task in self.nodes:
self.position_of_error = self.nodes.index(task)
# this can be used to retrieve the location the Op was declared
exc_value = exc_type(_exc_value)
exc_value.__thunk_trace__ = trace
......
......@@ -909,15 +909,17 @@ def _populate_grad_dict(var_to_app_to_idx,
orig_output, new_output_grad = packed
if not hasattr(orig_output, 'shape'):
continue
if isinstance(new_output_grad.type, DisconnectedType):
continue
for orig_output_v, new_output_grad_v in get_debug_values(
node.outputs, new_output_grads):
*packed):
o_shape = orig_output_v.shape
g_shape = new_output_grad_v.shape
if o_shape != g_shape:
raise ValueError("Got a gradient of shape " + \
str(o_shape) + " on an output of shape " + \
str(g_shape))
raise ValueError(
"Got a gradient of shape " +
str(o_shape) + " on an output of shape " +
str(g_shape))
input_grads = node.op.grad(inputs, new_output_grads)
......
......@@ -219,7 +219,7 @@ def test_rop_lop():
raised = False
try:
tmp = tensor.Rop(
tensor.Rop(
theano.clone(y, replace={mx: break_op(mx)}),
mx,
mv)
......@@ -283,7 +283,7 @@ class test_diag(unittest.TestCase):
test_diag test makes sure that linalg.diag instantiates
the right op based on the dimension of the input.
"""
def __init__(self, name, mode=None, shared=tensor.shared,
def __init__(self, name, mode=None, shared=tensor._shared,
floatX=None, type=tensor.TensorType):
self.mode = mode
self.shared = shared
......
......@@ -2750,9 +2750,9 @@ class T_Scan(unittest.TestCase):
outputs_info=[numpy.asarray([0.0, 0.0], theano.config.floatX),
None])
f = theano.function([inp], [i_t, i_tm1])
val = numpy.arange(10).reshape(5, 2)
val = numpy.arange(10).reshape(5, 2).astype(theano.config.floatX)
ret = f(val)
utt.assert_allclose(ret[0], val+10)
utt.assert_allclose(ret[0], val + 10)
utt.assert_allclose(ret[1], [[0., 0.],
[10., 11.],
[12., 13.],
......
......@@ -2677,6 +2677,27 @@ class TrueDot(gof.op.Op):
rval = x.dot(y)
if not scipy.sparse.issparse(rval):
rval = getattr(scipy.sparse, x.format + '_matrix')(rval)
#x.dot call tocsr() that will "upcast" to ['int8', 'uint8', 'short',
# 'ushort', 'intc', 'uintc', 'longlong', 'ulonglong', 'single',
# 'double', 'longdouble', 'csingle', 'cdouble', 'clongdouble']
# But ulonglong is uint64 on x86-64, but with a different typenum!
if rval.dtype.num != numpy.dtype(str(rval.dtype)).num:
assert str(rval.dtype) == node.outputs[0].dtype
# Create a view with the expected typenum.
format = node.outputs[0].type.format
data = rval.data.view(dtype=node.outputs[0].dtype)
indices = rval.indices
indptr = rval.indptr
shape = rval.shape
# No need to copy indices and indptr as in CSM.perform(),
# as there is only one user of them.
if format == 'csc':
rval = scipy.sparse.csc_matrix((data, indices, indptr),
shape, copy=False)
else:
assert format == 'csr'
rval = scipy.sparse.csr_matrix((data, indices, indptr),
shape, copy=False)
out[0] = rval
def grad(self, (x, y), (gz, )):
......
......@@ -130,9 +130,9 @@ class StructuredDotCSC(gof.Op):
if node.inputs[4].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
typenum_z = node.outputs[0].type.dtype_specs()[-1] # retrieve dtype number
typenum_a_val = node.inputs[0].type.dtype_specs()[-1] # retrieve dtype number
typenum_b = node.inputs[4].type.dtype_specs()[-1] # retrieve dtype number
typenum_z = node.outputs[0].type.dtype_specs()[2] # retrieve dtype number
typenum_a_val = node.inputs[0].type.dtype_specs()[2] # retrieve dtype number
typenum_b = node.inputs[4].type.dtype_specs()[2] # retrieve dtype number
rval = """
......@@ -318,7 +318,7 @@ class StructuredDotCSR(gof.Op):
@param sub: TODO, not too sure, something to do with weave probably
"""
# retrieve dtype number
typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[-1]
typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -550,11 +550,11 @@ class UsmmCscDense(gof.Op):
conv_type = "double"
axpy = "daxpy_"
# retrieve dtype numbers
typenum_alpha = node.inputs[0].type.dtype_specs()[-1]
typenum_x_val = node.inputs[1].type.dtype_specs()[-1]
typenum_y = node.inputs[5].type.dtype_specs()[-1]
typenum_z = node.inputs[6].type.dtype_specs()[-1]
typenum_zn = node.outputs[0].type.dtype_specs()[-1]
typenum_alpha = node.inputs[0].type.dtype_specs()[2]
typenum_x_val = node.inputs[1].type.dtype_specs()[2]
typenum_y = node.inputs[5].type.dtype_specs()[2]
typenum_z = node.inputs[6].type.dtype_specs()[2]
typenum_zn = node.outputs[0].type.dtype_specs()[2]
inplace = int(self.inplace)
......@@ -761,7 +761,7 @@ class CSMGradC(gof.Op):
def c_code(self, node, name, (a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim), (z,), sub):
# retrieve dtype number
typenum_z = node.outputs[0].type.dtype_specs()[-1]
typenum_z = node.outputs[0].type.dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
......@@ -1558,15 +1558,15 @@ class SamplingDotCSR(gof.Op):
cdot = "ddot_"
# retrieve dtype number
typenum_x = node.inputs[0].type.dtype_specs()[-1]
typenum_y = node.inputs[1].type.dtype_specs()[-1]
typenum_p = node.inputs[2].type.dtype_specs()[-1]
typenum_x = node.inputs[0].type.dtype_specs()[2]
typenum_y = node.inputs[1].type.dtype_specs()[2]
typenum_p = node.inputs[2].type.dtype_specs()[2]
typenum_zd = tensor.TensorType(node.outputs[0].dtype,
[]).dtype_specs()[-1]
[]).dtype_specs()[2]
typenum_zi = tensor.TensorType(node.outputs[1].dtype,
[]).dtype_specs()[-1]
[]).dtype_specs()[2]
typenum_zp = tensor.TensorType(node.outputs[2].dtype,
[]).dtype_specs()[-1]
[]).dtype_specs()[2]
rval = """
if (PyArray_NDIM(%(x)s) != 2) {
......
import sys
import traceback
from copy import copy
from itertools import izip
......@@ -10,7 +9,7 @@ from theano import gof
from theano.gof import Apply, Op
from theano import scalar
from theano.scalar import Scalar
from theano.printing import min_informative_str, pprint
from theano.printing import pprint
from theano.gof.python25 import all, any
from theano.tensor.utils import hash_from_dict
from theano.gradient import DisconnectedType
......@@ -741,7 +740,7 @@ class Elemwise(Op):
scalar_ograds = map(as_scalar, ograds)
scalar_igrads = self.scalar_op.grad(scalar_inputs, scalar_ograds)
for igrad in scalar_igrads:
assert igrad is not None
assert igrad is not None, self.scalar_op
finally:
......
......@@ -30,10 +30,10 @@ def good_seed_param(seed):
return True
AddConfigVar('unittests.rseed',
"Seed to use for randomized unit tests. "
"Special value 'random' means using a seed of None.",
StrParam(666, is_valid=good_seed_param),
in_c_key=False)
"Seed to use for randomized unit tests. "
"Special value 'random' means using a seed of None.",
StrParam(666, is_valid=good_seed_param),
in_c_key=False)
def fetch_seed(pseed=None):
......@@ -41,15 +41,15 @@ def fetch_seed(pseed=None):
Returns the seed to use for running the unit tests.
If an explicit seed is given, it will be used for seeding numpy's rng.
If not, it will use config.unittest.rseed (its default value is 666).
If config.unittest.rseed is set to "random", it will seed the rng with None,
which is equivalent to seeding with a random seed.
If config.unittest.rseed is set to "random", it will seed the rng with
None, which is equivalent to seeding with a random seed.
Useful for seeding RandomState objects.
>>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
"""
seed = pseed or config.unittests.rseed
if seed=='random':
if seed == 'random':
seed = None
try:
......@@ -58,8 +58,8 @@ def fetch_seed(pseed=None):
else:
seed = None
except ValueError:
print >> sys.stderr, 'Error: config.unittests.rseed contains '\
'invalid seed, using None instead'
print >> sys.stderr, ('Error: config.unittests.rseed contains '
'invalid seed, using None instead')
seed = None
return seed
......@@ -72,7 +72,7 @@ def seed_rng(pseed=None):
"""
seed = fetch_seed(pseed)
if pseed and pseed!=seed:
if pseed and pseed != seed:
print >> sys.stderr, 'Warning: using seed given by config.unittests.rseed=%i'\
'instead of seed %i given as parameter' % (seed, pseed)
numpy.random.seed(seed)
......@@ -155,7 +155,8 @@ class T_OpContractMixin(object):
assert op_i == self.clone(op_i)
assert op_i != self.other_op
for j, op_j in enumerate(self.ops):
if i == j: continue
if i == j:
continue
assert op_i != op_j
def test_hash(self):
......@@ -167,7 +168,8 @@ class T_OpContractMixin(object):
assert h_i == hash(self.clone(op_i))
assert h_i != hash(self.other_op)
for j, op_j in enumerate(self.ops):
if i == j: continue
if i == j:
continue
assert op_i != hash(op_j)
def test_name(self):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论