提交 fd552f23 authored 作者: notoraptor's avatar notoraptor

Add a COp test in test_wrapper.

Rewrite `Wrap` so that it depends on a Wrapper. Simplify code.
上级 5260c149
......@@ -799,22 +799,15 @@ class Op(utils.object2, PureOp, CLinkerOp):
# We add a default get_params() implementation which will try to detect params from the op
# if params_type is set to a Wrapper. If not, we raise a MethodNotDefined exception.
def get_params(self, node):
if hasattr(self, 'params_type'):
# If params_type is a Wrapper, we try to extract params from the op.
if isinstance(self.params_type, theano.gof.wrapper.Wrapper):
wrapper = self.params_type
op_has_wrap_attributes = True
for field in wrapper.fields:
if not hasattr(self, field):
op_has_wrap_attributes = False
break
if op_has_wrap_attributes:
wrap_dict = dict()
for i in range(wrapper.length):
field = wrapper.fields[i]
_type = wrapper.types[i]
wrap_dict[field] = _type.filter(getattr(self, field), strict=False, allow_downcast=True)
return theano.gof.wrapper.Wrap(**wrap_dict)
if hasattr(self, 'params_type') and isinstance(self.params_type, theano.gof.wrapper.Wrapper):
wrapper = self.params_type
if hasattr(self, '__props__') and all(field in self.__props__ for field in wrapper.fields):
wrap_dict = dict()
for i in range(wrapper.length):
field = wrapper.fields[i]
_type = wrapper.types[i]
wrap_dict[field] = _type.filter(getattr(self, field), strict=False, allow_downcast=True)
return theano.gof.wrapper.Wrap(wrapper, **wrap_dict)
raise theano.gof.utils.MethodNotDefined('get_params')
def prepare_node(self, node, storage_map, compute_map, impl):
......
#section support_code_apply
int APPLY_SPECIFIC(quadratic_function)(PyArrayObject* tensor, DTYPE_INPUT_0 a, DTYPE_INPUT_0 b, DTYPE_INPUT_0 c) {
NpyIter* iterator = NpyIter_New(tensor,
NPY_ITER_READWRITE | NPY_ITER_EXTERNAL_LOOP | NPY_ITER_REFS_OK,
NPY_KEEPORDER, NPY_NO_CASTING, NULL);
if(iterator == NULL) {
PyErr_SetString(PyExc_RuntimeError, "Unable to iterate over a tensor for an elemwise operation.");
return -1;
}
NpyIter_IterNextFunc* get_next = NpyIter_GetIterNext(iterator, NULL);
char** data_ptr = NpyIter_GetDataPtrArray(iterator);
npy_intp* stride_ptr = NpyIter_GetInnerStrideArray(iterator);
npy_intp* innersize_ptr = NpyIter_GetInnerLoopSizePtr(iterator);
do {
char* data = *data_ptr;
npy_intp stride = *stride_ptr;
npy_intp count = *innersize_ptr;
while(count) {
DTYPE_INPUT_0 x = *((DTYPE_INPUT_0*)data);
*((DTYPE_INPUT_0*)data) = a*x*x + b*x + c;
data += stride;
--count;
}
} while(get_next(iterator));
NpyIter_Deallocate(iterator);
return 0;
}
int APPLY_SPECIFIC(compute_quadratic)(PyArrayObject* X, PyArrayObject** Y, QUADRATIC_WRAPPER* coeff) {
DTYPE_INPUT_0 a = (DTYPE_INPUT_0) (*(COEFF_TYPE*) PyArray_GETPTR1(coeff->a, 0)); // 0-D TensorType.
DTYPE_INPUT_0 b = coeff->b; // Scalar.
DTYPE_INPUT_0 c = (DTYPE_INPUT_0)PyFloat_AsDouble(coeff->c); // Generic.
Py_XDECREF(*Y);
*Y = (PyArrayObject*)PyArray_EMPTY(PyArray_NDIM(X), PyArray_DIMS(X), TYPENUM_INPUT_0, PyArray_IS_F_CONTIGUOUS(X));
if (PyArray_CopyInto(*Y, X) != 0) {
PyErr_SetString(PyExc_RuntimeError, "Unable to copy input into output.");
return 1;
};
if (APPLY_SPECIFIC(quadratic_function)(*Y, a, b, c) != 0) {
PyErr_SetString(PyExc_RuntimeError, "Unable to compute quadratic function.");
return 1;
}
return 0;
}
......@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
import theano
import numpy
from unittest import TestCase
from theano.gof import Op, Apply
from theano.gof import Op, COp, Apply
from theano import Generic
from theano.scalar import Scalar
from theano.tensor import TensorType
......@@ -18,7 +18,7 @@ generic_type = Generic()
# A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params.
class QuadraticFunction(Op):
class QuadraticOpFunc(Op):
__props__ = ('a', 'b', 'c')
params_type = Wrapper(a=tensor_type_0d,
b=scalar_type,
......@@ -39,7 +39,7 @@ class QuadraticFunction(Op):
y[0] = coefficients.a * (x**2) + coefficients.b * x + coefficients.c
def c_code_cache_version(self):
return (1, 2)
return (1, 3)
def c_support_code_apply(self, node, name):
float_type = node.inputs[0].type.dtype_specs()[1]
......@@ -82,9 +82,9 @@ class QuadraticFunction(Op):
float_typenum = numpy.dtype(node.inputs[0].type.dtype).num
coeff_type = 'npy_' + numpy.dtype(dtype).name
return """
%(float_type)s a = (%(float_type)s) (*(%(coeff_type)s*) PyArray_GETPTR1(%(coeff)s.a, 0)); // 0-D TensorType.
%(float_type)s b = %(coeff)s.b; // Scalar.
%(float_type)s c = (%(float_type)s)PyFloat_AsDouble(%(coeff)s.c); // Generic.
%(float_type)s a = (%(float_type)s) (*(%(coeff_type)s*) PyArray_GETPTR1(%(coeff)s->a, 0)); // 0-D TensorType.
%(float_type)s b = %(coeff)s->b; // Scalar.
%(float_type)s c = (%(float_type)s)PyFloat_AsDouble(%(coeff)s->c); // Generic.
Py_XDECREF(%(Y)s);
%(Y)s = (PyArrayObject*)PyArray_EMPTY(PyArray_NDIM(%(X)s), PyArray_DIMS(%(X)s), %(float_typenum)s, PyArray_IS_F_CONTIGUOUS(%(X)s));
if (PyArray_CopyInto(%(Y)s, %(X)s) != 0) {
......@@ -98,29 +98,54 @@ class QuadraticFunction(Op):
""" % locals()
# Same op as above, but implemented as a COp (with C code in an external file).
class QuadraticCOpFunc(COp):
__props__ = ('a', 'b', 'c')
params_type = Wrapper(a=tensor_type_0d,
b=scalar_type,
c=generic_type)
def get_op_params(self):
return [('QUADRATIC_WRAPPER', self.params_type.name),
('COEFF_TYPE', 'npy_' + numpy.dtype(dtype).name)]
def __init__(self, a, b, c):
super(QuadraticCOpFunc, self).__init__('test_quadratic_function.c',
'APPLY_SPECIFIC(compute_quadratic)')
self.a = a
self.b = b
self.c = c
def make_node(self, x):
x = tensor.as_tensor_variable(x)
return Apply(self, [x], [x.type()])
class TestWrapper(TestCase):
def test_wrap_hash_and_eq(self):
w1 = Wrap(a=1, b='test string', array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Wrap(a=1, b='test string', array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
def test_hash_and_eq_wrap(self):
wp1 = Wrapper(a=Generic(), array=TensorType('int32', (False,)), floatting=Scalar('float32'),
npy_scalar=TensorType('float64', tuple()))
wp2 = Wrapper(a=Generic(), array=TensorType('int32', (False,)), floatting=Scalar('float32'),
npy_scalar=TensorType('float64', tuple()))
w1 = Wrap(wp1, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Wrap(wp2, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 == w2
assert not (w1 != w2)
assert hash(w1) == hash(w2)
assert all(hasattr(w1, key) for key in ('a', 'b', 'array', 'floatting', 'npy_scalar'))
# Changing attributes names only.
w2 = Wrap(other_name=1, b='test string', array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2
# Changing attributes types only.
w2 = Wrap(a=1, b='test string', array=[1, 2, 4, 5, 7], floatting=-4.5, npy_scalar=numpy.asarray(12))
# Changing attributes names only (a -> other_name).
wp2_other = Wrapper(other_name=Generic(), array=TensorType('int32', (False,)), floatting=Scalar('float32'),
npy_scalar=TensorType('float64', tuple()))
w2 = Wrap(wp2_other, other_name=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2
# Changing attributes values only.
w2 = Wrap(a=1, b='string', array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
# Changing attributes values only (now a=2).
w2 = Wrap(wp2, a=2, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2
# Changing NumPy array values.
w2 = Wrap(a=1, b='test string', array=numpy.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
# Changing NumPy array values (5 -> -5).
w2 = Wrap(wp2, a=1, array=numpy.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2
def test_wrapper_hash_and_eq(self):
def test_hash_and_eq_wrapper(self):
w1 = Wrapper(a1=TensorType('int64', (False, False)),
a2=TensorType('int64', (False, True, False, False, True)),
a3=Generic())
......@@ -133,7 +158,7 @@ class TestWrapper(TestCase):
assert w1.name == w2.name
# Changing attributes names only.
w2 = Wrapper(a1=TensorType('int64', (False, False)),
other_name=TensorType('int64', (False, True, False, False, True)),
other_name=TensorType('int64', (False, True, False, False, True)), # a2 -> other_name
a3=Generic())
assert w1 != w2
# Changing attributes types only.
......@@ -157,7 +182,8 @@ class TestWrapper(TestCase):
a3=Generic())
# With a value that does not match the wrapper.
o = Wrap(a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'),
o = Wrap(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'),
a2=random_tensor.astype('float32'),
a3=2000)
# should fail (o.a1 is not int32, o.a2 is not float64)
......@@ -168,7 +194,8 @@ class TestWrapper(TestCase):
w.filter(o, strict=False, allow_downcast=True)
# With a value that matches the wrapper.
o1 = Wrap(a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
o1 = Wrap(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'),
a3=2000)
# All should pass.
......@@ -176,15 +203,17 @@ class TestWrapper(TestCase):
w.filter(o1, strict=False, allow_downcast=False)
w.filter(o1, strict=False, allow_downcast=True)
# Check value_eq and value_eq_approx.
o2 = Wrap(a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
# Check values_eq and values_eq_approx.
o2 = Wrap(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'),
a3=2000)
assert w.values_eq(o1, o2)
assert w.values_eq_approx(o1, o2)
# Check value_eq_approx.
o3 = Wrap(a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('float32'),
o3 = Wrap(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('float32'),
a2=random_tensor.astype('float64'),
a3=2000.0)
assert w.values_eq_approx(o1, o3)
......@@ -192,14 +221,18 @@ class TestWrapper(TestCase):
def test_op_params(self):
a, b, c = 2, 3, -7
x = tensor.matrix()
y = QuadraticFunction(a, b, c)(x)
f = theano.function([x], y)
y1 = QuadraticOpFunc(a, b, c)(x)
y2 = QuadraticCOpFunc(a, b, c)(x)
f1 = theano.function([x], y1)
f2 = theano.function([x], y2)
shape = (100, 100)
# The for-loop is here just to force profiling print something interesting.
# When running this test without this loop, profiling does not print neither list of classes nor list of ops
# (maybe because the function is extremely fast ?).
for i in range(50):
vx = numpy.random.normal(size=shape[0] * shape[1]).astype(dtype).reshape(*shape)
vy = f(vx)
vy1 = f1(vx)
vy2 = f2(vx)
ref = a * (vx**2) + b * vx + c
utt.assert_allclose(ref, vy)
utt.assert_allclose(vy1, vy2)
utt.assert_allclose(ref, vy1)
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论