提交 e04f0531 authored 作者: notoraptor's avatar notoraptor

Important renaming for classes and files:

- class Wrap -> Params - class Wrapper -> ParamsType - file wrapper.py -> params_type.py - file test_wrapper.py -> test_params_type.py Protect Wrap private fields and rewrite Wrap __repr__().
上级 3c440a74
...@@ -74,9 +74,9 @@ attribute :attr:`params_type` to an instance of your params Type. ...@@ -74,9 +74,9 @@ attribute :attr:`params_type` to an instance of your params Type.
.. note:: .. note::
If you want to have multiple parameters, Theano provides the convenient class If you want to have multiple parameters, Theano provides the convenient class
:class:`theano.gof.wrapper.Wrapper` that allows to bundle many parameters into :class:`theano.gof.params_type.ParamsType` that allows to bundle many parameters into
one object that will be available in both Python (as a Python object) and C code (as a struct). one object that will be available in both Python (as a Python object) and C code (as a struct).
See :ref:`Wrapper tutorial and API documentation <libdoc_gof_wrapper>` for more infos. See :ref:`ParamsType tutorial and API documentation <libdoc_gof_wrapper>` for more infos.
For example if we decide to use an int as the params the following For example if we decide to use an int as the params the following
would be appropriate: would be appropriate:
......
...@@ -17,5 +17,5 @@ ...@@ -17,5 +17,5 @@
fgraph fgraph
toolbox toolbox
type type
wrapper params_type
utils utils
.. _libdoc_gof_wrapper: .. _libdoc_gof_wrapper:
======================================================== ============================================================
:mod:`theano.gof.wrapper` -- Wrapper class for op params :mod:`theano.gof.params_type` -- Wrapper class for op params
======================================================== ============================================================
--------- ---------
Reference Reference
--------- ---------
.. automodule:: theano.gof.wrapper .. automodule:: theano.gof.params_type
:platform: Unix, Windows :platform: Unix, Windows
:synopsis: Wrapper class for op params :synopsis: Wrapper class for op params
:members: :members:
......
...@@ -80,7 +80,7 @@ from theano.gof.type import \ ...@@ -80,7 +80,7 @@ from theano.gof.type import \
from theano.gof.utils import \ from theano.gof.utils import \
hashtype, object2, MethodNotDefined hashtype, object2, MethodNotDefined
from theano.gof.wrapper import Wrapper, Wrap from theano.gof.params_type import ParamsType, Params
import theano import theano
......
...@@ -797,18 +797,18 @@ class Op(utils.object2, PureOp, CLinkerOp): ...@@ -797,18 +797,18 @@ class Op(utils.object2, PureOp, CLinkerOp):
""" """
# We add a default get_params() implementation which will try to detect params from the op # We add a default get_params() implementation which will try to detect params from the op
# if params_type is set to a Wrapper. If not, we raise a MethodNotDefined exception. # if params_type is set to a ParamsType. If not, we raise a MethodNotDefined exception.
def get_params(self, node): def get_params(self, node):
if hasattr(self, 'params_type') and isinstance(self.params_type, theano.gof.Wrapper): if hasattr(self, 'params_type') and isinstance(self.params_type, theano.gof.ParamsType):
wrapper = self.params_type wrapper = self.params_type
if not all(hasattr(self, field) for field in wrapper.fields): if not all(hasattr(self, field) for field in wrapper.fields):
raise AttributeError('%s: missing attributes for Wrapper parameter.' % type(self).__name__) raise AttributeError('%s: missing attributes for ParamsType parameter.' % type(self).__name__)
wrap_dict = dict() wrap_dict = dict()
for i in range(wrapper.length): for i in range(wrapper.length):
field = wrapper.fields[i] field = wrapper.fields[i]
_type = wrapper.types[i] _type = wrapper.types[i]
wrap_dict[field] = _type.filter(getattr(self, field), strict=False, allow_downcast=True) wrap_dict[field] = _type.filter(getattr(self, field), strict=False, allow_downcast=True)
return theano.gof.Wrap(wrapper, **wrap_dict) return theano.gof.Params(wrapper, **wrap_dict)
raise theano.gof.utils.MethodNotDefined('get_params') raise theano.gof.utils.MethodNotDefined('get_params')
def prepare_node(self, node, storage_map, compute_map, impl): def prepare_node(self, node, storage_map, compute_map, impl):
...@@ -1393,19 +1393,19 @@ class COp(Op): ...@@ -1393,19 +1393,19 @@ class COp(Op):
The names must be strings that are not a C keyword and the The names must be strings that are not a C keyword and the
values must be strings of literal C representations. values must be strings of literal C representations.
If op uses a :class:`theano.gof.wrapper.Wrapper` as ``params_type``, If op uses a :class:`theano.gof.params_type.ParamsType` as ``params_type``,
it returns: it returns:
- a default macro ``APPLY_SPECIFIC_WRAPPER`` which defines the class name of the - a default macro ``PARAMS_TYPE`` which defines the class name of the
corresponding C struct. corresponding C struct.
- a macro ``DTYPE_PARAM_key`` for every ``key`` in the Wrapper for which associated - a macro ``DTYPE_PARAM_key`` for every ``key`` in the ParamsType for which associated
type implements the method :func:`theano.gof.type.CLinkerType.c_element_type`. type implements the method :func:`theano.gof.type.CLinkerType.c_element_type`.
``DTYPE_PARAM_key`` defines the primitive C type name of an item in a variable ``DTYPE_PARAM_key`` defines the primitive C type name of an item in a variable
associated to ``key``. associated to ``key``.
""" """
if hasattr(self, 'params_type') and isinstance(self.params_type, theano.gof.Wrapper): if hasattr(self, 'params_type') and isinstance(self.params_type, theano.gof.ParamsType):
wrapper = self.params_type wrapper = self.params_type
params = [('APPLY_SPECIFIC_WRAPPER', wrapper.name)] params = [('PARAMS_TYPE', wrapper.name)]
for i in range(wrapper.length): for i in range(wrapper.length):
try: try:
params.append(('DTYPE_PARAM_' + wrapper.fields[i], wrapper.types[i].c_element_type())) params.append(('DTYPE_PARAM_' + wrapper.fields[i], wrapper.types[i].c_element_type()))
......
...@@ -6,7 +6,7 @@ from theano.gof import Op, COp, Apply ...@@ -6,7 +6,7 @@ from theano.gof import Op, COp, Apply
from theano import Generic from theano import Generic
from theano.scalar import Scalar from theano.scalar import Scalar
from theano.tensor import TensorType from theano.tensor import TensorType
from theano.gof import Wrapper, Wrap from theano.gof import ParamsType, Params
from theano import tensor from theano import tensor
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
...@@ -18,7 +18,7 @@ generic_type = Generic() ...@@ -18,7 +18,7 @@ generic_type = Generic()
# A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params. # A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params.
class QuadraticOpFunc(Op): class QuadraticOpFunc(Op):
__props__ = ('a', 'b', 'c') __props__ = ('a', 'b', 'c')
params_type = Wrapper(a=tensor_type_0d, params_type = ParamsType(a=tensor_type_0d,
b=scalar_type, b=scalar_type,
c=generic_type) c=generic_type)
...@@ -93,7 +93,7 @@ class QuadraticOpFunc(Op): ...@@ -93,7 +93,7 @@ class QuadraticOpFunc(Op):
# Same op as above, but implemented as a COp (with C code in an external file). # Same op as above, but implemented as a COp (with C code in an external file).
class QuadraticCOpFunc(COp): class QuadraticCOpFunc(COp):
__props__ = ('a', 'b', 'c') __props__ = ('a', 'b', 'c')
params_type = Wrapper(a=tensor_type_0d, params_type = ParamsType(a=tensor_type_0d,
b=scalar_type, b=scalar_type,
c=generic_type) c=generic_type)
...@@ -114,35 +114,35 @@ class QuadraticCOpFunc(COp): ...@@ -114,35 +114,35 @@ class QuadraticCOpFunc(COp):
y[0] = coefficients.a * (x**2) + coefficients.b * x + coefficients.c y[0] = coefficients.a * (x**2) + coefficients.b * x + coefficients.c
class TestWrapper(TestCase): class TestParamsType(TestCase):
def test_hash_and_eq_wrap(self): def test_hash_and_eq_params(self):
wp1 = Wrapper(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'), wp1 = ParamsType(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple())) npy_scalar=TensorType('float64', tuple()))
wp2 = Wrapper(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'), wp2 = ParamsType(a=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple())) npy_scalar=TensorType('float64', tuple()))
w1 = Wrap(wp1, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w1 = Params(wp1, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
w2 = Wrap(wp2, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2, a=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 == w2 assert w1 == w2
assert not (w1 != w2) assert not (w1 != w2)
assert hash(w1) == hash(w2) assert hash(w1) == hash(w2)
# Changing attributes names only (a -> other_name). # Changing attributes names only (a -> other_name).
wp2_other = Wrapper(other_name=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'), wp2_other = ParamsType(other_name=Generic(), array=TensorType('int64', (False,)), floatting=Scalar('float64'),
npy_scalar=TensorType('float64', tuple())) npy_scalar=TensorType('float64', tuple()))
w2 = Wrap(wp2_other, other_name=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2_other, other_name=1, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2 assert w1 != w2
# Changing attributes values only (now a=2). # Changing attributes values only (now a=2).
w2 = Wrap(wp2, a=2, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2, a=2, array=numpy.asarray([1, 2, 4, 5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2 assert w1 != w2
# Changing NumPy array values (5 -> -5). # Changing NumPy array values (5 -> -5).
w2 = Wrap(wp2, a=1, array=numpy.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12)) w2 = Params(wp2, a=1, array=numpy.asarray([1, 2, 4, -5, 7]), floatting=-4.5, npy_scalar=numpy.asarray(12))
assert w1 != w2 assert w1 != w2
def test_hash_and_eq_wrapper(self): def test_hash_and_eq_params_type(self):
w1 = Wrapper(a1=TensorType('int64', (False, False)), w1 = ParamsType(a1=TensorType('int64', (False, False)),
a2=TensorType('int64', (False, True, False, False, True)), a2=TensorType('int64', (False, True, False, False, True)),
a3=Generic()) a3=Generic())
w2 = Wrapper(a1=TensorType('int64', (False, False)), w2 = ParamsType(a1=TensorType('int64', (False, False)),
a2=TensorType('int64', (False, True, False, False, True)), a2=TensorType('int64', (False, True, False, False, True)),
a3=Generic()) a3=Generic())
assert w1 == w2 assert w1 == w2
...@@ -150,32 +150,32 @@ class TestWrapper(TestCase): ...@@ -150,32 +150,32 @@ class TestWrapper(TestCase):
assert hash(w1) == hash(w2) assert hash(w1) == hash(w2)
assert w1.name == w2.name assert w1.name == w2.name
# Changing attributes names only. # Changing attributes names only.
w2 = Wrapper(a1=TensorType('int64', (False, False)), w2 = ParamsType(a1=TensorType('int64', (False, False)),
other_name=TensorType('int64', (False, True, False, False, True)), # a2 -> other_name other_name=TensorType('int64', (False, True, False, False, True)), # a2 -> other_name
a3=Generic()) a3=Generic())
assert w1 != w2 assert w1 != w2
# Changing attributes types only. # Changing attributes types only.
w2 = Wrapper(a1=TensorType('int64', (False, False)), w2 = ParamsType(a1=TensorType('int64', (False, False)),
a2=Generic(), # changing class a2=Generic(), # changing class
a3=Generic()) a3=Generic())
assert w1 != w2 assert w1 != w2
# Changing attributes types characteristics only. # Changing attributes types characteristics only.
w2 = Wrapper(a1=TensorType('int64', (False, True)), # changing broadcasting w2 = ParamsType(a1=TensorType('int64', (False, True)), # changing broadcasting
a2=TensorType('int64', (False, True, False, False, True)), a2=TensorType('int64', (False, True, False, False, True)),
a3=Generic()) a3=Generic())
assert w1 != w2 assert w1 != w2
def test_wrapper_filtering(self): def test_params_type_filtering(self):
shape_tensor5 = (1, 2, 2, 3, 2) shape_tensor5 = (1, 2, 2, 3, 2)
size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[2] * shape_tensor5[3] * shape_tensor5[4] size_tensor5 = shape_tensor5[0] * shape_tensor5[1] * shape_tensor5[2] * shape_tensor5[3] * shape_tensor5[4]
random_tensor = numpy.random.normal(size=size_tensor5).reshape(shape_tensor5) random_tensor = numpy.random.normal(size=size_tensor5).reshape(shape_tensor5)
w = Wrapper(a1=TensorType('int32', (False, False)), w = ParamsType(a1=TensorType('int32', (False, False)),
a2=TensorType('float64', (False, False, False, False, False)), a2=TensorType('float64', (False, False, False, False, False)),
a3=Generic()) a3=Generic())
# With a value that does not match the wrapper. # With a value that does not match the params type.
o = Wrap(w, o = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'), a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int64'),
a2=random_tensor.astype('float32'), a2=random_tensor.astype('float32'),
a3=2000) a3=2000)
...@@ -186,8 +186,8 @@ class TestWrapper(TestCase): ...@@ -186,8 +186,8 @@ class TestWrapper(TestCase):
# Should pass. # Should pass.
w.filter(o, strict=False, allow_downcast=True) w.filter(o, strict=False, allow_downcast=True)
# With a value that matches the wrapper. # With a value that matches the params type.
o1 = Wrap(w, o1 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'), a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'), a2=random_tensor.astype('float64'),
a3=2000) a3=2000)
...@@ -197,7 +197,7 @@ class TestWrapper(TestCase): ...@@ -197,7 +197,7 @@ class TestWrapper(TestCase):
w.filter(o1, strict=False, allow_downcast=True) w.filter(o1, strict=False, allow_downcast=True)
# Check values_eq and values_eq_approx. # Check values_eq and values_eq_approx.
o2 = Wrap(w, o2 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'), a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=random_tensor.astype('float64'), a2=random_tensor.astype('float64'),
a3=2000) a3=2000)
...@@ -207,7 +207,7 @@ class TestWrapper(TestCase): ...@@ -207,7 +207,7 @@ class TestWrapper(TestCase):
# Check value_eq_approx. # Check value_eq_approx.
# NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx. # NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx.
# So, I just play a little with float values. # So, I just play a little with float values.
o3 = Wrap(w, o3 = Params(w,
a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'), a1=numpy.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]).astype('int32'),
a2=(random_tensor.astype('float32') * 10 / 2.2 * 2.19999999999 / 10).astype('float64'), a2=(random_tensor.astype('float32') * 10 / 2.2 * 2.19999999999 / 10).astype('float64'),
a3=2000.0 - 0.00000000000000001) a3=2000.0 - 0.00000000000000001)
......
...@@ -26,7 +26,7 @@ int APPLY_SPECIFIC(quadratic_function)(PyArrayObject* tensor, DTYPE_INPUT_0 a, D ...@@ -26,7 +26,7 @@ int APPLY_SPECIFIC(quadratic_function)(PyArrayObject* tensor, DTYPE_INPUT_0 a, D
return 0; return 0;
} }
int APPLY_SPECIFIC(compute_quadratic)(PyArrayObject* X, PyArrayObject** Y, APPLY_SPECIFIC_WRAPPER* coeff) { int APPLY_SPECIFIC(compute_quadratic)(PyArrayObject* X, PyArrayObject** Y, PARAMS_TYPE* coeff) {
DTYPE_INPUT_0 a = (DTYPE_INPUT_0) (*(DTYPE_PARAM_a*) PyArray_GETPTR1(coeff->a, 0)); // 0-D TensorType. DTYPE_INPUT_0 a = (DTYPE_INPUT_0) (*(DTYPE_PARAM_a*) PyArray_GETPTR1(coeff->a, 0)); // 0-D TensorType.
DTYPE_INPUT_0 b = coeff->b; // Scalar. DTYPE_INPUT_0 b = coeff->b; // Scalar.
DTYPE_INPUT_0 c = (DTYPE_INPUT_0) PyFloat_AsDouble(coeff->c); // Generic. DTYPE_INPUT_0 c = (DTYPE_INPUT_0) PyFloat_AsDouble(coeff->c); // Generic.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论