提交 b61e972f authored 作者: Iban Harlouchet's avatar Iban Harlouchet

numpydoc for theano/tensor/type.py

上级 c621d24d
...@@ -12,30 +12,35 @@ _logger = logging.getLogger("theano.tensor.type") ...@@ -12,30 +12,35 @@ _logger = logging.getLogger("theano.tensor.type")
class TensorType(Type): class TensorType(Type):
"""Symbolic `Type` representing a numpy.ndarray value."""
filter_checks_isfinite = False
"""
When this is True, strict filtering rejects data containing NaN or
Inf entries. (Used in `DebugMode`)
""" """
Symbolic `Type` representing a numpy.ndarray value.
def __init__(self, dtype, broadcastable, name=None, sparse_grad=False): Initialize self.dtype and self.broadcastable.
"""Initialize self.dtype and self.broadcastable.
:Parameters: Parameters
- `dtype`: str corresponding to numpy dtype (e.g., 'int64') ----------
dtype: str
Corresponding to numpy dtype (e.g., 'int64')
The value (ndarray) associated to a `Variable` of this `Type` will The value (ndarray) associated to a `Variable` of this `Type` will
have this dtype. have this dtype.
- `broadcastable`: tuple, list, or array of boolean values broadcastable: tuple, list, or array of boolean values
This argument serves two purposes. First, the True elements of this This argument serves two purposes. First, the True elements of this
list indicate the dimensions where the shape of an associated value list indicate the dimensions where the shape of an associated value
must be 1. Secondly, the length of this list is the number of must be 1. Secondly, the length of this list is the number of
dimensions that an associated value must have. See dimensions that an associated value must have. See
:doc:`broadcasting` for an explanation of how this list is used. doc:`broadcasting` for an explanation of how this list is used.
- `name`: str name : str
Optional name for this type. Optional name for this type.
""" """
filter_checks_isfinite = False
"""
When this is True, strict filtering rejects data containing NaN or
Inf entries. (Used in `DebugMode`)
"""
def __init__(self, dtype, broadcastable, name=None, sparse_grad=False):
self.dtype = str(dtype) self.dtype = str(dtype)
if self.dtype == 'floatX': if self.dtype == 'floatX':
self.dtype = config.floatX self.dtype = config.floatX
...@@ -56,6 +61,7 @@ class TensorType(Type): ...@@ -56,6 +61,7 @@ class TensorType(Type):
""" """
Return a copy of the type optionally with a new dtype or Return a copy of the type optionally with a new dtype or
broadcastable pattern. broadcastable pattern.
""" """
if dtype is None: if dtype is None:
dtype = self.dtype dtype = self.dtype
...@@ -65,11 +71,13 @@ class TensorType(Type): ...@@ -65,11 +71,13 @@ class TensorType(Type):
sparse_grad=self.sparse_grad) sparse_grad=self.sparse_grad)
def filter(self, data, strict=False, allow_downcast=None): def filter(self, data, strict=False, allow_downcast=None):
"""Convert `data` to something which can be associated to a """
Convert `data` to something which can be associated to a
`TensorVariable`. `TensorVariable`.
This function is not meant to be called in user code. It is for This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph. `Linker` instances to use when running a compiled graph.
""" """
# Explicit error message when one accidentally uses a Variable as # Explicit error message when one accidentally uses a Variable as
# input (typical mistake, especially with shared variables). # input (typical mistake, especially with shared variables).
...@@ -191,11 +199,13 @@ class TensorType(Type): ...@@ -191,11 +199,13 @@ class TensorType(Type):
return data return data
def filter_variable(self, other, allow_convert=True): def filter_variable(self, other, allow_convert=True):
"""Convert a symbolic Variable into a TensorType, if compatible. """
Convert a symbolic Variable into a TensorType, if compatible.
For the moment, only a TensorType or CudaNdarrayType will be For the moment, only a TensorType or CudaNdarrayType will be
converted, provided they have the same number of dimensions, converted, provided they have the same number of dimensions,
broadcastable pattern, and dtype. broadcastable pattern, and dtype.
""" """
if hasattr(other, '_as_TensorVariable'): if hasattr(other, '_as_TensorVariable'):
other = other._as_TensorVariable() other = other._as_TensorVariable()
...@@ -230,10 +240,12 @@ class TensorType(Type): ...@@ -230,10 +240,12 @@ class TensorType(Type):
return "value is valid" return "value is valid"
def dtype_specs(self): def dtype_specs(self):
"""Return a tuple (python type, c type, numpy typenum) that corresponds """
Return a tuple (python type, c type, numpy typenum) that corresponds
to self.dtype. to self.dtype.
This function is used internally as part of C code generation. This function is used internally as part of C code generation.
""" """
# TODO: add more type correspondances for e.g. int32, int64, float32, # TODO: add more type correspondances for e.g. int32, int64, float32,
# complex64, etc. # complex64, etc.
...@@ -261,7 +273,10 @@ class TensorType(Type): ...@@ -261,7 +273,10 @@ class TensorType(Type):
return scal.get_scalar_type(dtype=self.dtype) return scal.get_scalar_type(dtype=self.dtype)
def __eq__(self, other): def __eq__(self, other):
"""Compare True iff other is the same kind of TensorType""" """
Compare True iff other is the same kind of TensorType.
"""
return type(self) == type(other) and other.dtype == self.dtype \ return type(self) == type(other) and other.dtype == self.dtype \
and other.broadcastable == self.broadcastable and other.broadcastable == self.broadcastable
...@@ -305,14 +320,19 @@ class TensorType(Type): ...@@ -305,14 +320,19 @@ class TensorType(Type):
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False, def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
rtol=None, atol=None): rtol=None, atol=None):
""" """
:param allow_remove_inf: If True, when there is an inf in a, Parameters
we allow any value in b in that position. ----------
Event -inf allow_remove_inf
:param allow_remove_nan: If True, when there is a nan in a, If True, when there is an inf in a, we allow any value in b in
we allow any value in b in that position. that position. Event -inf
Event +-inf allow_remove_nan
:param rtol: relative tolerance, passed to _allclose If True, when there is a nan in a, we allow any value in b in
:param atol: absolute tolerance, passed to _allclose that position. Event +-inf
rtol
Relative tolerance, passed to _allclose.
atol
Absolute tolerance, passed to _allclose.
""" """
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray): if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
if a.shape != b.shape: if a.shape != b.shape:
...@@ -389,7 +409,8 @@ class TensorType(Type): ...@@ -389,7 +409,8 @@ class TensorType(Type):
ndim = property(lambda self: len(self.broadcastable), ndim = property(lambda self: len(self.broadcastable),
doc="number of dimensions") doc="number of dimensions")
"""Number of dimensions """
Number of dimensions.
This read-only property is the preferred way to get the number of This read-only property is the preferred way to get the number of
dimensions of a `TensorType`. dimensions of a `TensorType`.
...@@ -397,12 +418,15 @@ class TensorType(Type): ...@@ -397,12 +418,15 @@ class TensorType(Type):
""" """
def make_variable(self, name=None): def make_variable(self, name=None):
"""Return a `TensorVariable` of this type """
Return a `TensorVariable` of this type.
:Parameters: Parameters
- `name`: str ----------
name : str
A pretty name to identify this `Variable` when printing and A pretty name to identify this `Variable` when printing and
debugging debugging
""" """
return self.Variable(self, name=name) return self.Variable(self, name=name)
...@@ -430,7 +454,10 @@ class TensorType(Type): ...@@ -430,7 +454,10 @@ class TensorType(Type):
# "TensorType{%s, %s}" % (str(self.dtype), str(self.broadcastable)) # "TensorType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub, check_input=True): def c_declare(self, name, sub, check_input=True):
"""Override `CLinkerType.c_declare` """ """
Override `CLinkerType.c_declare`.
"""
if(check_input): if(check_input):
check = """ check = """
typedef %(dtype)s dtype_%(name)s; typedef %(dtype)s dtype_%(name)s;
...@@ -444,13 +471,19 @@ class TensorType(Type): ...@@ -444,13 +471,19 @@ class TensorType(Type):
return declaration + check return declaration + check
def c_init(self, name, sub): def c_init(self, name, sub):
"""Override `CLinkerType.c_init` """ """
Override `CLinkerType.c_init`.
"""
return """ return """
%(name)s = NULL; %(name)s = NULL;
""" % dict(sub, name=name, type_num=self.dtype_specs()[2]) """ % dict(sub, name=name, type_num=self.dtype_specs()[2])
def c_extract(self, name, sub, check_input=True): def c_extract(self, name, sub, check_input=True):
"""Override `CLinkerType.c_extract` """ """
Override `CLinkerType.c_extract`.
"""
if(check_input): if(check_input):
check = """ check = """
%(name)s = NULL; %(name)s = NULL;
...@@ -509,7 +542,10 @@ class TensorType(Type): ...@@ -509,7 +542,10 @@ class TensorType(Type):
""" % dict(sub, name=name, type_num=self.dtype_specs()[2]) """ % dict(sub, name=name, type_num=self.dtype_specs()[2])
def c_cleanup(self, name, sub): def c_cleanup(self, name, sub):
"""Override `CLinkerType.c_cleanup` """ """
Override `CLinkerType.c_cleanup`.
"""
return """ return """
if (%(name)s) { if (%(name)s) {
Py_XDECREF(%(name)s); Py_XDECREF(%(name)s);
...@@ -517,7 +553,10 @@ class TensorType(Type): ...@@ -517,7 +553,10 @@ class TensorType(Type):
""" % locals() """ % locals()
def c_sync(self, name, sub): def c_sync(self, name, sub):
"""Override `CLinkerType.c_sync` """ """
Override `CLinkerType.c_sync`.
"""
fail = sub['fail'] fail = sub['fail']
type_num = self.dtype_specs()[2] type_num = self.dtype_specs()[2]
return """ return """
...@@ -558,7 +597,10 @@ class TensorType(Type): ...@@ -558,7 +597,10 @@ class TensorType(Type):
""" % locals() """ % locals()
def c_headers(self): def c_headers(self):
"""Override `CLinkerObject.c_headers` """ """
Override `CLinkerObject.c_headers`.
"""
return scal.get_scalar_type(self.dtype).c_headers() return scal.get_scalar_type(self.dtype).c_headers()
def c_libraries(self): def c_libraries(self):
...@@ -568,7 +610,10 @@ class TensorType(Type): ...@@ -568,7 +610,10 @@ class TensorType(Type):
return scal.get_scalar_type(self.dtype).c_compile_args() return scal.get_scalar_type(self.dtype).c_compile_args()
def c_support_code(self): def c_support_code(self):
"""Override `CLinkerObject.c_support_code` """ """
Override `CLinkerObject.c_support_code`.
"""
return scal.get_scalar_type(self.dtype).c_support_code() return scal.get_scalar_type(self.dtype).c_support_code()
def c_init_code(self): def c_init_code(self):
...@@ -584,6 +629,7 @@ class TensorType(Type): ...@@ -584,6 +629,7 @@ class TensorType(Type):
def value_zeros(self, shape): def value_zeros(self, shape):
""" """
Create an numpy ndarray full of 0 values. Create an numpy ndarray full of 0 values.
""" """
return numpy.zeros(shape, dtype=self.dtype) return numpy.zeros(shape, dtype=self.dtype)
...@@ -604,17 +650,31 @@ class TensorType(Type): ...@@ -604,17 +650,31 @@ class TensorType(Type):
``get_size()`` will be called on the output of this function ``get_size()`` will be called on the output of this function
when printing the memory profile. when printing the memory profile.
:param obj: The object that this Type represents during execution Parameters
:return: Python object that ``self.get_size()`` understands ----------
obj
The object that this Type represents during execution.
Returns
-------
Python object that ``self.get_size()`` understands.
""" """
return obj.shape return obj.shape
def get_size(self, shape_info): def get_size(self, shape_info):
""" Number of bytes taken by the object represented by shape_info. """
Number of bytes taken by the object represented by shape_info.
Parameters
----------
shape_info
The output of the call to get_shape_info().
Returns
-------
The number of bytes taken by the object described by ``shape_info``.
:param shape_info: the output of the call to get_shape_info()
:return: the number of bytes taken by the object described by
``shape_info``.
""" """
if shape_info: if shape_info:
return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论