提交 909aa3d7 authored 作者: Iban Harlouchet's avatar Iban Harlouchet

numpydoc for theano/sandbox/cuda/type.py

上级 49af6efe
"""Provide CudaNdarrayType """
Provide CudaNdarrayType.
""" """
from __future__ import print_function from __future__ import print_function
import os import os
...@@ -31,36 +33,47 @@ class CudaNdarrayType(Type): ...@@ -31,36 +33,47 @@ class CudaNdarrayType(Type):
dtype = 'float32' dtype = 'float32'
Variable = None Variable = None
""" This will be set to the Variable type corresponding to this class. """
This will be set to the Variable type corresponding to this class.
That variable type is `CudaNdarrayVariable` defined in the That variable type is `CudaNdarrayVariable` defined in the
``var.py`` file beside this one. ``var.py`` file beside this one.
:note: The var file depends on the file basic_ops.py, which Notes
depends on this file. A cyclic dependency is avoided by not -----
hardcoding ``Variable = CudaNdarrayVariable``. The var file depends on the file basic_ops.py, which depends on this file.
A cyclic dependency is avoided by not hardcoding
``Variable = CudaNdarrayVariable``.
""" """
Constant = None Constant = None
""" This will be set to `CudaNdarrayConstant` defined in ``var.py`` """
This will be set to `CudaNdarrayConstant` defined in ``var.py``.
:note: Notes
-----
The var file depends on the file basic_ops.py, which depends on this file. The var file depends on the file basic_ops.py, which depends on this file.
A cyclic dependency is avoided by not hardcoding this class. A cyclic dependency is avoided by not hardcoding this class.
""" """
SharedVariable = None SharedVariable = None
""" This will be set to `CudaNdarraySharedVariable` defined in ``var.py`` """
This will be set to `CudaNdarraySharedVariable` defined in ``var.py``.
:note: Notes
-----
The var file depends on the file basic_ops.py, which depends on this file. The var file depends on the file basic_ops.py, which depends on this file.
A cyclic dependency is avoided by not hardcoding this class. A cyclic dependency is avoided by not hardcoding this class.
""" """
if cuda is not None: if cuda is not None:
value_zeros = staticmethod(cuda.CudaNdarray.zeros) value_zeros = staticmethod(cuda.CudaNdarray.zeros)
""" """
Create an CudaNdarray full of 0 values Create an CudaNdarray full of 0 values.
""" """
def __init__(self, broadcastable, name=None, dtype=None): def __init__(self, broadcastable, name=None, dtype=None):
...@@ -120,11 +133,13 @@ class CudaNdarrayType(Type): ...@@ -120,11 +133,13 @@ class CudaNdarrayType(Type):
data) data)
def filter_variable(self, other, allow_convert=True): def filter_variable(self, other, allow_convert=True):
"""Convert a Variable into a CudaNdarrayType, if compatible. """
Convert a Variable into a CudaNdarrayType, if compatible.
This Variable should either already be a CudaNdarrayType, or be This Variable should either already be a CudaNdarrayType, or be
a TensorType. It has to have the right number of dimensions, a TensorType. It has to have the right number of dimensions,
broadcastable pattern, and dtype. broadcastable pattern, and dtype.
""" """
if hasattr(other, '_as_CudaNdarrayVariable'): if hasattr(other, '_as_CudaNdarrayVariable'):
other = other._as_CudaNdarrayVariable() other = other._as_CudaNdarrayVariable()
...@@ -209,10 +224,12 @@ class CudaNdarrayType(Type): ...@@ -209,10 +224,12 @@ class CudaNdarrayType(Type):
) )
def dtype_specs(self): def dtype_specs(self):
"""Return a tuple (python type, c type, numpy typenum) that """
corresponds to self.dtype. Return a tuple (python type, c type, numpy typenum) that corresponds
to self.dtype.
This function is used internally as part of C code generation. This function is used internally as part of C code generation.
""" """
# TODO: add more type correspondances for e.g. int32, int64, float32, # TODO: add more type correspondances for e.g. int32, int64, float32,
# complex64, etc. # complex64, etc.
...@@ -236,7 +253,10 @@ class CudaNdarrayType(Type): ...@@ -236,7 +253,10 @@ class CudaNdarrayType(Type):
self.__class__.__name__, self.dtype)) self.__class__.__name__, self.dtype))
def __eq__(self, other): def __eq__(self, other):
"""Compare True iff other is the same kind of CudaNdarrayType""" """
Compare True iff other is the same kind of CudaNdarrayType.
"""
return (type(self) == type(other) and return (type(self) == type(other) and
other.broadcastable == self.broadcastable) other.broadcastable == self.broadcastable)
...@@ -248,12 +268,16 @@ class CudaNdarrayType(Type): ...@@ -248,12 +268,16 @@ class CudaNdarrayType(Type):
return theano.tensor.patternbroadcast(var, self.broadcastable) return theano.tensor.patternbroadcast(var, self.broadcastable)
def __hash__(self): def __hash__(self):
"""Hash equal for same kinds of CudaNdarrayType""" """
Hash equal for same kinds of CudaNdarrayType.
"""
return hash(type(self)) ^ hash(self.broadcastable) return hash(type(self)) ^ hash(self.broadcastable)
ndim = property(lambda self: len(self.broadcastable), ndim = property(lambda self: len(self.broadcastable),
doc="number of dimensions") doc="number of dimensions")
"""Number of dimensions """
Number of dimensions.
This read-only property is the preferred way to get the number of This read-only property is the preferred way to get the number of
dimensions of a `CudaNdarrayType`. dimensions of a `CudaNdarrayType`.
...@@ -261,12 +285,14 @@ class CudaNdarrayType(Type): ...@@ -261,12 +285,14 @@ class CudaNdarrayType(Type):
""" """
def make_variable(self, name=None): def make_variable(self, name=None):
"""Return a `TensorVariable` of this type """
Return a `TensorVariable` of this type.
:Parameters: Parameters
- `name`: str ----------
A pretty name to identify this `Variable` when printing and name : str
debugging A pretty name to identify this `Variable` when printing and
debugging.
""" """
return self.Variable(self, name=name) return self.Variable(self, name=name)
...@@ -381,7 +407,9 @@ class CudaNdarrayType(Type): ...@@ -381,7 +407,9 @@ class CudaNdarrayType(Type):
return sio.getvalue() return sio.getvalue()
def c_extract_out(self, name, sub, check_input=True, check_broadcast=True): def c_extract_out(self, name, sub, check_input=True, check_broadcast=True):
""" To allow the hack to skip check_broadcast. """
To allow the hack to skip check_broadcast.
""" """
return """ return """
if (py_%(name)s == Py_None) if (py_%(name)s == Py_None)
...@@ -411,7 +439,10 @@ class CudaNdarrayType(Type): ...@@ -411,7 +439,10 @@ class CudaNdarrayType(Type):
""" % locals() """ % locals()
def c_sync(self, name, sub): def c_sync(self, name, sub):
"""Override `CLinkerOp.c_sync` """ """
Override `CLinkerOp.c_sync`.
"""
return """ return """
//std::cerr << "sync\\n"; //std::cerr << "sync\\n";
if (NULL == %(name)s) { if (NULL == %(name)s) {
...@@ -433,11 +464,17 @@ class CudaNdarrayType(Type): ...@@ -433,11 +464,17 @@ class CudaNdarrayType(Type):
""" % locals() """ % locals()
def c_headers(self): def c_headers(self):
"""Override `CLinkerOp.c_headers` """ """
Override `CLinkerOp.c_headers`.
"""
return ['cuda_ndarray.cuh'] return ['cuda_ndarray.cuh']
def c_header_dirs(self): def c_header_dirs(self):
"""Override `CLinkerOp.c_headers` """ """
Override `CLinkerOp.c_headers`.
"""
ret = [os.path.dirname(cuda_ndarray.__file__)] ret = [os.path.dirname(cuda_ndarray.__file__)]
cuda_root = config.cuda.root cuda_root = config.cuda.root
if cuda_root: if cuda_root:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论