提交 245adb5e authored 作者: Nicolas Bouchard's avatar Nicolas Bouchard

Fix bugs in docstring of sparse.basic.

上级 9350272e
...@@ -159,8 +159,8 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs): ...@@ -159,8 +159,8 @@ def verify_grad_sparse(op, pt, structured=False, *args, **kwargs):
:param pt: List of inputs to realize the tests. :param pt: List of inputs to realize the tests.
:param structured: True to tests with a structured grad, :param structured: True to tests with a structured grad,
False otherwise. False otherwise.
:param *args: Other `verify_grad` parameters if any. :param args: Other `verify_grad` parameters if any.
:param **kwargs: Other `verify_grad` keywords if any. :param kwargs: Other `verify_grad` keywords if any.
:return: None :return: None
""" """
...@@ -543,9 +543,8 @@ class CSMProperties(gof.Op): ...@@ -543,9 +543,8 @@ class CSMProperties(gof.Op):
:return: (data, indices, indptr, shape), the properties :return: (data, indices, indptr, shape), the properties
of `csm`. of `csm`.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured. `infer_shape` method is not available for this op.
- `infer_shape` method is not available for this op.
""" """
# NOTE # NOTE
...@@ -653,9 +652,8 @@ class CSM(gof.Op): ...@@ -653,9 +652,8 @@ class CSM(gof.Op):
:return: A sparse matrix having the properties :return: A sparse matrix having the properties
speficied by the inputs. speficied by the inputs.
:note: :note: The grad method returns a dense vector, so it provide
- The grad method returns a dense vector, so it provide a regular grad.
a regular grad.
""" """
# should view the other inputs too, but viewing multiple inputs is not # should view the other inputs too, but viewing multiple inputs is not
...@@ -968,8 +966,8 @@ class Cast(gof.op.Op): ...@@ -968,8 +966,8 @@ class Cast(gof.op.Op):
:return: Same as `x` but having `out_type` as dtype. :return: Same as `x` but having `out_type` as dtype.
:note: :note: The grad implemented is regular, i.e. not
- The grad implemented is regular, i.e. not structured. structured.
""" """
def __init__(self, out_type): def __init__(self, out_type):
...@@ -1026,12 +1024,11 @@ class DenseFromSparse(gof.op.Op): ...@@ -1026,12 +1024,11 @@ class DenseFromSparse(gof.op.Op):
:return: A dense matrix, the same as `x`. :return: A dense matrix, the same as `x`.
:note: :note: The grad implementation can be controlled
- The grad implementation can be controlled through the constructor via the `structured`
through the constructor via the `structured` parameter. `True` will provide a structured
parameter. `True` will provide a structured grad while `False` will provide a regular
grad while `False` will provide a regular grad. By default, the grad is structured.
grad. By default, the grad is structured.
""" """
def __init__(self, structured=True): def __init__(self, structured=True):
...@@ -1090,11 +1087,10 @@ class SparseFromDense(gof.op.Op): ...@@ -1090,11 +1087,10 @@ class SparseFromDense(gof.op.Op):
:return: The same as `x` in a sparse matrix :return: The same as `x` in a sparse matrix
format. format.
:note: :note: The grad implementation is regular, i.e.
- The grad implementation is regular, i.e. not structured.
not structured. :note: The output sparse format can also be controlled
- The output sparse format can also be controlled via the `format` parameter in the constructor.
via the `format` parameter in the constructor.
""" """
def __init__(self, format): def __init__(self, format):
...@@ -1171,8 +1167,7 @@ class GetItem2d(gof.op.Op): ...@@ -1171,8 +1167,7 @@ class GetItem2d(gof.op.Op):
:return: The slice corresponding in `x`. :return: The slice corresponding in `x`.
:note: :note: The grad is not implemented for this op.
- The grad is not implemented for this op.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1269,8 +1264,7 @@ class GetItemScalar(gof.op.Op): ...@@ -1269,8 +1264,7 @@ class GetItemScalar(gof.op.Op):
:return: The item corresponding in `x`. :return: The item corresponding in `x`.
:note: :note: The grad is not implemented for this op.
- The grad is not implemented for this op.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1324,11 +1318,11 @@ class Transpose(gof.op.Op): ...@@ -1324,11 +1318,11 @@ class Transpose(gof.op.Op):
:return: `x` transposed. :return: `x` transposed.
:note: :note: The returned matrix will not be in the
- The returned matrix will not be in the same format. `csc` same format. `csc` matrix will be changed
matrix will be changed in `csr` matrix and `csr` matrix in in `csr` matrix and `csr` matrix in `csc`
`csc` matrix. matrix.
- The grad is regular, i.e. not structured. :note: The grad is regular, i.e. not structured.
""" """
format_map = {'csr': 'csc', format_map = {'csr': 'csc',
...@@ -1371,8 +1365,7 @@ class Neg(gof.op.Op): ...@@ -1371,8 +1365,7 @@ class Neg(gof.op.Op):
:return: -`x`. :return: -`x`.
:note: :note: The grad is regular, i.e. not structured.
- The grad is regular, i.e. not structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1413,8 +1406,7 @@ class ColScaleCSC(gof.op.Op): ...@@ -1413,8 +1406,7 @@ class ColScaleCSC(gof.op.Op):
# each column had been multiply by the corresponding # each column had been multiply by the corresponding
# element of `s`. # element of `s`.
# :note: # :note: The grad implemented is structured.
# - The grad implemented is structured.
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
...@@ -1461,8 +1453,7 @@ class RowScaleCSC(gof.op.Op): ...@@ -1461,8 +1453,7 @@ class RowScaleCSC(gof.op.Op):
# each row had been multiply by the corresponding # each row had been multiply by the corresponding
# element of `s`. # element of `s`.
# :note: # :note: The grad implemented is structured.
# - The grad implemented is structured.
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
...@@ -1511,8 +1502,7 @@ def col_scale(x, s): ...@@ -1511,8 +1502,7 @@ def col_scale(x, s):
each column had been multiply by the corresponding each column had been multiply by the corresponding
element of `s`. element of `s`.
:note: :note: The grad implemented is structured.
- The grad implemented is structured.
""" """
if x.format == 'csc': if x.format == 'csc':
...@@ -1535,8 +1525,7 @@ def row_scale(x, s): ...@@ -1535,8 +1525,7 @@ def row_scale(x, s):
each row had been multiply by the corresponding each row had been multiply by the corresponding
element of `s`. element of `s`.
:note: :note: The grad implemented is structured.
- The grad implemented is structured.
""" """
return col_scale(x.T, s).T return col_scale(x.T, s).T
...@@ -1554,13 +1543,12 @@ class SpSum(gof.op.Op): ...@@ -1554,13 +1543,12 @@ class SpSum(gof.op.Op):
:return: The sum of `x` in a dense format. :return: The sum of `x` in a dense format.
:note: :note: The grad implementation is controlled with the `sparse_grad`
- The grad implementation is controlled with the `sparse_grad` parameter. `True` will provide a structured grad and `False`
parameter. `True` will provide a structured grad and `False` will provide a regular grad. For both choice, the grad
will provide a regular grad. For both choice, the grad return a sparse matrix having the same format as `x`.
return a sparse matrix having the same format as `x`. :note: This op does not return a sparse matrix, but a dense tensor
- This op does not return a sparse matrix, but a dense tensor matrix.
matrix.
""" """
def __init__(self, axis=None, sparse_grad=True): def __init__(self, axis=None, sparse_grad=True):
...@@ -1658,9 +1646,8 @@ class Diag(gof.op.Op): ...@@ -1658,9 +1646,8 @@ class Diag(gof.op.Op):
:return: A dense vector representing the diagonal elements. :return: A dense vector representing the diagonal elements.
:note: :note: The grad implemented is regular, i.e. not structured, since
- The grad implemented is regular, i.e. not structured, since the output is a dense vector.
the output is a dense vector.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1698,8 +1685,7 @@ class SquareDiagonal(gof.op.Op): ...@@ -1698,8 +1685,7 @@ class SquareDiagonal(gof.op.Op):
:return: A sparse matrix having `x` as diagonal. :return: A sparse matrix having `x` as diagonal.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1750,8 +1736,7 @@ class EnsureSortedIndices(gof.op.Op): ...@@ -1750,8 +1736,7 @@ class EnsureSortedIndices(gof.op.Op):
:return: The same as `x` with indices sorted. :return: The same as `x` with indices sorted.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def __init__(self, inplace): def __init__(self, inplace):
...@@ -1802,8 +1787,7 @@ def clean(x): ...@@ -1802,8 +1787,7 @@ def clean(x):
:return: The same as `x` with indices sorted and zeros :return: The same as `x` with indices sorted and zeros
removed. removed.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
return ensure_sorted_indices(remove0(x)) return ensure_sorted_indices(remove0(x))
...@@ -1816,8 +1800,7 @@ class AddSS(gof.op.Op): ...@@ -1816,8 +1800,7 @@ class AddSS(gof.op.Op):
:return: `x`+`y` :return: `x`+`y`
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1866,9 +1849,9 @@ class AddSSData(gof.op.Op): ...@@ -1866,9 +1849,9 @@ class AddSSData(gof.op.Op):
:return: The sum of the two sparse matrix element wise. :return: The sum of the two sparse matrix element wise.
:note: :note: `x` and `y` are assumed to have the same
- `x` and `y` are assumed to have the same sparsity pattern. sparsity pattern.
- The grad implemented is structured. :note: The grad implemented is structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -1917,8 +1900,7 @@ class AddSD(gof.op.Op): ...@@ -1917,8 +1900,7 @@ class AddSD(gof.op.Op):
:return: `x`+`y` :return: `x`+`y`
:note: :note: The grad implemented is structured on `x`.
- The grad implemented is structured on `x`.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -2026,10 +2008,9 @@ class StructuredAddSVCSR(gof.Op): ...@@ -2026,10 +2008,9 @@ class StructuredAddSVCSR(gof.Op):
# :return: A sparse matrix containing the addition of the vector to # :return: A sparse matrix containing the addition of the vector to
# the data of the sparse matrix. # the data of the sparse matrix.
# :note: # :note: The a_* are the properties of a sparse matrix in csr
# - The a_* are the properties of a sparse matrix in csr # format.
# format. # :note: This op is used as an optimization for StructuredAddSV.
# - This op is used as an optimization for StructuredAddSV.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -2145,10 +2126,9 @@ def add(x, y): ...@@ -2145,10 +2126,9 @@ def add(x, y):
:return: `x` + `y` :return: `x` + `y`
:note: :note: At least one of `x` and `y` must be a sparse matrix.
- At least one of `x` and `y` must be a sparse matrix. :note: The grad will be structured only when one of the
- The grad will be structured only when one of the variable variable will be a dense matrix.
will be a dense matrix.
""" """
if hasattr(x, 'getnnz'): if hasattr(x, 'getnnz'):
...@@ -2181,10 +2161,9 @@ def sub(x, y): ...@@ -2181,10 +2161,9 @@ def sub(x, y):
:return: `x` - `y` :return: `x` - `y`
:note: :note: At least one of `x` and `y` must be a sparse matrix.
- At least one of `x` and `y` must be a sparse matrix. :note: The grad will be structured only when one of the variable
- The grad will be structured only when one of the variable will be a dense matrix.
will be a dense matrix.
""" """
return x + (-y) return x + (-y)
...@@ -2198,8 +2177,7 @@ class MulSS(gof.op.Op): ...@@ -2198,8 +2177,7 @@ class MulSS(gof.op.Op):
:return: `x` * `y` :return: `x` * `y`
:note: :note: At least one of `x` and `y` must be a sparse matrix.
- At least one of `x` and `y` must be a sparse matrix.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -2242,8 +2220,7 @@ class MulSD(gof.op.Op): ...@@ -2242,8 +2220,7 @@ class MulSD(gof.op.Op):
:return: `x` * `y` :return: `x` * `y`
:note: :note: The grad is regular, i.e. not structured..
- The grad is regular, i.e. not structured..
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -2336,12 +2313,11 @@ class MulSDCSC(gof.Op): ...@@ -2336,12 +2313,11 @@ class MulSDCSC(gof.Op):
# :return: The multiplication of the two matrix element wise. # :return: The multiplication of the two matrix element wise.
# :note: # :note: `a_data`, `a_indices` and `a_indptr` must be the properties
# - `a_data`, `a_indices` and `a_indptr` must be the properties # of a sparse matrix in csc format.
# of a sparse matrix in csc format. # :note: The dtype of `a_data`, i.e. the dtype of the sparse matrix,
# - The dtype of `a_data`, i.e. the dtype of the sparse matrix, # cannot be a complex type.
# cannot be a complex type. # :note: This op is used as an optimization of mul_s_d.
# - This op is used as an optimization of mul_s_d.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -2450,12 +2426,11 @@ class MulSDCSR(gof.Op): ...@@ -2450,12 +2426,11 @@ class MulSDCSR(gof.Op):
# :return: The multiplication of the two matrix element wise. # :return: The multiplication of the two matrix element wise.
# :note: # :note: `a_data`, `a_indices` and `a_indptr` must be the properties
# - `a_data`, `a_indices` and `a_indptr` must be the properties # of a sparse matrix in csr format.
# of a sparse matrix in csr format. # :note: The dtype of `a_data`, i.e. the dtype of the sparse matrix,
# - The dtype of `a_data`, i.e. the dtype of the sparse matrix, # cannot be a complex type.
# cannot be a complex type. # :note: This op is used as an optimization of mul_s_d.
# - This op is used as an optimization of mul_s_d.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -2562,8 +2537,7 @@ class MulSV(gof.op.Op): ...@@ -2562,8 +2537,7 @@ class MulSV(gof.op.Op):
:Return: The product x * y element wise. :Return: The product x * y element wise.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -2614,12 +2588,11 @@ class MulSVCSR(gof.Op): ...@@ -2614,12 +2588,11 @@ class MulSVCSR(gof.Op):
# :return: The multiplication of the two matrix element wise. # :return: The multiplication of the two matrix element wise.
# :note: # :note: `a_data`, `a_indices` and `a_indptr` must be the properties
# - `a_data`, `a_indices` and `a_indptr` must be the properties # of a sparse matrix in csr format.
# of a sparse matrix in csr format. # :note: The dtype of `a_data`, i.e. the dtype of the sparse matrix,
# - The dtype of `a_data`, i.e. the dtype of the sparse matrix, # cannot be a complex type.
# cannot be a complex type. # :note: This op is used as an optimization of MulSV.
# - This op is used as an optimization of MulSV.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -2724,9 +2697,8 @@ def mul(x, y): ...@@ -2724,9 +2697,8 @@ def mul(x, y):
:return: `x` + `y` :return: `x` + `y`
:note: :note: At least one of `x` and `y` must be a sparse matrix.
- At least one of `x` and `y` must be a sparse matrix. :note: The grad is regular, i.e. not structured.
- The grad is regular, i.e. not structured.
""" """
x = as_sparse_or_tensor_variable(x) x = as_sparse_or_tensor_variable(x)
...@@ -2756,9 +2728,8 @@ class HStack(gof.op.Op): ...@@ -2756,9 +2728,8 @@ class HStack(gof.op.Op):
:return: The concatenation of the sparse arrays column wise. :return: The concatenation of the sparse arrays column wise.
:note: :note: The number of line of the sparse matrix must agree.
- The number of line of the sparse matrix must agree. :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def __init__(self, format=None, dtype=None): def __init__(self, format=None, dtype=None):
...@@ -2838,9 +2809,8 @@ def hstack(blocks, format=None, dtype=None): ...@@ -2838,9 +2809,8 @@ def hstack(blocks, format=None, dtype=None):
:return: The concatenation of the sparse array column wise. :return: The concatenation of the sparse array column wise.
:note: :note: The number of line of the sparse matrix must agree.
- The number of line of the sparse matrix must agree. :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
blocks = [as_sparse_variable(i) for i in blocks] blocks = [as_sparse_variable(i) for i in blocks]
...@@ -2859,9 +2829,8 @@ class VStack(HStack): ...@@ -2859,9 +2829,8 @@ class VStack(HStack):
:return: The concatenation of the sparse arrays row wise. :return: The concatenation of the sparse arrays row wise.
:note: :note: The number of column of the sparse matrix must agree.
- The number of column of the sparse matrix must agree. :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def perform(self, node, block, (out, )): def perform(self, node, block, (out, )):
...@@ -2912,9 +2881,8 @@ def vstack(blocks, format=None, dtype=None): ...@@ -2912,9 +2881,8 @@ def vstack(blocks, format=None, dtype=None):
:return: The concatenation of the sparse array row wise. :return: The concatenation of the sparse array row wise.
:note: :note: The number of column of the sparse matrix must agree.
- The number of column of the sparse matrix must agree. :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
blocks = [as_sparse_variable(i) for i in blocks] blocks = [as_sparse_variable(i) for i in blocks]
...@@ -2924,8 +2892,14 @@ def vstack(blocks, format=None, dtype=None): ...@@ -2924,8 +2892,14 @@ def vstack(blocks, format=None, dtype=None):
class Remove0(gof.Op): class Remove0(gof.Op):
""" """Remove explicit zeros from a sparse matrix, and
Remove explicit zeros from a sparse matrix, and resort indices resort indices.
:param x: Sparse matrix.
:return: Exactly `x` but with a data attribute
exempt of zeros.
:note: The grad implemented is regular, i.e. not structured.
""" """
def __init__(self, inplace=False, *args, **kwargs): def __init__(self, inplace=False, *args, **kwargs):
...@@ -3207,8 +3181,7 @@ class StructuredDot(gof.Op): ...@@ -3207,8 +3181,7 @@ class StructuredDot(gof.Op):
:return: The dot product of `a` and `b`. :return: The dot product of `a` and `b`.
:note: :note: The grad implemented is structured.
- The grad implemented is structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -3303,8 +3276,7 @@ def structured_dot(x, y): ...@@ -3303,8 +3276,7 @@ def structured_dot(x, y):
:return: The dot product of `a` and `b`. :return: The dot product of `a` and `b`.
:note: :note: The grad implemented is structured.
- The grad implemented is structured.
""" """
# @todo: Maybe the triple-transposition formulation (when x is dense) # @todo: Maybe the triple-transposition formulation (when x is dense)
...@@ -3342,9 +3314,8 @@ class StructuredDotCSC(gof.Op): ...@@ -3342,9 +3314,8 @@ class StructuredDotCSC(gof.Op):
# :return: The dot product of `a` and `b`. # :return: The dot product of `a` and `b`.
# :note: # :note: The grad implemented is structured.
# - The grad implemented is structured. # :note: This op is used as an optimization for StructuredDot.
# - This op is used as an optimization for StructuredDot.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -3532,9 +3503,8 @@ class StructuredDotCSR(gof.Op): ...@@ -3532,9 +3503,8 @@ class StructuredDotCSR(gof.Op):
# :return: The dot product of `a` and `b`. # :return: The dot product of `a` and `b`.
# :note: # :note: The grad implemented is structured.
# - The grad implemented is structured. # :note: This op is used as an optimization for StructuredDot.
# - This op is used as an optimization for StructuredDot.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -3704,8 +3674,7 @@ class SamplingDot(gof.op.Op): ...@@ -3704,8 +3674,7 @@ class SamplingDot(gof.op.Op):
:return: A dense matrix containing the dot product of `x` by `y`.T only :return: A dense matrix containing the dot product of `x` by `y`.T only
where `p` is 1. where `p` is 1.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -3784,11 +3753,10 @@ class SamplingDotCSR(gof.Op): ...@@ -3784,11 +3753,10 @@ class SamplingDotCSR(gof.Op):
# :return: A dense matrix containing the dot product of `x` by `y`.T only # :return: A dense matrix containing the dot product of `x` by `y`.T only
# where `p` is 1. # where `p` is 1.
# :note: # :note: If we have the input of mixed dtype, we insert cast elemwise
# - If we have the input of mixed dtype, we insert cast elemwise # in the graph to be able to call blas function as they don't
# in the graph to be able to call blas function as they don't # allow mixed dtype.
# allow mixed dtype. # :note: This op is used as an optimization for SamplingDot.
# - This op is used as an optimization for SamplingDot.
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
...@@ -4028,10 +3996,9 @@ class StructuredDotGradCSC(gof.Op): ...@@ -4028,10 +3996,9 @@ class StructuredDotGradCSC(gof.Op):
# :return: The grad of `a`.`b` for `a` accumulated # :return: The grad of `a`.`b` for `a` accumulated
# with g_ab. # with g_ab.
# :note: # :note: The grad implemented is structured.
# - The grad implemented is structured. # :note: a_* are the corresponding properties of a sparse
# - a_* are the corresponding properties of a sparse # matrix in csc format.
# matrix in csc format.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -4164,10 +4131,9 @@ class StructuredDotGradCSR(gof.Op): ...@@ -4164,10 +4131,9 @@ class StructuredDotGradCSR(gof.Op):
# :return: The grad of `a`.`b` for `a` accumulated # :return: The grad of `a`.`b` for `a` accumulated
# with g_ab. # with g_ab.
# :note: # :note: The grad implemented is structured.
# - The grad implemented is structured. # :note: a_* are the corresponding properties of a sparse
# - a_* are the corresponding properties of a sparse # matrix in csr format.
# matrix in csr format.
def __eq__(self, other): def __eq__(self, other):
return (type(self) == type(other)) return (type(self) == type(other))
...@@ -4302,9 +4268,8 @@ class Dot(gof.op.Op): ...@@ -4302,9 +4268,8 @@ class Dot(gof.op.Op):
:return: The dot product `x`.`y` in a dense format. :return: The dot product `x`.`y` in a dense format.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured. :note: At least one of `x` or `y` must be a sparse matrix.
- At least one of `x` or `y` must be a sparse matrix.
""" """
def __eq__(self, other): def __eq__(self, other):
...@@ -4381,9 +4346,8 @@ def dot(x, y): ...@@ -4381,9 +4346,8 @@ def dot(x, y):
:return: The dot product `x`.`y` in a dense format. :return: The dot product `x`.`y` in a dense format.
:note: :note: The grad implemented is regular, i.e. not structured.
- The grad implemented is regular, i.e. not structured. :note: At least one of `x` or `y` must be a sparse matrix.
- At least one of `x` or `y` must be a sparse matrix.
""" """
if hasattr(x, 'getnnz'): if hasattr(x, 'getnnz'):
...@@ -4410,9 +4374,8 @@ class Usmm(gof.op.Op): ...@@ -4410,9 +4374,8 @@ class Usmm(gof.op.Op):
:return: The dense matrix resulting from `alpha` * `x` `y` + `z`. :return: The dense matrix resulting from `alpha` * `x` `y` + `z`.
:note: :note: The grad is not implemented for this op.
- The grad is not implemented for this op. :note: At least one of `x` or `y` must be a sparse matrix.
- At least one of `x` or `y` must be a sparse matrix.
""" """
# We don't implement the infer_shape as it is # We don't implement the infer_shape as it is
...@@ -4484,10 +4447,9 @@ class UsmmCscDense(gof.Op): ...@@ -4484,10 +4447,9 @@ class UsmmCscDense(gof.Op):
# :return: The dense matrix resulting from `alpha` * `x` `y` + `z`. # :return: The dense matrix resulting from `alpha` * `x` `y` + `z`.
# :note: # :note: The grad is not implemented for this op.
# - The grad is not implemented for this op. # :note: Optimized version os Usmm when `x` is in csc format and
# - Optimized version os Usmm when `x` is in csc format and # `y` is dense.
# `y` is dense.
def __init__(self, inplace): def __init__(self, inplace):
self.inplace = inplace self.inplace = inplace
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论