提交 94f5ddfd authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Rename SparseType to SparseTensorType

上级 b8c1c463
......@@ -167,7 +167,7 @@ def get_scalar_constant_value(v):
"""
# Is it necessary to test for presence of aesara.sparse at runtime?
sparse = globals().get("sparse")
if sparse and isinstance(v.type, sparse.SparseType):
if sparse and isinstance(v.type, sparse.SparseTensorType):
if v.owner is not None and isinstance(v.owner.op, sparse.CSM):
data = v.owner.inputs[0]
return tensor.get_scalar_constant_value(data)
......
......@@ -17,7 +17,7 @@ class CType(Type, CLinkerType):
- `TensorType`: for numpy.ndarray
- `SparseType`: for scipy.sparse
- `SparseTensorType`: for scipy.sparse
But you are encouraged to write your own, as described in WRITEME.
......
......@@ -12,7 +12,7 @@ from aesara.tensor.type import TensorType
try:
import scipy.sparse
from aesara.sparse.basic import SparseType
from aesara.sparse.basic import SparseTensorType
def _is_sparse(a):
return scipy.sparse.issparse(a)
......@@ -64,4 +64,4 @@ def may_share_memory(a, b, raise_other_type=True):
if a_gpua or b_gpua:
return False
return SparseType.may_share_memory(a, b)
return SparseTensorType.may_share_memory(a, b)
......@@ -9,7 +9,7 @@ except ImportError:
enable_sparse = False
warn("SciPy can't be imported. Sparse matrix support is disabled.")
from aesara.sparse.type import SparseType, _is_sparse
from aesara.sparse.type import SparseTensorType, _is_sparse
if enable_sparse:
......
......@@ -22,7 +22,7 @@ from aesara.graph.op import Op
from aesara.link.c.op import COp
from aesara.link.c.type import generic
from aesara.misc.safe_asarray import _asarray
from aesara.sparse.type import SparseType, _is_sparse
from aesara.sparse.type import SparseTensorType, _is_sparse
from aesara.sparse.utils import hash_from_sparse
from aesara.tensor import basic as at
from aesara.tensor.basic import Split
......@@ -80,11 +80,11 @@ def _is_sparse_variable(x):
if not isinstance(x, Variable):
raise NotImplementedError(
"this function should only be called on "
"*variables* (of type sparse.SparseType "
"*variables* (of type sparse.SparseTensorType "
"or TensorType, for instance), not ",
x,
)
return isinstance(x.type, SparseType)
return isinstance(x.type, SparseTensorType)
def _is_dense_variable(x):
......@@ -100,7 +100,7 @@ def _is_dense_variable(x):
if not isinstance(x, Variable):
raise NotImplementedError(
"this function should only be called on "
"*variables* (of type sparse.SparseType or "
"*variables* (of type sparse.SparseTensorType or "
"TensorType, for instance), not ",
x,
)
......@@ -159,13 +159,15 @@ def as_sparse_variable(x, name=None, ndim=None, **kwargs):
else:
x = x.outputs[0]
if isinstance(x, Variable):
if not isinstance(x.type, SparseType):
raise TypeError("Variable type field must be a SparseType.", x, x.type)
if not isinstance(x.type, SparseTensorType):
raise TypeError(
"Variable type field must be a SparseTensorType.", x, x.type
)
return x
try:
return constant(x, name=name)
except TypeError:
raise TypeError(f"Cannot convert {x} to SparseType", type(x))
raise TypeError(f"Cannot convert {x} to SparseTensorType", type(x))
as_sparse = as_sparse_variable
......@@ -198,10 +200,10 @@ def constant(x, name=None):
raise TypeError("sparse.constant must be called on a " "scipy.sparse.spmatrix")
try:
return SparseConstant(
SparseType(format=x.format, dtype=x.dtype), x.copy(), name=name
SparseTensorType(format=x.format, dtype=x.dtype), x.copy(), name=name
)
except TypeError:
raise TypeError(f"Could not convert {x} to SparseType", type(x))
raise TypeError(f"Could not convert {x} to SparseTensorType", type(x))
def sp_ones_like(x):
......@@ -259,7 +261,7 @@ def override_dense(*methods):
self = self.toarray()
new_args = [
arg.toarray()
if hasattr(arg, "type") and isinstance(arg.type, SparseType)
if hasattr(arg, "type") and isinstance(arg.type, SparseTensorType)
else arg
for arg in args
]
......@@ -503,15 +505,15 @@ class SparseConstant(TensorConstant, _sparse_py_operators):
return str(self)
SparseType.variable_type = SparseVariable
SparseType.constant_type = SparseConstant
SparseTensorType.variable_type = SparseVariable
SparseTensorType.constant_type = SparseConstant
# for more dtypes, call SparseType(format, dtype)
# for more dtypes, call SparseTensorType(format, dtype)
def matrix(format, name=None, dtype=None):
if dtype is None:
dtype = config.floatX
type = SparseType(format=format, dtype=dtype)
type = SparseTensorType(format=format, dtype=dtype)
return type(name)
......@@ -527,15 +529,15 @@ def bsr_matrix(name=None, dtype=None):
return matrix("bsr", name, dtype)
# for more dtypes, call SparseType(format, dtype)
csc_dmatrix = SparseType(format="csc", dtype="float64")
csr_dmatrix = SparseType(format="csr", dtype="float64")
bsr_dmatrix = SparseType(format="bsr", dtype="float64")
csc_fmatrix = SparseType(format="csc", dtype="float32")
csr_fmatrix = SparseType(format="csr", dtype="float32")
bsr_fmatrix = SparseType(format="bsr", dtype="float32")
# for more dtypes, call SparseTensorType(format, dtype)
csc_dmatrix = SparseTensorType(format="csc", dtype="float64")
csr_dmatrix = SparseTensorType(format="csr", dtype="float64")
bsr_dmatrix = SparseTensorType(format="bsr", dtype="float64")
csc_fmatrix = SparseTensorType(format="csc", dtype="float32")
csr_fmatrix = SparseTensorType(format="csr", dtype="float32")
bsr_fmatrix = SparseTensorType(format="bsr", dtype="float32")
all_dtypes = list(SparseType.dtype_specs_map.keys())
all_dtypes = list(SparseTensorType.dtype_specs_map.keys())
complex_dtypes = [t for t in all_dtypes if t[:7] == "complex"]
float_dtypes = [t for t in all_dtypes if t[:5] == "float"]
int_dtypes = [t for t in all_dtypes if t[:3] == "int"]
......@@ -725,7 +727,7 @@ class CSM(Op):
return Apply(
self,
[data, indices, indptr, shape],
[SparseType(dtype=data.type.dtype, format=self.format)()],
[SparseTensorType(dtype=data.type.dtype, format=self.format)()],
)
def perform(self, node, inputs, outputs):
......@@ -931,7 +933,9 @@ class Cast(Op):
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ("csr", "csc")
return Apply(self, [x], [SparseType(dtype=self.out_type, format=x.format)()])
return Apply(
self, [x], [SparseTensorType(dtype=self.out_type, format=x.format)()]
)
def perform(self, node, inputs, outputs):
(x,) = inputs
......@@ -1014,7 +1018,7 @@ class DenseFromSparse(Op):
return f"{self.__class__.__name__}{{structured_grad={self.sparse_grad}}}"
def __call__(self, x):
if not isinstance(x.type, SparseType):
if not isinstance(x.type, SparseTensorType):
return x
return super().__call__(x)
......@@ -1097,7 +1101,7 @@ class SparseFromDense(Op):
return f"{self.__class__.__name__}{{{self.format}}}"
def __call__(self, x):
if isinstance(x.type, SparseType):
if isinstance(x.type, SparseTensorType):
return x
return super().__call__(x)
......@@ -1116,12 +1120,14 @@ class SparseFromDense(Op):
else:
assert x.ndim == 2
return Apply(self, [x], [SparseType(dtype=x.type.dtype, format=self.format)()])
return Apply(
self, [x], [SparseTensorType(dtype=x.type.dtype, format=self.format)()]
)
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
out[0] = SparseType.format_cls[self.format](x)
out[0] = SparseTensorType.format_cls[self.format](x)
def grad(self, inputs, gout):
(x,) = inputs
......@@ -1585,7 +1591,11 @@ class Transpose(Op):
return Apply(
self,
[x],
[SparseType(dtype=x.type.dtype, format=self.format_map[x.type.format])()],
[
SparseTensorType(
dtype=x.type.dtype, format=self.format_map[x.type.format]
)()
],
)
def perform(self, node, inputs, outputs):
......@@ -2002,7 +2012,7 @@ class SquareDiagonal(Op):
if diag.type.ndim != 1:
raise TypeError("data argument must be a vector", diag.type)
return Apply(self, [diag], [SparseType(dtype=diag.dtype, format="csc")()])
return Apply(self, [diag], [SparseTensorType(dtype=diag.dtype, format="csc")()])
def perform(self, node, inputs, outputs):
(z,) = outputs
......@@ -2146,7 +2156,7 @@ class AddSS(Op):
assert y.format in ("csr", "csc")
out_dtype = aes.upcast(x.type.dtype, y.type.dtype)
return Apply(
self, [x, y], [SparseType(dtype=out_dtype, format=x.type.format)()]
self, [x, y], [SparseTensorType(dtype=out_dtype, format=x.type.format)()]
)
def perform(self, node, inputs, outputs):
......@@ -2183,7 +2193,7 @@ class AddSSData(Op):
if x.type.format != y.type.format:
raise NotImplementedError()
return Apply(
self, [x, y], [SparseType(dtype=x.type.dtype, format=x.type.format)()]
self, [x, y], [SparseTensorType(dtype=x.type.dtype, format=x.type.format)()]
)
def perform(self, node, inputs, outputs):
......@@ -2286,7 +2296,7 @@ class StructuredAddSV(Op):
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
return Apply(
self, [x, y], [SparseType(dtype=x.type.dtype, format=x.type.format)()]
self, [x, y], [SparseTensorType(dtype=x.type.dtype, format=x.type.format)()]
)
def perform(self, node, inputs, outputs):
......@@ -2426,7 +2436,7 @@ class MulSS(Op):
assert y.format in ("csr", "csc")
out_dtype = aes.upcast(x.type.dtype, y.type.dtype)
return Apply(
self, [x, y], [SparseType(dtype=out_dtype, format=x.type.format)()]
self, [x, y], [SparseTensorType(dtype=out_dtype, format=x.type.format)()]
)
def perform(self, node, inputs, outputs):
......@@ -2469,7 +2479,7 @@ class MulSD(Op):
# Broadcasting of the sparse matrix is not supported.
# We support nd == 0 used by grad of SpSum()
assert y.type.ndim in (0, 2)
out = SparseType(dtype=dtype, format=x.type.format)()
out = SparseTensorType(dtype=dtype, format=x.type.format)()
return Apply(self, [x, y], [out])
def perform(self, node, inputs, outputs):
......@@ -2559,7 +2569,7 @@ class MulSV(Op):
f"Got {x.type.dtype} and {y.type.dtype}."
)
return Apply(
self, [x, y], [SparseType(dtype=x.type.dtype, format=x.type.format)()]
self, [x, y], [SparseTensorType(dtype=x.type.dtype, format=x.type.format)()]
)
def perform(self, node, inputs, outputs):
......@@ -2694,7 +2704,9 @@ class __ComparisonOpSS(Op):
if x.type.format != y.type.format:
raise NotImplementedError()
return Apply(self, [x, y], [SparseType(dtype="uint8", format=x.type.format)()])
return Apply(
self, [x, y], [SparseTensorType(dtype="uint8", format=x.type.format)()]
)
def perform(self, node, inputs, outputs):
(x, y) = inputs
......@@ -3050,7 +3062,9 @@ class HStack(Op):
for x in var:
assert x.format in ("csr", "csc")
return Apply(self, var, [SparseType(dtype=self.dtype, format=self.format)()])
return Apply(
self, var, [SparseTensorType(dtype=self.dtype, format=self.format)()]
)
def perform(self, node, block, outputs):
(out,) = outputs
......@@ -3578,7 +3592,7 @@ class TrueDot(Op):
raise NotImplementedError()
inputs = [x, y] # Need to convert? e.g. assparse
outputs = [SparseType(dtype=x.type.dtype, format=myformat)()]
outputs = [SparseTensorType(dtype=x.type.dtype, format=myformat)()]
return Apply(self, inputs, outputs)
def perform(self, node, inp, out_):
......@@ -3702,7 +3716,7 @@ class StructuredDot(Op):
raise NotImplementedError("non-matrix b")
if _is_sparse_variable(b):
return Apply(self, [a, b], [SparseType(a.type.format, dtype_out)()])
return Apply(self, [a, b], [SparseTensorType(a.type.format, dtype_out)()])
else:
return Apply(
self,
......@@ -3719,7 +3733,7 @@ class StructuredDot(Op):
)
variable = a * b
if isinstance(node.outputs[0].type, SparseType):
if isinstance(node.outputs[0].type, SparseTensorType):
assert _is_sparse(variable)
out[0] = variable
return
......
......@@ -7,7 +7,7 @@ from aesara.graph.basic import Apply
from aesara.graph.op import Op
from aesara.sparse.basic import (
Remove0,
SparseType,
SparseTensorType,
_is_sparse,
as_sparse_variable,
remove0,
......@@ -108,7 +108,9 @@ class Binomial(Op):
assert shape.dtype in discrete_dtypes
return Apply(
self, [n, p, shape], [SparseType(dtype=self.dtype, format=self.format)()]
self,
[n, p, shape],
[SparseTensorType(dtype=self.dtype, format=self.format)()],
)
def perform(self, node, inputs, outputs):
......
......@@ -3,7 +3,7 @@ import copy
import scipy.sparse
from aesara.compile import SharedVariable, shared_constructor
from aesara.sparse.basic import SparseType, _sparse_py_operators
from aesara.sparse.basic import SparseTensorType, _sparse_py_operators
class SparseTensorSharedVariable(_sparse_py_operators, SharedVariable):
......@@ -16,7 +16,7 @@ def sparse_constructor(
value, name=None, strict=False, allow_downcast=None, borrow=False, format=None
):
"""
SharedVariable Constructor for SparseType.
SharedVariable Constructor for SparseTensorType.
writeme
......@@ -29,7 +29,7 @@ def sparse_constructor(
if format is None:
format = value.format
type = SparseType(format=format, dtype=value.dtype)
type = SparseTensorType(format=format, dtype=value.dtype)
if not borrow:
value = copy.deepcopy(value)
return SparseTensorSharedVariable(
......
......@@ -25,9 +25,8 @@ def _is_sparse(x):
return isinstance(x, scipy.sparse.spmatrix)
class SparseType(TensorType, HasDataType):
"""
Fundamental way to create a sparse node.
class SparseTensorType(TensorType, HasDataType):
"""A `Type` for sparse tensors.
Parameters
----------
......@@ -42,8 +41,7 @@ class SparseType(TensorType, HasDataType):
Notes
-----
As far as I can tell, L{scipy.sparse} objects must be matrices, i.e.
have dimension 2.
Currently, sparse tensors can only be matrices (i.e. have two dimensions).
"""
......@@ -126,15 +124,13 @@ class SparseType(TensorType, HasDataType):
raise NotImplementedError()
return sp
@staticmethod
def may_share_memory(a, b):
# This is Fred suggestion for a quick and dirty way of checking
# aliasing .. this can potentially be further refined (ticket #374)
@classmethod
def may_share_memory(cls, a, b):
if _is_sparse(a) and _is_sparse(b):
return (
SparseType.may_share_memory(a, b.data)
or SparseType.may_share_memory(a, b.indices)
or SparseType.may_share_memory(a, b.indptr)
cls.may_share_memory(a, b.data)
or cls.may_share_memory(a, b.indices)
or cls.may_share_memory(a, b.indptr)
)
if _is_sparse(b) and isinstance(a, np.ndarray):
a, b = b, a
......@@ -151,7 +147,7 @@ class SparseType(TensorType, HasDataType):
def convert_variable(self, var):
res = super().convert_variable(var)
if res and not isinstance(res.type, SparseType):
if res and not isinstance(res.type, type(self)):
# TODO: Convert to this sparse format
raise NotImplementedError()
......@@ -232,9 +228,8 @@ class SparseType(TensorType, HasDataType):
return False
# Register SparseType's C code for ViewOp.
aesara.compile.register_view_op_c_code(
SparseType,
SparseTensorType,
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
......@@ -242,3 +237,6 @@ aesara.compile.register_view_op_c_code(
""",
1,
)
# This is a deprecated alias used for (temporary) backward-compatibility
SparseType = SparseTensorType
......@@ -314,9 +314,9 @@ def get_scalar_constant_value(
except ValueError:
raise NotScalarConstantError()
from aesara.sparse.type import SparseType
from aesara.sparse.type import SparseTensorType
if isinstance(v.type, SparseType):
if isinstance(v.type, SparseTensorType):
raise NotScalarConstantError()
return data
......
......@@ -44,7 +44,7 @@ usual dense tensors. In particular, in the
instead of ``as_tensor_variable(x)``.
Another difference is that you need to use ``SparseVariable`` and
``SparseType`` instead of ``TensorVariable`` and ``TensorType``.
``SparseTensorType`` instead of ``TensorVariable`` and ``TensorType``.
Do not forget that we support only sparse matrices (so only 2 dimensions)
and (like in SciPy) they do not support broadcasting operations by default
......@@ -55,7 +55,7 @@ you can create output variables like this:
.. code-block:: python
out_format = inputs[0].format # or 'csr' or 'csc' if the output format is fixed
SparseType(dtype=inputs[0].dtype, format=out_format).make_variable()
SparseTensorType(dtype=inputs[0].dtype, format=out_format).make_variable()
See the sparse :class:`Aesara.sparse.basic.Cast` `Op` code for a good example of
a sparse `Op` with Python code.
......@@ -226,7 +226,7 @@ along with pointers to the relevant documentation.
primitive type. The C type associated with this Aesara type is the
represented C primitive itself.
* :ref:`SparseType <sparse_ops>` : Aesara `Type` used to represent sparse
* :ref:`SparseTensorType <sparse_ops>` : Aesara `Type` used to represent sparse
tensors. There is no equivalent C type for this Aesara `Type` but you
can split a sparse variable into its parts as TensorVariables. Those
can then be used as inputs to an op with C code.
......
......@@ -751,8 +751,8 @@ class TestAliasingRules:
# operations are used) and to break the elemwise composition
# with some non-elemwise op (here dot)
x = sparse.SparseType("csc", dtype="float64")()
y = sparse.SparseType("csc", dtype="float64")()
x = sparse.SparseTensorType("csc", dtype="float64")()
y = sparse.SparseTensorType("csc", dtype="float64")()
f = function([In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y))
# Test 1. If the same variable is given twice
......
......@@ -38,7 +38,7 @@ from aesara.sparse import (
Remove0,
SamplingDot,
SparseFromDense,
SparseType,
SparseTensorType,
SquareDiagonal,
StructuredDot,
StructuredDotGradCSC,
......@@ -413,7 +413,7 @@ class TestSparseInferShape(utt.InferShapeTester):
pass
def test_getitem_scalar(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x],
[x[2, 2]],
......@@ -451,7 +451,7 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_transpose(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x],
[x.T],
......@@ -460,7 +460,7 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_neg(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x],
[-x],
......@@ -469,8 +469,8 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_add_ss(self):
x = SparseType("csr", dtype=config.floatX)()
y = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
y = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x, y],
[x + y],
......@@ -482,7 +482,7 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_add_sd(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
y = matrix()
self._compile_and_check(
[x, y],
......@@ -495,8 +495,8 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_mul_ss(self):
x = SparseType("csr", dtype=config.floatX)()
y = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
y = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x, y],
[x * y],
......@@ -508,7 +508,7 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_mul_sd(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
y = matrix()
self._compile_and_check(
[x, y],
......@@ -522,7 +522,7 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_remove0(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x],
[Remove0()(x)],
......@@ -531,8 +531,8 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_dot(self):
x = SparseType("csc", dtype=config.floatX)()
y = SparseType("csc", dtype=config.floatX)()
x = SparseTensorType("csc", dtype=config.floatX)()
y = SparseTensorType("csc", dtype=config.floatX)()
self._compile_and_check(
[x, y],
[Dot()(x, y)],
......@@ -545,12 +545,12 @@ class TestSparseInferShape(utt.InferShapeTester):
def test_dot_broadcast(self):
for x, y in [
(SparseType("csr", "float32")(), vector()[:, None]),
(SparseType("csr", "float32")(), vector()[None, :]),
(SparseType("csr", "float32")(), matrix()),
(vector()[:, None], SparseType("csr", "float32")()),
(vector()[None, :], SparseType("csr", "float32")()),
(matrix(), SparseType("csr", "float32")()),
(SparseTensorType("csr", "float32")(), vector()[:, None]),
(SparseTensorType("csr", "float32")(), vector()[None, :]),
(SparseTensorType("csr", "float32")(), matrix()),
(vector()[:, None], SparseTensorType("csr", "float32")()),
(vector()[None, :], SparseTensorType("csr", "float32")()),
(matrix(), SparseTensorType("csr", "float32")()),
]:
sparse_out = at.dot(x, y)
......@@ -562,8 +562,8 @@ class TestSparseInferShape(utt.InferShapeTester):
assert dense_out.broadcastable == sparse_out.broadcastable
def test_structured_dot(self):
x = SparseType("csc", dtype=config.floatX)()
y = SparseType("csc", dtype=config.floatX)()
x = SparseTensorType("csc", dtype=config.floatX)()
y = SparseTensorType("csc", dtype=config.floatX)()
self._compile_and_check(
[x, y],
[structured_dot(x, y)],
......@@ -583,8 +583,8 @@ class TestSparseInferShape(utt.InferShapeTester):
("csc", StructuredDotGradCSC),
("csr", StructuredDotGradCSR),
]:
x = SparseType(format, dtype=config.floatX)()
y = SparseType(format, dtype=config.floatX)()
x = SparseTensorType(format, dtype=config.floatX)()
y = SparseTensorType(format, dtype=config.floatX)()
grads = aesara.grad(dense_from_sparse(structured_dot(x, y)).sum(), [x, y])
self._compile_and_check(
[x, y],
......@@ -606,7 +606,7 @@ class TestSparseInferShape(utt.InferShapeTester):
)
def test_dense_from_sparse(self):
x = SparseType("csr", dtype=config.floatX)()
x = SparseTensorType("csr", dtype=config.floatX)()
self._compile_and_check(
[x],
[dense_from_sparse(x)],
......@@ -1130,7 +1130,7 @@ class TestCsmProperties:
for format in ("csc", "csr"):
for dtype in ("float32", "float64"):
x = SparseType(format, dtype=dtype)()
x = SparseTensorType(format, dtype=dtype)()
f = aesara.function([x], csm_properties(x))
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
......@@ -1288,7 +1288,7 @@ class TestStructuredDot:
for dense_dtype in typenames:
for sparse_dtype in typenames:
correct_dtype = aesara.scalar.upcast(sparse_dtype, dense_dtype)
a = SparseType("csc", dtype=sparse_dtype)()
a = SparseTensorType("csc", dtype=sparse_dtype)()
b = matrix(dtype=dense_dtype)
d = structured_dot(a, b)
assert d.type.dtype == correct_dtype
......@@ -1375,8 +1375,8 @@ class TestStructuredDot:
for sparse_format_a in ["csc", "csr", "bsr"]:
for sparse_format_b in ["csc", "csr", "bsr"]:
a = SparseType(sparse_format_a, dtype=sparse_dtype)()
b = SparseType(sparse_format_b, dtype=sparse_dtype)()
a = SparseTensorType(sparse_format_a, dtype=sparse_dtype)()
b = SparseTensorType(sparse_format_b, dtype=sparse_dtype)()
d = at.dot(a, b)
f = aesara.function([a, b], Out(d, borrow=True))
for M, N, K, nnz in [
......@@ -1397,7 +1397,7 @@ class TestStructuredDot:
sparse_dtype = "float64"
dense_dtype = "float64"
a = SparseType("csc", dtype=sparse_dtype)()
a = SparseTensorType("csc", dtype=sparse_dtype)()
b = matrix(dtype=dense_dtype)
d = at.dot(a, b)
f = aesara.function([a, b], Out(d, borrow=True))
......@@ -1445,7 +1445,7 @@ class TestStructuredDot:
sparse_dtype = "float32"
dense_dtype = "float32"
a = SparseType("csr", dtype=sparse_dtype)()
a = SparseTensorType("csr", dtype=sparse_dtype)()
b = matrix(dtype=dense_dtype)
d = at.dot(a, b)
f = aesara.function([a, b], d)
......@@ -1567,8 +1567,8 @@ class TestDots(utt.InferShapeTester):
("csr", "csc"),
("csr", "csr"),
]:
x = sparse.SparseType(format=x_f, dtype=d1)("x")
y = sparse.SparseType(format=x_f, dtype=d2)("x")
x = sparse.SparseTensorType(format=x_f, dtype=d1)("x")
y = sparse.SparseTensorType(format=x_f, dtype=d2)("x")
def f_a(x, y):
return x * y
......@@ -1886,7 +1886,7 @@ class TestZerosLike:
def test_shape_i():
sparse_dtype = "float32"
a = SparseType("csr", dtype=sparse_dtype)()
a = SparseTensorType("csr", dtype=sparse_dtype)()
f = aesara.function([a], a.shape[1])
assert f(sp.sparse.csr_matrix(random_lil((100, 10), sparse_dtype, 3))) == 10
......@@ -1896,7 +1896,7 @@ def test_shape():
# does not actually create a dense tensor in the process.
sparse_dtype = "float32"
a = SparseType("csr", dtype=sparse_dtype)()
a = SparseTensorType("csr", dtype=sparse_dtype)()
f = aesara.function([a], a.shape)
assert np.all(
f(sp.sparse.csr_matrix(random_lil((100, 10), sparse_dtype, 3))) == (100, 10)
......@@ -1946,7 +1946,7 @@ def test_may_share_memory():
(b.transpose(), a, False),
]:
assert SparseType.may_share_memory(a_, b_) == rep
assert SparseTensorType.may_share_memory(a_, b_) == rep
def test_sparse_shared_memory():
......@@ -1955,8 +1955,8 @@ def test_sparse_shared_memory():
a = random_lil((3, 4), "float32", 3).tocsr()
m1 = random_lil((4, 4), "float32", 3).tocsr()
m2 = random_lil((4, 4), "float32", 3).tocsr()
x = SparseType("csr", dtype="float32")()
y = SparseType("csr", dtype="float32")()
x = SparseTensorType("csr", dtype="float32")()
y = SparseTensorType("csr", dtype="float32")()
sdot = sparse.structured_dot
z = sdot(x * 3, m1) + sdot(y * 2, m2)
......@@ -1966,7 +1966,7 @@ def test_sparse_shared_memory():
def f_(x, y, m1=m1, m2=m2):
return ((x * 3) * m1) + ((y * 2) * m2)
assert SparseType.may_share_memory(a, a) # This is trivial
assert SparseTensorType.may_share_memory(a, a) # This is trivial
result = f(a, a)
result_ = f_(a, a)
assert (result_.todense() == result.todense()).all()
......@@ -3192,7 +3192,7 @@ class TestMulSV:
for format in ("csr", "csc"):
for dtype in ("float32", "float64"):
x = sparse.SparseType(format, dtype=dtype)()
x = sparse.SparseTensorType(format, dtype=dtype)()
y = vector(dtype=dtype)
f = aesara.function([x, y], mul_s_v(x, y))
......@@ -3220,7 +3220,7 @@ class TestStructuredAddSV:
for format in ("csr", "csc"):
for dtype in ("float32", "float64"):
x = sparse.SparseType(format, dtype=dtype)()
x = sparse.SparseTensorType(format, dtype=dtype)()
y = vector(dtype=dtype)
f = aesara.function([x, y], structured_add_s_v(x, y))
......
import pytest
from aesara.sparse import matrix as sp_matrix
from aesara.sparse.type import SparseType
from aesara.sparse.type import SparseTensorType
from aesara.tensor import dmatrix
def test_clone():
st = SparseType("csr", "float64")
st = SparseTensorType("csr", "float64")
assert st == st.clone()
......
......@@ -7,7 +7,7 @@ from scipy.sparse.csr import csr_matrix
import aesara
import aesara.sparse as sparse
import aesara.tensor as at
from aesara.sparse.type import SparseType
from aesara.sparse.type import SparseTensorType
from aesara.tensor.type import DenseTensorType
......@@ -16,7 +16,7 @@ class TestSparseVariable:
"method, exp_type, cm",
[
("__abs__", DenseTensorType, None),
("__neg__", SparseType, ExitStack()),
("__neg__", SparseTensorType, ExitStack()),
("__ceil__", DenseTensorType, None),
("__floor__", DenseTensorType, None),
("__trunc__", DenseTensorType, None),
......@@ -65,7 +65,7 @@ class TestSparseVariable:
("conj", DenseTensorType, None),
("round", DenseTensorType, None),
("trace", DenseTensorType, None),
("zeros_like", SparseType, ExitStack()),
("zeros_like", SparseTensorType, ExitStack()),
("ones_like", DenseTensorType, ExitStack()),
("cumsum", DenseTensorType, None),
("cumprod", DenseTensorType, None),
......@@ -83,7 +83,7 @@ class TestSparseVariable:
if cm is None:
cm = pytest.warns(UserWarning, match=".*converted to dense.*")
if exp_type == SparseType:
if exp_type == SparseTensorType:
exp_res_type = csr_matrix
else:
exp_res_type = np.ndarray
......@@ -112,16 +112,16 @@ class TestSparseVariable:
@pytest.mark.parametrize(
"method, exp_type",
[
("__lt__", SparseType),
("__le__", SparseType),
("__gt__", SparseType),
("__ge__", SparseType),
("__lt__", SparseTensorType),
("__le__", SparseTensorType),
("__gt__", SparseTensorType),
("__ge__", SparseTensorType),
("__and__", DenseTensorType),
("__or__", DenseTensorType),
("__xor__", DenseTensorType),
("__add__", SparseType),
("__sub__", SparseType),
("__mul__", SparseType),
("__add__", SparseTensorType),
("__sub__", SparseTensorType),
("__mul__", SparseTensorType),
("__pow__", DenseTensorType),
("__mod__", DenseTensorType),
("__divmod__", DenseTensorType),
......@@ -137,7 +137,7 @@ class TestSparseVariable:
method_to_call = getattr(x, method)
if exp_type == SparseType:
if exp_type == SparseTensorType:
exp_res_type = csr_matrix
cm = ExitStack()
else:
......@@ -198,7 +198,7 @@ class TestSparseVariable:
x = sparse.csr_from_dense(x)
z = x[:, :2]
assert isinstance(z.type, SparseType)
assert isinstance(z.type, SparseTensorType)
f = aesara.function([x], z)
exp_res = f([[1.1, 0.0, 2.0], [-1.0, 0.0, 0.0]])
......@@ -211,7 +211,7 @@ class TestSparseVariable:
y = sparse.csr_from_dense(y)
z = x.__dot__(y)
assert isinstance(z.type, SparseType)
assert isinstance(z.type, SparseTensorType)
f = aesara.function([x, y], z)
exp_res = f(
......
......@@ -451,7 +451,7 @@ class TestIndex:
def test_sparse(self):
sp = pytest.importorskip("scipy")
mySymbolicSparseList = TypedListType(
sparse.SparseType("csr", aesara.config.floatX)
sparse.SparseTensorType("csr", aesara.config.floatX)
)()
mySymbolicSparse = sparse.csr_matrix()
......@@ -519,7 +519,7 @@ class TestCount:
def test_sparse(self):
sp = pytest.importorskip("scipy")
mySymbolicSparseList = TypedListType(
sparse.SparseType("csr", aesara.config.floatX)
sparse.SparseTensorType("csr", aesara.config.floatX)
)()
mySymbolicSparse = sparse.csr_matrix()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论