提交 470b9d60 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Brandon T. Willard

Rename Scalar to ScalarType

上级 9136eb66
......@@ -717,7 +717,7 @@ def ensure_dt(val, default, name, dtype):
val = constant(val)
if hasattr(val, "ndim") and val.ndim == 0:
val = as_scalar(val)
if not isinstance(val.type, aesara.scalar.Scalar):
if not isinstance(val.type, aesara.scalar.ScalarType):
raise TypeError(f"{name}: expected a scalar value")
if val.type.dtype != dtype:
val = val.astype(dtype)
......
......@@ -8,7 +8,7 @@ from aesara.graph.basic import Apply
from aesara.graph.op import _NoPythonOp
from aesara.graph.utils import MethodNotDefined
from aesara.link.c.interface import HideC
from aesara.scalar import Composite, Scalar
from aesara.scalar import Composite, ScalarType
from aesara.scalar.basic import complex_types, upgrade_to_float_no_complex
from aesara.scalar.math import Erfcinv, Erfinv
from aesara.tensor.elemwise import CAReduceDtype, DimShuffle, Elemwise
......@@ -1083,7 +1083,7 @@ class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype, _NoPythonOp):
if self.pre_scalar_op: # TODO: multiple dtypes
# dtype = node.inputs[0].dtype
dummy_var = aes.Scalar(dtype=dtype)()
dummy_var = aes.ScalarType(dtype=dtype)()
dummy_node = self.pre_scalar_op.make_node(dummy_var)
......@@ -1128,8 +1128,8 @@ class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype, _NoPythonOp):
in_dtype = x.dtype
out_dtype = node.outputs[0].dtype
dummy_left = Scalar(dtype=out_dtype)()
dummy_right = Scalar(dtype=in_dtype)()
dummy_left = ScalarType(dtype=out_dtype)()
dummy_right = ScalarType(dtype=in_dtype)()
dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)
......@@ -1955,12 +1955,12 @@ class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype, _NoPythonOp):
# now we insert versions for the ops on which we depend...
scalar_node = Apply(
self.scalar_op,
[Scalar(dtype=input.type.dtype)() for input in node.inputs],
[Scalar(dtype=output.type.dtype)() for output in node.outputs],
[ScalarType(dtype=input.type.dtype)() for input in node.inputs],
[ScalarType(dtype=output.type.dtype)() for output in node.outputs],
)
version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))
for i in node.inputs + node.outputs:
version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())
version.extend(ScalarType(dtype=i.type.dtype).c_code_cache_version())
version.extend(self.kernel_version(node))
if all(version):
return tuple(version)
......
......@@ -161,7 +161,7 @@ from aesara.ifelse import IfElse
from aesara.link.c.basic import CLinker
from aesara.misc.ordered_set import OrderedSet
from aesara.raise_op import Assert
from aesara.scalar.basic import Cast, Pow, Scalar, log, neg, true_div
from aesara.scalar.basic import Cast, Pow, ScalarType, log, neg, true_div
from aesara.scalar.math import Erfcinv, Erfinv
from aesara.scan.op import Scan
from aesara.scan.opt import ScanInplaceOptimizer
......@@ -811,7 +811,7 @@ def local_gpua_elemwise(fgraph, op, context_name, inputs, outputs):
new_inputs = []
for inp in inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
gpu_cast_op = GpuElemwise(Cast(ScalarType(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
......@@ -1387,7 +1387,7 @@ def local_gpua_gemmbatch(fgraph, op, context_name, inputs, outputs):
# In case of mismatched dtypes, we also have to upcast
out_dtype = outputs[0].dtype
if a.dtype != out_dtype or b.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
gpu_cast_op = GpuElemwise(Cast(ScalarType(out_dtype)))
if a.dtype != out_dtype:
a = gpu_cast_op(a)
if b.dtype != out_dtype:
......
......@@ -649,9 +649,9 @@ class CLinker(Linker):
for r in self.variables
if isinstance(r, Constant) and r not in self.inputs
)
# C type constants (aesara.scalar.Scalar). They don't request an object
# C type constants (aesara.scalar.ScalarType). They don't request an object
self.consts = []
# Move c type from orphans (aesara.scalar.Scalar) to self.consts
# Move c type from orphans (aesara.scalar.ScalarType) to self.consts
for variable in self.orphans:
if (
isinstance(variable, Constant)
......
......@@ -7,7 +7,7 @@ used to create a Params object that is compatible with the ParamsType defined.
The Params object will be available in both Python code (as a standard Python object) and C code
(as a specific struct with parameters as struct fields). To be fully-available in C code, Aesara
types wrapped into a ParamsType must provide a C interface (e.g. TensorType, Scalar, GpuArrayType,
types wrapped into a ParamsType must provide a C interface (e.g. TensorType, ScalarType, GpuArrayType,
or your own type. See :ref:`extending_op_params` for more details).
Example of usage
......@@ -23,13 +23,13 @@ Importation:
# If you want to use a tensor and a scalar as parameters,
# you should import required Aesara types.
from aesara.tensor.type import TensorType
from aesara.scalar import Scalar
from aesara.scalar import ScalarType
In your Op sub-class:
.. code-block:: python
params_type = ParamsType(attr1=TensorType('int32', (False, False)), attr2=Scalar('float64'))
params_type = ParamsType(attr1=TensorType('int32', (False, False)), attr2=ScalarType('float64'))
If your op contains attributes ``attr1`` **and** ``attr2``, the default ``op.get_params()``
implementation will automatically try to look for it and generate an appropriate Params object.
......@@ -236,11 +236,11 @@ class Params(dict):
.. code-block:: python
from aesara.link.c.params_type import ParamsType, Params
from aesara.scalar import Scalar
from aesara.scalar import ScalarType
# You must create a ParamsType first:
params_type = ParamsType(attr1=Scalar('int32'),
key2=Scalar('float32'),
field3=Scalar('int64'))
params_type = ParamsType(attr1=ScalarType('int32'),
key2=ScalarType('float32'),
field3=ScalarType('int64'))
# Then you can create a Params object with
# the params type defined above and values for attributes.
params = Params(params_type, attr1=1, key2=2.0, field3=3)
......@@ -498,9 +498,9 @@ class ParamsType(CType):
from aesara.graph.params_type import ParamsType
from aesara.link.c.type import EnumType, EnumList
from aesara.scalar import Scalar
from aesara.scalar import ScalarType
wrapper = ParamsType(scalar=Scalar('int32'),
wrapper = ParamsType(scalar=ScalarType('int32'),
letters=EnumType(A=1, B=2, C=3),
digits=EnumList('ZERO', 'ONE', 'TWO'))
print(wrapper.get_enum('C')) # 3
......@@ -527,9 +527,9 @@ class ParamsType(CType):
from aesara.graph.params_type import ParamsType
from aesara.link.c.type import EnumType, EnumList
from aesara.scalar import Scalar
from aesara.scalar import ScalarType
wrapper = ParamsType(scalar=Scalar('int32'),
wrapper = ParamsType(scalar=ScalarType('int32'),
letters=EnumType(A=(1, 'alpha'), B=(2, 'beta'), C=3),
digits=EnumList(('ZERO', 'nothing'), ('ONE', 'unit'), ('TWO', 'couple')))
print(wrapper.get_enum('C')) # 3
......@@ -574,14 +574,14 @@ class ParamsType(CType):
import numpy
from aesara.graph.params_type import ParamsType
from aesara.tensor.type import dmatrix
from aesara.scalar import Scalar
from aesara.scalar import ScalarType
class MyObject:
def __init__(self):
self.a = 10
self.b = numpy.asarray([[1, 2, 3], [4, 5, 6]])
params_type = ParamsType(a=Scalar('int32'), b=dmatrix, c=Scalar('bool'))
params_type = ParamsType(a=ScalarType('int32'), b=dmatrix, c=ScalarType('bool'))
o = MyObject()
value_for_c = False
......
......@@ -25,7 +25,7 @@ from aesara.link.utils import (
fgraph_to_python,
unique_name_generator,
)
from aesara.scalar.basic import Scalar
from aesara.scalar.basic import ScalarType
from aesara.scalar.math import Softplus
from aesara.tensor.blas import BatchedDot
from aesara.tensor.math import Dot
......@@ -86,7 +86,7 @@ def get_numba_type(
):
return numba_dtype
return numba.types.Array(numba_dtype, aesara_type.ndim, layout)
elif isinstance(aesara_type, Scalar):
elif isinstance(aesara_type, ScalarType):
dtype = np.dtype(aesara_type.dtype)
numba_dtype = numba.from_dtype(dtype)
return numba_dtype
......
......@@ -8,7 +8,7 @@ import aesara.tensor as at
from aesara.configdefaults import config
from aesara.graph.basic import Apply
from aesara.link.c.op import COp
from aesara.scalar import Scalar, as_scalar
from aesara.scalar import ScalarType, as_scalar
from aesara.tensor.type import discrete_dtypes
......@@ -72,7 +72,7 @@ class MultinomialFromUniform(COp):
if self.odtype == "auto":
t = f"PyArray_TYPE({pvals})"
else:
t = Scalar(self.odtype).dtype_specs()[1]
t = ScalarType(self.odtype).dtype_specs()[1]
if t.startswith("aesara_complex"):
t = t.replace("aesara_complex", "NPY_COMPLEX")
else:
......@@ -264,7 +264,7 @@ class ChoiceFromUniform(MultinomialFromUniform):
if self.odtype == "auto":
t = "NPY_INT64"
else:
t = Scalar(self.odtype).dtype_specs()[1]
t = ScalarType(self.odtype).dtype_specs()[1]
if t.startswith("aesara_complex"):
t = t.replace("aesara_complex", "NPY_COMPLEX")
else:
......
......@@ -102,7 +102,7 @@ def upcast(dtype, *dtypes):
def as_common_dtype(*vars):
"""
For for aesara.scalar.Scalar and TensorVariable.
For for aesara.scalar.ScalarType and TensorVariable.
"""
dtype = upcast(*[v.dtype for v in vars])
return (v.astype(dtype) for v in vars)
......@@ -275,7 +275,7 @@ def convert(x, dtype=None):
return x_
class Scalar(CType, HasDataType):
class ScalarType(CType, HasDataType):
"""
Internal class, should not be used by clients.
......@@ -356,7 +356,7 @@ class Scalar(CType, HasDataType):
def c_headers(self, c_compiler=None, **kwargs):
l = ["<math.h>"]
# These includes are needed by Scalar and TensorType,
# These includes are needed by ScalarType and TensorType,
# we declare them here and they will be re-used by TensorType
l.append("<numpy/arrayobject.h>")
l.append("<numpy/arrayscalars.h>")
......@@ -428,7 +428,7 @@ class Scalar(CType, HasDataType):
return str(self.dtype)
def __repr__(self):
return f"Scalar({self.dtype})"
return f"ScalarType({self.dtype})"
def c_literal(self, data):
if "complex" in self.dtype:
......@@ -677,15 +677,19 @@ class Scalar(CType, HasDataType):
return shape_info
def get_scalar_type(dtype) -> Scalar:
# Deprecated alias for backward compatibility
Scalar = ScalarType
def get_scalar_type(dtype) -> ScalarType:
"""
Return a Scalar(dtype) object.
Return a ScalarType(dtype) object.
This caches objects to save allocation and run time.
"""
if dtype not in get_scalar_type.cache:
get_scalar_type.cache[dtype] = Scalar(dtype=dtype)
get_scalar_type.cache[dtype] = ScalarType(dtype=dtype)
return get_scalar_type.cache[dtype]
......@@ -694,7 +698,7 @@ get_scalar_type.cache = {}
# Register C code for ViewOp on Scalars.
aesara.compile.register_view_op_c_code(
Scalar,
ScalarType,
"""
%(oname)s = %(iname)s;
""",
......@@ -702,22 +706,22 @@ aesara.compile.register_view_op_c_code(
)
bool: Scalar = get_scalar_type("bool")
int8: Scalar = get_scalar_type("int8")
int16: Scalar = get_scalar_type("int16")
int32: Scalar = get_scalar_type("int32")
int64: Scalar = get_scalar_type("int64")
uint8: Scalar = get_scalar_type("uint8")
uint16: Scalar = get_scalar_type("uint16")
uint32: Scalar = get_scalar_type("uint32")
uint64: Scalar = get_scalar_type("uint64")
float16: Scalar = get_scalar_type("float16")
float32: Scalar = get_scalar_type("float32")
float64: Scalar = get_scalar_type("float64")
complex64: Scalar = get_scalar_type("complex64")
complex128: Scalar = get_scalar_type("complex128")
_ScalarTypes: TypeAlias = Tuple[Scalar, ...]
bool: ScalarType = get_scalar_type("bool")
int8: ScalarType = get_scalar_type("int8")
int16: ScalarType = get_scalar_type("int16")
int32: ScalarType = get_scalar_type("int32")
int64: ScalarType = get_scalar_type("int64")
uint8: ScalarType = get_scalar_type("uint8")
uint16: ScalarType = get_scalar_type("uint16")
uint32: ScalarType = get_scalar_type("uint32")
uint64: ScalarType = get_scalar_type("uint64")
float16: ScalarType = get_scalar_type("float16")
float32: ScalarType = get_scalar_type("float32")
float64: ScalarType = get_scalar_type("float64")
complex64: ScalarType = get_scalar_type("complex64")
complex128: ScalarType = get_scalar_type("complex128")
_ScalarTypes: TypeAlias = Tuple[ScalarType, ...]
int_types: _ScalarTypes = (int8, int16, int32, int64)
uint_types: _ScalarTypes = (uint8, uint16, uint32, uint64)
float_types: _ScalarTypes = (float16, float32, float64)
......@@ -732,7 +736,7 @@ discrete_dtypes = tuple(t.dtype for t in discrete_types)
class _scalar_py_operators:
# So that we can simplify checking code when we have a mixture of Scalar
# So that we can simplify checking code when we have a mixture of ScalarType
# variables and Tensor variables
ndim = 0
......@@ -844,7 +848,7 @@ class ScalarVariable(_scalar_py_operators, Variable):
pass
Scalar.variable_type = ScalarVariable
ScalarType.variable_type = ScalarVariable
class ScalarConstant(ScalarVariable, Constant):
......@@ -852,8 +856,8 @@ class ScalarConstant(ScalarVariable, Constant):
Constant.__init__(self, *args, **kwargs)
# Register ScalarConstant as the type of Constant corresponding to Scalar
Scalar.constant_type = ScalarConstant
# Register ScalarConstant as the type of Constant corresponding to ScalarType
ScalarType.constant_type = ScalarConstant
def constant(x, name=None, dtype=None) -> ScalarConstant:
......@@ -876,16 +880,16 @@ def as_scalar(x, name=None) -> ScalarConstant:
else:
x = x.outputs[0]
if isinstance(x, Variable):
if isinstance(x.type, Scalar):
if isinstance(x.type, ScalarType):
return x
elif isinstance(x.type, TensorType) and x.ndim == 0:
return scalar_from_tensor(x)
else:
raise TypeError("Variable type field must be a Scalar.", x, x.type)
raise TypeError("Variable type field must be a ScalarType.", x, x.type)
try:
return constant(x)
except TypeError:
raise TypeError(f"Cannot convert {x} to Scalar", type(x))
raise TypeError(f"Cannot convert {x} to ScalarType", type(x))
# Easy constructors
......@@ -898,7 +902,7 @@ complexs128 = apply_across_args(complex128)
def upcast_out(*types):
dtype = Scalar.upcast(*types)
dtype = ScalarType.upcast(*types)
return (get_scalar_type(dtype),)
......@@ -932,7 +936,9 @@ def upgrade_to_float(*types):
uint32: float64,
uint64: float64,
}
return (get_scalar_type(Scalar.upcast(*[conv.get(type, type) for type in types])),)
return (
get_scalar_type(ScalarType.upcast(*[conv.get(type, type) for type in types])),
)
def upgrade_to_float64(*types):
......@@ -962,7 +968,7 @@ def same_out_min8(type):
def upcast_out_no_complex(*types):
if any(type in complex_types for type in types):
raise TypeError("complex type are not supported")
return (get_scalar_type(dtype=Scalar.upcast(*types)),)
return (get_scalar_type(dtype=ScalarType.upcast(*types)),)
def same_out_float_only(type):
......@@ -2452,7 +2458,7 @@ identity = Identity(same_out, name="identity")
# CASTING OPERATIONS
class Cast(UnaryScalarOp):
def __init__(self, o_type, name=None):
if not isinstance(o_type, Scalar):
if not isinstance(o_type, ScalarType):
raise TypeError(o_type)
super().__init__(specific_out(o_type), name=name)
self.o_type = o_type
......@@ -2539,7 +2545,7 @@ _cast_mapping = {
def cast(x, dtype):
"""
Symbolically cast `x` to a Scalar of given `dtype`.
Symbolically cast `x` to a ScalarType of given `dtype`.
"""
if dtype == "floatX":
......@@ -3926,7 +3932,7 @@ class Complex(BinaryScalarOp):
if y in complex_types:
raise TypeError(y)
up = Scalar.upcast(x, y)
up = ScalarType.upcast(x, y)
if up in ("float64", "int64", "uint64", "int32", "uint32"):
return [complex128]
else:
......
......@@ -19,11 +19,7 @@ way (as scan does) to create a shared variable of this kind.
import numpy as np
from aesara.compile import SharedVariable
from .basic import Scalar, _scalar_py_operators
__docformat__ = "restructuredtext en"
from aesara.scalar.basic import ScalarType, _scalar_py_operators
class ScalarSharedVariable(_scalar_py_operators, SharedVariable):
......@@ -54,7 +50,7 @@ def shared(value, name=None, strict=False, allow_downcast=None):
dtype = str(dtype)
value = getattr(np, dtype)(value)
scalar_type = Scalar(dtype=dtype)
scalar_type = ScalarType(dtype=dtype)
rval = ScalarSharedVariable(
type=scalar_type,
value=value,
......
......@@ -62,7 +62,7 @@ def safe_new(
return nwx
else:
return x
# Note, `as_tensor_variable` will convert the `Scalar` into a
# Note, `as_tensor_variable` will convert the `ScalarType` into a
# `TensorScalar` that will require a `ScalarFromTensor` `Op`, making the
# push-out optimization fail
elif isinstance(x, aes.ScalarVariable):
......
......@@ -12,7 +12,7 @@ def as_tensor_variable(
) -> "TensorVariable":
"""Convert `x` into an equivalent `TensorVariable`.
This function can be used to turn ndarrays, numbers, `Scalar` instances,
This function can be used to turn ndarrays, numbers, `ScalarType` instances,
`Apply` instances and `TensorVariable` instances into valid input list
elements.
......
......@@ -537,8 +537,8 @@ class TensorFromScalar(Op):
__props__ = ()
def make_node(self, s):
if not isinstance(s.type, aes.Scalar):
raise TypeError("Input must be a `Scalar` `Type`")
if not isinstance(s.type, aes.ScalarType):
raise TypeError("Input must be a `ScalarType` `Type`")
return Apply(self, [s], [tensor(dtype=s.type.dtype, shape=())])
......
......@@ -16,7 +16,7 @@ from aesara.misc.frozendict import frozendict
from aesara.misc.safe_asarray import _asarray
from aesara.printing import FunctionPrinter, Printer, pprint
from aesara.scalar import get_scalar_type
from aesara.scalar.basic import Scalar
from aesara.scalar.basic import ScalarType
from aesara.scalar.basic import bool as scalar_bool
from aesara.scalar.basic import identity as scalar_identity
from aesara.scalar.basic import transfer_type, upcast
......@@ -815,7 +815,7 @@ second dimension
# there must be some input that is not broadcastable in
# dimension 'dim'
for ishp, i in zip(i_shapes, node.inputs):
if isinstance(i.type, Scalar):
if isinstance(i.type, ScalarType):
continue # we skip scalar
if not i.type.broadcastable[dim]:
# input i is not broadcastable in position dim
......
......@@ -1811,7 +1811,7 @@ def bilinear_kernel_2D(ratio, normalize=True):
Parameters
----------
ratio: int or Constant/Scalar Aesara tensor of int* dtype
ratio: int or Constant/ScalarType Aesara tensor of int* dtype
the ratio by which an image will be upsampled by the returned filter
in the 2D space.
......@@ -1847,7 +1847,7 @@ def bilinear_kernel_1D(ratio, normalize=True):
Parameters
----------
ratio: int or Constant/Scalar Aesara tensor of int* dtype
ratio: int or Constant/ScalarType Aesara tensor of int* dtype
the ratio by which an image will be upsampled by the returned filter
in the 2D space.
......@@ -1998,7 +1998,7 @@ def bilinear_upsampling(
input: symbolic 4D tensor
mini-batch of feature map stacks, of shape (batch size,
input channels, input rows, input columns) that will be upsampled.
ratio: `int or Constant or Scalar Tensor of int* dtype`
ratio: `int or Constant or ScalarType Tensor of int* dtype`
the ratio by which the input is upsampled in the 2D space (row and
col size).
frac_ratio: None, tuple of int or tuple of tuples of int
......
......@@ -30,7 +30,6 @@ from aesara.link.c.op import COp
from aesara.raise_op import Assert
from aesara.scalar import UnaryScalarOp
from aesara.tensor import basic as at
from aesara.tensor import extra_ops, math_opt
from aesara.tensor.basic import ARange, as_tensor_variable
from aesara.tensor.basic_opt import (
register_canonicalize,
......@@ -39,6 +38,7 @@ from aesara.tensor.basic_opt import (
)
from aesara.tensor.elemwise import DimShuffle, Elemwise
from aesara.tensor.exceptions import NotScalarConstantError
from aesara.tensor.extra_ops import Unique
from aesara.tensor.math import (
MaxAndArgmax,
Sum,
......@@ -57,6 +57,7 @@ from aesara.tensor.math import (
)
from aesara.tensor.math import sum as at_sum
from aesara.tensor.math import tanh, tensordot, true_div
from aesara.tensor.math_opt import local_mul_canonizer
from aesara.tensor.nnet.blocksparse import sparse_block_dot
from aesara.tensor.shape import Shape, shape_padleft
from aesara.tensor.subtensor import AdvancedIncSubtensor, AdvancedSubtensor
......@@ -1291,7 +1292,7 @@ def softmax_simplifier(numerators, denominators):
return numerators, denominators
math_opt.local_mul_canonizer.add_simplifier(softmax_simplifier, "softmax_simplifier")
local_mul_canonizer.add_simplifier(softmax_simplifier, "softmax_simplifier")
class CrossentropySoftmaxArgmax1HotWithBias(COp):
......@@ -2974,7 +2975,7 @@ def confusion_matrix(actual, pred):
if pred.ndim != 1:
raise ValueError("pred must be 1-d tensor variable")
order = extra_ops.Unique(False, False, False)(at.concatenate([actual, pred]))
order = Unique(False, False, False)(at.concatenate([actual, pred]))
colA = actual.dimshuffle(0, "x")
colP = pred.dimshuffle(0, "x")
......
......@@ -21,11 +21,11 @@ class BNComposite(Composite):
@config.change_flags(compute_test_value="off")
def __init__(self, dtype):
self.dtype = dtype
x = aesara.scalar.Scalar(dtype=dtype).make_variable()
mean = aesara.scalar.Scalar(dtype=dtype).make_variable()
std = aesara.scalar.Scalar(dtype=dtype).make_variable()
gamma = aesara.scalar.Scalar(dtype=dtype).make_variable()
beta = aesara.scalar.Scalar(dtype=dtype).make_variable()
x = aesara.scalar.ScalarType(dtype=dtype).make_variable()
mean = aesara.scalar.ScalarType(dtype=dtype).make_variable()
std = aesara.scalar.ScalarType(dtype=dtype).make_variable()
gamma = aesara.scalar.ScalarType(dtype=dtype).make_variable()
beta = aesara.scalar.ScalarType(dtype=dtype).make_variable()
o = add(mul(true_div(sub(x, mean), std), gamma), beta)
inputs = [x, mean, std, gamma, beta]
outputs = [o]
......
......@@ -150,7 +150,7 @@ def hard_sigmoid(x):
"""
# Use the same dtype as determined by "upgrade_to_float",
# and perform computation in that dtype.
out_dtype = aes.upgrade_to_float(aes.Scalar(dtype=x.dtype))[0].dtype
out_dtype = aes.upgrade_to_float(aes.ScalarType(dtype=x.dtype))[0].dtype
slope = constant(0.2, dtype=out_dtype)
shift = constant(0.5, dtype=out_dtype)
x = (x * slope) + shift
......
......@@ -624,7 +624,7 @@ def get_constant_idx(
Example usage where `v` and `a` are appropriately typed Aesara variables :
>>> b = a[v, 1:3]
>>> b.owner.op.idx_list
(Scalar(int64), slice(Scalar(int64), Scalar(int64), None))
(ScalarType(int64), slice(ScalarType(int64), ScalarType(int64), None))
>>> get_constant_idx(b.owner.op.idx_list, b.owner.inputs, allow_partial=True)
[v, slice(1, 3, None)]
>>> get_constant_idx(b.owner.op.idx_list, b.owner.inputs)
......@@ -656,7 +656,7 @@ def get_constant_idx(
def as_nontensor_scalar(a: Variable) -> aes.ScalarVariable:
"""Convert a value to a `Scalar` variable."""
"""Convert a value to a `ScalarType` variable."""
# Since aes.as_scalar does not know about tensor types (it would
# create a circular import) , this method converts either a
# TensorVariable or a ScalarVariable to a scalar.
......@@ -1196,7 +1196,7 @@ class SubtensorPrinter(Printer):
sidxs = []
getattr(pstate, "precedence", None)
for entry in idxs:
if isinstance(entry, aes.Scalar):
if isinstance(entry, aes.ScalarType):
with set_precedence(pstate):
sidxs.append(pstate.pprinter.process(inputs.pop()))
elif isinstance(entry, slice):
......
......@@ -562,8 +562,8 @@ def local_subtensor_remove_broadcastable_index(fgraph, node):
remove_dim = []
node_inputs_idx = 1
for dim, elem in enumerate(idx):
if isinstance(elem, (aes.Scalar)):
# The idx is a Scalar, ie a Type. This means the actual index
if isinstance(elem, (aes.ScalarType)):
# The idx is a ScalarType, ie a Type. This means the actual index
# is contained in node.inputs[1]
dim_index = node.inputs[node_inputs_idx]
if isinstance(dim_index, aes.ScalarConstant):
......@@ -741,7 +741,7 @@ def local_subtensor_make_vector(fgraph, node):
if isinstance(node.op, Subtensor):
(idx,) = node.op.idx_list
if isinstance(idx, (aes.Scalar, TensorType)):
if isinstance(idx, (aes.ScalarType, TensorType)):
old_idx, idx = idx, node.inputs[1]
assert idx.type.is_super(old_idx)
elif isinstance(node.op, AdvancedSubtensor1):
......
......@@ -130,7 +130,7 @@ prefix. The complete list can be found in the documentation for
for the variables handled by this Aesara type. For example,
for a matrix of 32-bit signed NumPy integers, it should return
``"npy_int32"``. If C type may change from an instance to another
(e.g. ``Scalar('int32')`` vs ``Scalar('int64')``), consider
(e.g. ``ScalarType('int32')`` vs ``ScalarType('int64')``), consider
implementing this method. If C type is fixed across instances,
this method may be useless (as you already know the C type
when you work with the C code).
......
......@@ -222,7 +222,7 @@ along with pointers to the relevant documentation.
type). Variables of this Aesara type are represented in C as objects
of class `PyListObject <https://docs.python.org/2/c-api/list.html>`_.
* :ref:`Scalar <libdoc_scalar>` : Aesara type that represents a C
* :ref:`ScalarType <libdoc_scalar>` : Aesara type that represents a C
primitive type. The C type associated with this Aesara type is the
represented C primitive itself.
......
......@@ -64,17 +64,17 @@ Example:
class Add(Op):
#...
def make_node(self, x, y):
# note 1: constant, int64 and Scalar are defined in aesara.scalar
# note 1: constant, int64 and ScalarType are defined in aesara.scalar
# note 2: constant(x) is equivalent to Constant(type = int64, data = x)
# note 3: the call int64() is equivalent to Variable(type = int64) or Variable(type = Scalar(dtype = 'int64'))
# note 3: the call int64() is equivalent to Variable(type = int64) or Variable(type = ScalarType(dtype = 'int64'))
if isinstance(x, int):
x = constant(x)
elif not isinstance(x, Variable) or not x.type == int64:
raise TypeError("expected an int64 Scalar")
raise TypeError("expected an int64 ScalarType")
if isinstance(y, int):
y = constant(y)
elif not isinstance(y, Variable) or not x.type == int64:
raise TypeError("expected an int64 Scalar")
raise TypeError("expected an int64 ScalarType")
inputs = [x, y]
outputs = [int64()]
node = Apply(op = self, inputs = inputs, outputs = outputs)
......
......@@ -258,10 +258,10 @@ def test_unify_Type():
s = unify(t1, etuple(TensorType, "float64", (1, None)))
assert s == {}
from aesara.scalar.basic import Scalar
from aesara.scalar.basic import ScalarType
st1 = Scalar(np.float64)
st2 = Scalar(np.float64)
st1 = ScalarType(np.float64)
st2 = ScalarType(np.float64)
s = unify(st1, st2)
assert s == {}
......
......@@ -28,7 +28,7 @@ int APPLY_SPECIFIC(quadratic_function)(PyArrayObject* tensor, DTYPE_INPUT_0 a, D
int APPLY_SPECIFIC(compute_quadratic)(PyArrayObject* X, PyArrayObject** Y, PARAMS_TYPE* coeff) {
DTYPE_INPUT_0 a = (DTYPE_INPUT_0) (*(DTYPE_PARAM_a*) PyArray_GETPTR1(coeff->a, 0)); // 0-D TensorType.
DTYPE_INPUT_0 b = coeff->b; // Scalar.
DTYPE_INPUT_0 b = coeff->b; // ScalarType.
DTYPE_INPUT_0 c = (DTYPE_INPUT_0) PyFloat_AsDouble(coeff->c); // Generic.
Py_XDECREF(*Y);
*Y = (PyArrayObject*)PyArray_EMPTY(PyArray_NDIM(X), PyArray_DIMS(X), TYPENUM_INPUT_0, PyArray_IS_F_CONTIGUOUS(X));
......
......@@ -7,13 +7,13 @@ from aesara.graph.basic import Apply
from aesara.link.c.op import COp, ExternalCOp
from aesara.link.c.params_type import Params, ParamsType
from aesara.link.c.type import EnumList, Generic
from aesara.scalar import Scalar
from aesara.scalar import ScalarType
from aesara.tensor.type import TensorType, matrix
from tests import unittest_tools as utt
tensor_type_0d = TensorType("float64", tuple())
scalar_type = Scalar("float64")
scalar_type = ScalarType("float64")
generic_type = Generic()
......@@ -77,7 +77,7 @@ class QuadraticOpFunc(COp):
def c_code(self, node, name, inputs, outputs, sub):
return """
%(float_type)s a = (%(float_type)s) (*(npy_float64*) PyArray_GETPTR1(%(coeff)s->a, 0)); // 0-D TensorType.
%(float_type)s b = %(coeff)s->b; // Scalar.
%(float_type)s b = %(coeff)s->b; // ScalarType.
%(float_type)s c = (%(float_type)s) PyFloat_AsDouble(%(coeff)s->c); // Generic.
Py_XDECREF(%(Y)s);
%(Y)s = (PyArrayObject*)PyArray_EMPTY(PyArray_NDIM(%(X)s), PyArray_DIMS(%(X)s), PyArray_TYPE(%(X)s), PyArray_IS_F_CONTIGUOUS(%(X)s));
......@@ -128,13 +128,13 @@ class TestParamsType:
wp1 = ParamsType(
a=Generic(),
array=TensorType("int64", (False,)),
floatting=Scalar("float64"),
floatting=ScalarType("float64"),
npy_scalar=TensorType("float64", tuple()),
)
wp2 = ParamsType(
a=Generic(),
array=TensorType("int64", (False,)),
floatting=Scalar("float64"),
floatting=ScalarType("float64"),
npy_scalar=TensorType("float64", tuple()),
)
w1 = Params(
......@@ -158,7 +158,7 @@ class TestParamsType:
wp2_other = ParamsType(
other_name=Generic(),
array=TensorType("int64", (False,)),
floatting=Scalar("float64"),
floatting=ScalarType("float64"),
npy_scalar=TensorType("float64", tuple()),
)
w2 = Params(
......
"""
These routines are not well-tested. They are also old.
OB says that it is not important to test them well because Scalar Ops
are rarely used by themselves, instead they are the basis for Tensor Ops
(which should be checked thoroughly). Moreover, Scalar will be changed
to use numpy's scalar routines.
If you do want to rewrite these tests, bear in mind:
* You don't need to use Composite.
* FunctionGraph and DualLinker are old, use aesara.compile.function.function instead.
"""
import numpy as np
import pytest
......@@ -20,7 +10,7 @@ from aesara.scalar.basic import (
ComplexError,
Composite,
InRange,
Scalar,
ScalarType,
add,
and_,
arccos,
......@@ -357,8 +347,8 @@ class TestUpgradeToFloat:
xi = int8("xi")
yi = int8("yi")
xf = Scalar(aesara.config.floatX)("xf")
yf = Scalar(aesara.config.floatX)("yf")
xf = ScalarType(aesara.config.floatX)("xf")
yf = ScalarType(aesara.config.floatX)("yf")
ei = true_div(xi, yi)
fi = aesara.function([xi, yi], ei)
......
......@@ -3,7 +3,7 @@ import numpy as np
from aesara.configdefaults import config
from aesara.scalar.basic import (
IntDiv,
Scalar,
ScalarType,
TrueDiv,
complex64,
float32,
......@@ -14,7 +14,7 @@ from aesara.scalar.basic import (
def test_numpy_dtype():
test_type = Scalar(np.int32)
test_type = ScalarType(np.int32)
assert test_type.dtype == "int32"
......@@ -37,9 +37,9 @@ def test_div_types():
def test_filter_float_subclass():
"""Make sure `Scalar.filter` can handle `float` subclasses."""
"""Make sure `ScalarType.filter` can handle `float` subclasses."""
with config.change_flags(floatX="float64"):
test_type = Scalar("float64")
test_type = ScalarType("float64")
nan = np.array([np.nan], dtype="float64")[0]
assert isinstance(nan, float)
......@@ -49,7 +49,7 @@ def test_filter_float_subclass():
with config.change_flags(floatX="float32"):
# Try again, except this time `nan` isn't a `float`
test_type = Scalar("float32")
test_type = ScalarType("float32")
nan = np.array([np.nan], dtype="float32")[0]
assert isinstance(nan, np.floating)
......@@ -63,6 +63,6 @@ def test_filter_float_subclass():
def test_clone():
st = Scalar("int64")
st = ScalarType("int64")
assert st == st.clone()
assert st.clone("float64").dtype == "float64"
......@@ -3448,7 +3448,7 @@ class TestGetScalarConstantValue:
assert get_scalar_constant_value(mv[np.int32(0)]) == 1
assert get_scalar_constant_value(mv[np.int64(1)]) == 2
assert get_scalar_constant_value(mv[np.uint(2)]) == 3
t = aes.Scalar("int64")
t = aes.ScalarType("int64")
with pytest.raises(NotScalarConstantError):
get_scalar_constant_value(mv[t()])
......
......@@ -1971,7 +1971,7 @@ class TestCastCast:
def test_consecutive(self):
x = fmatrix()
o = Elemwise(aes.Cast(aes.Scalar("float64")))(x.astype("float64"))
o = Elemwise(aes.Cast(aes.ScalarType("float64")))(x.astype("float64"))
f = function([x], o, mode=self.mode)
dx = np.random.random((5, 4)).astype("float32")
f(dx)
......@@ -1980,7 +1980,7 @@ class TestCastCast:
assert isinstance(topo[0].op.scalar_op, aes.basic.Cast)
x = dmatrix()
o = Elemwise(aes.Cast(aes.Scalar("float32")))(x.astype("float32"))
o = Elemwise(aes.Cast(aes.ScalarType("float32")))(x.astype("float32"))
f = function([x], o, mode=self.mode)
dx = np.random.random((5, 4))
f(dx)
......@@ -1991,7 +1991,7 @@ class TestCastCast:
def test_upcast(self):
# Upcast followed by any other cast
x = fmatrix()
o = Elemwise(aes.Cast(aes.Scalar("complex128")))(x.astype("complex64"))
o = Elemwise(aes.Cast(aes.ScalarType("complex128")))(x.astype("complex64"))
f = function([x], o, mode=self.mode)
dx = np.random.random((5, 4)).astype("float32")
f(dx)
......@@ -2001,7 +2001,7 @@ class TestCastCast:
# Upcast followed by a downcast back to the base type
x = fmatrix()
o = Elemwise(aes.Cast(aes.Scalar("float32")))(x.astype("float64"))
o = Elemwise(aes.Cast(aes.ScalarType("float32")))(x.astype("float64"))
f = function([x], o, mode=self.mode)
dx = np.random.random((5, 4)).astype("float32")
f(dx)
......@@ -2012,7 +2012,7 @@ class TestCastCast:
# Downcast followed by an upcast back to the base type
# Optimization shouldn't be applied
x = dmatrix()
o = Elemwise(aes.Cast(aes.Scalar("float64")))(x.astype("float32"))
o = Elemwise(aes.Cast(aes.ScalarType("float64")))(x.astype("float32"))
f = function([x], o, mode=self.mode)
dx = np.random.random((5, 4))
f(dx)
......@@ -2641,7 +2641,7 @@ def test_local_tensor_scalar_tensor(dtype):
],
)
def test_local_scalar_tensor_scalar(dtype):
s_type = aes.Scalar(dtype=dtype)
s_type = aes.ScalarType(dtype=dtype)
s = s_type()
t = at.tensor_from_scalar(s)
s2 = at.scalar_from_tensor(t)
......
......@@ -2269,7 +2269,7 @@ class TestArithmeticCast:
return np.array([1], dtype=dtype)
def Aesara_i_scalar(dtype):
return aes.Scalar(str(dtype))()
return aes.ScalarType(str(dtype))()
def numpy_i_scalar(dtype):
return numpy_scalar(dtype)
......
......@@ -2621,4 +2621,4 @@ def test_index_vars_to_types():
index_vars_to_types(1)
res = index_vars_to_types(iscalar)
assert isinstance(res, scal.Scalar)
assert isinstance(res, scal.ScalarType)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论