Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
6d3c7568
提交
6d3c7568
authored
8月 27, 2023
作者:
Purna Chandra Mansingh
提交者:
Ricardo Vieira
9月 01, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Rename module pytensor.tensor.var to pytensor.tensor.variable
上级
288a3f34
隐藏空白字符变更
内嵌
并排
正在显示
37 个修改的文件
包含
1141 行增加
和
1112 行删除
+1141
-1112
.pre-commit-config.yaml
.pre-commit-config.yaml
+1
-1
basic.py
pytensor/link/basic.py
+1
-1
subtensor.py
pytensor/link/jax/dispatch/subtensor.py
+1
-1
op.py
pytensor/scan/op.py
+1
-1
rewriting.py
pytensor/scan/rewriting.py
+1
-1
utils.py
pytensor/scan/utils.py
+1
-1
basic.py
pytensor/sparse/basic.py
+5
-1
__init__.py
pytensor/tensor/__init__.py
+1
-1
basic.py
pytensor/tensor/basic.py
+1
-1
abstract_conv.py
pytensor/tensor/conv/abstract_conv.py
+1
-1
elemwise.py
pytensor/tensor/elemwise.py
+1
-1
extra_ops.py
pytensor/tensor/extra_ops.py
+1
-1
fourier.py
pytensor/tensor/fourier.py
+1
-1
math.py
pytensor/tensor/math.py
+1
-1
nlinalg.py
pytensor/tensor/nlinalg.py
+3
-2
op.py
pytensor/tensor/random/op.py
+1
-1
utils.py
pytensor/tensor/random/utils.py
+1
-1
basic.py
pytensor/tensor/rewriting/basic.py
+1
-1
elemwise.py
pytensor/tensor/rewriting/elemwise.py
+1
-1
jax.py
pytensor/tensor/rewriting/jax.py
+1
-1
math.py
pytensor/tensor/rewriting/math.py
+1
-1
subtensor.py
pytensor/tensor/rewriting/subtensor.py
+1
-1
shape.py
pytensor/tensor/shape.py
+1
-1
sharedvar.py
pytensor/tensor/sharedvar.py
+1
-1
slinalg.py
pytensor/tensor/slinalg.py
+1
-1
type.py
pytensor/tensor/type.py
+1
-1
var.py
pytensor/tensor/var.py
+7
-1075
variable.py
pytensor/tensor/variable.py
+1079
-0
basic.py
pytensor/typed_list/basic.py
+1
-1
mypy-failing.txt
scripts/mypy-failing.txt
+3
-2
test_basic.py
tests/graph/test_basic.py
+1
-1
test_vm.py
tests/link/test_vm.py
+1
-1
test_math.py
tests/tensor/rewriting/test_math.py
+1
-1
test_basic.py
tests/tensor/test_basic.py
+1
-1
test_shape.py
tests/tensor/test_shape.py
+1
-1
test_variable.py
tests/tensor/test_variable.py
+13
-1
test_basic.py
tests/typed_list/test_basic.py
+1
-1
没有找到文件。
.pre-commit-config.yaml
浏览文件 @
6d3c7568
...
...
@@ -16,7 +16,7 @@ repos:
pytensor/graph/op\.py|
pytensor/compile/nanguardmode\.py|
pytensor/graph/rewriting/basic\.py|
pytensor/tensor/var\.py|
pytensor/tensor/var
iable
\.py|
)$
-
id
:
check-merge-conflict
-
repo
:
https://github.com/asottile/pyupgrade
...
...
pytensor/link/basic.py
浏览文件 @
6d3c7568
...
...
@@ -30,7 +30,7 @@ if TYPE_CHECKING:
OutputStorageType
,
StorageMapType
,
)
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
ThunkAndContainersType
=
Tuple
[
"BasicThunkType"
,
List
[
"Container"
],
List
[
"Container"
]]
...
...
pytensor/link/jax/dispatch/subtensor.py
浏览文件 @
6d3c7568
...
...
@@ -33,7 +33,7 @@ slice length.
def
subtensor_assert_indices_jax_compatible
(
node
,
idx_list
):
from
pytensor.graph.basic
import
Constant
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
ilist
=
indices_from_subtensor
(
node
.
inputs
[
1
:],
idx_list
)
for
idx
in
ilist
:
...
...
pytensor/scan/op.py
浏览文件 @
6d3c7568
...
...
@@ -82,7 +82,7 @@ from pytensor.tensor.basic import as_tensor_variable
from
pytensor.tensor.math
import
minimum
from
pytensor.tensor.shape
import
Shape_i
from
pytensor.tensor.type
import
TensorType
,
integer_dtypes
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
# Logging function for sending warning or info
...
...
pytensor/scan/rewriting.py
浏览文件 @
6d3c7568
...
...
@@ -69,7 +69,7 @@ from pytensor.tensor.subtensor import (
get_slice_elements
,
set_subtensor
,
)
from
pytensor.tensor.var
import
TensorConstant
,
get_unique_constant_value
from
pytensor.tensor.var
iable
import
TensorConstant
,
get_unique_constant_value
list_opt_slice
=
[
...
...
pytensor/scan/utils.py
浏览文件 @
6d3c7568
...
...
@@ -21,7 +21,7 @@ from pytensor.graph.type import HasDataType
from
pytensor.graph.utils
import
TestValueError
from
pytensor.tensor.basic
import
AllocEmpty
,
cast
from
pytensor.tensor.subtensor
import
set_subtensor
from
pytensor.tensor.var
import
TensorConstant
from
pytensor.tensor.var
iable
import
TensorConstant
if
TYPE_CHECKING
:
...
...
pytensor/sparse/basic.py
浏览文件 @
6d3c7568
...
...
@@ -51,7 +51,11 @@ from pytensor.tensor.type import TensorType
from
pytensor.tensor.type
import
continuous_dtypes
as
tensor_continuous_dtypes
from
pytensor.tensor.type
import
discrete_dtypes
as
tensor_discrete_dtypes
from
pytensor.tensor.type
import
iscalar
,
ivector
,
scalar
,
tensor
,
vector
from
pytensor.tensor.var
import
TensorConstant
,
TensorVariable
,
_tensor_py_operators
from
pytensor.tensor.variable
import
(
TensorConstant
,
TensorVariable
,
_tensor_py_operators
,
)
sparse_formats
=
[
"csc"
,
"csr"
]
...
...
pytensor/tensor/__init__.py
浏览文件 @
6d3c7568
...
...
@@ -146,7 +146,7 @@ from pytensor.tensor.sort import argsort, argtopk, sort, topk, topk_and_argtopk
from
pytensor.tensor.subtensor
import
*
# noqa
from
pytensor.tensor.type
import
*
# noqa
from
pytensor.tensor.type_other
import
*
# noqa
from
pytensor.tensor.var
import
TensorConstant
,
TensorVariable
# noqa
from
pytensor.tensor.var
iable
import
TensorConstant
,
TensorVariable
# noqa
# Allow accessing numpy constants from pytensor.tensor
from
numpy
import
e
,
euler_gamma
,
inf
,
infty
,
nan
,
newaxis
,
pi
# noqa
...
...
pytensor/tensor/basic.py
浏览文件 @
6d3c7568
...
...
@@ -62,7 +62,7 @@ from pytensor.tensor.type import (
uint_dtypes
,
values_eq_approx_always_true
,
)
from
pytensor.tensor.var
import
(
from
pytensor.tensor.var
iable
import
(
TensorConstant
,
TensorVariable
,
get_unique_constant_value
,
...
...
pytensor/tensor/conv/abstract_conv.py
浏览文件 @
6d3c7568
...
...
@@ -29,7 +29,7 @@ from pytensor.tensor.basic import (
get_underlying_scalar_constant_value
,
)
from
pytensor.tensor.exceptions
import
NotScalarConstantError
from
pytensor.tensor.var
import
TensorConstant
,
TensorVariable
from
pytensor.tensor.var
iable
import
TensorConstant
,
TensorVariable
_logger
=
logging
.
getLogger
(
__name__
)
...
...
pytensor/tensor/elemwise.py
浏览文件 @
6d3c7568
...
...
@@ -29,7 +29,7 @@ from pytensor.tensor.type import (
float_dtypes
,
lvector
,
)
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
from
pytensor.utils
import
uniq
...
...
pytensor/tensor/extra_ops.py
浏览文件 @
6d3c7568
...
...
@@ -35,7 +35,7 @@ from pytensor.tensor.math import sum as at_sum
from
pytensor.tensor.math
import
switch
from
pytensor.tensor.subtensor
import
advanced_inc_subtensor1
,
set_subtensor
from
pytensor.tensor.type
import
TensorType
,
dvector
,
int_dtypes
,
integer_dtypes
,
vector
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
from
pytensor.utils
import
LOCAL_BITWIDTH
,
PYTHON_INT_BITWIDTH
...
...
pytensor/tensor/fourier.py
浏览文件 @
6d3c7568
...
...
@@ -16,7 +16,7 @@ from pytensor.tensor.math import exp, lt, outer, tensordot
from
pytensor.tensor.shape
import
shape
from
pytensor.tensor.subtensor
import
set_subtensor
from
pytensor.tensor.type
import
TensorType
,
integer_dtypes
from
pytensor.tensor.var
import
TensorConstant
from
pytensor.tensor.var
iable
import
TensorConstant
class
Fourier
(
Op
):
...
...
pytensor/tensor/math.py
浏览文件 @
6d3c7568
...
...
@@ -40,7 +40,7 @@ from pytensor.tensor.type import (
)
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.utils
import
as_list
from
pytensor.tensor.var
import
TensorConstant
,
_tensor_py_operators
from
pytensor.tensor.var
iable
import
TensorConstant
,
_tensor_py_operators
if
TYPE_CHECKING
:
...
...
pytensor/tensor/nlinalg.py
浏览文件 @
6d3c7568
import
typing
from
functools
import
partial
from
typing
import
Tuple
from
typing
import
Callable
,
Tuple
import
numpy
as
np
...
...
@@ -299,7 +300,7 @@ class Eigh(Eig):
"""
_numop
=
staticmethod
(
np
.
linalg
.
eigh
)
_numop
=
typing
.
cast
(
Callable
,
staticmethod
(
np
.
linalg
.
eigh
)
)
__props__
=
(
"UPLO"
,)
def
__init__
(
self
,
UPLO
=
"L"
):
...
...
pytensor/tensor/random/op.py
浏览文件 @
6d3c7568
...
...
@@ -21,7 +21,7 @@ from pytensor.tensor.random.utils import normalize_size_param, params_broadcast_
from
pytensor.tensor.shape
import
shape_tuple
from
pytensor.tensor.type
import
TensorType
,
all_dtypes
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
class
RandomVariable
(
Op
):
...
...
pytensor/tensor/random/utils.py
浏览文件 @
6d3c7568
...
...
@@ -14,7 +14,7 @@ from pytensor.tensor.extra_ops import broadcast_to
from
pytensor.tensor.math
import
maximum
from
pytensor.tensor.shape
import
specify_shape
from
pytensor.tensor.type
import
int_dtypes
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
if
TYPE_CHECKING
:
...
...
pytensor/tensor/rewriting/basic.py
浏览文件 @
6d3c7568
...
...
@@ -72,7 +72,7 @@ from pytensor.tensor.math import eq
from
pytensor.tensor.shape
import
Shape_i
,
shape_padleft
from
pytensor.tensor.sort
import
TopKOp
from
pytensor.tensor.type
import
DenseTensorType
,
TensorType
from
pytensor.tensor.var
import
TensorConstant
,
TensorVariable
from
pytensor.tensor.var
iable
import
TensorConstant
,
TensorVariable
from
pytensor.utils
import
NoDuplicateOptWarningFilter
...
...
pytensor/tensor/rewriting/elemwise.py
浏览文件 @
6d3c7568
...
...
@@ -39,7 +39,7 @@ from pytensor.tensor.rewriting.basic import (
register_specialize
,
)
from
pytensor.tensor.shape
import
shape_padleft
from
pytensor.tensor.var
import
TensorConstant
,
get_unique_constant_value
from
pytensor.tensor.var
iable
import
TensorConstant
,
get_unique_constant_value
class
InplaceElemwiseOptimizer
(
GraphRewriter
):
...
...
pytensor/tensor/rewriting/jax.py
浏览文件 @
6d3c7568
...
...
@@ -6,7 +6,7 @@ from pytensor.tensor.elemwise import DimShuffle
from
pytensor.tensor.math
import
Sum
from
pytensor.tensor.shape
import
Reshape
from
pytensor.tensor.subtensor
import
AdvancedIncSubtensor
,
AdvancedSubtensor
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
@node_rewriter
([
AdvancedIncSubtensor
])
...
...
pytensor/tensor/rewriting/math.py
浏览文件 @
6d3c7568
...
...
@@ -101,7 +101,7 @@ from pytensor.tensor.type import (
values_eq_approx_remove_inf_nan
,
values_eq_approx_remove_nan
,
)
from
pytensor.tensor.var
import
TensorConstant
,
get_unique_constant_value
from
pytensor.tensor.var
iable
import
TensorConstant
,
get_unique_constant_value
def
scalarconsts_rest
(
inputs
,
elemwise
=
True
,
only_process_constants
=
False
):
...
...
pytensor/tensor/rewriting/subtensor.py
浏览文件 @
6d3c7568
...
...
@@ -81,7 +81,7 @@ from pytensor.tensor.subtensor import (
)
from
pytensor.tensor.type
import
TensorType
from
pytensor.tensor.type_other
import
NoneTypeT
,
SliceConstant
,
SliceType
from
pytensor.tensor.var
import
TensorConstant
,
TensorVariable
from
pytensor.tensor.var
iable
import
TensorConstant
,
TensorVariable
def
register_useless
(
lopt
,
*
tags
,
**
kwargs
):
...
...
pytensor/tensor/shape.py
浏览文件 @
6d3c7568
...
...
@@ -19,7 +19,7 @@ from pytensor.tensor import get_vector_length
from
pytensor.tensor.exceptions
import
NotScalarConstantError
from
pytensor.tensor.type
import
DenseTensorType
,
TensorType
,
int_dtypes
,
tensor
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.var
import
TensorConstant
,
TensorVariable
from
pytensor.tensor.var
iable
import
TensorConstant
,
TensorVariable
ShapeValueType
=
Union
[
None
,
np
.
integer
,
int
,
Variable
]
...
...
pytensor/tensor/sharedvar.py
浏览文件 @
6d3c7568
...
...
@@ -6,7 +6,7 @@ from pytensor.compile import SharedVariable, shared_constructor
from
pytensor.misc.safe_asarray
import
_asarray
from
pytensor.tensor
import
_get_vector_length
from
pytensor.tensor.type
import
TensorType
from
pytensor.tensor.var
import
_tensor_py_operators
from
pytensor.tensor.var
iable
import
_tensor_py_operators
def
load_shared_variable
(
val
):
...
...
pytensor/tensor/slinalg.py
浏览文件 @
6d3c7568
...
...
@@ -16,7 +16,7 @@ from pytensor.tensor import math as atm
from
pytensor.tensor.nlinalg
import
matrix_dot
from
pytensor.tensor.shape
import
reshape
from
pytensor.tensor.type
import
matrix
,
tensor
,
vector
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
if
TYPE_CHECKING
:
...
...
pytensor/tensor/type.py
浏览文件 @
6d3c7568
...
...
@@ -18,7 +18,7 @@ from pytensor.utils import apply_across_args
if
TYPE_CHECKING
:
from
numpy.typing
import
DTypeLike
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
_logger
=
logging
.
getLogger
(
"pytensor.tensor.type"
)
...
...
pytensor/tensor/var.py
浏览文件 @
6d3c7568
import
copy
import
traceback
as
tb
import
warnings
from
collections.abc
import
Iterable
from
numbers
import
Number
from
typing
import
Optional
,
TypeVar
import
numpy
as
np
from
pytensor
import
tensor
as
at
from
pytensor.configdefaults
import
config
from
pytensor.graph.basic
import
Constant
,
OptionalApplyType
,
Variable
from
pytensor.graph.utils
import
MetaType
from
pytensor.scalar
import
ComplexError
,
IntegerDivisionError
from
pytensor.tensor
import
_get_vector_length
from
pytensor.tensor.exceptions
import
AdvancedIndexingError
from
pytensor.tensor.type
import
TensorType
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.utils
import
hash_from_ndarray
from
pytensor.tensor.variable
import
*
# noqa
_TensorTypeType
=
TypeVar
(
"_TensorTypeType"
,
bound
=
TensorType
)
class
_tensor_py_operators
:
def
__abs__
(
self
):
return
at
.
math
.
abs
(
self
)
def
__neg__
(
self
):
return
at
.
math
.
neg
(
self
)
# These won't work because Python requires an int return value
# def __int__(self): return convert_to_int32(self)
# def __float__(self): return convert_to_float64(self)
# def __complex__(self): return convert_to_complex128(self)
_is_nonzero
=
True
def
__lt__
(
self
,
other
):
rval
=
at
.
math
.
lt
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__le__
(
self
,
other
):
rval
=
at
.
math
.
le
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__gt__
(
self
,
other
):
rval
=
at
.
math
.
gt
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__ge__
(
self
,
other
):
rval
=
at
.
math
.
ge
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__bool__
(
self
):
# This is meant to prohibit stuff like a < b < c, which is internally
# implemented as (a < b) and (b < c). The trouble with this is the
# side-effect that checking for a non-NULL a by typing "if a: ..."
# uses the same __nonzero__ method. We want these both to work, but
# it seems impossible. Currently, all vars evaluate to nonzero except
# the return values of comparison operators, which raise this
# exception. If you can think of a better solution, go for it!
#
# __bool__ is Python 3.x data model. __nonzero__ is Python 2.x.
if
self
.
_is_nonzero
:
return
True
else
:
raise
TypeError
(
"Variables do not support boolean operations."
)
def
__invert__
(
self
):
return
at
.
math
.
invert
(
self
)
def
__and__
(
self
,
other
):
return
at
.
math
.
and_
(
self
,
other
)
def
__or__
(
self
,
other
):
return
at
.
math
.
or_
(
self
,
other
)
def
__xor__
(
self
,
other
):
return
at
.
math
.
xor
(
self
,
other
)
def
__rand__
(
self
,
other
):
return
at
.
math
.
and_
(
other
,
self
)
def
__ror__
(
self
,
other
):
return
at
.
math
.
or_
(
other
,
self
)
def
__rxor__
(
self
,
other
):
return
at
.
math
.
xor
(
other
,
self
)
# def __iand__(self, other):
# return _and_inplace(self, other)
#
# def __ior__(self, other):
# return _or_inplace(self, other)
#
# def __ixor__(self, other):
# return _xor_inplace(self, other)
def
__add__
(
self
,
other
):
try
:
return
at
.
math
.
add
(
self
,
other
)
# We should catch the minimum number of exception here.
# Otherwise this will convert error when PyTensor flags
# compute_test_value is used
# Evidently, we need to catch NotImplementedError
# TypeError from as_tensor_variable are caught in Elemwise.make_node
# Otherwise TensorVariable * SparseVariable won't work!
except
(
NotImplementedError
,
TypeError
):
# We must return NotImplemented and not an
# NotImplementedError or raise an NotImplementedError.
# That way python will give a good error message like this
# `TypeError: unsupported operand type(s) for +:
# 'TensorVariable' and 'TensorVariable'`
return
NotImplemented
def
__sub__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
sub
(
self
,
other
)
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__mul__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
mul
(
self
,
other
)
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__div__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
div_proxy
(
self
,
other
)
except
IntegerDivisionError
:
# This is to raise the exception that occurs when trying to divide
# two integer arrays (currently forbidden).
raise
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__pow__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
pow
(
self
,
other
)
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__mod__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
mod_check
(
self
,
other
)
except
ComplexError
:
# This is to raise the exception that occurs when trying to compute
# x % y with either x or y a complex number.
raise
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__divmod__
(
self
,
other
):
return
at
.
math
.
divmod
(
self
,
other
)
def
__truediv__
(
self
,
other
):
return
at
.
math
.
true_div
(
self
,
other
)
def
__floordiv__
(
self
,
other
):
return
at
.
math
.
floor_div
(
self
,
other
)
def
__rtruediv__
(
self
,
other
):
return
at
.
math
.
true_div
(
other
,
self
)
def
__rfloordiv__
(
self
,
other
):
return
at
.
math
.
floor_div
(
other
,
self
)
# Do not use these; in-place `Op`s should be inserted by optimizations
# only!
# def __iadd__(self, other):
# return _add_inplace(self, other)
# def __isub__(self, other):
# return _sub_inplace(self, other)
#
# def __imul__(self, other):
# return _mul_inplace(self, other)
#
# def __idiv__(self, other):
# return _div_inplace(self, other)
#
# def __ipow__(self, other):
# return _pow_inplace(self, other)
def
__radd__
(
self
,
other
):
return
at
.
math
.
add
(
other
,
self
)
def
__rsub__
(
self
,
other
):
return
at
.
math
.
sub
(
other
,
self
)
def
__rmul__
(
self
,
other
):
return
at
.
math
.
mul
(
other
,
self
)
def
__rdiv__
(
self
,
other
):
return
at
.
math
.
div_proxy
(
other
,
self
)
def
__rmod__
(
self
,
other
):
return
at
.
math
.
mod
(
other
,
self
)
def
__rdivmod__
(
self
,
other
):
return
at
.
math
.
divmod
(
other
,
self
)
def
__rpow__
(
self
,
other
):
return
at
.
math
.
pow
(
other
,
self
)
def
__ceil__
(
self
):
return
at
.
math
.
ceil
(
self
)
def
__floor__
(
self
):
return
at
.
math
.
floor
(
self
)
def
__trunc__
(
self
):
return
at
.
math
.
trunc
(
self
)
# NumPy-like transpose property
@property
def
T
(
self
):
return
at
.
basic
.
transpose
(
self
)
def
transpose
(
self
,
*
axes
):
"""Transpose this array.
Returns
-------
object
`tensor.transpose(self, axes)` or `tensor.transpose(self, axes[0])`.
If only one `axes` argument is provided and it is iterable, then it is
assumed to be the entire axes tuple, and passed intact to
tensor.transpose.
"""
if
len
(
axes
)
==
0
:
return
at
.
basic
.
transpose
(
self
)
try
:
iter
(
axes
[
0
])
iterable
=
True
except
TypeError
:
iterable
=
False
if
len
(
axes
)
==
1
and
iterable
:
return
at
.
basic
.
transpose
(
self
,
axes
[
0
])
else
:
return
at
.
basic
.
transpose
(
self
,
axes
)
@property
def
shape
(
self
):
return
at
.
shape
(
self
)
@property
def
size
(
self
):
if
self
.
ndim
==
1
:
return
self
.
shape
[
0
]
else
:
return
at
.
math
.
prod
(
self
.
shape
)
def
any
(
self
,
axis
=
None
,
keepdims
=
False
):
return
at
.
math
.
any
(
self
,
axis
=
axis
,
keepdims
=
keepdims
)
def
all
(
self
,
axis
=
None
,
keepdims
=
False
):
return
at
.
math
.
all
(
self
,
axis
=
axis
,
keepdims
=
keepdims
)
# Old note: "We can't implement this because Python requests that this
# function returns an integer."
# TODO: We could use `get_vector_length` and let it raise an exception just like
# `__iter__` does
# def __len__(self):
# raise Exception("PyTensor Variables can't work with len(PyTensor "
# "Variable) due to Python restriction. You can use "
# "PyTensorVariable.shape[0] instead.")
def
reshape
(
self
,
shape
,
*
,
ndim
=
None
):
"""Return a reshaped view/copy of this variable.
Parameters
----------
shape
Something that can be converted to a symbolic vector of integers.
ndim
The length of the shape. Passing None here means for
PyTensor to try and guess the length of `shape`.
.. warning:: This has a different signature than numpy's
ndarray.reshape!
In numpy you do not need to wrap the shape arguments
in a tuple, in pytensor you do need to.
"""
if
ndim
is
not
None
:
if
not
isinstance
(
ndim
,
int
):
raise
ValueError
(
"Expected ndim to be an integer, is "
+
str
(
type
(
ndim
))
)
return
at
.
reshape
(
self
,
shape
,
ndim
=
ndim
)
def
dimshuffle
(
self
,
*
pattern
):
"""
Reorder the dimensions of this variable, optionally inserting
broadcasted dimensions.
Parameters
----------
pattern
List/tuple of int mixed with 'x' for broadcastable dimensions.
Examples
--------
For example, to create a 3D view of a [2D] matrix, call
``dimshuffle([0,'x',1])``. This will create a 3D view such that the
middle dimension is an implicit broadcasted dimension. To do the same
thing on the transpose of that matrix, call ``dimshuffle([1, 'x', 0])``.
Notes
-----
This function supports the pattern passed as a tuple, or as a
variable-length argument (e.g. ``a.dimshuffle(pattern)`` is equivalent
to ``a.dimshuffle(*pattern)`` where ``pattern`` is a list/tuple of ints
mixed with 'x' characters).
See Also
--------
DimShuffle
"""
if
(
len
(
pattern
)
==
1
)
and
(
isinstance
(
pattern
[
0
],
(
list
,
tuple
))):
pattern
=
pattern
[
0
]
op
=
at
.
elemwise
.
DimShuffle
(
list
(
self
.
type
.
broadcastable
),
pattern
)
return
op
(
self
)
def
flatten
(
self
,
ndim
=
1
):
return
at
.
basic
.
flatten
(
self
,
ndim
)
def
ravel
(
self
):
return
at
.
basic
.
flatten
(
self
)
def
diagonal
(
self
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
):
return
at
.
basic
.
diagonal
(
self
,
offset
,
axis1
,
axis2
)
def
transfer
(
self
,
target
):
"""Transfer this this array's data to another device.
If `target` is `'cpu'` this will transfer to a TensorType (if
not already one). Other types may define additional targets.
Parameters
----------
target : str
The desired location of the output variable
"""
return
at
.
basic
.
transfer
(
self
,
target
)
def
arccos
(
self
):
return
at
.
math
.
arccos
(
self
)
def
arccosh
(
self
):
return
at
.
math
.
arccosh
(
self
)
def
arcsin
(
self
):
return
at
.
math
.
arcsin
(
self
)
def
arcsinh
(
self
):
return
at
.
math
.
arcsinh
(
self
)
def
arctan
(
self
):
return
at
.
math
.
arctan
(
self
)
def
arctanh
(
self
):
return
at
.
math
.
arctanh
(
self
)
def
ceil
(
self
):
return
at
.
math
.
ceil
(
self
)
def
cos
(
self
):
return
at
.
math
.
cos
(
self
)
def
cosh
(
self
):
return
at
.
math
.
cosh
(
self
)
def
deg2rad
(
self
):
return
at
.
math
.
deg2rad
(
self
)
def
exp
(
self
):
return
at
.
math
.
exp
(
self
)
def
exp2
(
self
):
return
at
.
math
.
exp2
(
self
)
def
expm1
(
self
):
return
at
.
math
.
expm1
(
self
)
def
floor
(
self
):
return
at
.
math
.
floor
(
self
)
def
log
(
self
):
return
at
.
math
.
log
(
self
)
def
log10
(
self
):
return
at
.
math
.
log10
(
self
)
def
log1p
(
self
):
return
at
.
math
.
log1p
(
self
)
def
log2
(
self
):
return
at
.
math
.
log2
(
self
)
def
rad2deg
(
self
):
return
at
.
math
.
rad2deg
(
self
)
def
sin
(
self
):
return
at
.
math
.
sin
(
self
)
def
sinh
(
self
):
return
at
.
math
.
sinh
(
self
)
def
sqrt
(
self
):
return
at
.
math
.
sqrt
(
self
)
def
tan
(
self
):
return
at
.
math
.
tan
(
self
)
def
tanh
(
self
):
return
at
.
math
.
tanh
(
self
)
def
trunc
(
self
):
return
at
.
math
.
trunc
(
self
)
def
astype
(
self
,
dtype
):
return
at
.
basic
.
cast
(
self
,
dtype
)
def
__getitem__
(
self
,
args
):
def
includes_bool
(
args_el
):
if
isinstance
(
args_el
,
(
np
.
bool_
,
bool
))
or
(
hasattr
(
args_el
,
"dtype"
)
and
args_el
.
dtype
==
"bool"
):
return
True
if
not
isinstance
(
args_el
,
Variable
)
and
isinstance
(
args_el
,
Iterable
):
for
el
in
args_el
:
if
includes_bool
(
el
):
return
True
return
False
if
isinstance
(
args
,
list
)
and
any
(
isinstance
(
a
,
slice
)
for
a
in
args
):
pass
elif
not
isinstance
(
args
,
tuple
):
args
=
(
args
,)
# Count the dimensions, check for bools and find ellipses.
ellipses
=
[]
index_dim_count
=
0
for
i
,
arg
in
enumerate
(
args
):
if
arg
is
np
.
newaxis
or
arg
is
NoneConst
:
# no increase in index_dim_count
pass
elif
arg
is
Ellipsis
:
# no increase in index_dim_count
ellipses
.
append
(
i
)
elif
(
isinstance
(
arg
,
(
np
.
ndarray
,
Variable
))
and
hasattr
(
arg
,
"dtype"
)
and
arg
.
dtype
==
"bool"
):
index_dim_count
+=
arg
.
ndim
else
:
# Python arrays can contain a mixture of bools and integers,
# which requires complex rules to handle all special cases.
# These rules differ slightly between NumPy versions.
# Since earlier versions of PyTensor did not support any boolean
# indexing, it is safe to throw an error if we encounter
# any of these difficult cases.
if
includes_bool
(
arg
):
raise
TypeError
(
"TensorType does not support Python bools "
"for indexing, such as tensor[[True, False]]. "
"To use a boolean mask, convert the mask to "
"a NumPy array first, e.g., "
"tensor[numpy.array([True, False])]."
)
index_dim_count
+=
1
# Check if the number of dimensions isn't too large.
if
self
.
ndim
<
index_dim_count
:
raise
IndexError
(
"too many indices for array"
)
# Convert an Ellipsis if provided into an appropriate number of
# slice(None).
if
len
(
ellipses
)
>
1
:
raise
IndexError
(
"an index can only have a single Ellipsis (`...`)"
)
elif
len
(
ellipses
)
==
1
:
ellipsis_at
=
ellipses
[
0
]
args
=
list
(
args
)
args
[
ellipsis_at
:
ellipsis_at
+
1
]
=
[
slice
(
None
)]
*
(
self
.
ndim
-
index_dim_count
)
def
is_empty_array
(
val
):
return
(
isinstance
(
val
,
(
tuple
,
list
))
and
len
(
val
)
==
0
)
or
(
isinstance
(
val
,
np
.
ndarray
)
and
val
.
size
==
0
)
# Force input to be an int datatype if input is an empty list or tuple
# Else leave it as is if it is a real number
# Convert python literals to pytensor constants
args
=
tuple
(
[
at
.
subtensor
.
as_index_constant
(
np
.
array
(
inp
,
dtype
=
np
.
uint8
)
if
is_empty_array
(
inp
)
else
inp
)
for
inp
in
args
]
)
# Determine if advanced indexing is needed or not. The logic is
# already in `index_vars_to_types`: if it succeeds, standard indexing is
# used; if it fails with `AdvancedIndexingError`, advanced indexing is
# used
advanced
=
False
for
i
,
arg
in
enumerate
(
args
):
if
includes_bool
(
arg
):
advanced
=
True
break
if
arg
is
not
np
.
newaxis
and
arg
is
not
NoneConst
:
try
:
at
.
subtensor
.
index_vars_to_types
(
arg
)
except
AdvancedIndexingError
:
if
advanced
:
break
else
:
advanced
=
True
if
advanced
:
return
at
.
subtensor
.
advanced_subtensor
(
self
,
*
args
)
else
:
if
np
.
newaxis
in
args
or
NoneConst
in
args
:
# `np.newaxis` (i.e. `None`) in NumPy indexing mean "add a new
# broadcastable dimension at this location". Since PyTensor adds
# new broadcastable dimensions via the `DimShuffle` `Op`, the
# following code uses said `Op` to add one of the new axes and
# then uses recursion to apply any other indices and add any
# remaining new axes.
counter
=
0
pattern
=
[]
new_args
=
[]
for
arg
in
args
:
if
arg
is
np
.
newaxis
or
arg
is
NoneConst
:
pattern
.
append
(
"x"
)
new_args
.
append
(
slice
(
None
,
None
,
None
))
else
:
pattern
.
append
(
counter
)
counter
+=
1
new_args
.
append
(
arg
)
pattern
.
extend
(
list
(
range
(
counter
,
self
.
ndim
)))
view
=
self
.
dimshuffle
(
pattern
)
full_slices
=
True
for
arg
in
new_args
:
# We can't do arg == slice(None, None, None) as in
# Python 2.7, this call __lt__ if we have a slice
# with some symbolic variable.
if
not
(
isinstance
(
arg
,
slice
)
and
(
arg
.
start
is
None
or
arg
.
start
is
NoneConst
)
and
(
arg
.
stop
is
None
or
arg
.
stop
is
NoneConst
)
and
(
arg
.
step
is
None
or
arg
.
step
is
NoneConst
)
):
full_slices
=
False
if
full_slices
:
return
view
else
:
return
view
.
__getitem__
(
tuple
(
new_args
))
else
:
return
at
.
subtensor
.
Subtensor
(
args
)(
self
,
*
at
.
subtensor
.
get_slice_elements
(
args
,
lambda
entry
:
isinstance
(
entry
,
Variable
)
),
)
def
take
(
self
,
indices
,
axis
=
None
,
mode
=
"raise"
):
return
at
.
subtensor
.
take
(
self
,
indices
,
axis
,
mode
)
def
copy
(
self
,
name
=
None
):
"""Return a symbolic copy and optionally assign a name.
Does not copy the tags.
"""
copied_variable
=
at
.
basic
.
tensor_copy
(
self
)
copied_variable
.
name
=
name
return
copied_variable
def
__iter__
(
self
):
try
:
for
i
in
range
(
at
.
basic
.
get_vector_length
(
self
)):
yield
self
[
i
]
except
TypeError
:
# This prevents accidental iteration via sum(self)
raise
TypeError
(
"TensorType does not support iteration.
\n
"
"
\t
Did you pass a PyTensor variable to a function that expects a list?
\n
"
"
\t
Maybe you are using builtins.sum instead of pytensor.tensor.sum?"
)
@property
def
ndim
(
self
)
->
int
:
"""The rank of this tensor."""
return
self
.
type
.
ndim
@property
def
broadcastable
(
self
):
"""
The broadcastable signature of this tensor.
See Also
--------
broadcasting
"""
return
self
.
type
.
broadcastable
@property
def
dtype
(
self
):
"""The dtype of this tensor."""
return
self
.
type
.
dtype
def
__dot__
(
left
,
right
):
return
at
.
math
.
dense_dot
(
left
,
right
)
def
__rdot__
(
right
,
left
):
return
at
.
math
.
dense_dot
(
left
,
right
)
dot
=
__dot__
__matmul__
=
__dot__
__rmatmul__
=
__rdot__
def
sum
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
,
acc_dtype
=
None
):
"""See :func:`pytensor.tensor.math.sum`."""
return
at
.
math
.
sum
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
def
prod
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
,
acc_dtype
=
None
):
"""See :func:`pytensor.tensor.math.prod`."""
return
at
.
math
.
prod
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
def
norm
(
self
,
L
,
axis
=
None
,
keepdims
=
False
):
if
L
==
0
:
raise
NotImplementedError
()
if
np
.
isinf
(
L
):
raise
NotImplementedError
()
# optimizations will/should catch cases like L=1, L=2
y
=
at
.
math
.
pow
(
at
.
math
.
pow
(
at
.
math
.
abs
(
self
),
L
)
.
sum
(
axis
=
axis
),
1.0
/
L
,
)
if
keepdims
:
return
at
.
math
.
makeKeepDims
(
self
,
y
,
axis
)
else
:
return
y
def
mean
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
,
acc_dtype
=
None
):
"""See :func:`pytensor.tensor.math.mean`."""
return
at
.
math
.
mean
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
def
var
(
self
,
axis
=
None
,
ddof
=
0
,
keepdims
=
False
,
corrected
=
False
):
"""See :func:`pytensor.tensor.math.var`."""
return
at
.
math
.
var
(
self
,
axis
=
axis
,
ddof
=
ddof
,
keepdims
=
keepdims
,
corrected
=
corrected
)
def
std
(
self
,
axis
=
None
,
ddof
=
0
,
keepdims
=
False
,
corrected
=
False
):
"""See :func:`pytensor.tensor.math.std`."""
return
at
.
math
.
std
(
self
,
axis
=
axis
,
ddof
=
ddof
,
keepdims
=
keepdims
,
corrected
=
corrected
)
def
min
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.min`."""
return
at
.
math
.
min
(
self
,
axis
,
keepdims
=
keepdims
)
def
max
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.max`."""
return
at
.
math
.
max
(
self
,
axis
,
keepdims
=
keepdims
)
def
argmin
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.argmin`."""
return
at
.
math
.
argmin
(
self
,
axis
,
keepdims
=
keepdims
)
def
argmax
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.argmax`."""
return
at
.
math
.
argmax
(
self
,
axis
,
keepdims
=
keepdims
)
def
nonzero
(
self
,
return_matrix
=
False
):
"""See :func:`pytensor.tensor.basic.nonzero`."""
return
at
.
nonzero
(
self
,
return_matrix
=
return_matrix
)
def
nonzero_values
(
self
):
"""See :func:`pytensor.tensor.basic.nonzero_values`."""
return
at
.
nonzero_values
(
self
)
def
sort
(
self
,
axis
=-
1
,
kind
=
"quicksort"
,
order
=
None
):
"""See :func:`pytensor.tensor.sort.sort`."""
return
at
.
sort
(
self
,
axis
,
kind
,
order
)
def
argsort
(
self
,
axis
=-
1
,
kind
=
"quicksort"
,
order
=
None
):
"""See :func:`pytensor.tensor.sort.argsort`."""
from
pytensor.tensor.sort
import
argsort
return
argsort
(
self
,
axis
,
kind
,
order
)
def
clip
(
self
,
a_min
,
a_max
):
"See :func:`pytensor.tensor.math.clip`."
return
at
.
math
.
clip
(
self
,
a_min
,
a_max
)
def
conj
(
self
):
"""See :func:`pytensor.tensor.math.conj`."""
return
at
.
math
.
conj
(
self
)
conjugate
=
conj
def
repeat
(
self
,
repeats
,
axis
=
None
):
"""See :func:`pytensor.tensor.basic.repeat`."""
return
at
.
extra_ops
.
repeat
(
self
,
repeats
,
axis
)
def
round
(
self
,
mode
=
None
):
"""See :func:`pytensor.tensor.math.round`."""
return
at
.
math
.
round
(
self
,
mode
)
def
trace
(
self
):
return
at
.
linalg
.
trace
(
self
)
# This value is set so that PyTensor arrays will trump NumPy operators.
__array_priority__
=
1000
def
get_underlying_scalar_constant
(
self
):
return
at
.
basic
.
get_underlying_scalar_constant_value
(
self
)
def
zeros_like
(
model
,
dtype
=
None
):
return
at
.
basic
.
zeros_like
(
model
,
dtype
=
dtype
)
def
ones_like
(
model
,
dtype
=
None
):
return
at
.
basic
.
ones_like
(
model
,
dtype
=
dtype
)
def
cumsum
(
self
,
axis
=
None
):
return
at
.
extra_ops
.
cumsum
(
self
,
axis
)
def
cumprod
(
self
,
axis
=
None
):
return
at
.
extra_ops
.
cumprod
(
self
,
axis
)
def
searchsorted
(
self
,
v
,
side
=
"left"
,
sorter
=
None
):
return
at
.
extra_ops
.
searchsorted
(
self
,
v
,
side
,
sorter
)
def
ptp
(
self
,
axis
=
None
):
"""See :func:`pytensor.tensor.math.ptp`."""
return
at
.
math
.
ptp
(
self
,
axis
)
def
swapaxes
(
self
,
axis1
,
axis2
):
"""See :func:`pytensor.tensor.basic.swapaxes`.
If a matrix is provided with the right axes, its transpose
will be returned.
"""
return
at
.
basic
.
swapaxes
(
self
,
axis1
,
axis2
)
def
fill
(
self
,
value
):
"""Fill inputted tensor with the assigned value."""
return
at
.
basic
.
fill
(
self
,
value
)
def
choose
(
self
,
choices
,
mode
=
"raise"
):
"""
Construct an array from an index array and a set of arrays to choose
from.
"""
return
at
.
basic
.
choose
(
self
,
choices
,
mode
=
"raise"
)
def
squeeze
(
self
):
"""
Remove broadcastable dimensions from the shape of an array.
It returns the input array, but with the broadcastable dimensions
removed. This is always `x` itself or a view into `x`.
"""
return
at
.
extra_ops
.
squeeze
(
self
)
def
compress
(
self
,
a
,
axis
=
None
):
"""Return selected slices only."""
return
at
.
extra_ops
.
compress
(
self
,
a
,
axis
=
axis
)
class
TensorVariable
(
_tensor_py_operators
,
Variable
[
_TensorTypeType
,
OptionalApplyType
]
):
"""
Subclass to add the tensor operators to the basic `Variable` class.
"""
def
__init__
(
self
,
type
:
_TensorTypeType
,
owner
:
OptionalApplyType
,
index
=
None
,
name
=
None
,
):
super
()
.
__init__
(
type
,
owner
,
index
=
index
,
name
=
name
)
if
config
.
warn_float64
!=
"ignore"
and
type
.
dtype
==
"float64"
:
msg
=
(
"You are creating a TensorVariable "
"with float64 dtype. You requested an action via "
"the PyTensor flag warn_float64={ignore,warn,raise,pdb}."
)
if
config
.
warn_float64
==
"warn"
:
# Get the user stack. We don't want function inside the
# tensor and graph directory to be shown to the user.
x
=
tb
.
extract_stack
()
nb_rm
=
0
while
x
:
file_path
=
x
[
-
1
][
0
]
rm
=
False
for
p
in
[
"pytensor/tensor/"
,
"pytensor
\\
tensor
\\
"
,
"pytensor/graph/"
,
"pytensor
\\
tensor
\\
"
,
]:
if
p
in
file_path
:
x
=
x
[:
-
1
]
nb_rm
+=
1
rm
=
True
break
if
not
rm
:
break
warnings
.
warn
(
msg
,
stacklevel
=
1
+
nb_rm
)
elif
config
.
warn_float64
==
"raise"
:
raise
Exception
(
msg
)
elif
config
.
warn_float64
==
"pdb"
:
import
pdb
pdb
.
set_trace
()
@_get_vector_length.register
(
TensorVariable
)
def
_get_vector_length_TensorVariable
(
op_or_var
,
var
):
if
var
.
type
.
shape
[
0
]
is
None
:
raise
ValueError
(
f
"Length of {var} cannot be determined"
)
return
var
.
type
.
shape
[
0
]
TensorType
.
variable_type
=
TensorVariable
class
TensorConstantSignature
(
tuple
):
r"""A signature object for comparing `TensorConstant` instances.
An instance is a pair with the type ``(Type, ndarray)``.
TODO FIXME: Subclassing `tuple` is unnecessary, and it appears to be
preventing the use of a much more convenient `__init__` that removes the
need for all these lazy computations and their safety checks.
Also, why do we even need this signature stuff? We could simply implement
good `Constant.__eq__` and `Constant.__hash__` implementations.
We could also produce plain `tuple`\s with hashable values.
"""
def
__eq__
(
self
,
other
):
if
type
(
self
)
!=
type
(
other
):
return
False
try
:
(
t0
,
d0
),
(
t1
,
d1
)
=
self
,
other
except
Exception
:
return
False
# N.B. compare shape to ensure no broadcasting in ==
if
t0
!=
t1
or
d0
.
shape
!=
d1
.
shape
:
return
False
self
.
no_nan
# Ensure has_nan is computed.
# Note that in the comparisons below, the elementwise comparisons
# come last because they are the most expensive checks.
if
self
.
has_nan
:
other
.
no_nan
# Ensure has_nan is computed.
return
(
other
.
has_nan
and
self
.
sum
==
other
.
sum
and
(
self
.
no_nan
.
mask
==
other
.
no_nan
.
mask
)
.
all
()
and
# Note that the second test below (==) may crash e.g. for
# a single scalar NaN value, so we do not run it when all
# values are missing.
(
self
.
no_nan
.
mask
.
all
()
or
(
self
.
no_nan
==
other
.
no_nan
)
.
all
())
)
else
:
# Simple case where we do not need to worry about NaN values.
# (note that if there are NaN values in d1, this will return
# False, which is why we do not bother with testing `other.has_nan`
# here).
return
(
self
.
sum
==
other
.
sum
)
and
np
.
all
(
d0
==
d1
)
def
__ne__
(
self
,
other
):
return
not
self
==
other
def
__hash__
(
self
):
t
,
d
=
self
return
hash
((
type
(
self
),
t
,
d
.
shape
,
self
.
sum
))
def
pytensor_hash
(
self
):
_
,
d
=
self
return
hash_from_ndarray
(
d
)
@property
def
sum
(
self
):
"""Compute sum of non NaN / Inf values in the array."""
try
:
return
self
.
_sum
except
AttributeError
:
# Prevent warnings when there are `inf`s and `-inf`s present
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
,
category
=
RuntimeWarning
)
self
.
_sum
=
self
.
no_nan
.
sum
()
# The following 2 lines are needed as in Python 3.3 with NumPy
# 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.
if
isinstance
(
self
.
_sum
,
np
.
memmap
):
self
.
_sum
=
np
.
asarray
(
self
.
_sum
)
.
item
()
if
self
.
has_nan
and
self
.
no_nan
.
mask
.
all
():
# In this case the sum is not properly computed by numpy.
self
.
_sum
=
0
if
np
.
isinf
(
self
.
_sum
)
or
np
.
isnan
(
self
.
_sum
):
# NaN may happen when there are both -inf and +inf values.
if
self
.
has_nan
:
# Filter both NaN and Inf values.
mask
=
self
.
no_nan
.
mask
+
np
.
isinf
(
self
[
1
])
else
:
# Filter only Inf values.
mask
=
np
.
isinf
(
self
[
1
])
if
mask
.
all
():
self
.
_sum
=
0
else
:
self
.
_sum
=
np
.
ma
.
masked_array
(
self
[
1
],
mask
)
.
sum
()
# At this point there should be no more NaN.
assert
not
np
.
isnan
(
self
.
_sum
)
if
isinstance
(
self
.
_sum
,
np
.
ma
.
core
.
MaskedConstant
):
self
.
_sum
=
0
return
self
.
_sum
@property
def
no_nan
(
self
):
try
:
return
self
.
_no_nan
except
AttributeError
:
nans
=
np
.
isnan
(
self
[
1
])
self
.
_no_nan
=
np
.
ma
.
masked_array
(
self
[
1
],
nans
)
self
.
has_nan
=
np
.
any
(
nans
)
return
self
.
_no_nan
def
get_unique_constant_value
(
x
:
TensorVariable
)
->
Optional
[
Number
]:
"""Return the unique value of a tensor, if there is one"""
if
isinstance
(
x
,
Constant
):
data
=
x
.
data
if
isinstance
(
data
,
np
.
ndarray
)
and
data
.
ndim
>
0
:
flat_data
=
data
.
ravel
()
if
flat_data
.
shape
[
0
]:
if
(
flat_data
==
flat_data
[
0
])
.
all
():
return
flat_data
[
0
]
return
None
class
TensorConstant
(
TensorVariable
,
Constant
[
_TensorTypeType
]):
"""Subclass to add the tensor operators to the basic `Constant` class."""
def
__init__
(
self
,
type
:
_TensorTypeType
,
data
,
name
=
None
):
data_shape
=
np
.
shape
(
data
)
if
len
(
data_shape
)
!=
type
.
ndim
or
any
(
ds
!=
ts
for
ds
,
ts
in
zip
(
np
.
shape
(
data
),
type
.
shape
)
if
ts
is
not
None
):
raise
ValueError
(
f
"Shape of data ({data_shape}) does not match shape of type ({type.shape})"
)
# We want all the shape information from `data`
new_type
=
type
.
clone
(
shape
=
data_shape
)
assert
not
any
(
s
is
None
for
s
in
new_type
.
shape
)
Constant
.
__init__
(
self
,
new_type
,
data
,
name
)
def
signature
(
self
):
return
TensorConstantSignature
((
self
.
type
,
self
.
data
))
def
equals
(
self
,
other
):
# Override Constant.equals to allow to compare with
# numpy.ndarray, and python type.
if
isinstance
(
other
,
(
np
.
ndarray
,
int
,
float
)):
# Make a TensorConstant to be able to compare
other
=
at
.
basic
.
constant
(
other
)
return
(
isinstance
(
other
,
TensorConstant
)
and
self
.
signature
()
==
other
.
signature
()
)
def
__copy__
(
self
):
# We need to do this to remove the cached attribute
return
type
(
self
)(
self
.
type
,
self
.
data
,
self
.
name
)
def
__deepcopy__
(
self
,
memo
):
# We need to do this to remove the cached attribute
return
type
(
self
)(
copy
.
deepcopy
(
self
.
type
,
memo
),
copy
.
deepcopy
(
self
.
data
,
memo
),
copy
.
deepcopy
(
self
.
name
,
memo
),
)
TensorType
.
constant_type
=
TensorConstant
class
DenseVariableMeta
(
MetaType
):
def
__instancecheck__
(
self
,
o
):
if
type
(
o
)
==
TensorVariable
or
isinstance
(
o
,
DenseVariableMeta
):
return
True
return
False
class
DenseTensorVariable
(
TensorType
,
metaclass
=
DenseVariableMeta
):
r"""A `Variable` for dense tensors.
Instances of this class and `TensorVariable`\s are considered dense
`Variable`\s.
"""
class
DenseConstantMeta
(
MetaType
):
def
__instancecheck__
(
self
,
o
):
if
type
(
o
)
==
TensorConstant
or
isinstance
(
o
,
DenseConstantMeta
):
return
True
return
False
class
DenseTensorConstant
(
TensorType
,
metaclass
=
DenseConstantMeta
):
r"""A `Constant` for dense tensors.
Instances of this class and `TensorConstant`\s are considered dense
`Constant`\s.
"""
warnings
.
warn
(
"The module 'pytensor.tensor.var' has been deprecated. "
"Use 'pytensor.tensor.variable' instead."
,
category
=
DeprecationWarning
,
stacklevel
=
2
,
)
pytensor/tensor/variable.py
0 → 100644
浏览文件 @
6d3c7568
import
copy
import
traceback
as
tb
import
warnings
from
collections.abc
import
Iterable
from
numbers
import
Number
from
typing
import
Optional
,
TypeVar
import
numpy
as
np
from
pytensor
import
tensor
as
at
from
pytensor.configdefaults
import
config
from
pytensor.graph.basic
import
Constant
,
OptionalApplyType
,
Variable
from
pytensor.graph.utils
import
MetaType
from
pytensor.scalar
import
ComplexError
,
IntegerDivisionError
from
pytensor.tensor
import
_get_vector_length
from
pytensor.tensor.exceptions
import
AdvancedIndexingError
from
pytensor.tensor.type
import
TensorType
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.utils
import
hash_from_ndarray
_TensorTypeType
=
TypeVar
(
"_TensorTypeType"
,
bound
=
TensorType
)
class
_tensor_py_operators
:
def
__abs__
(
self
):
return
at
.
math
.
abs
(
self
)
def
__neg__
(
self
):
return
at
.
math
.
neg
(
self
)
# These won't work because Python requires an int return value
# def __int__(self): return convert_to_int32(self)
# def __float__(self): return convert_to_float64(self)
# def __complex__(self): return convert_to_complex128(self)
_is_nonzero
=
True
def
__lt__
(
self
,
other
):
rval
=
at
.
math
.
lt
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__le__
(
self
,
other
):
rval
=
at
.
math
.
le
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__gt__
(
self
,
other
):
rval
=
at
.
math
.
gt
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__ge__
(
self
,
other
):
rval
=
at
.
math
.
ge
(
self
,
other
)
rval
.
_is_nonzero
=
False
return
rval
def
__bool__
(
self
):
# This is meant to prohibit stuff like a < b < c, which is internally
# implemented as (a < b) and (b < c). The trouble with this is the
# side-effect that checking for a non-NULL a by typing "if a: ..."
# uses the same __nonzero__ method. We want these both to work, but
# it seems impossible. Currently, all vars evaluate to nonzero except
# the return values of comparison operators, which raise this
# exception. If you can think of a better solution, go for it!
#
# __bool__ is Python 3.x data model. __nonzero__ is Python 2.x.
if
self
.
_is_nonzero
:
return
True
else
:
raise
TypeError
(
"Variables do not support boolean operations."
)
def
__invert__
(
self
):
return
at
.
math
.
invert
(
self
)
def
__and__
(
self
,
other
):
return
at
.
math
.
and_
(
self
,
other
)
def
__or__
(
self
,
other
):
return
at
.
math
.
or_
(
self
,
other
)
def
__xor__
(
self
,
other
):
return
at
.
math
.
xor
(
self
,
other
)
def
__rand__
(
self
,
other
):
return
at
.
math
.
and_
(
other
,
self
)
def
__ror__
(
self
,
other
):
return
at
.
math
.
or_
(
other
,
self
)
def
__rxor__
(
self
,
other
):
return
at
.
math
.
xor
(
other
,
self
)
# def __iand__(self, other):
# return _and_inplace(self, other)
#
# def __ior__(self, other):
# return _or_inplace(self, other)
#
# def __ixor__(self, other):
# return _xor_inplace(self, other)
def
__add__
(
self
,
other
):
try
:
return
at
.
math
.
add
(
self
,
other
)
# We should catch the minimum number of exception here.
# Otherwise this will convert error when PyTensor flags
# compute_test_value is used
# Evidently, we need to catch NotImplementedError
# TypeError from as_tensor_variable are caught in Elemwise.make_node
# Otherwise TensorVariable * SparseVariable won't work!
except
(
NotImplementedError
,
TypeError
):
# We must return NotImplemented and not an
# NotImplementedError or raise an NotImplementedError.
# That way python will give a good error message like this
# `TypeError: unsupported operand type(s) for +:
# 'TensorVariable' and 'TensorVariable'`
return
NotImplemented
def
__sub__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
sub
(
self
,
other
)
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__mul__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
mul
(
self
,
other
)
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__div__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
div_proxy
(
self
,
other
)
except
IntegerDivisionError
:
# This is to raise the exception that occurs when trying to divide
# two integer arrays (currently forbidden).
raise
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__pow__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
pow
(
self
,
other
)
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__mod__
(
self
,
other
):
# See explanation in __add__ for the error caught
# and the return value in that case
try
:
return
at
.
math
.
mod_check
(
self
,
other
)
except
ComplexError
:
# This is to raise the exception that occurs when trying to compute
# x % y with either x or y a complex number.
raise
except
(
NotImplementedError
,
TypeError
):
return
NotImplemented
def
__divmod__
(
self
,
other
):
return
at
.
math
.
divmod
(
self
,
other
)
def
__truediv__
(
self
,
other
):
return
at
.
math
.
true_div
(
self
,
other
)
def
__floordiv__
(
self
,
other
):
return
at
.
math
.
floor_div
(
self
,
other
)
def
__rtruediv__
(
self
,
other
):
return
at
.
math
.
true_div
(
other
,
self
)
def
__rfloordiv__
(
self
,
other
):
return
at
.
math
.
floor_div
(
other
,
self
)
# Do not use these; in-place `Op`s should be inserted by optimizations
# only!
# def __iadd__(self, other):
# return _add_inplace(self, other)
# def __isub__(self, other):
# return _sub_inplace(self, other)
#
# def __imul__(self, other):
# return _mul_inplace(self, other)
#
# def __idiv__(self, other):
# return _div_inplace(self, other)
#
# def __ipow__(self, other):
# return _pow_inplace(self, other)
def
__radd__
(
self
,
other
):
return
at
.
math
.
add
(
other
,
self
)
def
__rsub__
(
self
,
other
):
return
at
.
math
.
sub
(
other
,
self
)
def
__rmul__
(
self
,
other
):
return
at
.
math
.
mul
(
other
,
self
)
def
__rdiv__
(
self
,
other
):
return
at
.
math
.
div_proxy
(
other
,
self
)
def
__rmod__
(
self
,
other
):
return
at
.
math
.
mod
(
other
,
self
)
def
__rdivmod__
(
self
,
other
):
return
at
.
math
.
divmod
(
other
,
self
)
def
__rpow__
(
self
,
other
):
return
at
.
math
.
pow
(
other
,
self
)
def
__ceil__
(
self
):
return
at
.
math
.
ceil
(
self
)
def
__floor__
(
self
):
return
at
.
math
.
floor
(
self
)
def
__trunc__
(
self
):
return
at
.
math
.
trunc
(
self
)
# NumPy-like transpose property
@property
def
T
(
self
):
return
at
.
basic
.
transpose
(
self
)
def
transpose
(
self
,
*
axes
):
"""Transpose this array.
Returns
-------
object
`tensor.transpose(self, axes)` or `tensor.transpose(self, axes[0])`.
If only one `axes` argument is provided and it is iterable, then it is
assumed to be the entire axes tuple, and passed intact to
tensor.transpose.
"""
if
len
(
axes
)
==
0
:
return
at
.
basic
.
transpose
(
self
)
try
:
iter
(
axes
[
0
])
iterable
=
True
except
TypeError
:
iterable
=
False
if
len
(
axes
)
==
1
and
iterable
:
return
at
.
basic
.
transpose
(
self
,
axes
[
0
])
else
:
return
at
.
basic
.
transpose
(
self
,
axes
)
@property
def
shape
(
self
):
return
at
.
shape
(
self
)
@property
def
size
(
self
):
if
self
.
ndim
==
1
:
return
self
.
shape
[
0
]
else
:
return
at
.
math
.
prod
(
self
.
shape
)
def
any
(
self
,
axis
=
None
,
keepdims
=
False
):
return
at
.
math
.
any
(
self
,
axis
=
axis
,
keepdims
=
keepdims
)
def
all
(
self
,
axis
=
None
,
keepdims
=
False
):
return
at
.
math
.
all
(
self
,
axis
=
axis
,
keepdims
=
keepdims
)
# Old note: "We can't implement this because Python requests that this
# function returns an integer."
# TODO: We could use `get_vector_length` and let it raise an exception just like
# `__iter__` does
# def __len__(self):
# raise Exception("PyTensor Variables can't work with len(PyTensor "
# "Variable) due to Python restriction. You can use "
# "PyTensorVariable.shape[0] instead.")
def
reshape
(
self
,
shape
,
*
,
ndim
=
None
):
"""Return a reshaped view/copy of this variable.
Parameters
----------
shape
Something that can be converted to a symbolic vector of integers.
ndim
The length of the shape. Passing None here means for
PyTensor to try and guess the length of `shape`.
.. warning:: This has a different signature than numpy's
ndarray.reshape!
In numpy you do not need to wrap the shape arguments
in a tuple, in pytensor you do need to.
"""
if
ndim
is
not
None
:
if
not
isinstance
(
ndim
,
int
):
raise
ValueError
(
"Expected ndim to be an integer, is "
+
str
(
type
(
ndim
))
)
return
at
.
reshape
(
self
,
shape
,
ndim
=
ndim
)
def
dimshuffle
(
self
,
*
pattern
):
"""
Reorder the dimensions of this variable, optionally inserting
broadcasted dimensions.
Parameters
----------
pattern
List/tuple of int mixed with 'x' for broadcastable dimensions.
Examples
--------
For example, to create a 3D view of a [2D] matrix, call
``dimshuffle([0,'x',1])``. This will create a 3D view such that the
middle dimension is an implicit broadcasted dimension. To do the same
thing on the transpose of that matrix, call ``dimshuffle([1, 'x', 0])``.
Notes
-----
This function supports the pattern passed as a tuple, or as a
variable-length argument (e.g. ``a.dimshuffle(pattern)`` is equivalent
to ``a.dimshuffle(*pattern)`` where ``pattern`` is a list/tuple of ints
mixed with 'x' characters).
See Also
--------
DimShuffle
"""
if
(
len
(
pattern
)
==
1
)
and
(
isinstance
(
pattern
[
0
],
(
list
,
tuple
))):
pattern
=
pattern
[
0
]
op
=
at
.
elemwise
.
DimShuffle
(
list
(
self
.
type
.
broadcastable
),
pattern
)
return
op
(
self
)
def
flatten
(
self
,
ndim
=
1
):
return
at
.
basic
.
flatten
(
self
,
ndim
)
def
ravel
(
self
):
return
at
.
basic
.
flatten
(
self
)
def
diagonal
(
self
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
):
return
at
.
basic
.
diagonal
(
self
,
offset
,
axis1
,
axis2
)
def
transfer
(
self
,
target
):
"""Transfer this this array's data to another device.
If `target` is `'cpu'` this will transfer to a TensorType (if
not already one). Other types may define additional targets.
Parameters
----------
target : str
The desired location of the output variable
"""
return
at
.
basic
.
transfer
(
self
,
target
)
def
arccos
(
self
):
return
at
.
math
.
arccos
(
self
)
def
arccosh
(
self
):
return
at
.
math
.
arccosh
(
self
)
def
arcsin
(
self
):
return
at
.
math
.
arcsin
(
self
)
def
arcsinh
(
self
):
return
at
.
math
.
arcsinh
(
self
)
def
arctan
(
self
):
return
at
.
math
.
arctan
(
self
)
def
arctanh
(
self
):
return
at
.
math
.
arctanh
(
self
)
def
ceil
(
self
):
return
at
.
math
.
ceil
(
self
)
def
cos
(
self
):
return
at
.
math
.
cos
(
self
)
def
cosh
(
self
):
return
at
.
math
.
cosh
(
self
)
def
deg2rad
(
self
):
return
at
.
math
.
deg2rad
(
self
)
def
exp
(
self
):
return
at
.
math
.
exp
(
self
)
def
exp2
(
self
):
return
at
.
math
.
exp2
(
self
)
def
expm1
(
self
):
return
at
.
math
.
expm1
(
self
)
def
floor
(
self
):
return
at
.
math
.
floor
(
self
)
def
log
(
self
):
return
at
.
math
.
log
(
self
)
def
log10
(
self
):
return
at
.
math
.
log10
(
self
)
def
log1p
(
self
):
return
at
.
math
.
log1p
(
self
)
def
log2
(
self
):
return
at
.
math
.
log2
(
self
)
def
rad2deg
(
self
):
return
at
.
math
.
rad2deg
(
self
)
def
sin
(
self
):
return
at
.
math
.
sin
(
self
)
def
sinh
(
self
):
return
at
.
math
.
sinh
(
self
)
def
sqrt
(
self
):
return
at
.
math
.
sqrt
(
self
)
def
tan
(
self
):
return
at
.
math
.
tan
(
self
)
def
tanh
(
self
):
return
at
.
math
.
tanh
(
self
)
def
trunc
(
self
):
return
at
.
math
.
trunc
(
self
)
def
astype
(
self
,
dtype
):
return
at
.
basic
.
cast
(
self
,
dtype
)
def
__getitem__
(
self
,
args
):
def
includes_bool
(
args_el
):
if
isinstance
(
args_el
,
(
np
.
bool_
,
bool
))
or
(
hasattr
(
args_el
,
"dtype"
)
and
args_el
.
dtype
==
"bool"
):
return
True
if
not
isinstance
(
args_el
,
Variable
)
and
isinstance
(
args_el
,
Iterable
):
for
el
in
args_el
:
if
includes_bool
(
el
):
return
True
return
False
if
isinstance
(
args
,
list
)
and
any
(
isinstance
(
a
,
slice
)
for
a
in
args
):
pass
elif
not
isinstance
(
args
,
tuple
):
args
=
(
args
,)
# Count the dimensions, check for bools and find ellipses.
ellipses
=
[]
index_dim_count
=
0
for
i
,
arg
in
enumerate
(
args
):
if
arg
is
np
.
newaxis
or
arg
is
NoneConst
:
# no increase in index_dim_count
pass
elif
arg
is
Ellipsis
:
# no increase in index_dim_count
ellipses
.
append
(
i
)
elif
(
isinstance
(
arg
,
(
np
.
ndarray
,
Variable
))
and
hasattr
(
arg
,
"dtype"
)
and
arg
.
dtype
==
"bool"
):
index_dim_count
+=
arg
.
ndim
else
:
# Python arrays can contain a mixture of bools and integers,
# which requires complex rules to handle all special cases.
# These rules differ slightly between NumPy versions.
# Since earlier versions of PyTensor did not support any boolean
# indexing, it is safe to throw an error if we encounter
# any of these difficult cases.
if
includes_bool
(
arg
):
raise
TypeError
(
"TensorType does not support Python bools "
"for indexing, such as tensor[[True, False]]. "
"To use a boolean mask, convert the mask to "
"a NumPy array first, e.g., "
"tensor[numpy.array([True, False])]."
)
index_dim_count
+=
1
# Check if the number of dimensions isn't too large.
if
self
.
ndim
<
index_dim_count
:
raise
IndexError
(
"too many indices for array"
)
# Convert an Ellipsis if provided into an appropriate number of
# slice(None).
if
len
(
ellipses
)
>
1
:
raise
IndexError
(
"an index can only have a single Ellipsis (`...`)"
)
elif
len
(
ellipses
)
==
1
:
ellipsis_at
=
ellipses
[
0
]
args
=
list
(
args
)
args
[
ellipsis_at
:
ellipsis_at
+
1
]
=
[
slice
(
None
)]
*
(
self
.
ndim
-
index_dim_count
)
def
is_empty_array
(
val
):
return
(
isinstance
(
val
,
(
tuple
,
list
))
and
len
(
val
)
==
0
)
or
(
isinstance
(
val
,
np
.
ndarray
)
and
val
.
size
==
0
)
# Force input to be an int datatype if input is an empty list or tuple
# Else leave it as is if it is a real number
# Convert python literals to pytensor constants
args
=
tuple
(
[
at
.
subtensor
.
as_index_constant
(
np
.
array
(
inp
,
dtype
=
np
.
uint8
)
if
is_empty_array
(
inp
)
else
inp
)
for
inp
in
args
]
)
# Determine if advanced indexing is needed or not. The logic is
# already in `index_vars_to_types`: if it succeeds, standard indexing is
# used; if it fails with `AdvancedIndexingError`, advanced indexing is
# used
advanced
=
False
for
i
,
arg
in
enumerate
(
args
):
if
includes_bool
(
arg
):
advanced
=
True
break
if
arg
is
not
np
.
newaxis
and
arg
is
not
NoneConst
:
try
:
at
.
subtensor
.
index_vars_to_types
(
arg
)
except
AdvancedIndexingError
:
if
advanced
:
break
else
:
advanced
=
True
if
advanced
:
return
at
.
subtensor
.
advanced_subtensor
(
self
,
*
args
)
else
:
if
np
.
newaxis
in
args
or
NoneConst
in
args
:
# `np.newaxis` (i.e. `None`) in NumPy indexing mean "add a new
# broadcastable dimension at this location". Since PyTensor adds
# new broadcastable dimensions via the `DimShuffle` `Op`, the
# following code uses said `Op` to add one of the new axes and
# then uses recursion to apply any other indices and add any
# remaining new axes.
counter
=
0
pattern
=
[]
new_args
=
[]
for
arg
in
args
:
if
arg
is
np
.
newaxis
or
arg
is
NoneConst
:
pattern
.
append
(
"x"
)
new_args
.
append
(
slice
(
None
,
None
,
None
))
else
:
pattern
.
append
(
counter
)
counter
+=
1
new_args
.
append
(
arg
)
pattern
.
extend
(
list
(
range
(
counter
,
self
.
ndim
)))
view
=
self
.
dimshuffle
(
pattern
)
full_slices
=
True
for
arg
in
new_args
:
# We can't do arg == slice(None, None, None) as in
# Python 2.7, this call __lt__ if we have a slice
# with some symbolic variable.
if
not
(
isinstance
(
arg
,
slice
)
and
(
arg
.
start
is
None
or
arg
.
start
is
NoneConst
)
and
(
arg
.
stop
is
None
or
arg
.
stop
is
NoneConst
)
and
(
arg
.
step
is
None
or
arg
.
step
is
NoneConst
)
):
full_slices
=
False
if
full_slices
:
return
view
else
:
return
view
.
__getitem__
(
tuple
(
new_args
))
else
:
return
at
.
subtensor
.
Subtensor
(
args
)(
self
,
*
at
.
subtensor
.
get_slice_elements
(
args
,
lambda
entry
:
isinstance
(
entry
,
Variable
)
),
)
def
take
(
self
,
indices
,
axis
=
None
,
mode
=
"raise"
):
return
at
.
subtensor
.
take
(
self
,
indices
,
axis
,
mode
)
def
copy
(
self
,
name
=
None
):
"""Return a symbolic copy and optionally assign a name.
Does not copy the tags.
"""
copied_variable
=
at
.
basic
.
tensor_copy
(
self
)
copied_variable
.
name
=
name
return
copied_variable
def
__iter__
(
self
):
try
:
for
i
in
range
(
at
.
basic
.
get_vector_length
(
self
)):
yield
self
[
i
]
except
TypeError
:
# This prevents accidental iteration via sum(self)
raise
TypeError
(
"TensorType does not support iteration.
\n
"
"
\t
Did you pass a PyTensor variable to a function that expects a list?
\n
"
"
\t
Maybe you are using builtins.sum instead of pytensor.tensor.sum?"
)
@property
def
ndim
(
self
)
->
int
:
"""The rank of this tensor."""
return
self
.
type
.
ndim
@property
def
broadcastable
(
self
):
"""
The broadcastable signature of this tensor.
See Also
--------
broadcasting
"""
return
self
.
type
.
broadcastable
@property
def
dtype
(
self
):
"""The dtype of this tensor."""
return
self
.
type
.
dtype
def
__dot__
(
left
,
right
):
return
at
.
math
.
dense_dot
(
left
,
right
)
def
__rdot__
(
right
,
left
):
return
at
.
math
.
dense_dot
(
left
,
right
)
dot
=
__dot__
__matmul__
=
__dot__
__rmatmul__
=
__rdot__
def
sum
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
,
acc_dtype
=
None
):
"""See :func:`pytensor.tensor.math.sum`."""
return
at
.
math
.
sum
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
def
prod
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
,
acc_dtype
=
None
):
"""See :func:`pytensor.tensor.math.prod`."""
return
at
.
math
.
prod
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
def
norm
(
self
,
L
,
axis
=
None
,
keepdims
=
False
):
if
L
==
0
:
raise
NotImplementedError
()
if
np
.
isinf
(
L
):
raise
NotImplementedError
()
# optimizations will/should catch cases like L=1, L=2
y
=
at
.
math
.
pow
(
at
.
math
.
pow
(
at
.
math
.
abs
(
self
),
L
)
.
sum
(
axis
=
axis
),
1.0
/
L
,
)
if
keepdims
:
return
at
.
math
.
makeKeepDims
(
self
,
y
,
axis
)
else
:
return
y
def
mean
(
self
,
axis
=
None
,
dtype
=
None
,
keepdims
=
False
,
acc_dtype
=
None
):
"""See :func:`pytensor.tensor.math.mean`."""
return
at
.
math
.
mean
(
self
,
axis
=
axis
,
dtype
=
dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
def
var
(
self
,
axis
=
None
,
ddof
=
0
,
keepdims
=
False
,
corrected
=
False
):
"""See :func:`pytensor.tensor.math.var`."""
return
at
.
math
.
var
(
self
,
axis
=
axis
,
ddof
=
ddof
,
keepdims
=
keepdims
,
corrected
=
corrected
)
def
std
(
self
,
axis
=
None
,
ddof
=
0
,
keepdims
=
False
,
corrected
=
False
):
"""See :func:`pytensor.tensor.math.std`."""
return
at
.
math
.
std
(
self
,
axis
=
axis
,
ddof
=
ddof
,
keepdims
=
keepdims
,
corrected
=
corrected
)
def
min
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.min`."""
return
at
.
math
.
min
(
self
,
axis
,
keepdims
=
keepdims
)
def
max
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.max`."""
return
at
.
math
.
max
(
self
,
axis
,
keepdims
=
keepdims
)
def
argmin
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.argmin`."""
return
at
.
math
.
argmin
(
self
,
axis
,
keepdims
=
keepdims
)
def
argmax
(
self
,
axis
=
None
,
keepdims
=
False
):
"""See :func:`pytensor.tensor.math.argmax`."""
return
at
.
math
.
argmax
(
self
,
axis
,
keepdims
=
keepdims
)
def
nonzero
(
self
,
return_matrix
=
False
):
"""See :func:`pytensor.tensor.basic.nonzero`."""
return
at
.
nonzero
(
self
,
return_matrix
=
return_matrix
)
def
nonzero_values
(
self
):
"""See :func:`pytensor.tensor.basic.nonzero_values`."""
return
at
.
nonzero_values
(
self
)
def
sort
(
self
,
axis
=-
1
,
kind
=
"quicksort"
,
order
=
None
):
"""See :func:`pytensor.tensor.sort.sort`."""
return
at
.
sort
(
self
,
axis
,
kind
,
order
)
def
argsort
(
self
,
axis
=-
1
,
kind
=
"quicksort"
,
order
=
None
):
"""See :func:`pytensor.tensor.sort.argsort`."""
from
pytensor.tensor.sort
import
argsort
return
argsort
(
self
,
axis
,
kind
,
order
)
def
clip
(
self
,
a_min
,
a_max
):
"See :func:`pytensor.tensor.math.clip`."
return
at
.
math
.
clip
(
self
,
a_min
,
a_max
)
def
conj
(
self
):
"""See :func:`pytensor.tensor.math.conj`."""
return
at
.
math
.
conj
(
self
)
conjugate
=
conj
def
repeat
(
self
,
repeats
,
axis
=
None
):
"""See :func:`pytensor.tensor.basic.repeat`."""
return
at
.
extra_ops
.
repeat
(
self
,
repeats
,
axis
)
def
round
(
self
,
mode
=
None
):
"""See :func:`pytensor.tensor.math.round`."""
return
at
.
math
.
round
(
self
,
mode
)
def
trace
(
self
):
return
at
.
linalg
.
trace
(
self
)
# This value is set so that PyTensor arrays will trump NumPy operators.
__array_priority__
=
1000
def
get_underlying_scalar_constant
(
self
):
return
at
.
basic
.
get_underlying_scalar_constant_value
(
self
)
def
zeros_like
(
model
,
dtype
=
None
):
return
at
.
basic
.
zeros_like
(
model
,
dtype
=
dtype
)
def
ones_like
(
model
,
dtype
=
None
):
return
at
.
basic
.
ones_like
(
model
,
dtype
=
dtype
)
def
cumsum
(
self
,
axis
=
None
):
return
at
.
extra_ops
.
cumsum
(
self
,
axis
)
def
cumprod
(
self
,
axis
=
None
):
return
at
.
extra_ops
.
cumprod
(
self
,
axis
)
def
searchsorted
(
self
,
v
,
side
=
"left"
,
sorter
=
None
):
return
at
.
extra_ops
.
searchsorted
(
self
,
v
,
side
,
sorter
)
def
ptp
(
self
,
axis
=
None
):
"""See :func:`pytensor.tensor.math.ptp`."""
return
at
.
math
.
ptp
(
self
,
axis
)
def
swapaxes
(
self
,
axis1
,
axis2
):
"""See :func:`pytensor.tensor.basic.swapaxes`.
If a matrix is provided with the right axes, its transpose
will be returned.
"""
return
at
.
basic
.
swapaxes
(
self
,
axis1
,
axis2
)
def
fill
(
self
,
value
):
"""Fill inputted tensor with the assigned value."""
return
at
.
basic
.
fill
(
self
,
value
)
def
choose
(
self
,
choices
,
mode
=
"raise"
):
"""
Construct an array from an index array and a set of arrays to choose
from.
"""
return
at
.
basic
.
choose
(
self
,
choices
,
mode
=
"raise"
)
def
squeeze
(
self
):
"""
Remove broadcastable dimensions from the shape of an array.
It returns the input array, but with the broadcastable dimensions
removed. This is always `x` itself or a view into `x`.
"""
return
at
.
extra_ops
.
squeeze
(
self
)
def
compress
(
self
,
a
,
axis
=
None
):
"""Return selected slices only."""
return
at
.
extra_ops
.
compress
(
self
,
a
,
axis
=
axis
)
class
TensorVariable
(
_tensor_py_operators
,
Variable
[
_TensorTypeType
,
OptionalApplyType
]
):
"""
Subclass to add the tensor operators to the basic `Variable` class.
"""
def
__init__
(
self
,
type
:
_TensorTypeType
,
owner
:
OptionalApplyType
,
index
=
None
,
name
=
None
,
):
super
()
.
__init__
(
type
,
owner
,
index
=
index
,
name
=
name
)
if
config
.
warn_float64
!=
"ignore"
and
type
.
dtype
==
"float64"
:
msg
=
(
"You are creating a TensorVariable "
"with float64 dtype. You requested an action via "
"the PyTensor flag warn_float64={ignore,warn,raise,pdb}."
)
if
config
.
warn_float64
==
"warn"
:
# Get the user stack. We don't want function inside the
# tensor and graph directory to be shown to the user.
x
=
tb
.
extract_stack
()
nb_rm
=
0
while
x
:
file_path
=
x
[
-
1
][
0
]
rm
=
False
for
p
in
[
"pytensor/tensor/"
,
"pytensor
\\
tensor
\\
"
,
"pytensor/graph/"
,
"pytensor
\\
tensor
\\
"
,
]:
if
p
in
file_path
:
x
=
x
[:
-
1
]
nb_rm
+=
1
rm
=
True
break
if
not
rm
:
break
warnings
.
warn
(
msg
,
stacklevel
=
1
+
nb_rm
)
elif
config
.
warn_float64
==
"raise"
:
raise
Exception
(
msg
)
elif
config
.
warn_float64
==
"pdb"
:
import
pdb
pdb
.
set_trace
()
@_get_vector_length.register
(
TensorVariable
)
def
_get_vector_length_TensorVariable
(
op_or_var
,
var
):
if
var
.
type
.
shape
[
0
]
is
None
:
raise
ValueError
(
f
"Length of {var} cannot be determined"
)
return
var
.
type
.
shape
[
0
]
TensorType
.
variable_type
=
TensorVariable
class
TensorConstantSignature
(
tuple
):
r"""A signature object for comparing `TensorConstant` instances.
An instance is a pair with the type ``(Type, ndarray)``.
TODO FIXME: Subclassing `tuple` is unnecessary, and it appears to be
preventing the use of a much more convenient `__init__` that removes the
need for all these lazy computations and their safety checks.
Also, why do we even need this signature stuff? We could simply implement
good `Constant.__eq__` and `Constant.__hash__` implementations.
We could also produce plain `tuple`\s with hashable values.
"""
def
__eq__
(
self
,
other
):
if
type
(
self
)
!=
type
(
other
):
return
False
try
:
(
t0
,
d0
),
(
t1
,
d1
)
=
self
,
other
except
Exception
:
return
False
# N.B. compare shape to ensure no broadcasting in ==
if
t0
!=
t1
or
d0
.
shape
!=
d1
.
shape
:
return
False
self
.
no_nan
# Ensure has_nan is computed.
# Note that in the comparisons below, the elementwise comparisons
# come last because they are the most expensive checks.
if
self
.
has_nan
:
other
.
no_nan
# Ensure has_nan is computed.
return
(
other
.
has_nan
and
self
.
sum
==
other
.
sum
and
(
self
.
no_nan
.
mask
==
other
.
no_nan
.
mask
)
.
all
()
and
# Note that the second test below (==) may crash e.g. for
# a single scalar NaN value, so we do not run it when all
# values are missing.
(
self
.
no_nan
.
mask
.
all
()
or
(
self
.
no_nan
==
other
.
no_nan
)
.
all
())
)
else
:
# Simple case where we do not need to worry about NaN values.
# (note that if there are NaN values in d1, this will return
# False, which is why we do not bother with testing `other.has_nan`
# here).
return
(
self
.
sum
==
other
.
sum
)
and
np
.
all
(
d0
==
d1
)
def
__ne__
(
self
,
other
):
return
not
self
==
other
def
__hash__
(
self
):
t
,
d
=
self
return
hash
((
type
(
self
),
t
,
d
.
shape
,
self
.
sum
))
def
pytensor_hash
(
self
):
_
,
d
=
self
return
hash_from_ndarray
(
d
)
@property
def
sum
(
self
):
"""Compute sum of non NaN / Inf values in the array."""
try
:
return
self
.
_sum
except
AttributeError
:
# Prevent warnings when there are `inf`s and `-inf`s present
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
,
category
=
RuntimeWarning
)
self
.
_sum
=
self
.
no_nan
.
sum
()
# The following 2 lines are needed as in Python 3.3 with NumPy
# 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.
if
isinstance
(
self
.
_sum
,
np
.
memmap
):
self
.
_sum
=
np
.
asarray
(
self
.
_sum
)
.
item
()
if
self
.
has_nan
and
self
.
no_nan
.
mask
.
all
():
# In this case the sum is not properly computed by numpy.
self
.
_sum
=
0
if
np
.
isinf
(
self
.
_sum
)
or
np
.
isnan
(
self
.
_sum
):
# NaN may happen when there are both -inf and +inf values.
if
self
.
has_nan
:
# Filter both NaN and Inf values.
mask
=
self
.
no_nan
.
mask
+
np
.
isinf
(
self
[
1
])
else
:
# Filter only Inf values.
mask
=
np
.
isinf
(
self
[
1
])
if
mask
.
all
():
self
.
_sum
=
0
else
:
self
.
_sum
=
np
.
ma
.
masked_array
(
self
[
1
],
mask
)
.
sum
()
# At this point there should be no more NaN.
assert
not
np
.
isnan
(
self
.
_sum
)
if
isinstance
(
self
.
_sum
,
np
.
ma
.
core
.
MaskedConstant
):
self
.
_sum
=
0
return
self
.
_sum
@property
def
no_nan
(
self
):
try
:
return
self
.
_no_nan
except
AttributeError
:
nans
=
np
.
isnan
(
self
[
1
])
self
.
_no_nan
=
np
.
ma
.
masked_array
(
self
[
1
],
nans
)
self
.
has_nan
=
np
.
any
(
nans
)
return
self
.
_no_nan
def
get_unique_constant_value
(
x
:
TensorVariable
)
->
Optional
[
Number
]:
"""Return the unique value of a tensor, if there is one"""
if
isinstance
(
x
,
Constant
):
data
=
x
.
data
if
isinstance
(
data
,
np
.
ndarray
)
and
data
.
ndim
>
0
:
flat_data
=
data
.
ravel
()
if
flat_data
.
shape
[
0
]:
if
(
flat_data
==
flat_data
[
0
])
.
all
():
return
flat_data
[
0
]
return
None
class
TensorConstant
(
TensorVariable
,
Constant
[
_TensorTypeType
]):
"""Subclass to add the tensor operators to the basic `Constant` class."""
def
__init__
(
self
,
type
:
_TensorTypeType
,
data
,
name
=
None
):
data_shape
=
np
.
shape
(
data
)
if
len
(
data_shape
)
!=
type
.
ndim
or
any
(
ds
!=
ts
for
ds
,
ts
in
zip
(
np
.
shape
(
data
),
type
.
shape
)
if
ts
is
not
None
):
raise
ValueError
(
f
"Shape of data ({data_shape}) does not match shape of type ({type.shape})"
)
# We want all the shape information from `data`
new_type
=
type
.
clone
(
shape
=
data_shape
)
assert
not
any
(
s
is
None
for
s
in
new_type
.
shape
)
Constant
.
__init__
(
self
,
new_type
,
data
,
name
)
def
signature
(
self
):
return
TensorConstantSignature
((
self
.
type
,
self
.
data
))
def
equals
(
self
,
other
):
# Override Constant.equals to allow to compare with
# numpy.ndarray, and python type.
if
isinstance
(
other
,
(
np
.
ndarray
,
int
,
float
)):
# Make a TensorConstant to be able to compare
other
=
at
.
basic
.
constant
(
other
)
return
(
isinstance
(
other
,
TensorConstant
)
and
self
.
signature
()
==
other
.
signature
()
)
def
__copy__
(
self
):
# We need to do this to remove the cached attribute
return
type
(
self
)(
self
.
type
,
self
.
data
,
self
.
name
)
def
__deepcopy__
(
self
,
memo
):
# We need to do this to remove the cached attribute
return
type
(
self
)(
copy
.
deepcopy
(
self
.
type
,
memo
),
copy
.
deepcopy
(
self
.
data
,
memo
),
copy
.
deepcopy
(
self
.
name
,
memo
),
)
TensorType
.
constant_type
=
TensorConstant
class
DenseVariableMeta
(
MetaType
):
def
__instancecheck__
(
self
,
o
):
if
type
(
o
)
==
TensorVariable
or
isinstance
(
o
,
DenseVariableMeta
):
return
True
return
False
class
DenseTensorVariable
(
TensorType
,
metaclass
=
DenseVariableMeta
):
r"""A `Variable` for dense tensors.
Instances of this class and `TensorVariable`\s are considered dense
`Variable`\s.
"""
class
DenseConstantMeta
(
MetaType
):
def
__instancecheck__
(
self
,
o
):
if
type
(
o
)
==
TensorConstant
or
isinstance
(
o
,
DenseConstantMeta
):
return
True
return
False
class
DenseTensorConstant
(
TensorType
,
metaclass
=
DenseConstantMeta
):
r"""A `Constant` for dense tensors.
Instances of this class and `TensorConstant`\s are considered dense
`Constant`\s.
"""
pytensor/typed_list/basic.py
浏览文件 @
6d3c7568
...
...
@@ -8,7 +8,7 @@ from pytensor.graph.op import Op
from
pytensor.link.c.op
import
COp
from
pytensor.tensor.type
import
scalar
from
pytensor.tensor.type_other
import
SliceType
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
from
pytensor.typed_list.type
import
TypedListType
...
...
scripts/mypy-failing.txt
浏览文件 @
6d3c7568
...
...
@@ -32,4 +32,5 @@ pytensor/tensor/slinalg.py
pytensor/tensor/subtensor.py
pytensor/tensor/type.py
pytensor/tensor/type_other.py
pytensor/tensor/var.py
\ No newline at end of file
pytensor/tensor/variable.py
pytensor/tensor/nlinalg.py
\ No newline at end of file
tests/graph/test_basic.py
浏览文件 @
6d3c7568
...
...
@@ -33,7 +33,7 @@ from pytensor.graph.type import Type
from
pytensor.tensor.math
import
max_and_argmax
from
pytensor.tensor.type
import
TensorType
,
iscalars
,
matrix
,
scalars
,
vector
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
from
tests.graph.utils
import
MyInnerGraphOp
...
...
tests/link/test_vm.py
浏览文件 @
6d3c7568
...
...
@@ -18,7 +18,7 @@ from pytensor.link.utils import map_storage
from
pytensor.link.vm
import
VM
,
Loop
,
Stack
,
VMLinker
from
pytensor.tensor.math
import
cosh
,
tanh
from
pytensor.tensor.type
import
lscalar
,
scalar
,
scalars
,
vector
,
vectors
from
pytensor.tensor.var
import
TensorConstant
from
pytensor.tensor.var
iable
import
TensorConstant
from
tests
import
unittest_tools
as
utt
...
...
tests/tensor/rewriting/test_math.py
浏览文件 @
6d3c7568
...
...
@@ -125,7 +125,7 @@ from pytensor.tensor.type import (
vectors
,
zscalar
,
)
from
pytensor.tensor.var
import
TensorConstant
from
pytensor.tensor.var
iable
import
TensorConstant
from
tests
import
unittest_tools
as
utt
...
...
tests/tensor/test_basic.py
浏览文件 @
6d3c7568
...
...
@@ -125,7 +125,7 @@ from pytensor.tensor.type import (
vectors
,
wvector
,
)
from
pytensor.tensor.var
import
TensorConstant
from
pytensor.tensor.var
iable
import
TensorConstant
from
pytensor.utils
import
PYTHON_INT_BITWIDTH
from
tests
import
unittest_tools
as
utt
from
tests.tensor.utils
import
(
...
...
tests/tensor/test_shape.py
浏览文件 @
6d3c7568
...
...
@@ -47,7 +47,7 @@ from pytensor.tensor.type import (
vector
,
)
from
pytensor.tensor.type_other
import
NoneConst
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
from
pytensor.typed_list
import
make_list
from
tests
import
unittest_tools
as
utt
from
tests.graph.utils
import
MyType2
...
...
tests/tensor/test_var.py
→
tests/tensor/test_var
iable
.py
浏览文件 @
6d3c7568
...
...
@@ -29,7 +29,7 @@ from pytensor.tensor.type import (
tensor3
,
)
from
pytensor.tensor.type_other
import
MakeSlice
,
NoneConst
from
pytensor.tensor.var
import
(
from
pytensor.tensor.var
iable
import
(
DenseTensorConstant
,
DenseTensorVariable
,
TensorConstant
,
...
...
@@ -405,3 +405,15 @@ class TestTensorInstanceMethods:
assert_array_equal
(
X
.
take
(
indices
,
1
)
.
eval
({
X
:
x
}),
x
.
take
(
indices
,
1
))
# Test equivalent advanced indexing
assert_array_equal
(
X
[:,
indices
]
.
eval
({
X
:
x
}),
x
[:,
indices
])
def
test_deprecated_import
():
with
pytest
.
warns
(
DeprecationWarning
,
match
=
"The module 'pytensor.tensor.var' has been deprecated."
,
):
import
pytensor.tensor.var
as
_var
# Make sure the deprecated import provides access to 'variable' module
assert
hasattr
(
_var
,
"TensorVariable"
)
assert
hasattr
(
_var
,
"TensorConstant"
)
tests/typed_list/test_basic.py
浏览文件 @
6d3c7568
...
...
@@ -13,7 +13,7 @@ from pytensor.tensor.type import (
vector
,
)
from
pytensor.tensor.type_other
import
SliceType
from
pytensor.tensor.var
import
TensorVariable
from
pytensor.tensor.var
iable
import
TensorVariable
from
pytensor.typed_list.basic
import
(
Append
,
Count
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论