提交 ec6a3153 authored 作者: Ricardo Vieira's avatar Ricardo Vieira 提交者: Ricardo Vieira

Update README dprint output

上级 1944353c
...@@ -22,69 +22,69 @@ Getting started ...@@ -22,69 +22,69 @@ Getting started
.. code-block:: python .. code-block:: python
import pytensor import pytensor
from pytensor import tensor as pt from pytensor import tensor as pt
# Declare two symbolic floating-point scalars # Declare two symbolic floating-point scalars
a = pt.dscalar("a") a = pt.dscalar("a")
b = pt.dscalar("b") b = pt.dscalar("b")
# Create a simple example expression # Create a simple example expression
c = a + b c = a + b
# Convert the expression into a callable object that takes `(a, b)` # Convert the expression into a callable object that takes `(a, b)`
# values as input and computes the value of `c`. # values as input and computes the value of `c`.
f_c = pytensor.function([a, b], c) f_c = pytensor.function([a, b], c)
assert f_c(1.5, 2.5) == 4.0 assert f_c(1.5, 2.5) == 4.0
# Compute the gradient of the example expression with respect to `a` # Compute the gradient of the example expression with respect to `a`
dc = pytensor.grad(c, a) dc = pytensor.grad(c, a)
f_dc = pytensor.function([a, b], dc) f_dc = pytensor.function([a, b], dc)
assert f_dc(1.5, 2.5) == 1.0 assert f_dc(1.5, 2.5) == 1.0
# Compiling functions with `pytensor.function` also optimizes # Compiling functions with `pytensor.function` also optimizes
# expression graphs by removing unnecessary operations and # expression graphs by removing unnecessary operations and
# replacing computations with more efficient ones. # replacing computations with more efficient ones.
v = pt.vector("v") v = pt.vector("v")
M = pt.matrix("M") M = pt.matrix("M")
d = a/a + (M + a).dot(v) d = a/a + (M + a).dot(v)
pytensor.dprint(d) pytensor.dprint(d)
# Elemwise{add,no_inplace} [id A] '' # Add [id A]
# |InplaceDimShuffle{x} [id B] '' # ├─ ExpandDims{axis=0} [id B]
# | |Elemwise{true_div,no_inplace} [id C] '' # │ └─ True_div [id C]
# | |a [id D] # │ ├─ a [id D]
# | |a [id D] # │ └─ a [id D]
# |dot [id E] '' # └─ dot [id E]
# |Elemwise{add,no_inplace} [id F] '' # ├─ Add [id F]
# | |M [id G] # │ ├─ M [id G]
# | |InplaceDimShuffle{x,x} [id H] '' # │ └─ ExpandDims{axes=[0, 1]} [id H]
# | |a [id D] # │ └─ a [id D]
# |v [id I] # └─ v [id I]
f_d = pytensor.function([a, v, M], d) f_d = pytensor.function([a, v, M], d)
# `a/a` -> `1` and the dot product is replaced with a BLAS function # `a/a` -> `1` and the dot product is replaced with a BLAS function
# (i.e. CGemv) # (i.e. CGemv)
pytensor.dprint(f_d) pytensor.dprint(f_d)
# Elemwise{Add}[(0, 1)] [id A] '' 5 # Add [id A] 5
# |TensorConstant{(1,) of 1.0} [id B] # ├─ [1.] [id B]
# |CGemv{inplace} [id C] '' 4 # └─ CGemv{inplace} [id C] 4
# |AllocEmpty{dtype='float64'} [id D] '' 3 # ├─ AllocEmpty{dtype='float64'} [id D] 3
# | |Shape_i{0} [id E] '' 2 # │ └─ Shape_i{0} [id E] 2
# | |M [id F] # │ └─ M [id F]
# |TensorConstant{1.0} [id G] # ├─ 1.0 [id G]
# |Elemwise{add,no_inplace} [id H] '' 1 # ├─ Add [id H] 1
# | |M [id F] # │ ├─ M [id F]
# | |InplaceDimShuffle{x,x} [id I] '' 0 # │ └─ ExpandDims{axes=[0, 1]} [id I] 0
# | |a [id J] # │ └─ a [id J]
# |v [id K] # ├─ v [id K]
# |TensorConstant{0.0} [id L] # └─ 0.0 [id L]
See `the PyTensor documentation <https://pytensor.readthedocs.io/en/latest/>`__ for in-depth tutorials. See `the PyTensor documentation <https://pytensor.readthedocs.io/en/latest/>`__ for in-depth tutorials.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论