提交 02974b9d authored 作者: Maxim Kochurov's avatar Maxim Kochurov 提交者: Maxim Kochurov

remove deprecated pytensor.tensor.nnet

上级 15637d23
...@@ -77,7 +77,7 @@ jobs: ...@@ -77,7 +77,7 @@ jobs:
- "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_math.py --ignore=tests/tensor/test_math_scipy.py --ignore=tests/tensor/test_inplace.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/rewriting/test_basic.py --ignore=tests/tensor/rewriting/test_math.py --ignore=tests/tensor/nnet --ignore=tests/tensor/signal" - "tests/tensor tests/sparse --ignore=tests/tensor/test_basic.py --ignore=tests/tensor/test_math.py --ignore=tests/tensor/test_math_scipy.py --ignore=tests/tensor/test_inplace.py --ignore=tests/tensor/test_elemwise.py --ignore=tests/tensor/rewriting/test_basic.py --ignore=tests/tensor/rewriting/test_math.py --ignore=tests/tensor/nnet --ignore=tests/tensor/signal"
- "tests/tensor/test_basic.py tests/tensor/test_math.py tests/tensor/test_math_scipy.py tests/tensor/test_inplace.py" - "tests/tensor/test_basic.py tests/tensor/test_math.py tests/tensor/test_math_scipy.py tests/tensor/test_inplace.py"
- "tests/tensor/test_elemwise.py tests/tensor/rewriting/test_basic.py tests/tensor/rewriting/test_math.py" - "tests/tensor/test_elemwise.py tests/tensor/rewriting/test_basic.py tests/tensor/rewriting/test_math.py"
- "tests/tensor/nnet/test_conv.py" - "tests/tensor/conv/test_abstract_conv.py"
include: include:
- python-version: "3.7" - python-version: "3.7"
fast-compile: 1 fast-compile: 1
......
=========================================
:mod:`tensor.conv` -- Tensor Convolutions
=========================================
.. module:: tensor.conv
:platform: Unix, Windows
:synopsis: Tensor Convolutions
.. moduleauthor:: LISA, PyMC Developers, PyTensor Developers
.. automodule:: pytensor.tensor.conv
:members:
\ No newline at end of file
...@@ -26,5 +26,6 @@ They are grouped into the following sections: ...@@ -26,5 +26,6 @@ They are grouped into the following sections:
slinalg slinalg
nlinalg nlinalg
fft fft
conv
math_opt math_opt
basic_opt basic_opt
import warnings
warnings.warn(
"The module `pytensor.scalar.basic_scipy` is deprecated "
"and has been renamed to `pytensor.scalar.math`",
DeprecationWarning,
stacklevel=2,
)
from .abstract_conv import (
bilinear_upsampling,
causal_conv1d,
conv2d,
conv2d_transpose,
conv3d,
frac_bilinear_upsampling,
separable_conv2d,
separable_conv3d,
)
...@@ -5,14 +5,8 @@ Abstract conv interface ...@@ -5,14 +5,8 @@ Abstract conv interface
import logging import logging
import sys import sys
try:
from math import gcd
except ImportError:
from fractions import gcd
import warnings import warnings
from math import gcd
import numpy as np import numpy as np
...@@ -35,8 +29,7 @@ from pytensor.tensor.exceptions import NotScalarConstantError ...@@ -35,8 +29,7 @@ from pytensor.tensor.exceptions import NotScalarConstantError
from pytensor.tensor.var import TensorConstant, TensorVariable from pytensor.tensor.var import TensorConstant, TensorVariable
__docformat__ = "restructuredtext en" _logger = logging.getLogger(__name__)
_logger = logging.getLogger("pytensor.tensor.nnet.abstract_conv")
def get_conv_output_shape( def get_conv_output_shape(
...@@ -678,7 +671,7 @@ def abstract_conv2d( ...@@ -678,7 +671,7 @@ def abstract_conv2d(
stack of 2D inputs with a set of 2D filters. The implementation is modelled stack of 2D inputs with a set of 2D filters. The implementation is modelled
after Convolutional Neural Networks (CNN). after Convolutional Neural Networks (CNN).
Refer to :func:`nnet.conv2d <pytensor.tensor.nnet.conv2d>` for a more detailed documentation. Refer to :func:`nnet.conv2d <pytensor.tensor.conv.conv2d>` for a more detailed documentation.
""" """
input = as_tensor_variable(input) input = as_tensor_variable(input)
...@@ -2430,7 +2423,7 @@ class BaseAbstractConv(Op): ...@@ -2430,7 +2423,7 @@ class BaseAbstractConv(Op):
class AbstractConv(BaseAbstractConv): class AbstractConv(BaseAbstractConv):
"""Abstract Op for the forward convolution. """Abstract Op for the forward convolution.
Refer to :func:`BaseAbstractConv <pytensor.tensor.nnet.abstract_conv.BaseAbstractConv>` Refer to :func:`BaseAbstractConv <pytensor.tensor.conv.abstract_conv.BaseAbstractConv>`
for a more detailed documentation. for a more detailed documentation.
""" """
...@@ -2646,7 +2639,7 @@ class AbstractConv(BaseAbstractConv): ...@@ -2646,7 +2639,7 @@ class AbstractConv(BaseAbstractConv):
class AbstractConv2d(AbstractConv): class AbstractConv2d(AbstractConv):
"""Abstract Op for the forward convolution. """Abstract Op for the forward convolution.
Refer to :func:`BaseAbstractConv <pytensor.tensor.nnet.abstract_conv.BaseAbstractConv>` Refer to :func:`BaseAbstractConv <pytensor.tensor.conv.abstract_conv.BaseAbstractConv>`
for a more detailed documentation. for a more detailed documentation.
""" """
...@@ -2708,7 +2701,7 @@ class AbstractConv2d(AbstractConv): ...@@ -2708,7 +2701,7 @@ class AbstractConv2d(AbstractConv):
class AbstractConv3d(AbstractConv): class AbstractConv3d(AbstractConv):
"""Abstract Op for the forward convolution. """Abstract Op for the forward convolution.
Refer to :func:`BaseAbstractConv <pytensor.tensor.nnet.abstract_conv.BaseAbstractConv>` Refer to :func:`BaseAbstractConv <pytensor.tensor.conv.abstract_conv.BaseAbstractConv>`
for a more detailed documentation. for a more detailed documentation.
""" """
...@@ -3489,11 +3482,9 @@ def conv2d( ...@@ -3489,11 +3482,9 @@ def conv2d(
border_mode="valid", border_mode="valid",
subsample=(1, 1), subsample=(1, 1),
filter_flip=True, filter_flip=True,
image_shape=None,
filter_dilation=(1, 1), filter_dilation=(1, 1),
num_groups=1, num_groups=1,
unshared=False, unshared=False,
**kwargs,
): ):
""" """
This function will build the symbolic graph for convolving a mini-batch of a This function will build the symbolic graph for convolving a mini-batch of a
...@@ -3584,36 +3575,6 @@ def conv2d( ...@@ -3584,36 +3575,6 @@ def conv2d(
of shape (batch size, output channels, output rows, output columns) of shape (batch size, output channels, output rows, output columns)
""" """
if "imshp_logical" in kwargs or "kshp_logical" in kwargs:
raise ValueError(
"Keyword arguments 'imshp_logical' and 'kshp_logical' for conv2d "
"are not supported anymore (and have not been a reliable way to "
"perform upsampling). That feature is still available by calling "
"pytensor.tensor.nnet.conv.conv2d() for the time being."
)
if len(kwargs.keys()) > 0:
warnings.warn(
str(kwargs.keys()) + " are now deprecated in "
"`tensor.nnet.abstract_conv.conv2d` interface"
" and will be ignored.",
stacklevel=2,
)
if image_shape is not None:
warnings.warn(
"The `image_shape` keyword argument to "
"`tensor.nnet.conv2d` is deprecated, it has been "
"renamed to `input_shape`.",
stacklevel=2,
)
if input_shape is None:
input_shape = image_shape
else:
raise ValueError(
"input_shape and image_shape should not"
" be provided at the same time."
)
return abstract_conv2d( return abstract_conv2d(
input, input,
filters, filters,
......
import warnings
warnings.warn(
"The module `pytensor.tensor.nnet` is deprecated and will "
"be removed from PyTensor in version 2.9.0",
DeprecationWarning,
stacklevel=2,
)
import pytensor.tensor.nnet.rewriting
from pytensor.tensor.nnet.abstract_conv import (
abstract_conv2d,
conv2d,
conv2d_grad_wrt_inputs,
conv2d_transpose,
conv3d,
separable_conv2d,
)
from pytensor.tensor.nnet.basic import (
binary_crossentropy,
categorical_crossentropy,
confusion_matrix,
crossentropy_categorical_1hot,
crossentropy_categorical_1hot_grad,
crossentropy_softmax_1hot,
crossentropy_softmax_1hot_with_bias,
crossentropy_softmax_1hot_with_bias_dx,
crossentropy_softmax_argmax_1hot_with_bias,
crossentropy_softmax_max_and_argmax_1hot,
crossentropy_softmax_max_and_argmax_1hot_with_bias,
crossentropy_to_crossentropy_with_softmax,
crossentropy_to_crossentropy_with_softmax_with_bias,
elu,
graph_merge_softmax_with_crossentropy_softmax,
h_softmax,
logsoftmax,
prepend_0_to_each_row,
prepend_1_to_each_row,
prepend_scalar_to_each_row,
relu,
selu,
sigmoid_binary_crossentropy,
softmax,
softmax_grad_legacy,
softmax_legacy,
softmax_simplifier,
softmax_with_bias,
softsign,
)
from pytensor.tensor.nnet.batchnorm import batch_normalization
from pytensor.tensor.nnet.sigm import hard_sigmoid, ultra_fast_sigmoid
差异被折叠。
差异被折叠。
from typing import List
import numpy as np
import pytensor
from pytensor.gradient import grad_undefined
from pytensor.graph.basic import Apply
from pytensor.graph.op import Op
from pytensor.tensor.type import discrete_dtypes
class SparseBlockGemv(Op):
"""
This op computes the dot product of specified pieces of vectors
and matrices, returning pieces of vectors::
for b in range(batch_size):
for j in range(o.shape[1]):
for i in range(h.shape[1]):
o[b, j, :] += numpy.dot(h[b, i], W[iIdx[b, i], oIdx[b, j]])
where b, h, W, o iIdx, oIdx are defined in the docstring of make_node.
.. image:: ../../../images/blocksparse.png
:scale: 50 %
"""
__props__ = ("inplace",)
registered_opts: List = []
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, o, W, h, inputIdx, outputIdx):
"""
Compute the dot product of the specified pieces of vectors
and matrices.
The parameter types are actually their expected shapes
relative to each other.
Parameters
----------
o : batch, oWin, oSize
output vector
W : iBlocks, oBlocks, iSize, oSize
weight matrix
h : batch, iWin, iSize
input from lower layer (sparse)
inputIdx : batch, iWin
indexes of the input blocks
outputIdx : batch, oWin
indexes of the output blocks
Returns
-------
(batch, oWin, oSize)
dot(W[i, j], h[i]) + o[j]
Notes
-----
- `batch` is the number of examples in a minibatch (batch size).
- `iBlocks` is the total number of blocks in the input (from lower
layer).
- `iSize` is the size of each of these input blocks.
- `iWin` is the number of blocks that will be used as inputs. Which
blocks will be used is specified in `inputIdx`.
- `oBlocks` is the number or possible output blocks.
- `oSize` is the size of each of these output blocks.
- `oWin` is the number of output blocks that will actually be computed.
Which blocks will be computed is specified in `outputIdx`.
"""
o = pytensor.tensor.as_tensor_variable(o)
W = pytensor.tensor.as_tensor_variable(W)
h = pytensor.tensor.as_tensor_variable(h)
inputIdx = pytensor.tensor.as_tensor_variable(inputIdx)
outputIdx = pytensor.tensor.as_tensor_variable(outputIdx)
if o.ndim != 3:
raise TypeError("The output o must be a 2D tensor")
if W.ndim != 4:
raise TypeError("The weight matrix W must be a 4D tensor")
if h.ndim != 3:
raise TypeError("The input h must be a 3D tensor")
if inputIdx.ndim != 2:
raise TypeError("The input indices inputIdx must be a 2D tensor")
if outputIdx.ndim != 2:
raise TypeError("The output indices outputIdx must be a 2D tensor")
assert inputIdx.type.dtype in discrete_dtypes
assert outputIdx.type.dtype in discrete_dtypes
return Apply(self, [o, W, h, inputIdx, outputIdx], [o.type()])
def perform(self, node, inp, out_):
o, W, h, iIdx, oIdx = inp[:5]
if not self.inplace:
o = o.copy()
for b in range(o.shape[0]):
for j in range(o.shape[1]):
outputIdx = oIdx[b, j]
for i in range(h.shape[1]):
inputIdx = iIdx[b, i]
w = W[inputIdx, outputIdx]
o[b, j, :] += np.dot(h[b, i], w)
out_[0][0] = o
def infer_shape(self, fgraph, node, input_shapes):
return [input_shapes[0]]
def grad(self, inputs, grads):
o, W, h, inputIdx, outputIdx = inputs
go = grads[0]
outer_fun = SparseBlockOuter(self.inplace)
gemv_fun = SparseBlockGemv(self.inplace)
Wgrad = outer_fun(W.zeros_like(), h, go, inputIdx, outputIdx)
hgrad = gemv_fun(
h.zeros_like(), W.dimshuffle((1, 0, 3, 2)), go, outputIdx, inputIdx
)
return [
go,
Wgrad,
hgrad,
grad_undefined(self, 3, inputIdx, "grad of inputIdx makes no sense"),
grad_undefined(self, 4, outputIdx, "grad of outputIdx makes no sense"),
]
class SparseBlockOuter(Op):
"""
This computes the outer product of two sets of pieces of vectors
updating a full matrix with the results::
for b in range(batch_size):
o[xIdx[b, i], yIdx[b, j]] += (alpha * outer(x[b, i], y[b, j]))
This op is involved in the gradient of SparseBlockGemv.
"""
__props__ = ("inplace",)
registered_opts: List = []
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, o, x, y, xIdx, yIdx, alpha=None):
"""
Compute the dot product of the specified pieces of vectors
and matrices.
The parameter types are actually their expected shapes
relative to each other.
Parameters
----------
o : xBlocks, yBlocks, xSize, ySize
x : batch, xWin, xSize
y : batch, yWin, ySize
xIdx : batch, iWin
indexes of the x blocks
yIdx : batch, oWin
indexes of the y blocks
Returns
-------
(xBlocks, yBlocks, xSize, ySize)
outer(x[i], y[j]) + o[i, j]
Notes
-----
- `batch` is the number of examples in a minibatch (batch size).
- `xBlocks` is the total number of blocks in x.
- `xSize` is the size of each of these x blocks.
- `xWin` is the number of blocks that will be used as x. Which blocks
will be used is specified in `xIdx`.
- `yBlocks` is the number or possible y blocks.
- `ySize` is the size of each of these y blocks.
- `yWin` is the number of y blocks that will actually be computed.
Which blocks will be computed is specified in `yIdx`.
"""
one = pytensor.tensor.constant(np.asarray(1.0, dtype="float32"))
o = pytensor.tensor.as_tensor_variable(o)
x = pytensor.tensor.as_tensor_variable(x)
y = pytensor.tensor.as_tensor_variable(y)
if alpha is None:
alpha = one
return Apply(self, [o, x, y, xIdx, yIdx, alpha], [o.type()])
def infer_shape(self, fgraph, node, input_shapes):
return [input_shapes[0]]
def perform(self, node, inp, out_):
o, x, y, xIdx, yIdx, alpha = inp[:6]
if not self.inplace:
o = o.copy()
for b in range(x.shape[0]):
for i in range(xIdx.shape[1]):
for j in range(yIdx.shape[1]):
o[xIdx[b, i], yIdx[b, j]] += np.outer(x[b, i], y[b, j, :])
out_[0][0] = o
sparse_block_gemv = SparseBlockGemv(False)
sparse_block_gemv_inplace = SparseBlockGemv(True)
sparse_block_outer = SparseBlockOuter(False)
sparse_block_outer_inplace = SparseBlockOuter(True)
def sparse_block_dot(W, h, inputIdx, b, outputIdx):
"""
Compute the dot product (plus bias) of the specified pieces of vectors
and matrices. See SparseBlockGemv to get more information.
The parameter types are actually their expected shapes relative to
each other.
Parameters
----------
W : iBlocks, oBlocks, iSize, oSize
weight matrix
h : batch, iWin, iSize
input from lower layer (sparse)
inputIdx : batch, iWin
indexes of the input blocks
b : oBlocks, oSize
bias vector
outputIdx : batch, oWin
indexes of the output blocks
Returns
-------
(batch, oWin, oSize)
dot(W[i, j], h[i]) + b[j] but b[j] is only added once
Notes
-----
- `batch` is the number of examples in a minibatch (batch size).
- `iBlocks` is the total number of blocks in the input (from lower layer).
- `iSize` is the size of each of these input blocks.
- `iWin` is the number of blocks that will be used as inputs. Which blocks
will be used is specified in `inputIdx`.
- `oBlocks` is the number or possible output blocks.
- `oSize` is the size of each of these output blocks.
- `oWin` is the number of output blocks that will actually be computed.
Which blocks will be computed is specified in `outputIdx`.
"""
assert inputIdx.ndim == h.ndim - 1
assert outputIdx.ndim == inputIdx.ndim
if h.ndim == 2:
h = h.dimshuffle("x", 0, 1)
inputIdx = inputIdx.dimshuffle("x", 0)
outputIdx = outputIdx.dimshuffle("x", 0)
return SparseBlockGemv()(b.take(outputIdx, axis=0), W, h, inputIdx, outputIdx)
#section support_code
typedef struct ctc_context {
struct ctcOptions options;
void * workspace;
int * input_lengths;
int * flat_labels;
int * label_lengths;
} ctc_context_t;
void ctc_context_init(ctc_context_t * context)
{
struct ctcOptions * options = &(context->options);
memset(options, 0, sizeof(struct ctcOptions));
options->loc = CTC_CPU;
#if defined(_OPENMP)
options->num_threads = omp_get_num_threads();
#else
options->num_threads = 1;
#endif
context->workspace = NULL;
context->input_lengths = NULL;
context->flat_labels = NULL;
context->label_lengths = NULL;
}
void ctc_context_destroy(ctc_context_t * context)
{
free( context->workspace );
free( context->input_lengths );
free( context->flat_labels );
free( context->label_lengths );
}
int ctc_check_result(ctcStatus_t retcode, const char * msg)
{
if( CTC_STATUS_SUCCESS != retcode )
{
// Get error message from underlying library
const char * ctc_msg = ctcGetStatusString( retcode );
PyErr_Format( PyExc_RuntimeError,
"ConnectionistTemporalClassification: %s CTC error: %s",
msg,
ctc_msg );
return 1;
}
return 0;
}
void create_contiguous_input_lengths( PyArrayObject * input_lengths_arr,
int ** input_lengths )
{
npy_int num_elements = PyArray_DIMS( input_lengths_arr )[0];
*input_lengths = (int *) calloc( num_elements, sizeof(int) );
if ( NULL == (*input_lengths) )
return;
for( npy_int elem_idx = 0; elem_idx < num_elements; ++elem_idx )
{
(*input_lengths)[elem_idx] = *( (npy_int *) PyArray_GETPTR1( input_lengths_arr, elem_idx ) );
}
}
void create_flat_labels( PyArrayObject * label_matrix, int ** flat_labels,
int ** label_lengths )
{
npy_int rows = PyArray_DIMS( label_matrix )[0];
npy_int cols = PyArray_DIMS( label_matrix )[1];
*flat_labels = (int *) calloc( rows * cols, sizeof(int) );
if ( NULL == (*flat_labels) )
return;
*label_lengths = (int *) calloc( rows, sizeof(int) );
if ( NULL == (*label_lengths) )
{
free( *flat_labels );
*flat_labels = NULL;
return;
}
npy_int label_index = 0;
for( npy_int row_idx = 0; row_idx < rows; ++row_idx )
{
npy_int label_length = 0;
for( npy_int col_idx = 0; col_idx < cols; ++col_idx )
{
npy_int label = *( (npy_int *) PyArray_GETPTR2( label_matrix, row_idx, col_idx ) );
if ( label >= 0 ) // negative values are assumed to be padding
{
(*flat_labels)[ label_index++ ] = label;
++label_length;
}
}
(*label_lengths)[ row_idx ] = label_length;
}
}
#section support_code_apply
int APPLY_SPECIFIC(ctc_cost_cpu)(PyArrayObject * in_activations,
PyArrayObject * in_labels,
PyArrayObject * in_input_lengths,
PyArrayObject ** out_costs,
PyArrayObject ** out_gradients)
{
ctc_context_t ctc_object;
ctc_context_t * context = &ctc_object;
ctc_context_init( context );
if ( !PyArray_IS_C_CONTIGUOUS( in_activations ) )
{
PyErr_SetString( PyExc_RuntimeError,
"ConnectionistTemporalClassification: activations array must be C-contiguous." );
return 1;
}
npy_float32 * activations = (npy_float32 *) PyArray_DATA( in_activations );
create_contiguous_input_lengths( in_input_lengths, &(context->input_lengths) );
if ( NULL == context->input_lengths )
{
// Destroy previous CTC context before returning exception
ctc_context_destroy( context );
PyErr_Format( PyExc_MemoryError,
"ConnectionistTemporalClassification: Could not allocate memory for input lengths" );
return 1;
}
// flatten labels to conform with library memory layout
create_flat_labels( in_labels, &(context->flat_labels), &(context->label_lengths) );
if ( ( NULL == context->label_lengths ) || ( NULL == context->flat_labels ) )
{
// Destroy previous CTC context before returning exception
ctc_context_destroy( context );
PyErr_Format( PyExc_MemoryError,
"ConnectionistTemporalClassification: Could not allocate memory for labels and their lengths" );
return 1;
}
npy_int minibatch_size = PyArray_DIMS( in_activations )[1];
npy_int alphabet_size = PyArray_DIMS( in_activations )[2];
npy_float32 * costs = NULL;
npy_intp cost_size = minibatch_size;
if ( (*out_costs) == NULL || // Symbolic variable has no memory backing
PyArray_NDIM( *out_costs ) != 1 || // or, matrix has the wrong size
PyArray_DIMS( *out_costs )[0] != cost_size )
{
Py_XDECREF( *out_costs );
// Allocate new matrix
*out_costs = (PyArrayObject *) PyArray_ZEROS( 1, &cost_size, NPY_FLOAT32, 0 );
if ( NULL == (*out_costs) )
{
// Destroy previous CTC context before returning exception
ctc_context_destroy( context );
PyErr_Format( PyExc_MemoryError,
"ConnectionistTemporalClassification: Could not allocate memory for CTC costs" );
return 1;
}
}
costs = (npy_float32 *) PyArray_DATA( *out_costs );
npy_float32 * gradients = NULL;
if ( NULL != out_gradients ) // If gradient computation is not disabled
{
if ( NULL == (*out_gradients) || // Symbolic variable has no real backing
PyArray_NDIM( *out_gradients ) != 3 ||
PyArray_DIMS( *out_gradients )[0] != PyArray_DIMS( in_activations )[0] ||
PyArray_DIMS( *out_gradients )[1] != PyArray_DIMS( in_activations )[1] ||
PyArray_DIMS( *out_gradients )[2] != PyArray_DIMS( in_activations )[2] )
{
// Existing matrix is the wrong size. Make a new one.
// Decrement ref counter to existing array
Py_XDECREF( *out_gradients );
// Allocate new array
*out_gradients = (PyArrayObject *) PyArray_ZEROS(3, PyArray_DIMS( in_activations ),
NPY_FLOAT32, 0);
if ( NULL == (*out_gradients) )
{
// Destroy previous CTC context before returning exception
ctc_context_destroy( context );
PyErr_Format( PyExc_MemoryError,
"ConnectionistTemporalClassification: Could not allocate memory for CTC gradients!" );
return 1;
}
}
gradients = (npy_float32 *) PyArray_DATA( *out_gradients );
}
size_t cpu_workspace_size;
int ctc_error;
ctc_error = ctc_check_result( get_workspace_size( context->label_lengths,
context->input_lengths, alphabet_size, minibatch_size, context->options,
&cpu_workspace_size ),
"Failed to obtain CTC workspace size." );
if ( ctc_error ) // Exception is set by ctc_check_result, return error here
{
// Destroy previous CTC context before returning exception
ctc_context_destroy( context );
return 1;
}
context->workspace = malloc( cpu_workspace_size );
if ( NULL == context->workspace )
{
// Destroy previous CTC context before returning exception
ctc_context_destroy( context );
PyErr_Format( PyExc_MemoryError,
"ConnectionistTemporalClassification: Failed to allocate memory for CTC workspace." );
return 1;
}
ctc_error = ctc_check_result( compute_ctc_loss( activations, gradients,
context->flat_labels, context->label_lengths, context->input_lengths,
alphabet_size, minibatch_size, costs, context->workspace,
context->options ), "Failed to compute CTC loss function." );
if ( ctc_error ) // Exception is set by ctc_check_result, return error here
{
ctc_context_destroy( context );
return 1;
}
ctc_context_destroy( context );
return 0;
}
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
差异被折叠。
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论