Unverified 提交 4669be82 authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: GitHub

Merge pull request #194 from brandonwillard/update-warnings

Update print warnings to use warnings.warn
import theano
from theano.gof.utils import give_variables_names, remove, unique
def test_give_variables_names():
x = theano.tensor.matrix("x")
y = x + 1
z = theano.tensor.dot(x, y)
variables = (x, y, z)
give_variables_names(variables)
assert all(var.name for var in variables)
assert unique([var.name for var in variables])
def test_give_variables_names_idempotence():
x = theano.tensor.matrix("x")
y = x + 1
z = theano.tensor.dot(x, y)
variables = (x, y, z)
give_variables_names(variables)
names = [var.name for var in variables]
give_variables_names(variables)
names2 = [var.name for var in variables]
assert names == names2
def test_give_variables_names_small():
x = theano.tensor.matrix("x")
y = theano.tensor.dot(x, x)
fgraph = theano.FunctionGraph((x,), (y,))
give_variables_names(fgraph.variables)
assert all(var.name for var in fgraph.variables)
assert unique([var.name for var in fgraph.variables])
from theano.gof.utils import remove
def test_remove():
......
......@@ -4114,7 +4114,7 @@ def test_make_column_matrix_broadcastable():
assert (f(np.zeros((3, 1))) + np.ones(2) == np.ones((3, 2))).all()
def test_flatten_outdimNone():
def test_flatten_ndim_default():
a = dmatrix()
c = flatten(a)
f = inplace_func([a], c)
......@@ -4178,9 +4178,6 @@ def test_flatten_ndim2_of_3():
flatten_2 = partial(flatten, ndim=2)
utt.verify_grad(flatten_2, [a_val])
# test outdim parameter name
flatten_2 = partial(flatten, outdim=2)
utt.verify_grad(flatten_2, [a_val])
def test_flatten_broadcastable():
......@@ -4219,7 +4216,7 @@ def test_flatten_ndim_invalid():
def test_is_flat():
# tests is_flat method for constant and symbolic variables,
# as well as reshaped constant and symbolic variables on the
# given outdim
# given `ndim`
# Constant variable
assert tt.is_flat(tt.as_tensor_variable(np.zeros(10)))
......@@ -6251,10 +6248,10 @@ class TestInferShape(utt.InferShapeTester):
# Flatten
atens3 = tensor3()
atens3_val = rand(4, 5, 3)
for outdim in (3, 2, 1):
for ndim in (3, 2, 1):
self._compile_and_check(
[atens3],
[flatten(atens3, outdim)],
[flatten(atens3, ndim)],
[atens3_val],
Reshape,
excluding=["local_useless_reshape"],
......@@ -6262,10 +6259,10 @@ class TestInferShape(utt.InferShapeTester):
amat = matrix()
amat_val = rand(4, 5)
for outdim in (2, 1):
for ndim in (2, 1):
self._compile_and_check(
[amat],
[flatten(amat, outdim)],
[flatten(amat, ndim)],
[amat_val],
Reshape,
excluding=["local_useless_reshape"],
......@@ -6273,10 +6270,10 @@ class TestInferShape(utt.InferShapeTester):
avec = vector()
avec_val = rand(4)
outdim = 1
ndim = 1
self._compile_and_check(
[avec],
[flatten(avec, outdim)],
[flatten(avec, ndim)],
[avec_val],
Reshape,
excluding=["local_useless_reshape"],
......
import sys
from warnings import warn
import numpy as np
import pytest
......@@ -197,7 +197,7 @@ class TestCGemv(OptimizationTestMixin):
def test_force_gemv_init(self):
if check_force_gemv_init():
sys.stderr.write(
warn(
"WARNING: The current BLAS requires Theano to initialize"
+ " memory for some GEMV calls which will result in a minor"
+ " degradation in performance for such calls."
......
......@@ -13,6 +13,7 @@ import sys
from io import StringIO
from itertools import chain
from itertools import product as itertools_product
from warnings import warn
import numpy as np
......@@ -934,7 +935,7 @@ def _check_strides_match(a, b, warn_err, op):
if warn_err == 2:
raise e
else:
print("WARNING:", e, file=sys.stderr)
warn(str(e))
def _lessbroken_deepcopy(a):
......@@ -2459,7 +2460,7 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
l0 = fgraph0.equivalence_tracker.event_list
if li != l0:
infolog = StringIO()
print("WARNING: Optimization process is unstable...", file=infolog)
print("Optimization process is unstable...", file=infolog)
print(
" (HINT: Ops that the nodes point to must compare " "equal)",
file=infolog,
......
......@@ -123,7 +123,7 @@ class AddDestroyHandler(gof.GlobalOptimizer):
break
if not supervisor_added:
warnings.warn(
"WARNING: Supervisor is not added. Please build a FunctionGraph"
"Supervisor is not added. Please build a FunctionGraph"
"via theano.compile.function.types.std_graph()"
"or add the Supervisor class manually.",
stacklevel=3,
......
......@@ -2014,7 +2014,7 @@ class GCC_compiler(Compiler):
and "icpc" not in theano.config.cxx
):
_logger.warning(
"OPTIMIZATION WARNING: your Theano flag `cxx` seems not to be"
"Your Theano flag `cxx` seems not to be"
" the g++ compiler. So we disable the compiler optimization"
" specific to g++ that tell to compile for a specific CPU."
" At worst, this could cause slow down.\n"
......@@ -2083,7 +2083,7 @@ class GCC_compiler(Compiler):
else:
reported_lines = native_lines
_logger.warning(
"OPTIMIZATION WARNING: Theano was not able to find the"
"Theano was not able to find the"
" g++ parameters that tune the compilation to your "
" specific CPU. This can slow down the execution of Theano"
" functions. Please submit the following lines to"
......@@ -2095,7 +2095,7 @@ class GCC_compiler(Compiler):
_logger.info(f"g++ default lines: {default_lines}")
if len(default_lines) < 1:
_logger.warning(
"OPTIMIZATION WARNING: Theano was not able to find the"
"Theano was not able to find the"
" default g++ parameters. This is needed to tune"
" the compilation to your specific"
" CPU. This can slow down the execution of Theano"
......
......@@ -3,6 +3,7 @@ import traceback
from copy import copy, deepcopy
from io import StringIO
from sys import getsizeof
from warnings import warn
import numpy as np
......@@ -334,7 +335,7 @@ def raise_with_op(fgraph, node, thunk=None, exc_info=None, storage_map=None):
str(exc_value) + detailed_err_msg + "\n" + "\n".join(hints)
)
except TypeError:
print(f"WARNING: {exc_type} error does not allow us to add extra error message")
warn(f"{exc_type} error does not allow us to add extra error message")
# Some exception need extra parameter in inputs. So forget the
# extra long error message in that case.
raise exc_value.with_traceback(exc_trace)
......
......@@ -610,17 +610,15 @@ class ReplaceValidate(History, Validator):
if rm in fgraph.apply_nodes or rm in fgraph.variables:
fgraph.revert(chk)
if warn:
out = sys.stderr
print(
"WARNING: An optimization wanted to replace a Variable"
warn(
"An optimization wanted to replace a Variable"
" in the graph, but the replacement for it doesn't"
" remove it. We disabled the optimization."
" Your function runs correctly, but it would be"
" appreciated if you submit this problem to the"
" mailing list theano-users so that we can fix it.",
file=out,
" mailing list theano-users so that we can fix it."
f"{reason}: {replacements}",
)
print(reason, replacements, file=out)
raise ReplacementDidNotRemoveError()
def __getstate__(self):
......
......@@ -376,36 +376,6 @@ def memoize(f):
return rval
def deprecated(filename, msg=""):
"""
Decorator which will print a warning message on the first call.
Use it like this::
@deprecated('myfile', 'do something different...')
def fn_name(...)
...
And it will print::
WARNING myfile.fn_name deprecated. do something different...
"""
def _deprecated(f):
printme = [True]
def g(*args, **kwargs):
if printme[0]:
print(f"WARNING: {filename}.{f.__name__} deprecated. {msg}")
printme[0] = False
return f(*args, **kwargs)
return g
return _deprecated
def uniq(seq):
"""
Do not use set, this must always return the same value at the same index.
......@@ -617,10 +587,6 @@ def flatten(a):
return [a]
def unique(x):
return len(set(x)) == len(x)
def hist(coll):
counts = {}
for elem in coll:
......@@ -628,31 +594,6 @@ def hist(coll):
return counts
@deprecated("theano.gof.utils", msg="Use a_theano_variable.auto_name instead")
def give_variables_names(variables):
"""
Gives unique names to an iterable of variables. Modifies input.
This function is idempotent.
"""
names = [var.name for var in variables]
h = hist(names)
def bad_var(var):
return not var.name or h[var.name] > 1
for i, var in enumerate(filter(bad_var, variables)):
var.name = (var.name or "") + f"_{int(i)}"
if not unique([str(v) for v in variables]):
raise ValueError(
"Not all variables have unique names. Maybe you've "
"named some of the variables identically"
)
return variables
def remove(predicate, coll):
"""
Return those items of collection for which predicate(item) is true.
......
......@@ -24,11 +24,21 @@ try:
except ImportError:
pygpu = None
from . import ctc, dnn, extra_ops, fft, multinomial, opt, reduction, rng_mrg, sort
from .basic_ops import as_gpuarray_variable
from theano.gpuarray import (
ctc,
dnn,
extra_ops,
fft,
multinomial,
opt,
reduction,
rng_mrg,
sort,
)
from theano.gpuarray.basic_ops import as_gpuarray_variable
# This is for documentation not to depend on the availability of pygpu
from .type import (
from theano.gpuarray.type import (
ContextNotDefined,
GpuArrayConstant,
GpuArraySharedVariable,
......@@ -160,8 +170,8 @@ def init_dev(dev, name=None, preallocate=None):
f" {int(context.free_gmem / MB)} MB are available."
)
elif gmem > context.free_gmem - 50 * MB:
print(
"WARNING: Preallocating too much memory can prevent cudnn and cublas from working properly"
warnings.warn(
"Preallocating too much memory can prevent cudnn and cublas from working properly"
)
# This will allocate and immediately free an object of size gmem
......
......@@ -2157,6 +2157,7 @@ def consider_constant(x):
"consider_constant() is deprecated, use zero_grad() or "
"disconnected_grad() instead."
),
category=DeprecationWarning,
stacklevel=3,
)
......
import warnings
from theano.tensor.nnet.blocksparse import (
SparseBlockGemv,
SparseBlockOuter,
sparse_block_dot,
sparse_block_gemv,
sparse_block_gemv_inplace,
sparse_block_outer,
sparse_block_outer_inplace,
)
__all__ = [
SparseBlockGemv,
SparseBlockOuter,
sparse_block_dot,
sparse_block_gemv,
sparse_block_gemv_inplace,
sparse_block_outer,
sparse_block_outer_inplace,
]
warnings.warn(
"DEPRECATION: theano.sandbox.blocksparse does not exist anymore,"
"it has been moved to theano.tensor.nnet.blocksparse.",
category=DeprecationWarning,
)
import sys
print(
"DEPRECATION: theano.sandbox.conv no longer provides conv. "
"They have been moved to theano.tensor.nnet.conv",
file=sys.stderr,
)
from theano.tensor.nnet.nnet import softsign # noqa
import sys
print(
"DEPRECATION WARNING: softsign was moved from theano.sandbox.softsign to "
"theano.tensor.nnet.nnet ",
file=sys.stderr,
)
......@@ -377,9 +377,8 @@ def scan(
for i in range(n_outs):
if outs_info[i] is not None:
if isinstance(outs_info[i], dict):
# DEPRECATED :
if outs_info[i].get("return_steps", None) is not None:
raise ValueError(
raise DeprecationWarning(
"Using `return_steps` has been deprecated. "
"Simply select the entries you need using a "
"subtensor. Scan will optimize memory "
......@@ -396,12 +395,9 @@ def scan(
):
# ^ no initial state but taps provided
raise ValueError(
(
"If you are using slices of an output "
"you need to provide a initial state "
"for it"
),
outs_info[i],
"If you are using slices of an output "
"you need to provide a initial state "
f"for it: {outs_info[i]}"
)
elif (
outs_info[i].get("initial", None) is not None
......
......@@ -101,14 +101,6 @@ list_opt_slice = [
]
def warning(*msg):
_logger.warning("WARNING theano.scan: " + " ".join(msg))
def info(*msg):
_logger.info("INFO theano.scan: " + " ".join(msg))
@gof.local_optimizer([Scan])
def remove_constants_and_unused_inputs_scan(fgraph, node):
"""
......
......@@ -173,12 +173,7 @@ def hash_listsDictsTuples(x):
return hash_value
DEPRECATED_ARG = object()
def clone(
output, replace=None, strict=True, share_inputs=True, copy_inputs=DEPRECATED_ARG
):
def clone(output, replace=None, strict=True, share_inputs=True):
"""
Function that allows replacing subgraphs of a computational graph.
......@@ -196,17 +191,8 @@ def clone(
graph. If False, clone them. Note that cloned shared variables still
use the same underlying storage, so they will always have the same
value.
copy_inputs
Deprecated, use share_inputs.
"""
if copy_inputs is not DEPRECATED_ARG:
warnings.warn(
"In `clone()` function, the argument `copy_inputs` has been deprecated and renamed into `share_inputs`"
)
assert share_inputs # since we used `copy_inputs` we should have default value for `share_inputs`
share_inputs = copy_inputs
if isinstance(replace, dict):
items = list(replace.items())
elif isinstance(replace, (list, tuple)):
......
import sys
from warnings import warn
try:
import scipy
scipy_ver = [int(n) for n in scipy.__version__.split(".")[:2]]
enable_sparse = bool(scipy_ver >= [0, 7])
if not enable_sparse:
sys.stderr.write(
f"WARNING: scipy version = {scipy.__version__}."
" We request version >=0.7.0 for the sparse code as it has"
" bugs fixed in the sparse matrix code.\n"
)
enable_sparse = True
except ImportError:
enable_sparse = False
sys.stderr.write(
"WARNING: scipy can't be imported." " We disable the sparse matrix code."
)
warn("SciPy can't be imported. Sparse matrix support is disabled.")
from theano.sparse.type import *
......
......@@ -3,13 +3,11 @@ Classes for handling sparse matrices.
To read about different sparse formats, see
http://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps
"""
# TODO
# Automatic methods for determining best sparse format?
TODO: Automatic methods for determining best sparse format?
import sys
"""
from warnings import warn
import numpy as np
import scipy.sparse
......@@ -907,9 +905,8 @@ class DenseFromSparse(gof.op.Op):
(x,) = inputs
(out,) = outputs
if _is_dense(x):
print(
("WARNING: You just called DenseFromSparse on a dense matrix."),
file=sys.stderr,
warn(
"You just called DenseFromSparse on a dense matrix.",
)
out[0] = x
else:
......@@ -2398,10 +2395,8 @@ class MulSD(gof.op.Op):
z_data[j_idx] *= y[i, j]
out[0] = z
else:
print(
("WARNING: crappy implementation of MulSD"),
x.format,
file=sys.stderr,
warn(
"This implementation of MulSD is deficient: {x.format}",
)
out[0] = type(x)(x.toarray() * y)
......
......@@ -5244,8 +5244,8 @@ class Flatten(Op):
"""
Flatten a tensor.
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
Flattens a tensor to `ndim` dimensions by preserving the leading
ndim - 1 shape components.
.. note:: The interface Flatten(Op) is deprecated, you should use flatten.
"""
......@@ -5253,24 +5253,24 @@ class Flatten(Op):
view_map = {0: [0]}
check_input = False
__props__ = ("outdim",)
__props__ = ("ndim",)
def __init__(self, outdim=1):
def __init__(self, ndim=1):
warnings.warn(
"Flatten class is deprecated, " "please use flatten method instead.",
DeprecationWarning,
stacklevel=4,
)
self.outdim = int(outdim)
self.ndim = int(ndim)
def __str__(self):
return f"{self.__class__.__name__}{{{self.outdim}}}"
return f"{self.__class__.__name__}{{{self.ndim}}}"
def make_node(self, x):
t_x = as_tensor_variable(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
if self.ndim < 1 or (x.ndim and self.ndim > x.ndim):
raise ValueError(
f"invalid output ndimensions ({self.outdim}) for tensor of "
f"invalid output ndimensions ({self.ndim}) for tensor of "
f"rank {t_x.ndim}"
)
......@@ -5279,8 +5279,8 @@ class Flatten(Op):
# For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were
# broadcastable.
bcast_kept_dims = x.broadcastable[: self.outdim - 1]
bcast_new_dim = builtins.all(x.broadcastable[self.outdim - 1 :])
bcast_kept_dims = x.broadcastable[: self.ndim - 1]
bcast_new_dim = builtins.all(x.broadcastable[self.ndim - 1 :])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
return gof.Apply(self, [t_x], [tensor(x.type.dtype, broadcastable)])
......@@ -5288,22 +5288,22 @@ class Flatten(Op):
def perform(self, node, inp, out_):
(x,) = inp
(out,) = out_
outdim = self.outdim
if outdim == 1:
ndim = self.ndim
if ndim == 1:
try:
out[0] = x.reshape(x.size)
except AttributeError:
out[0] = x.reshape((np.prod(x.shape),))
elif outdim == len(x.shape):
elif ndim == len(x.shape):
out[0] = x
else:
newshape = x.shape[: outdim - 1] + (np.prod(x.shape[outdim - 1 :]),)
newshape = x.shape[: ndim - 1] + (np.prod(x.shape[ndim - 1 :]),)
out[0] = x.reshape(newshape)
def infer_shape(self, fgraph, node, in_shapes):
(in_shp,) = in_shapes
part1 = in_shp[: self.outdim - 1]
part2 = in_shp[self.outdim - 1 :]
part1 = in_shp[: self.ndim - 1]
part2 = in_shp[self.ndim - 1 :]
if len(part2) > 1:
part2 = (prod(part2, dtype="int64"),)
......@@ -5311,11 +5311,11 @@ class Flatten(Op):
# We do not want to force an upcast of part2 if its length is 1
pass
else:
if len(in_shp) == 0 and self.outdim == 1:
if len(in_shp) == 0 and self.ndim == 1:
part2 = (1,)
else:
raise ValueError(
f"invalid output ndimensions ({self.outdim}) for tensor "
f"invalid output ndimensions ({self.ndim}) for tensor "
f"of rank {len(in_shp)}"
)
......@@ -5338,11 +5338,11 @@ class Flatten(Op):
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(out,) = outputs
outdim = self.outdim
ndim = self.ndim
fail = sub["fail"]
return (
"""
if (%(outdim)s == PyArray_NDIM(%(x)s))
if (%(ndim)s == PyArray_NDIM(%(x)s))
{
Py_XDECREF(%(out)s);
Py_XINCREF(%(x)s);
......@@ -5352,7 +5352,7 @@ class Flatten(Op):
{
Py_XDECREF(%(out)s);
if (%(outdim)s == 1)
if (%(ndim)s == 1)
{
npy_intp size = PyArray_SIZE(%(x)s);
PyArray_Dims newshape;
......@@ -5365,20 +5365,20 @@ class Flatten(Op):
else
{
npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp newshape_dims[%(outdim)s];
npy_intp newshape_dims[%(ndim)s];
int i;
for (i = 0; i < %(outdim)s - 1; ++i)
for (i = 0; i < %(ndim)s - 1; ++i)
newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1;
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
for (int j = %(ndim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape;
newshape.ptr = newshape_dims;
newshape.len = %(outdim)s;
newshape.len = %(ndim)s;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
......@@ -5428,36 +5428,29 @@ def is_flat(var, ndim=None, outdim=None):
return var.ndim == ndim
def flatten(x, ndim=None, outdim=None):
"""
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
def flatten(x, ndim=1):
"""Return a copy of the array collapsed into one dimension.
Reshapes the variable `x` by keeping the first outdim-1 dimension size(s)
of `x` the same, and making the last dimension size of `x` equal to the
multiplication of its remaining dimension size(s).
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
x : theano.tensor.var.TensorVariable
The variable to be reshaped.
ndim : int
The number of dimensions of the returned variable
The default value is ``1``.
ndim : int
the number of dimensions of the returned variable
Default 1.
outdim : int
DEPRECATED synonym for ndim
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
if outdim is None and ndim is None:
if ndim is None:
ndim = 1
elif outdim is not None and ndim is not None:
raise ValueError("You should only specify ndim")
elif outdim is not None:
warnings.warn("flatten outdim parameter is deprecated, use ndim instead.")
ndim = outdim
# Any input variable can be flattened to have ndim of 1,
# even if it's a scalar. Otherwise, ndim must be positive
# and smaller than x.ndim.
......@@ -5473,32 +5466,11 @@ def flatten(x, ndim=None, outdim=None):
bcast_new_dim = builtins.all(x.broadcastable[ndim - 1 :])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
x_reshaped = theano.tensor.addbroadcast(
x_reshaped, *filter(lambda i: broadcastable[i], range(ndim))
x_reshaped, *[i for i in range(ndim) if broadcastable[i]]
)
return x_reshaped
# class TileGrad(Op):
# """
# Calculates the gradient of the Tile Op.
# """
# # this is so weird, I can't think of how to make this a general thing.
# def make_node(self, x, reps, g_out):
# return gof.Apply(self, [x, reps, g_out], [x.type()])
#
# def perform(self, node, inp, out):
# x, reps, g_out = inp
# gx, = out
# xsh = x.shape
# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:
# gx[0] = numpy.sum(g_out, axis=0)
# else:
# raise NotImplementedError('x.shape, reps combination not '
# 'supported', (x.shape, reps))
#
# tilegrad = TileGrad()
class Tile(Op):
"""
Construct an array by repeating the input x according to reps pattern.
......
......@@ -554,12 +554,12 @@ class ConvOp(OpenMPOp):
new -= 1
warnstr = (
"OPTIMISATION WARNING: in ConvOp.__init__() "
"unroll_batch(%i) must be 0 or a divisor of"
" bsize(%i). We revert it to %i. This"
"In ConvOp.__init__(): "
f"unroll_batch({self.unroll_batch}) must be 0 or a divisor of"
f" bsize({self.bsize}). We revert it to {new}. This"
" won't change the result, but may make it slower."
)
_logger.warning(warnstr, self.unroll_batch, self.bsize, new)
_logger.warning(warnstr)
self.unroll_batch = new
......@@ -580,12 +580,12 @@ class ConvOp(OpenMPOp):
new -= 1
warnstr = (
"OPTIMISATION WARNING: in ConvOp.__init__()"
" unroll_kern(%i) should be 0 or a divisor of"
" nkern(%i). We revert it to %i. This"
"In ConvOp.__init__(): "
f"unroll_kern({self.unroll_kern}) must be 0 or a divisor of"
f" nkern({self.nkern}). We revert it to {new}. This"
" won't change the result, but may make it slower."
)
_logger.warning(warnstr, self.unroll_kern, self.nkern, new)
_logger.warning(warnstr)
self.unroll_kern = new
self.outshp = get_conv_output_shape(
......
......@@ -440,9 +440,10 @@ class Softmax(Op):
raise ValueError(f"x must be 1-d or 2-d tensor of floats. Got {x.type}")
if x.ndim == 1:
warnings.warn(
"DEPRECATION: If x is a vector, Softmax will not automatically pad x "
"If x is a vector, Softmax will not automatically pad x "
"anymore in next releases. If you need it, please do it manually. The "
"vector case is gonna be supported soon and the output will be a vector.",
category=PendingDeprecationWarning,
stacklevel=4,
)
x = tt.shape_padleft(x, n_ones=1)
......@@ -638,9 +639,10 @@ class LogSoftmax(Op):
raise ValueError(f"x must be 1-d or 2-d tensor of floats. Got {x.type}")
if x.ndim == 1:
warnings.warn(
"DEPRECATION: If x is a vector, LogSoftmax will not automatically pad x "
"If x is a vector, LogSoftmax will not automatically pad x "
"anymore in next releases. If you need it, please do it manually. The "
"vector case is gonna be supported soon and the output will be a vector.",
category=PendingDeprecationWarning,
stacklevel=4,
)
x = tt.shape_padleft(x, n_ones=1)
......@@ -1661,8 +1663,7 @@ def local_argmax_pushdown(fgraph, node):
):
if theano.config.warn__argmax_pushdown_bug:
logging.getLogger("theano.tensor.nnet.nnet").warn(
"WARNING: there "
"was a bug in Theano fixed on May 27th, 2010 in this case."
"There was a bug in Theano fixed on May 27th, 2010 in this case."
" I.E. when we take the max of a softplus, softmax, exp, "
"log, tanh, sigmoid, softmax_with_bias op, we were doing "
"the max of the parent of the input. To remove this "
......
......@@ -5821,7 +5821,7 @@ def local_sum_prod_div_dimshuffle(fgraph, node):
if compatible_dims:
_logger.warning(
"WARNING: Your current code is fine, but"
"Your current code is fine, but"
" Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and"
" cfc6322e5ad4 (2010-08-03) would "
......@@ -5876,7 +5876,7 @@ def local_sum_prod_div_dimshuffle(fgraph, node):
node.op, Sum
):
_logger.warning(
"WARNING: Your current code is fine,"
"Your current code is fine,"
" but Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and"
" cfc6322e5ad4 (2010-08-03) would "
......@@ -5986,7 +5986,7 @@ def local_op_of_op(fgraph, node):
and len(newaxis) == len(newaxis_old)
):
_logger.warning(
"WARNING (YOUR CURRENT CODE IS FINE): Theano "
"(YOUR CURRENT CODE IS FINE): Theano "
"versions between version 9923a40c7b7a and August "
"2nd, 2010 generated bugged code in this case. "
"This happens when there are two consecutive sums "
......
"""Define random number Type (`RandomStateType`) and Op (`RandomFunction`)."""
import sys
from copy import copy
from functools import reduce
from warnings import warn
import numpy as np
......@@ -200,10 +199,8 @@ class RandomFunction(gof.Op):
assert shape.type.ndim == 1
assert (shape.type.dtype == "int64") or (shape.type.dtype == "int32")
if not isinstance(r.type, RandomStateType):
print(
"WARNING: RandomState instances should be in RandomStateType",
file=sys.stderr,
)
warn("RandomState instances should be in RandomStateType")
# the following doesn't work because we want to ignore the
# broadcastable flags in shape.type
# assert shape.type == tensor.lvector
......@@ -532,9 +529,9 @@ def binomial(random_state, size=None, n=1, p=0.5, ndim=None, dtype="int64", prob
"""
if prob is not None:
p = prob
print(
"DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as np.",
file=sys.stderr,
warn(
"The parameter prob to the binomal fct have been renamed to p to have the same name as np.",
category=DeprecationWarning,
)
n = tensor.as_tensor_variable(n)
p = tensor.as_tensor_variable(p)
......@@ -964,9 +961,9 @@ class RandomStreamsBase:
"""
if prob is not None:
p = prob
print(
"DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.",
file=sys.stderr,
warn(
"The parameter prob to the binomal fct have been renamed to p to have the same name as numpy.",
category=DeprecationWarning,
)
return self.gen(binomial, size, n, p, ndim=ndim, dtype=dtype)
......
......@@ -93,9 +93,10 @@ def pool_2d(
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
"The 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
category=DeprecationWarning,
stacklevel=2,
)
ws = ds
......@@ -110,9 +111,10 @@ def pool_2d(
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
"The 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
category=DeprecationWarning,
stacklevel=2,
)
stride = st
......@@ -125,9 +127,10 @@ def pool_2d(
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
"The 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
category=DeprecationWarning,
stacklevel=2,
)
pad = padding
......@@ -145,6 +148,7 @@ def pool_2d(
" GPU combination supported is when"
" `ws == stride and pad == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
category=DeprecationWarning,
stacklevel=2,
)
ignore_border = False
......@@ -210,9 +214,10 @@ def pool_3d(
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
"The 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
category=DeprecationWarning,
stacklevel=2,
)
ws = ds
......@@ -227,9 +232,10 @@ def pool_3d(
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
"The 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
category=DeprecationWarning,
stacklevel=2,
)
stride = st
......@@ -242,9 +248,10 @@ def pool_3d(
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
"The 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
category=DeprecationWarning,
stacklevel=2,
)
pad = padding
......@@ -262,6 +269,7 @@ def pool_3d(
" GPU combination supported is when"
" `ws == stride and pad == (0, 0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
category=DeprecationWarning,
stacklevel=2,
)
ignore_border = False
......@@ -387,9 +395,10 @@ class Pool(OpenMPOp):
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
"The 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
category=DeprecationWarning,
stacklevel=2,
)
ws = ds
......@@ -404,9 +413,10 @@ class Pool(OpenMPOp):
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
"The 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
category=DeprecationWarning,
stacklevel=2,
)
stride = st
......@@ -420,9 +430,10 @@ class Pool(OpenMPOp):
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to"
"The 'padding' parameter is not going to"
" exist anymore as it is going to be replaced by the"
" parameter 'pad'.",
category=DeprecationWarning,
stacklevel=2,
)
pad = padding
......@@ -1032,9 +1043,10 @@ class PoolGrad(OpenMPOp):
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter in PoolGrad is not going"
"The 'ds' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'ws'.",
category=DeprecationWarning,
stacklevel=2,
)
ws = ds
......@@ -1049,9 +1061,10 @@ class PoolGrad(OpenMPOp):
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter in PoolGrad is not going"
"The 'st' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'stride'.",
category=DeprecationWarning,
stacklevel=2,
)
stride = st
......@@ -1064,9 +1077,10 @@ class PoolGrad(OpenMPOp):
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter in PoolGrad is not"
"The 'padding' parameter in PoolGrad is not"
" going to exist anymore as it is going to be replaced"
" by the parameter 'pad'.",
category=DeprecationWarning,
stacklevel=2,
)
pad = padding
......
......@@ -55,9 +55,10 @@ class TensorType(Type):
self.sparse_grad = sparse_grad
if sparse_grad:
warnings.warn(
"DEPRECATION WARNING: You use an old interface to"
"You use an old interface to"
" AdvancedSubtensor1 sparse_grad. Now use"
" theano.sparse_grad(a_tensor[an_int_vector])."
" theano.sparse_grad(a_tensor[an_int_vector]).",
category=DeprecationWarning,
)
def clone(self, dtype=None, broadcastable=None):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论