提交 a8c334aa authored 作者: Brandon T. Willard's avatar Brandon T. Willard 提交者: Thomas Wiecki

Move theano.tensor.inc_code into theano.tensor.subtensor

上级 8a23fb1c
import numpy as np
import pytest
import theano
import theano.tensor as tt
from tests import unittest_tools as utt
from theano.tensor.type import col, dmatrix, dscalar, dtensor3, lscalar, matrix, vector
class TestIncSubtensor:
"""
Partial testing.
What could be tested:
- increment vs set
- thing incremented: scalar, vector, matrix,
- increment/set: constant, scalar, vector, matrix
- indices: scalar vs slice, constant vs variable, out of bound, ...
- inplace
NOTE: these are the same tests as test_incsubtensor.py, but using
the new (read: not deprecated) inc_subtensor, set_subtensor
functions.
"""
def setup_method(self):
utt.seed_rng()
def test_simple_2d(self):
# Increments or sets part of a tensor by a scalar using full slice and
# a partial slice depending on a scalar.
a = dmatrix()
increment = dscalar()
sl1 = slice(None)
sl2_end = lscalar()
sl2 = slice(sl2_end)
for do_set in [False, True]:
if do_set:
resut = tt.set_subtensor(a[sl1, sl2], increment)
else:
resut = tt.inc_subtensor(a[sl1, sl2], increment)
f = theano.function([a, increment, sl2_end], resut)
val_a = np.ones((5, 5))
val_inc = 2.3
val_sl2_end = 2
result = f(val_a, val_inc, val_sl2_end)
expected_result = np.copy(val_a)
if do_set:
expected_result[:, :val_sl2_end] = val_inc
else:
expected_result[:, :val_sl2_end] += val_inc
utt.assert_allclose(result, expected_result)
def test_wrong_dims(self):
a = matrix()
increment = matrix()
index = 0
with pytest.raises(TypeError):
tt.set_subtensor(a[index], increment)
with pytest.raises(TypeError):
tt.inc_subtensor(a[index], increment)
def test_wrong_broadcast(self):
a = col()
increment = vector()
# These symbolic graphs legitimate, as long as increment has exactly
# one element. So it should fail at runtime, not at compile time.
rng = np.random.RandomState(utt.fetch_seed())
def rng_randX(*shape):
return rng.rand(*shape).astype(theano.config.floatX)
for op in (tt.set_subtensor, tt.inc_subtensor):
for base in (a[:], a[0]):
out = op(base, increment)
f = theano.function([a, increment], out)
# This one should work
f(rng_randX(3, 1), rng_randX(1))
# These ones should not
with pytest.raises(ValueError):
f(rng_randX(3, 1), rng_randX(2))
with pytest.raises(ValueError):
f(rng_randX(3, 1), rng_randX(3))
with pytest.raises(ValueError):
f(rng_randX(3, 1), rng_randX(0))
def test_simple_3d(self):
# Increments or sets part of a tensor by a scalar using full slice and
# a partial slice depending on a scalar.
a = dtensor3()
increment = dscalar()
sl1 = slice(None)
sl2_end = lscalar()
sl2 = slice(sl2_end)
sl3 = 2
val_a = np.ones((5, 3, 4))
val_inc = 2.3
val_sl2_end = 2
for method in [tt.set_subtensor, tt.inc_subtensor]:
print("MethodSet", method)
resut = method(a[sl1, sl3, sl2], increment)
f = theano.function([a, increment, sl2_end], resut)
expected_result = np.copy(val_a)
result = f(val_a, val_inc, val_sl2_end)
if method is tt.set_subtensor:
expected_result[:, sl3, :val_sl2_end] = val_inc
else:
expected_result[:, sl3, :val_sl2_end] += val_inc
utt.assert_allclose(result, expected_result)
# Test when we broadcast the result
resut = method(a[sl1, sl2], increment)
f = theano.function([a, increment, sl2_end], resut)
expected_result = np.copy(val_a)
result = f(val_a, val_inc, val_sl2_end)
if method is tt.set_subtensor:
expected_result[:, :val_sl2_end] = val_inc
else:
expected_result[:, :val_sl2_end] += val_inc
utt.assert_allclose(result, expected_result)
def test_grad_inc_set(self):
def inc_slice(*s):
def just_numeric_args(a, b):
return tt.inc_subtensor(a[s], b)
return just_numeric_args
def set_slice(*s):
def just_numeric_args(a, b):
return tt.set_subtensor(a[s], b)
return just_numeric_args
for f_slice in [inc_slice, set_slice]:
# vector
utt.verify_grad(
f_slice(slice(2, 4, None)),
(
np.asarray([0, 1, 2, 3, 4, 5.0]),
np.asarray([9, 9.0]),
),
)
# matrix
utt.verify_grad(
f_slice(slice(1, 2, None), slice(None, None, None)),
(
np.asarray([[0, 1], [2, 3], [4, 5.0]]),
np.asarray([[9, 9.0]]),
),
)
# single element
utt.verify_grad(
f_slice(2, 1),
(
np.asarray([[0, 1], [2, 3], [4, 5.0]]),
np.asarray(9.0),
),
)
# broadcast
utt.verify_grad(
f_slice(2),
(
np.asarray([[0, 1], [2, 3], [4, 5.0]]),
np.asarray(9.0),
),
)
......@@ -43,6 +43,7 @@ from theano.tensor.type import (
ctensor3,
dmatrix,
dscalar,
dtensor3,
dtensor4,
dvector,
fmatrix,
......@@ -52,6 +53,7 @@ from theano.tensor.type import (
iscalar,
lmatrix,
lrow,
lscalar,
lvector,
matrix,
tensor,
......@@ -1442,6 +1444,191 @@ class TestSubtensor(utt.OptimizationTestMixin):
f(np.random.normal(0, 1, (30, 4)))
class TestIncSubtensor:
"""
Partial testing.
What could be tested:
- increment vs set
- thing incremented: scalar, vector, matrix,
- increment/set: constant, scalar, vector, matrix
- indices: scalar vs slice, constant vs variable, out of bound, ...
- inplace
NOTE: these are the same tests as test_incsubtensor.py, but using
the new (read: not deprecated) inc_subtensor, set_subtensor
functions.
"""
def setup_method(self):
utt.seed_rng()
def test_simple_2d(self):
# Increments or sets part of a tensor by a scalar using full slice and
# a partial slice depending on a scalar.
a = dmatrix()
increment = dscalar()
sl1 = slice(None)
sl2_end = lscalar()
sl2 = slice(sl2_end)
for do_set in [False, True]:
if do_set:
resut = set_subtensor(a[sl1, sl2], increment)
else:
resut = inc_subtensor(a[sl1, sl2], increment)
f = theano.function([a, increment, sl2_end], resut)
val_a = np.ones((5, 5))
val_inc = 2.3
val_sl2_end = 2
result = f(val_a, val_inc, val_sl2_end)
expected_result = np.copy(val_a)
if do_set:
expected_result[:, :val_sl2_end] = val_inc
else:
expected_result[:, :val_sl2_end] += val_inc
utt.assert_allclose(result, expected_result)
def test_wrong_dims(self):
a = matrix()
increment = matrix()
index = 0
with pytest.raises(TypeError):
set_subtensor(a[index], increment)
with pytest.raises(TypeError):
inc_subtensor(a[index], increment)
def test_wrong_broadcast(self):
a = col()
increment = vector()
# These symbolic graphs legitimate, as long as increment has exactly
# one element. So it should fail at runtime, not at compile time.
rng = np.random.RandomState(utt.fetch_seed())
def rng_randX(*shape):
return rng.rand(*shape).astype(theano.config.floatX)
for op in (set_subtensor, inc_subtensor):
for base in (a[:], a[0]):
out = op(base, increment)
f = theano.function([a, increment], out)
# This one should work
f(rng_randX(3, 1), rng_randX(1))
# These ones should not
with pytest.raises(ValueError):
f(rng_randX(3, 1), rng_randX(2))
with pytest.raises(ValueError):
f(rng_randX(3, 1), rng_randX(3))
with pytest.raises(ValueError):
f(rng_randX(3, 1), rng_randX(0))
def test_simple_3d(self):
# Increments or sets part of a tensor by a scalar using full slice and
# a partial slice depending on a scalar.
a = dtensor3()
increment = dscalar()
sl1 = slice(None)
sl2_end = lscalar()
sl2 = slice(sl2_end)
sl3 = 2
val_a = np.ones((5, 3, 4))
val_inc = 2.3
val_sl2_end = 2
for method in [set_subtensor, inc_subtensor]:
print("MethodSet", method)
resut = method(a[sl1, sl3, sl2], increment)
f = theano.function([a, increment, sl2_end], resut)
expected_result = np.copy(val_a)
result = f(val_a, val_inc, val_sl2_end)
if method is set_subtensor:
expected_result[:, sl3, :val_sl2_end] = val_inc
else:
expected_result[:, sl3, :val_sl2_end] += val_inc
utt.assert_allclose(result, expected_result)
# Test when we broadcast the result
resut = method(a[sl1, sl2], increment)
f = theano.function([a, increment, sl2_end], resut)
expected_result = np.copy(val_a)
result = f(val_a, val_inc, val_sl2_end)
if method is set_subtensor:
expected_result[:, :val_sl2_end] = val_inc
else:
expected_result[:, :val_sl2_end] += val_inc
utt.assert_allclose(result, expected_result)
def test_grad_inc_set(self):
def inc_slice(*s):
def just_numeric_args(a, b):
return inc_subtensor(a[s], b)
return just_numeric_args
def set_slice(*s):
def just_numeric_args(a, b):
return set_subtensor(a[s], b)
return just_numeric_args
for f_slice in [inc_slice, set_slice]:
# vector
utt.verify_grad(
f_slice(slice(2, 4, None)),
(
np.asarray([0, 1, 2, 3, 4, 5.0]),
np.asarray([9, 9.0]),
),
)
# matrix
utt.verify_grad(
f_slice(slice(1, 2, None), slice(None, None, None)),
(
np.asarray([[0, 1], [2, 3], [4, 5.0]]),
np.asarray([[9, 9.0]]),
),
)
# single element
utt.verify_grad(
f_slice(2, 1),
(
np.asarray([[0, 1], [2, 3], [4, 5.0]]),
np.asarray(9.0),
),
)
# broadcast
utt.verify_grad(
f_slice(2),
(
np.asarray([[0, 1], [2, 3], [4, 5.0]]),
np.asarray(9.0),
),
)
class TestIncSubtensor1:
# test inc_subtensor
# also tests set_subtensor
......
def inc_code():
types = [
"npy_" + t
for t in [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
]
]
complex_types = ["npy_" + t for t in ["complex32", "complex64", "complex128"]]
inplace_map_template = """
#if defined(%(typen)s)
static void %(type)s_inplace_add(PyArrayMapIterObject *mit,
PyArrayIterObject *it, int inc_or_set)
{
int index = mit->size;
while (index--) {
%(op)s
PyArray_MapIterNext(mit);
PyArray_ITER_NEXT(it);
}
}
#endif
"""
floatadd = (
"((%(type)s*)mit->dataptr)[0] = "
"(inc_or_set ? ((%(type)s*)mit->dataptr)[0] : 0)"
" + ((%(type)s*)it->dataptr)[0];"
)
complexadd = """
((%(type)s*)mit->dataptr)[0].real =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].real : 0)
+ ((%(type)s*)it->dataptr)[0].real;
((%(type)s*)mit->dataptr)[0].imag =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].imag : 0)
+ ((%(type)s*)it->dataptr)[0].imag;
"""
fns = "".join(
[
inplace_map_template
% {"type": t, "typen": t.upper(), "op": floatadd % {"type": t}}
for t in types
]
+ [
inplace_map_template
% {"type": t, "typen": t.upper(), "op": complexadd % {"type": t}}
for t in complex_types
]
)
def gen_binop(type, typen):
return f"""
#if defined({typen})
{type}_inplace_add,
#endif
"""
fn_array = (
"static inplace_map_binop addition_funcs[] = {"
+ "".join([gen_binop(type=t, typen=t.upper()) for t in types + complex_types])
+ "NULL};\n"
)
def gen_num(typen):
return f"""
#if defined({typen})
{typen},
#endif
"""
type_number_array = (
"static int type_numbers[] = {"
+ "".join([gen_num(typen=t.upper()) for t in types + complex_types])
+ "-1000};"
)
code = (
"""
typedef void (*inplace_map_binop)(PyArrayMapIterObject *,
PyArrayIterObject *, int inc_or_set);
"""
+ fns
+ fn_array
+ type_number_array
+ """
static int
map_increment(PyArrayMapIterObject *mit, PyArrayObject *op,
inplace_map_binop add_inplace, int inc_or_set)
{
PyArrayObject *arr = NULL;
PyArrayIterObject *it;
PyArray_Descr *descr;
if (mit->ait == NULL) {
return -1;
}
descr = PyArray_DESCR(mit->ait->ao);
Py_INCREF(descr);
arr = (PyArrayObject *)PyArray_FromAny((PyObject *)op, descr,
0, 0, NPY_ARRAY_FORCECAST, NULL);
if (arr == NULL) {
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
if (arr == NULL) {
return -1;
}
}
it = (PyArrayIterObject*)
PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);
if (it == NULL) {
Py_DECREF(arr);
return -1;
}
(*add_inplace)(mit, it, inc_or_set);
Py_DECREF(arr);
Py_DECREF(it);
return 0;
}
static int
inplace_increment(PyArrayObject *a, PyObject *index, PyArrayObject *inc,
int inc_or_set)
{
inplace_map_binop add_inplace = NULL;
int type_number = -1;
int i = 0;
PyArrayMapIterObject * mit;
if (PyArray_FailUnlessWriteable(a, "input/output array") < 0) {
return -1;
}
if (PyArray_NDIM(a) == 0) {
PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
return -1;
}
type_number = PyArray_TYPE(a);
while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){
if (type_number == type_numbers[i]) {
add_inplace = addition_funcs[i];
break;
}
i++ ;
}
if (add_inplace == NULL) {
PyErr_SetString(PyExc_TypeError, "unsupported type for a");
return -1;
}
mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);
if (mit == NULL) {
goto fail;
}
if (map_increment(mit, inc, add_inplace, inc_or_set) != 0) {
goto fail;
}
Py_DECREF(mit);
Py_INCREF(Py_None);
return 0;
fail:
Py_XDECREF(mit);
return -1;
}
"""
)
return code
......@@ -24,7 +24,6 @@ from theano.tensor.exceptions import (
NotScalarConstantError,
ShapeError,
)
from theano.tensor.inc_code import inc_code
from theano.tensor.math import clip
from theano.tensor.shape import Reshape
from theano.tensor.type import (
......@@ -2138,7 +2137,196 @@ class AdvancedIncSubtensor1(COp):
NPY_ARRAY_ENSURECOPY, NULL)"""
def c_support_code(self, **kwargs):
return inc_code()
types = [
"npy_" + t
for t in [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
]
]
complex_types = ["npy_" + t for t in ["complex32", "complex64", "complex128"]]
inplace_map_template = """
#if defined(%(typen)s)
static void %(type)s_inplace_add(PyArrayMapIterObject *mit,
PyArrayIterObject *it, int inc_or_set)
{
int index = mit->size;
while (index--) {
%(op)s
PyArray_MapIterNext(mit);
PyArray_ITER_NEXT(it);
}
}
#endif
"""
floatadd = (
"((%(type)s*)mit->dataptr)[0] = "
"(inc_or_set ? ((%(type)s*)mit->dataptr)[0] : 0)"
" + ((%(type)s*)it->dataptr)[0];"
)
complexadd = """
((%(type)s*)mit->dataptr)[0].real =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].real : 0)
+ ((%(type)s*)it->dataptr)[0].real;
((%(type)s*)mit->dataptr)[0].imag =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].imag : 0)
+ ((%(type)s*)it->dataptr)[0].imag;
"""
fns = "".join(
[
inplace_map_template
% {"type": t, "typen": t.upper(), "op": floatadd % {"type": t}}
for t in types
]
+ [
inplace_map_template
% {"type": t, "typen": t.upper(), "op": complexadd % {"type": t}}
for t in complex_types
]
)
def gen_binop(type, typen):
return f"""
#if defined({typen})
{type}_inplace_add,
#endif
"""
fn_array = (
"static inplace_map_binop addition_funcs[] = {"
+ "".join(
[gen_binop(type=t, typen=t.upper()) for t in types + complex_types]
)
+ "NULL};\n"
)
def gen_num(typen):
return f"""
#if defined({typen})
{typen},
#endif
"""
type_number_array = (
"static int type_numbers[] = {"
+ "".join([gen_num(typen=t.upper()) for t in types + complex_types])
+ "-1000};"
)
code = (
"""
typedef void (*inplace_map_binop)(PyArrayMapIterObject *,
PyArrayIterObject *, int inc_or_set);
"""
+ fns
+ fn_array
+ type_number_array
+ """
static int
map_increment(PyArrayMapIterObject *mit, PyArrayObject *op,
inplace_map_binop add_inplace, int inc_or_set)
{
PyArrayObject *arr = NULL;
PyArrayIterObject *it;
PyArray_Descr *descr;
if (mit->ait == NULL) {
return -1;
}
descr = PyArray_DESCR(mit->ait->ao);
Py_INCREF(descr);
arr = (PyArrayObject *)PyArray_FromAny((PyObject *)op, descr,
0, 0, NPY_ARRAY_FORCECAST, NULL);
if (arr == NULL) {
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
if (arr == NULL) {
return -1;
}
}
it = (PyArrayIterObject*)
PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);
if (it == NULL) {
Py_DECREF(arr);
return -1;
}
(*add_inplace)(mit, it, inc_or_set);
Py_DECREF(arr);
Py_DECREF(it);
return 0;
}
static int
inplace_increment(PyArrayObject *a, PyObject *index, PyArrayObject *inc,
int inc_or_set)
{
inplace_map_binop add_inplace = NULL;
int type_number = -1;
int i = 0;
PyArrayMapIterObject * mit;
if (PyArray_FailUnlessWriteable(a, "input/output array") < 0) {
return -1;
}
if (PyArray_NDIM(a) == 0) {
PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
return -1;
}
type_number = PyArray_TYPE(a);
while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){
if (type_number == type_numbers[i]) {
add_inplace = addition_funcs[i];
break;
}
i++ ;
}
if (add_inplace == NULL) {
PyErr_SetString(PyExc_TypeError, "unsupported type for a");
return -1;
}
mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);
if (mit == NULL) {
goto fail;
}
if (map_increment(mit, inc, add_inplace, inc_or_set) != 0) {
goto fail;
}
Py_DECREF(mit);
Py_INCREF(Py_None);
return 0;
fail:
Py_XDECREF(mit);
return -1;
}
"""
)
return code
def c_code(self, node, name, input_names, output_names, sub):
numpy_ver = [int(n) for n in np.__version__.split(".")[:2]]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论