Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
348e14dc
提交
348e14dc
authored
3月 18, 2013
作者:
lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1279 from jsalvatier/advinc_rebase3
Advinc rebase3
上级
517540f5
aed7bd08
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
491 行增加
和
88 行删除
+491
-88
cutils.py
theano/gof/cutils.py
+184
-10
basic.py
theano/scalar/basic.py
+6
-4
basic.py
theano/tensor/basic.py
+196
-68
test_basic.py
theano/tensor/tests/test_basic.py
+105
-6
没有找到文件。
theano/gof/cutils.py
浏览文件 @
348e14dc
import
os
,
sys
import
os
import
sys
from
theano.compat
import
PY3
from
theano.gof.compilelock
import
get_lock
,
release_lock
from
theano
import
config
import
cmodule
# TODO These two lines may be removed in the future, when we are 100% sure
# noone has an old cutils_ext.so lying around anymore.
...
...
@@ -12,8 +14,66 @@ if os.path.exists(os.path.join(config.compiledir, 'cutils_ext.so')):
def
compile_cutils
():
"""Do just the compilation of cutils_ext"""
code
=
"""
types
=
[
'npy_'
+
t
for
t
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'int128'
,
'int256'
,
'uint8'
,
'uint16'
,
'uint32'
,
'uint64'
,
'uint128'
,
'uint256'
,
'float16'
,
'float32'
,
'float64'
,
'float80'
,
'float96'
,
'float128'
,
'float256'
]]
complex_types
=
[
'npy_'
+
t
for
t
in
[
'complex32'
,
'complex64'
,
'complex128'
,
'complex160'
,
'complex192'
,
'complex512'
]]
inplace_map_template
=
"""
#if defined(
%(typen)
s)
static void
%(type)
s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)
{
int index = mit->size;
while (index--) {
%(op)
s
PyArray_MapIterNext(mit);
PyArray_ITER_NEXT(it);
}
}
#endif
"""
floatadd
=
"((
%(type)
s*)mit->dataptr)[0] = ((
%(type)
s*)mit->dataptr)[0] + ((
%(type)
s*)it->dataptr)[0];"
complexadd
=
"""
((
%(type)
s*)mit->dataptr)[0].real = ((
%(type)
s*)mit->dataptr)[0].real + ((
%(type)
s*)it->dataptr)[0].real;
((
%(type)
s*)mit->dataptr)[0].imag = ((
%(type)
s*)mit->dataptr)[0].imag + ((
%(type)
s*)it->dataptr)[0].imag;
"""
fns
=
''
.
join
([
inplace_map_template
%
{
'type'
:
t
,
'typen'
:
t
.
upper
(),
'op'
:
floatadd
%
{
'type'
:
t
}}
for
t
in
types
]
+
[
inplace_map_template
%
{
'type'
:
t
,
'typen'
:
t
.
upper
(),
'op'
:
complexadd
%
{
'type'
:
t
}}
for
t
in
complex_types
])
fn_array
=
(
"inplace_map_binop addition_funcs[] = {"
+
''
.
join
([
"""
#if defined(
%(typen)
s)
%(type)
s_inplace_add,
#endif
"""
%
{
'type'
:
t
,
'typen'
:
t
.
upper
()}
for
t
in
types
+
complex_types
])
+
"""NULL};
"""
)
type_number_array
=
(
"int type_numbers[] = {"
+
''
.
join
([
"""
#if defined(
%(typen)
s)
%(typen)
s,
#endif
"""
%
{
'type'
:
t
,
'typen'
:
t
.
upper
()}
for
t
in
types
+
complex_types
])
+
"-1000};"
)
code
=
(
"""
#include <Python.h>
#include "numpy/arrayobject.h"
extern "C"{
static PyObject *
run_cthunk(PyObject *self, PyObject *args)
...
...
@@ -35,14 +95,130 @@ def compile_cutils():
return Py_BuildValue("i", failure);
}
#if NPY_API_VERSION >= 0x00000008
typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);
"""
+
fns
+
fn_array
+
type_number_array
+
"""
static int
map_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)
{
PyArrayObject *arr = NULL;
PyArrayIterObject *it;
PyArray_Descr *descr;
if (mit->ait == NULL) {
return -1;
}
descr = PyArray_DESCR(mit->ait->ao);
Py_INCREF(descr);
arr = (PyArrayObject *)PyArray_FromAny(op, descr,
0, 0, NPY_ARRAY_FORCECAST, NULL);
if (arr == NULL) {
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
if (mit->iteraxes[0] > 0) {
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
if (arr == NULL) {
return -1;
}
}
}
it = (PyArrayIterObject*)
PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);
if (it == NULL) {
Py_DECREF(arr);
return -1;
}
(*add_inplace)(mit, it);
Py_DECREF(arr);
Py_DECREF(it);
return 0;
}
static PyObject *
inplace_increment(PyObject *dummy, PyObject *args)
{
PyObject *arg_a = NULL, *index=NULL, *inc=NULL;
PyArrayObject *a;
inplace_map_binop add_inplace = NULL;
int type_number = -1;
int i =0;
PyArrayMapIterObject * mit;
if (!PyArg_ParseTuple(args, "OOO", &arg_a, &index,
&inc)) {
return NULL;
}
if (!PyArray_Check(arg_a)) {
PyErr_SetString(PyExc_ValueError, "needs an ndarray as first argument");
return NULL;
}
a = (PyArrayObject *) arg_a;
if (PyArray_FailUnlessWriteable(a, "input/output array") < 0) {
return NULL;
}
if (PyArray_NDIM(a) == 0) {
PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
return NULL;
}
type_number = PyArray_TYPE(a);
while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){
if (type_number == type_numbers[i]) {
add_inplace = addition_funcs[i];
break;
}
i++ ;
}
if (add_inplace == NULL) {
PyErr_SetString(PyExc_TypeError, "unsupported type for a");
return NULL;
}
mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);
if (mit == NULL) {
goto fail;
}
if (map_increment(mit, inc, add_inplace) != 0) {
goto fail;
}
Py_DECREF(mit);
Py_INCREF(Py_None);
return Py_None;
fail:
Py_XDECREF(mit);
return NULL;
}
#endif
static PyMethodDef CutilsExtMethods[] = {
{"run_cthunk", run_cthunk, METH_VARARGS|METH_KEYWORDS,
"Run a theano cthunk."},
#if NPY_API_VERSION >= 0x00000008
{"inplace_increment", inplace_increment,
METH_VARARGS,
"increments a numpy array inplace at the passed indexes."},
#endif
{NULL, NULL, 0, NULL} /* Sentinel */
};"""
};"""
)
if
PY3
:
# This is not the most efficient code, but it is written this way to
highlight
# the changes needed to make 2.x code compile under python 3.
# This is not the most efficient code, but it is written this way to
#
highlight
the changes needed to make 2.x code compile under python 3.
code
=
code
.
replace
(
"<Python.h>"
,
'"numpy/npy_3kcompat.h"'
,
1
)
code
=
code
.
replace
(
"PyCObject"
,
"NpyCapsule"
)
code
+=
"""
...
...
@@ -59,15 +235,16 @@ def compile_cutils():
return PyModule_Create(&moduledef);
}
}
"""
"""
else
:
code
+=
"""
PyMODINIT_FUNC
initcutils_ext(void)
{
import_array();
(void) Py_InitModule("cutils_ext", CutilsExtMethods);
}
}
} //extern C
"""
loc
=
os
.
path
.
join
(
config
.
compiledir
,
'cutils_ext'
)
...
...
@@ -95,8 +272,6 @@ try:
try
:
from
cutils_ext.cutils_ext
import
*
except
ImportError
:
import
cmodule
get_lock
()
# Ensure no-one else is currently modifying the content of the compilation
# directory. This is important to prevent multiple processes from trying to
...
...
@@ -108,7 +283,6 @@ try:
# and when we receive the lock
from
cutils_ext.cutils_ext
import
*
except
ImportError
:
import
cmodule
compile_cutils
()
from
cutils_ext.cutils_ext
import
*
...
...
theano/scalar/basic.py
浏览文件 @
348e14dc
...
...
@@ -23,7 +23,8 @@ import numpy
import
theano
from
theano.compat
import
PY3
from
theano
import
gof
from
theano.gof
import
Op
,
utils
,
Variable
,
Constant
,
Type
,
Apply
,
FunctionGraph
from
theano.gof
import
(
Op
,
utils
,
Variable
,
Constant
,
Type
,
Apply
,
FunctionGraph
)
from
theano.gof.python25
import
partial
,
all
,
any
from
theano.configparser
import
config
...
...
@@ -1090,7 +1091,7 @@ class UnaryBitOp(UnaryScalarOp):
return
upcast_out
(
*
input_types
[
0
])
def
grad
(
self
,
inputs
,
output_gradients
):
return
[
None
]
return
[
inputs
[
0
]
.
zeros_like
()
.
astype
(
theano
.
config
.
floatX
)
]
class
BinaryBitOp
(
BinaryScalarOp
):
...
...
@@ -1103,7 +1104,8 @@ class BinaryBitOp(BinaryScalarOp):
return
upcast_out
(
*
input_types
[
0
])
def
grad
(
self
,
inputs
,
output_gradients
):
return
[
None
,
None
]
a
,
b
=
inputs
return
[
a
.
zeros_like
()
.
astype
(
theano
.
config
.
floatX
),
b
.
zeros_like
()
.
astype
(
theano
.
config
.
floatX
)]
class
OR
(
BinaryBitOp
):
...
...
@@ -2679,7 +2681,7 @@ class Composite(ScalarOp):
except
AttributeError
:
if
0
:
l
=
[]
for
n
in
fgraph
.
toposort
():
for
n
in
self
.
fgraph
.
toposort
():
if
hasattr
(
n
.
op
,
"name"
)
and
n
.
op
.
name
is
not
None
:
v
=
n
.
op
.
name
if
v
.
startswith
(
"Composite"
):
...
...
theano/tensor/basic.py
浏览文件 @
348e14dc
...
...
@@ -8,7 +8,7 @@ from itertools import izip
from
textwrap
import
dedent
import
numpy
#
from copy import copy as python_copy
from
copy
import
copy
as
python_copy
import
theano
from
theano.compat
import
PY3
...
...
@@ -24,6 +24,12 @@ from theano import compile, printing
from
theano.printing
import
pprint
,
min_informative_str
from
theano.tensor.utils
import
hash_from_ndarray
import
theano.gof.cutils
# needed to import cutils_ext
try
:
from
cutils_ext.cutils_ext
import
inplace_increment
except
ImportError
:
inplace_increment
=
None
# We use these exceptions as well.
from
theano.scalar
import
ComplexError
,
IntegerDivisionError
import
theano.scalar.sharedvar
...
...
@@ -43,14 +49,14 @@ python_any = any
python_all
=
all
# Define common subsets of dtypes (as strings).
int_dtypes
=
map
(
str
,
scal
.
int_types
)
uint_dtypes
=
map
(
str
,
scal
.
uint_types
)
float_dtypes
=
map
(
str
,
scal
.
float_types
)
complex_dtypes
=
map
(
str
,
scal
.
complex_types
)
continuous_dtypes
=
map
(
str
,
scal
.
continuous_types
)
float_dtypes
=
map
(
str
,
scal
.
float_types
)
discrete_dtypes
=
map
(
str
,
scal
.
discrete_types
)
all_dtypes
=
map
(
str
,
scal
.
all_types
)
int_dtypes
=
map
(
str
,
scal
.
int_types
)
uint_dtypes
=
map
(
str
,
scal
.
uint_types
)
# Do a lazy import of the sparse module
sparse_module_ref
=
None
...
...
@@ -4446,7 +4452,8 @@ class Subtensor(Op):
slice_c
=
None
return
slice
(
slice_a
,
slice_b
,
slice_c
)
# There is a bug in numpy that results in isinstance(x, int) returning False for numpy integers.
# There is a bug in numpy that results in isinstance(x, int) returning
# False for numpy integers.
# See <http://projects.scipy.org/numpy/ticket/2235>.
elif
isinstance
(
entry
,
(
numpy
.
integer
,
int
)):
return
entry
...
...
@@ -5077,6 +5084,7 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
"""
# First of all, y cannot have a higher dimension than x,
# nor have non-broadcastable dimensions where x is broadcastable.
x
=
as_tensor_variable
(
x
)
y
=
as_tensor_variable
(
y
)
...
...
@@ -5117,11 +5125,11 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
return
the_op
(
real_x
,
y
,
ilist
)
elif
isinstance
(
x
.
owner
.
op
,
AdvancedSubtensor
):
real_x
=
x
.
owner
.
inputs
[
0
]
coordvec_0
=
x
.
owner
.
inputs
[
1
]
coordvec_1
=
x
.
owner
.
inputs
[
2
]
ilist
=
x
.
owner
.
inputs
[
1
:
]
the_op
=
AdvancedIncSubtensor
(
inplace
,
set_instead_of_inc
=
set_instead_of_inc
)
return
the_op
(
real_x
,
y
,
coordvec_0
,
coordvec_1
)
return
the_op
(
real_x
,
y
,
*
ilist
)
elif
isinstance
(
x
.
owner
.
op
,
DimShuffle
):
inner_x
=
x
.
owner
.
inputs
[
0
]
# In the dimshuffle case, there are in fact two dimshuffles:
...
...
@@ -5147,7 +5155,7 @@ def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
# Try to apply inc_subtensor on inner_x.
# If it works, there is no need to reshape, as the inc_subtensor
# will have the same shape as inner_x, which is what we want.
inner_incsubtensor
=
inc_subtensor
(
inner_x
,
y
,
inner_incsubtensor
=
inc_subtensor
(
inner_x
,
y
.
flatten
()
,
inplace
=
inplace
,
set_instead_of_inc
=
set_instead_of_inc
,
tolerate_inplace_aliasing
=
tolerate_inplace_aliasing
)
...
...
@@ -7147,19 +7155,27 @@ class AdvancedIncSubtensor1(Op):
if
self
.
set_instead_of_inc
:
x
[
idx
]
=
y
else
:
# If `y` has as many dimensions as `x`, then we want to iterate
# jointly on `x` and `y`. Otherwise, it means `y` should be
# broadcasted to fill all relevant rows of `x`.
assert
y
.
ndim
<=
x
.
ndim
# Should be guaranteed by `make_node`
if
y
.
ndim
==
x
.
ndim
:
assert
len
(
y
)
==
len
(
idx
)
for
(
j
,
i
)
in
enumerate
(
idx
):
x
[
i
]
+=
y
[
j
]
else
:
for
i
in
idx
:
x
[
i
]
+=
y
increment
=
inplace_increment
if
increment
is
None
:
increment
=
self
.
inplace_increment1d_slow
increment
(
x
,
idx
,
y
)
out
[
0
]
=
x
def
inplace_increment1d_slow
(
self
,
x
,
idx
,
y
):
# If `y` has as many dimensions as `x`, then we want to iterate
# jointly on `x` and `y`. Otherwise, it means `y` should be
# broadcasted to fill all relevant rows of `x`.
assert
y
.
ndim
<=
x
.
ndim
# Should be guaranteed by `make_node`
if
y
.
ndim
==
x
.
ndim
:
assert
len
(
y
)
==
len
(
idx
)
for
(
j
,
i
)
in
enumerate
(
idx
):
x
[
i
]
+=
y
[
j
]
else
:
for
i
in
idx
:
x
[
i
]
+=
y
def
infer_shape
(
self
,
node
,
ishapes
):
x
,
y
,
ilist
=
ishapes
return
[
x
]
...
...
@@ -7186,6 +7202,111 @@ class AdvancedIncSubtensor1(Op):
return
[
gx
,
gy
]
+
[
DisconnectedType
()()]
*
len
(
idx_list
)
advanced_inc_subtensor1
=
AdvancedIncSubtensor1
()
def
as_index_variable
(
idx
):
if
idx
is
None
:
return
NoneConst
if
isinstance
(
idx
,
slice
):
return
make_slice
(
idx
)
idx
=
as_tensor_variable
(
idx
)
if
idx
.
type
.
dtype
[:
3
]
not
in
(
'int'
,
'uin'
):
raise
TypeError
(
'index must be integers'
)
return
idx
def
as_int_none_variable
(
x
):
if
x
is
None
:
return
NoneConst
x
=
as_tensor_variable
(
x
,
ndim
=
0
)
if
x
.
type
.
dtype
[:
3
]
not
in
(
'int'
,
'uin'
):
raise
TypeError
(
'index must be integers'
)
return
x
class
MakeSlice
(
Op
):
def
make_node
(
self
,
slc
):
return
Apply
(
self
,
map
(
as_int_none_variable
,
[
slc
.
start
,
slc
.
stop
,
slc
.
step
]),
[
slicetype
()])
def
perform
(
self
,
node
,
inp
,
out_
):
out
,
=
out_
out
[
0
]
=
slice
(
*
inp
)
def
__str__
(
self
):
return
self
.
__class__
.
__name__
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
def
__hash__
(
self
):
return
hash
(
type
(
self
))
def
grad
(
self
,
inputs
,
grads
):
return
[
DisconnectedType
()()
for
i
in
inputs
]
make_slice
=
MakeSlice
()
class
SliceType
(
gof
.
Type
):
def
filter
(
self
,
x
,
strict
=
False
,
allow_downcast
=
None
):
if
isinstance
(
x
,
slice
):
return
x
else
:
raise
TypeError
(
'Expected a slice!'
)
def
__str__
(
self
):
return
"slice"
slicetype
=
SliceType
()
class
NoneTypeT
(
gof
.
Type
):
def
filter
(
self
,
x
,
strict
=
False
,
allow_downcast
=
None
):
if
x
is
None
:
return
x
else
:
raise
TypeError
(
'Expected None!'
)
def
__str__
(
self
):
return
"None"
NoneConst
=
Constant
(
NoneTypeT
(),
None
,
name
=
'None'
)
def
adv_index_broadcastable_pattern
(
a
,
idx
):
"""
This function is only used to determine the broadcast pattern for
AdvancedSubtensor output variable.
For this, we make a fake ndarray and a fake idx and call use ask numpy
the output. From this, we find the output broadcast pattern.
"""
def
replace_slice
(
v
):
if
isinstance
(
v
,
gof
.
Apply
):
if
len
(
v
.
outputs
)
!=
1
:
raise
ValueError
(
"It is ambiguous which output of a multi-output Op has"
" to be fetched."
,
v
)
else
:
v
=
v
.
outputs
[
0
]
if
NoneConst
.
equals
(
v
):
return
None
if
isinstance
(
v
.
type
,
SliceType
):
return
slice
(
None
,
None
)
return
numpy
.
zeros
((
2
,)
*
v
.
ndim
,
int
)
newidx
=
tuple
(
map
(
replace_slice
,
idx
))
#2 - True = 1; 2 - False = 2
fakeshape
=
[
2
-
bc
for
bc
in
a
.
broadcastable
]
retshape
=
numpy
.
empty
(
fakeshape
)[
newidx
]
.
shape
return
tuple
([
dim
==
1
for
dim
in
retshape
])
class
AdvancedSubtensor
(
Op
):
...
...
@@ -7204,37 +7325,15 @@ class AdvancedSubtensor(Op):
def
__str__
(
self
):
return
self
.
__class__
.
__name__
def
make_node
(
self
,
x
,
*
in
puts
):
def
make_node
(
self
,
x
,
*
in
dex
):
x
=
as_tensor_variable
(
x
)
# FIXME
# Note (9 Jul 2012): what does this 'FIXME' mean? Possibly that the
# current implementation must be generalized? Please specify.
if
x
.
ndim
==
2
and
len
(
inputs
)
==
2
:
ind1
=
as_tensor_variable
(
inputs
[
0
])
ind2
=
as_tensor_variable
(
inputs
[
1
])
if
(
not
(
ind1
.
type
.
dtype
.
startswith
(
'int'
)
or
ind1
.
type
.
dtype
.
startswith
(
'uint'
))):
raise
TypeError
(
'the indices into a matrix must be int or uint. It is '
,
ind1
.
type
.
dtype
)
if
(
not
(
ind2
.
type
.
dtype
.
startswith
(
'int'
)
or
ind2
.
type
.
dtype
.
startswith
(
'uint'
))):
raise
TypeError
(
'the indices into a matrix must be int or uint. It is '
,
ind2
.
type
.
dtype
)
if
ind1
.
ndim
==
1
and
ind2
.
ndim
==
1
:
return
gof
.
Apply
(
self
,
(
x
,
ind1
,
ind2
),
[
tensor
(
dtype
=
x
.
type
.
dtype
,
broadcastable
=
[
False
])])
raise
NotImplementedError
(
'Advanced indexing of x (of dimension
%
i) with these argument'
' dimensions (
%
s) not supported yet'
%
(
x
.
ndim
,
','
.
join
(
str
(
input
.
ndim
)
for
input
in
inputs
)))
raise
NotImplementedError
(
'Advanced indexing of x with arguments (
%
s) not supported yet'
%
','
.
join
(
str
(
input
)
for
input
in
inputs
))
index
=
tuple
(
map
(
as_index_variable
,
index
))
bcast
=
adv_index_broadcastable_pattern
(
x
,
index
)
return
gof
.
Apply
(
self
,
(
x
,)
+
index
,
[
tensor
(
dtype
=
x
.
type
.
dtype
,
broadcastable
=
bcast
)])
def
R_op
(
self
,
inputs
,
eval_points
):
if
eval_points
[
0
]
is
None
:
...
...
@@ -7309,6 +7408,8 @@ class AdvancedIncSubtensor(Op):
raise
NotImplementedError
(
'In place computation is not'
' implemented'
)
self
.
allow_legacy_perform
=
False
def
__hash__
(
self
):
return
hash
((
type
(
self
),
self
.
inplace
,
self
.
set_instead_of_inc
))
...
...
@@ -7326,24 +7427,43 @@ class AdvancedIncSubtensor(Op):
x
=
as_tensor_variable
(
x
)
y
=
as_tensor_variable
(
y
)
if
x
.
ndim
==
2
and
y
.
ndim
==
1
and
len
(
inputs
)
==
2
:
ind1
=
as_tensor_variable
(
inputs
[
0
])
ind2
=
as_tensor_variable
(
inputs
[
1
])
if
ind1
.
ndim
==
1
and
ind2
.
ndim
==
1
:
return
gof
.
Apply
(
self
,
op
=
self
# If we are incrementing, but the increment compiled function is not
# available, we need to support legacy cases.
if
not
self
.
set_instead_of_inc
and
inplace_increment
is
None
:
legacy_conditions
=
False
if
x
.
ndim
==
2
and
y
.
ndim
==
1
and
len
(
inputs
)
==
2
:
ind1
=
as_tensor_variable
(
inputs
[
0
])
ind2
=
as_tensor_variable
(
inputs
[
1
])
if
ind1
.
ndim
==
1
and
ind2
.
ndim
==
1
:
if
ind1
.
owner
and
isinstance
(
ind1
.
owner
.
op
,
ARange
):
legacy_conditions
=
True
elif
isinstance
(
ind1
,
Constant
):
# Make sure no index is duplicated
val
=
ind1
.
value
if
numpy
.
unique
(
val
)
.
size
==
val
.
size
:
legacy_conditions
=
True
elif
ind2
.
owner
and
isinstance
(
ind2
.
owner
.
op
,
ARange
):
legacy_conditions
=
True
elif
isinstance
(
ind2
,
Constant
):
# Make sure no index is duplicated
val
=
ind2
.
value
if
numpy
.
unique
(
val
)
.
size
==
val
.
size
:
legacy_conditions
=
True
if
legacy_conditions
:
op
=
python_copy
(
self
)
op
.
allow_legacy_perform
=
True
else
:
raise
NotImplementedError
(
'Could not import inplace_increment, so some advanced '
'indexing features are disabled. They will be '
'available if you update NumPy to version 1.8 or '
'later, or to the latest development version.'
)
return
gof
.
Apply
(
op
,
(
x
,
y
)
+
inputs
,
[
tensor
(
dtype
=
x
.
type
.
dtype
,
broadcastable
=
x
.
type
.
broadcastable
)])
raise
NotImplementedError
(
'Advanced indexing increment/set of x (of dimension
%
i) by y'
' (of dimension
%
i) with these argument dimensions (
%
s) not'
' supported yet'
%
(
x
.
ndim
,
y
.
ndim
,
','
.
join
(
str
(
input
.
ndim
)
for
input
in
inputs
)))
raise
NotImplementedError
(
'Advanced indexing increment/set of x (of dim
%
i) by y (of dim
%
i)'
' with arguments (
%
s) not supported yet'
%
(
x
.
ndim
,
y
.
ndim
,
','
.
join
(
str
(
input
)
for
input
in
inputs
)))
def
perform
(
self
,
node
,
inputs
,
out_
):
# TODO: 1. opt to make this in place 2. generalize as described in
...
...
@@ -7353,12 +7473,20 @@ class AdvancedIncSubtensor(Op):
if
not
self
.
inplace
:
out
[
0
]
=
inputs
[
0
]
.
copy
()
else
:
raise
NotImplementedError
(
'In place computation is not'
' implemented'
)
out
[
0
]
=
inputs
[
0
]
if
self
.
set_instead_of_inc
:
out
[
0
][
inputs
[
2
:]]
=
inputs
[
1
]
else
:
elif
inplace_increment
is
not
None
:
inplace_increment
(
out
[
0
],
tuple
(
inputs
[
2
:]),
inputs
[
1
])
elif
self
.
allow_legacy_perform
:
out
[
0
][
inputs
[
2
:]]
+=
inputs
[
1
]
else
:
raise
NotImplementedError
(
'Could not import inplace_increment, so some advanced '
'indexing features are disabled. They will be '
'available if you update NumPy to version 1.8 or '
'later, or to the latest development version.'
)
if
(
numpy
.
__version__
<=
'1.6.1'
and
out
[
0
]
.
size
!=
numpy
.
uint32
(
out
[
0
]
.
size
)):
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
348e14dc
...
...
@@ -37,13 +37,13 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
tensor_copy
,
tensordot
,
TensorType
,
Tri
,
tri
,
tril
,
triu
,
unbroadcast
,
var
,
Join
,
shape
,
MaxAndArgmax
,
lscalar
,
zvector
,
exp
,
get_scalar_constant_value
,
ivector
,
reshape
,
scalar_from_tensor
,
scal
,
iscalars
,
arange
,
dscalars
,
fvector
,
imatrix
,
numeric_grad
,
iscalars
,
arange
,
dscalars
,
fvector
,
imatrix
,
numeric_grad
,
opt
,
ComplexError
,
lvector
,
lmatrix
,
true_div
,
max
,
min
,
Split
,
roll
,
tile
,
patternbroadcast
,
Eye
,
Shape
,
Dot
,
PermuteRowElements
,
ScalarFromTensor
,
TensorFromScalar
,
dtensor4
,
Rebroadcast
,
Alloc
,
dtensor3
,
SpecifyShape
,
Mean
,
IncSubtensor
,
AdvancedIncSubtensor1
,
itensor3
,
Tile
,
AdvancedIncSubtensor
,
switch
,
Diagonal
,
Diag
,
nonzero
,
flatnonzero
,
nonzero_values
)
nonzero
,
flatnonzero
,
nonzero_values
,
inplace_increment
)
from
theano.tests
import
unittest_tools
as
utt
...
...
@@ -3131,10 +3131,6 @@ class T_subtensor(unittest.TestCase, utt.TestOptimizationMixin):
n
=
self
.
shared
(
numpy
.
asarray
(
5
,
dtype
=
self
.
dtype
))
self
.
assertRaises
(
TypeError
,
n
.
__getitem__
,
[
0
,
0
])
def
test_err_invalid_not_2d
(
self
):
n
=
self
.
shared
(
numpy
.
ones
((
3
,
3
,
3
),
dtype
=
self
.
dtype
)
*
5
)
self
.
assertRaises
(
NotImplementedError
,
n
.
__getitem__
,
([
0
,
0
,
0
],
[
1
,
1
,
1
],
[
2
,
2
,
2
]))
def
test_err_invalid_2list_dtype
(
self
):
n
=
self
.
shared
(
numpy
.
ones
((
3
,
3
),
dtype
=
self
.
dtype
)
*
5
)
...
...
@@ -3725,6 +3721,109 @@ class TestIncSubtensor1(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
lambda
:
inc_subtensor
(
self
.
v
[
self
.
adv1q
],
fmatrix
()))
inplace_increment_missing
=
SkipTest
(
"inc_subtensor with advanced indexing not enabled. "
"Installing NumPy 1.8 or the latest development version "
"should make that feature available."
)
class
TestAdvancedSubtensor
(
unittest
.
TestCase
):
# test inc_subtensor
# also tests set_subtensor
def
setUp
(
self
):
self
.
s
=
iscalar
()
self
.
v
=
fvector
()
self
.
m
=
dmatrix
()
self
.
t
=
ctensor3
()
self
.
ix1
=
lvector
()
# advanced 1d query
self
.
ix12
=
lvector
()
self
.
ix2
=
lmatrix
()
def
test_cant_adv_idx_into_scalar
(
self
):
self
.
assertRaises
(
TypeError
,
lambda
:
self
.
s
[
self
.
ix1
])
def
test_index_into_vec_w_vec
(
self
):
a
=
self
.
v
[
self
.
ix1
]
assert
a
.
type
==
self
.
v
.
type
,
(
a
.
type
,
self
.
v
.
type
)
def
test_index_into_vec_w_matrix
(
self
):
a
=
self
.
v
[
self
.
ix2
]
assert
a
.
dtype
==
self
.
v
.
dtype
,
(
a
.
dtype
,
self
.
v
.
dtype
)
assert
a
.
broadcastable
==
self
.
ix2
.
broadcastable
,
(
a
.
broadcastable
,
self
.
ix2
.
broadcastable
)
def
test_inc_adv_subtensor_w_matrix
(
self
):
if
inplace_increment
is
None
:
raise
inplace_increment_missing
subt
=
self
.
v
[
self
.
ix2
]
a
=
inc_subtensor
(
subt
,
subt
)
assert
a
.
type
==
self
.
v
.
type
,
(
a
.
type
,
self
.
v
.
type
)
f
=
theano
.
function
([
self
.
v
,
self
.
ix2
],
a
,
allow_input_downcast
=
True
)
aval
=
f
([
.
4
,
.
9
,
.
1
],
[[
1
,
2
],
[
1
,
2
]])
assert
numpy
.
allclose
(
aval
,
[
.
4
,
.
9
*
3
,
.
1
*
3
])
def
test_inc_adv_subtensor_w_2vec
(
self
):
if
inplace_increment
is
None
:
raise
inplace_increment_missing
subt
=
self
.
m
[
self
.
ix1
,
self
.
ix12
]
a
=
inc_subtensor
(
subt
,
subt
)
typ
=
TensorType
(
self
.
m
.
type
.
dtype
,
self
.
ix2
.
type
.
broadcastable
)
assert
a
.
type
==
typ
,
(
a
.
type
,
typ
)
f
=
theano
.
function
([
self
.
m
,
self
.
ix1
,
self
.
ix12
],
a
,
allow_input_downcast
=
True
)
aval
=
f
([[
.
4
,
.
9
,
.
1
],
[
5
,
6
,
7
],
[
.
5
,
.
3
,
.
15
]],
[
1
,
2
,
1
],
[
0
,
1
,
0
])
assert
numpy
.
allclose
(
aval
,
[[
.
4
,
.
9
,
.
1
],
[
5
*
3
,
6
,
7
],
[
.
5
,
.
3
*
2
,
.
15
]]),
aval
def
test_inc_adv_subtensor_with_broadcasting
(
self
):
if
inplace_increment
is
None
:
raise
inplace_increment_missing
a
=
inc_subtensor
(
self
.
m
[
self
.
ix1
,
self
.
ix12
],
2.1
)
assert
a
.
type
==
self
.
m
.
type
,
(
a
.
type
,
self
.
m
.
type
)
f
=
theano
.
function
([
self
.
m
,
self
.
ix1
,
self
.
ix12
],
a
,
allow_input_downcast
=
True
)
aval
=
f
([[
.
4
,
.
9
,
.
1
],
[
5
,
6
,
7
],
[
.
5
,
.
3
,
.
15
]],
[
1
,
2
,
1
],
[
0
,
1
,
0
])
assert
numpy
.
allclose
(
aval
,
[[
.
4
,
.
9
,
.
1
],
[
5
+
2.1
*
2
,
6
,
7
],
[
.
5
,
.
3
+
2.1
,
.
15
]]),
aval
def
test_inc_adv_subtensor_with_index_broadcasting
(
self
):
if
inplace_increment
is
None
:
raise
inplace_increment_missing
a
=
inc_subtensor
(
self
.
m
[
self
.
ix1
,
self
.
ix2
],
2.1
)
assert
a
.
type
==
self
.
m
.
type
,
(
a
.
type
,
self
.
m
.
type
)
f
=
theano
.
function
([
self
.
m
,
self
.
ix1
,
self
.
ix2
],
a
,
allow_input_downcast
=
True
)
aval
=
f
([[
.
4
,
.
9
,
.
1
],
[
5
,
6
,
7
],
[
.
5
,
.
3
,
.
15
]],
[
0
,
2
,
0
],
[[
0
,
1
,
0
],
[
2
,
2
,
2
]])
assert
numpy
.
allclose
(
aval
,
[[
.
4
+
2
*
2.1
,
.
9
,
.
1
+
2
*
2.1
],
[
5
,
6
,
7
],
[
.
5
,
.
3
+
2.1
,
.
15
+
2.1
]]),
aval
class
T_Join_and_Split
(
unittest
.
TestCase
):
"""
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论