Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
b7547f84
提交
b7547f84
authored
10月 01, 2015
作者:
Sina Honari
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
applying changes to remove Flatten Op
上级
eced0049
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
201 行增加
和
194 行删除
+201
-194
basic_ops.py
theano/sandbox/cuda/basic_ops.py
+17
-17
basic.py
theano/tensor/basic.py
+166
-159
opt.py
theano/tensor/opt.py
+18
-18
没有找到文件。
theano/sandbox/cuda/basic_ops.py
浏览文件 @
b7547f84
...
@@ -3322,23 +3322,23 @@ class GpuIncSubtensor(tensor.IncSubtensor, GpuOp):
...
@@ -3322,23 +3322,23 @@ class GpuIncSubtensor(tensor.IncSubtensor, GpuOp):
return
()
return
()
#
class GpuFlatten(gof.HideC, tensor.Reshape, GpuOp):
class
GpuFlatten
(
gof
.
HideC
,
tensor
.
Reshape
,
GpuOp
):
#
"""
"""
#
Implement Flatten on the gpu.
Implement Flatten on the gpu.
#
#
"""
"""
#
#
def make_node(self, x):
def
make_node
(
self
,
x
):
#
warnings.warn(
warnings
.
warn
(
#
"GpuFlatten class is deprecated, "
"GpuFlatten class is deprecated, "
#
"please use gpu_flatten method instead.",
"please use gpu_flatten method instead."
,
#
DeprecationWarning,
DeprecationWarning
,
#
stacklevel=4)
stacklevel
=
4
)
#
assert isinstance(x.type, CudaNdarrayType)
assert
isinstance
(
x
.
type
,
CudaNdarrayType
)
#
rval = tensor.Reshape.make_node(self, x, [tensor.prod(x.shape)])
rval
=
tensor
.
Reshape
.
make_node
(
self
,
x
,
[
tensor
.
prod
(
x
.
shape
)])
#
host_out_broadcastable = rval.outputs[0].type.broadcastable
host_out_broadcastable
=
rval
.
outputs
[
0
]
.
type
.
broadcastable
#
out_type = CudaNdarrayType(broadcastable=host_out_broadcastable)
out_type
=
CudaNdarrayType
(
broadcastable
=
host_out_broadcastable
)
#
return Apply(self, [x], [out_type()])
return
Apply
(
self
,
[
x
],
[
out_type
()])
...
...
theano/tensor/basic.py
浏览文件 @
b7547f84
...
@@ -4499,172 +4499,179 @@ def reshape(x, newshape, ndim=None, name=None):
...
@@ -4499,172 +4499,179 @@ def reshape(x, newshape, ndim=None, name=None):
return
rval
return
rval
#
class Flatten(Op):
class
Flatten
(
Op
):
#
"""
"""
#
Flatten a tensor.
Flatten a tensor.
#
#
Flattens a tensor to `outdim` dimensions by preserving the leading
Flattens a tensor to `outdim` dimensions by preserving the leading
#
outdim - 1 shape components.
outdim - 1 shape components.
#
#
"""
"""
#
view_map = {0: [0]}
view_map
=
{
0
:
[
0
]}
#
#
check_input = False
check_input
=
False
#
__props__ = ("outdim",)
__props__
=
(
"outdim"
,)
#
#
def __init__(self, outdim=1):
def
__init__
(
self
,
outdim
=
1
):
#
warnings.warn(
warnings
.
warn
(
#
"Flatten class is deprecated, "
"Flatten class is deprecated, "
#
"please use flatten method instead.",
"please use flatten method instead."
,
#
DeprecationWarning,
DeprecationWarning
,
#
stacklevel=4)
stacklevel
=
4
)
#
self.outdim = int(outdim)
self
.
outdim
=
int
(
outdim
)
#
#
def __str__(self):
def
__str__
(
self
):
#
return '%s{%s}' % (self.__class__.__name__, self.outdim)
return
'
%
s{
%
s}'
%
(
self
.
__class__
.
__name__
,
self
.
outdim
)
#
#
def make_node(self, x):
def
make_node
(
self
,
x
):
#
t_x = as_tensor_variable(x)
t_x
=
as_tensor_variable
(
x
)
#
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
if
self
.
outdim
<
1
or
(
x
.
ndim
and
self
.
outdim
>
x
.
ndim
):
#
raise ValueError('invalid output ndimensions (%i) for tensor of '
raise
ValueError
(
'invalid output ndimensions (
%
i) for tensor of '
#
'rank %i' % (self.outdim, t_x.ndim))
'rank
%
i'
%
(
self
.
outdim
,
t_x
.
ndim
))
#
#
# Infer the broadcastable pattern of the output. For every dimension
# Infer the broadcastable pattern of the output. For every dimension
#
# unaffected by the flatten, the broadcast flag should be unchanged.
# unaffected by the flatten, the broadcast flag should be unchanged.
#
# For the dimension resulting from the collapse of other dimensions,
# For the dimension resulting from the collapse of other dimensions,
#
# it should be broadcastable iff all the collapsed dimensions were
# it should be broadcastable iff all the collapsed dimensions were
#
# broadcastable.
# broadcastable.
#
bcast_kept_dims = x.broadcastable[:self.outdim - 1]
bcast_kept_dims
=
x
.
broadcastable
[:
self
.
outdim
-
1
]
#
bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
bcast_new_dim
=
python_all
(
x
.
broadcastable
[
self
.
outdim
-
1
:])
#
broadcastable = bcast_kept_dims + (bcast_new_dim,)
broadcastable
=
bcast_kept_dims
+
(
bcast_new_dim
,)
#
#
return gof.Apply(self, [t_x], [tensor(x.type.dtype,
return
gof
.
Apply
(
self
,
[
t_x
],
[
tensor
(
x
.
type
.
dtype
,
#
broadcastable)])
broadcastable
)])
#
#
def perform(self, node, inp, out_):
def
perform
(
self
,
node
,
inp
,
out_
):
#
x, = inp
x
,
=
inp
#
out, = out_
out
,
=
out_
#
outdim = self.outdim
outdim
=
self
.
outdim
#
if outdim == 1:
if
outdim
==
1
:
#
try:
try
:
#
out[0] = x.reshape(x.size)
out
[
0
]
=
x
.
reshape
(
x
.
size
)
#
except AttributeError:
except
AttributeError
:
#
out[0] = x.reshape((numpy.prod(x.shape),))
out
[
0
]
=
x
.
reshape
((
numpy
.
prod
(
x
.
shape
),))
#
elif outdim == len(x.shape):
elif
outdim
==
len
(
x
.
shape
):
#
out[0] = x
out
[
0
]
=
x
#
else:
else
:
#
newshape = (x.shape[:outdim - 1] +
newshape
=
(
x
.
shape
[:
outdim
-
1
]
+
#
(numpy.prod(x.shape[outdim - 1:]),))
(
numpy
.
prod
(
x
.
shape
[
outdim
-
1
:]),))
#
out[0] = x.reshape(newshape)
out
[
0
]
=
x
.
reshape
(
newshape
)
#
#
def infer_shape(self, node, in_shapes):
def
infer_shape
(
self
,
node
,
in_shapes
):
#
in_shp, = in_shapes
in_shp
,
=
in_shapes
#
part1 = in_shp[:self.outdim - 1]
part1
=
in_shp
[:
self
.
outdim
-
1
]
#
part2 = in_shp[self.outdim - 1:]
part2
=
in_shp
[
self
.
outdim
-
1
:]
#
#
if len(part2) > 1:
if
len
(
part2
)
>
1
:
#
part2 = (prod(part2, dtype='int64'),)
part2
=
(
prod
(
part2
,
dtype
=
'int64'
),)
#
elif len(part2) == 1:
elif
len
(
part2
)
==
1
:
#
# We do not want to force an upcast of part2 if its length is 1
# We do not want to force an upcast of part2 if its length is 1
#
pass
pass
#
else:
else
:
#
if len(in_shp) == 0 and self.outdim == 1:
if
len
(
in_shp
)
==
0
and
self
.
outdim
==
1
:
#
part2 = (1,)
part2
=
(
1
,)
#
else:
else
:
#
raise ValueError('invalid output ndimensions (%i) for tensor '
raise
ValueError
(
'invalid output ndimensions (
%
i) for tensor '
#
'of rank %i' % (self.outdim, len(in_shp)))
'of rank
%
i'
%
(
self
.
outdim
,
len
(
in_shp
)))
#
#
out_shape = (part1 + part2)
out_shape
=
(
part1
+
part2
)
#
return [out_shape]
return
[
out_shape
]
#
#
def grad(self, inp, grads):
def
grad
(
self
,
inp
,
grads
):
#
x, = inp
x
,
=
inp
#
g_out, = grads
g_out
,
=
grads
#
return [reshape(g_out, shape(x), x.ndim)]
return
[
reshape
(
g_out
,
shape
(
x
),
x
.
ndim
)]
#
#
def R_op(self, inputs, eval_points):
def
R_op
(
self
,
inputs
,
eval_points
):
#
if None in eval_points:
if
None
in
eval_points
:
#
return [None]
return
[
None
]
#
return self.make_node(*eval_points).outputs
return
self
.
make_node
(
*
eval_points
)
.
outputs
#
#
def c_code_cache_version(self):
def
c_code_cache_version
(
self
):
#
return (1, 1)
return
(
1
,
1
)
#
#
def c_code(self, node, name, inputs, outputs, sub):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
#
x, = inputs
x
,
=
inputs
#
out, = outputs
out
,
=
outputs
#
outdim = self.outdim
outdim
=
self
.
outdim
#
fail = sub['fail']
fail
=
sub
[
'fail'
]
#
return """
return
"""
#
if (%(outdim)s == PyArray_NDIM(%(x)s))
if (
%(outdim)
s == PyArray_NDIM(
%(x)
s))
#
{
{
#
Py_XDECREF(%(out)s);
Py_XDECREF(
%(out)
s);
#
Py_XINCREF(%(x)s);
Py_XINCREF(
%(x)
s);
#
%(out)s = %(x)s;
%(out)
s =
%(x)
s;
#
}
}
#
else
else
#
{
{
#
Py_XDECREF(%(out)s);
Py_XDECREF(
%(out)
s);
#
#
if (%(outdim)s == 1)
if (
%(outdim)
s == 1)
#
{
{
#
npy_intp size = PyArray_SIZE(%(x)s);
npy_intp size = PyArray_SIZE(
%(x)
s);
#
PyArray_Dims newshape;
PyArray_Dims newshape;
#
newshape.ptr = &size;
newshape.ptr = &size;
#
newshape.len = 1;
newshape.len = 1;
#
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
%(out)
s = (PyArrayObject*)PyArray_Newshape(
%(x)
s,
#
&newshape,
&newshape,
#
NPY_CORDER);
NPY_CORDER);
#
}
}
#
else
else
#
{
{
#
npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp *oldshape = PyArray_DIMS(
%(x)
s);
#
npy_intp newshape_dims[%(outdim)s];
npy_intp newshape_dims[
%(outdim)
s];
#
#
int i;
int i;
#
for (i = 0; i < %(outdim)s - 1; ++i)
for (i = 0; i <
%(outdim)
s - 1; ++i)
#
newshape_dims[i] = oldshape[i];
newshape_dims[i] = oldshape[i];
#
#
newshape_dims[i] = 1;
newshape_dims[i] = 1;
#
#
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
for (int j =
%(outdim)
s - 1; j < PyArray_NDIM(
%(x)
s); ++j)
#
newshape_dims[i] *= oldshape[j];
newshape_dims[i] *= oldshape[j];
#
#
PyArray_Dims newshape;
PyArray_Dims newshape;
#
newshape.ptr = newshape_dims;
newshape.ptr = newshape_dims;
#
newshape.len = %(outdim)s;
newshape.len =
%(outdim)
s;
#
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
%(out)
s = (PyArrayObject*)PyArray_Newshape(
%(x)
s,
#
&newshape,
&newshape,
#
NPY_CORDER);
NPY_CORDER);
#
}
}
#
}
}
#
if (!%(out)s)
if (!
%(out)
s)
#
{
{
#
//The error message should have been set by
//The error message should have been set by
#
// PyArray_Newshape
// PyArray_Newshape
#
%(fail)s;
%(fail)
s;
#
}
}
#
if (!PyArray_ISALIGNED(%(out)s)) {
if (!PyArray_ISALIGNED(
%(out)
s)) {
#
PyErr_Format(
PyErr_Format(
#
PyExc_RuntimeError,
PyExc_RuntimeError,
#
"PyArray_Newshape returned an object that isn't"
"PyArray_Newshape returned an object that isn't"
#
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
#
" this problem for some input shape/new shape"
" this problem for some input shape/new shape"
#
" combinations. Use another NumPy version.");
" combinations. Use another NumPy version.");
#
%(fail)s;
%(fail)
s;
#
}
}
#
""" % locals()
"""
%
locals
()
def
is_flatten
(
node
,
outdim
=
1
):
def
is_flatten
(
node
,
outdim
=
1
):
return
isinstance
(
node
.
op
,
theano
.
tensor
.
Reshape
)
and
node
.
inputs
[
1
]
.
ndim
==
outdim
return
isinstance
(
node
.
op
,
theano
.
tensor
.
Reshape
)
and
\
node
.
inputs
[
1
]
.
ndim
==
outdim
def
flatten
(
x
,
outdim
=
1
):
def
flatten
(
x
,
outdim
=
1
):
outdim
=
int
(
outdim
)
outdim
=
int
(
outdim
)
if
outdim
<
1
or
outdim
>
x
.
ndim
:
# an error is raised if outdim is not positive or
raise
ValueError
(
'outdim of flatten must an int in the range [1,
%
s], recieved
%
s'
%
(
x
.
ndim
,
outdim
))
# of outdim is greater than one and also greater than x dimensionality.
# Any input variable can be flattened to have outdim of 1,
# even if it's a scalar.
if
outdim
<
1
or
(
outdim
>
1
and
outdim
>
x
.
ndim
):
raise
ValueError
(
'outdim
%
s out of bound [1,
%
d)'
%
(
outdim
,
x
.
ndim
+
1
))
if
outdim
>
1
:
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
theano
.
tensor
.
prod
(
x
.
shape
[
outdim
-
1
:]),)
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
\
(
theano
.
tensor
.
prod
(
x
.
shape
[
outdim
-
1
:]),)
else
:
else
:
dims
=
(
-
1
,)
dims
=
(
-
1
,)
x_reshaped
=
x
.
reshape
(
dims
)
x_reshaped
=
x
.
reshape
(
dims
)
...
...
theano/tensor/opt.py
浏览文件 @
b7547f84
...
@@ -3877,24 +3877,24 @@ def local_useless_split(node):
...
@@ -3877,24 +3877,24 @@ def local_useless_split(node):
################
################
# Flatten Opts #
# Flatten Opts #
################
################
#
@register_canonicalize
@register_canonicalize
#
@register_stabilize
@register_stabilize
#
@gof.local_optimizer([T.Flatten])
@gof.local_optimizer
([
T
.
Flatten
])
#
def local_flatten_lift(node):
def
local_flatten_lift
(
node
):
#
""
""
#
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
Flatten
(
UnaryElemwise
(
x
))
->
UnaryElemwise
(
Flatten
(
x
))
#
#
This optimization is needed by optimization
This
optimization
is
needed
by
optimization
#
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
nnet
/
sigm
.
py
:
log1msigm_to_softplus
to
get
applied
when
there
is
a
flatten
.
#
#
""
""
#
if (isinstance(node.op, T.Flatten) and
if
(
isinstance
(
node
.
op
,
T
.
Flatten
)
and
#
node.inputs[0].owner and
node
.
inputs
[
0
]
.
owner
and
#
isinstance(node.inputs[0].owner.op, T.Elemwise) and
isinstance
(
node
.
inputs
[
0
]
.
owner
.
op
,
T
.
Elemwise
)
and
#
len(node.inputs[0].owner.inputs) == 1):
len
(
node
.
inputs
[
0
]
.
owner
.
inputs
)
==
1
):
#
f = node.op(node.inputs[0].owner.inputs[0])
f
=
node
.
op
(
node
.
inputs
[
0
]
.
owner
.
inputs
[
0
])
#
e = node.inputs[0].owner.op(f)
e
=
node
.
inputs
[
0
]
.
owner
.
op
(
f
)
#
return [e]
return
[
e
]
##################
##################
# Reshape opts #
# Reshape opts #
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论