Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
eced0049
提交
eced0049
authored
9月 30, 2015
作者:
Sina Honari
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
first commit to remove Flatten Op
上级
33bda7ca
隐藏空白字符变更
内嵌
并排
正在显示
10 个修改的文件
包含
281 行增加
和
241 行删除
+281
-241
__init__.py
theano/sandbox/cuda/__init__.py
+1
-1
basic_ops.py
theano/sandbox/cuda/basic_ops.py
+27
-12
extra_ops.py
theano/sandbox/cuda/extra_ops.py
+2
-2
opt.py
theano/sandbox/cuda/opt.py
+21
-19
test_basic_ops.py
theano/sandbox/cuda/tests/test_basic_ops.py
+1
-1
basic.py
theano/tensor/basic.py
+175
-152
test_sigm.py
theano/tensor/nnet/tests/test_sigm.py
+1
-1
opt.py
theano/tensor/opt.py
+18
-18
test_basic.py
theano/tensor/tests/test_basic.py
+27
-29
test_opt.py
theano/tensor/tests/test_opt.py
+8
-6
没有找到文件。
theano/sandbox/cuda/__init__.py
浏览文件 @
eced0049
...
...
@@ -318,7 +318,7 @@ if cuda_available:
GpuDimShuffle
,
GpuCAReduce
,
GpuReshape
,
GpuContiguous
,
GpuSubtensor
,
GpuIncSubtensor
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuFlatten
,
GpuShape
,
GpuAlloc
,
GpuAllocEmpty
,
GpuSplit
,
gpu_flatten
,
GpuFlatten
,
GpuShape
,
GpuAlloc
,
GpuAllocEmpty
,
GpuSplit
,
GpuJoin
,
fscalar
,
fvector
,
fmatrix
,
frow
,
fcol
,
ftensor3
,
ftensor4
,
scalar
,
vector
,
matrix
,
row
,
col
,
...
...
theano/sandbox/cuda/basic_ops.py
浏览文件 @
eced0049
...
...
@@ -3322,18 +3322,33 @@ class GpuIncSubtensor(tensor.IncSubtensor, GpuOp):
return
()
class
GpuFlatten
(
gof
.
HideC
,
tensor
.
Flatten
,
GpuOp
):
"""
Implement Flatten on the gpu.
"""
def
make_node
(
self
,
x
):
assert
isinstance
(
x
.
type
,
CudaNdarrayType
)
rval
=
tensor
.
Flatten
.
make_node
(
self
,
x
)
host_out_broadcastable
=
rval
.
outputs
[
0
]
.
type
.
broadcastable
out_type
=
CudaNdarrayType
(
broadcastable
=
host_out_broadcastable
)
return
Apply
(
self
,
[
x
],
[
out_type
()])
#class GpuFlatten(gof.HideC, tensor.Reshape, GpuOp):
# """
# Implement Flatten on the gpu.
#
# """
#
# def make_node(self, x):
# warnings.warn(
# "GpuFlatten class is deprecated, "
# "please use gpu_flatten method instead.",
# DeprecationWarning,
# stacklevel=4)
# assert isinstance(x.type, CudaNdarrayType)
# rval = tensor.Reshape.make_node(self, x, [tensor.prod(x.shape)])
# host_out_broadcastable = rval.outputs[0].type.broadcastable
# out_type = CudaNdarrayType(broadcastable=host_out_broadcastable)
# return Apply(self, [x], [out_type()])
def
gpu_flatten
(
x
,
outdim
=
1
):
x
=
as_cuda_ndarray_variable
(
x
)
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
theano
.
tensor
.
prod
(
x
.
shape
[
outdim
-
1
:]),)
else
:
dims
=
(
-
1
,)
return
GpuReshape
(
outdim
)(
x
,
dims
)
class
GpuShape
(
tensor
.
Shape
,
GpuOp
):
...
...
theano/sandbox/cuda/extra_ops.py
浏览文件 @
eced0049
...
...
@@ -3,7 +3,7 @@ import copy
from
theano
import
Op
from
theano.gof
import
local_optimizer
from
theano.sandbox.cuda
import
cuda_available
,
GpuOp
from
theano.sandbox.cuda.basic_ops
import
GpuF
latten
from
theano.sandbox.cuda.basic_ops
import
gpu_f
latten
from
theano.tensor.extra_ops
import
CumsumOp
if
cuda_available
:
...
...
@@ -453,7 +453,7 @@ def use_gpu_cumsum(node):
x
=
gpu_from_host
(
x
)
if
axis
is
None
and
x
.
ndim
>
1
:
x
=
GpuFlatten
()
(
x
)
x
=
gpu_flatten
(
x
)
# ``gpu_cumsum`` assume array has been flattened if needed.
if
axis
is
None
:
...
...
theano/sandbox/cuda/opt.py
浏览文件 @
eced0049
...
...
@@ -24,7 +24,9 @@ from theano.sandbox.cuda.basic_ops import (
gpu_eye
,
gpu_contiguous
,
gpu_from_host
,
host_from_gpu
,
GpuFromHost
,
HostFromGpu
,
GpuContiguous
,
GpuElemwise
,
GpuDimShuffle
,
GpuReshape
,
GpuCAReduce
,
GpuFlatten
,
GpuElemwise
,
GpuDimShuffle
,
GpuReshape
,
GpuCAReduce
,
# GpuFlatten,
gpu_flatten
,
GpuSubtensor
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedIncSubtensor1_dev20
,
GpuIncSubtensor
,
gpu_alloc
,
GpuAlloc
,
gpu_shape
,
GpuSplit
,
GpuAllocEmpty
)
...
...
@@ -152,7 +154,7 @@ cpu_ops_moved_to_gpu = [
tensor
.
elemwise
.
All
,
tensor
.
elemwise
.
Any
,
tensor
.
elemwise
.
CAReduceDtype
,
tensor
.
elemwise
.
Sum
,
tensor
.
elemwise
.
Prod
,
tensor
.
elemwise
.
ProdWithoutZeros
,
tensor
.
Reshape
,
tensor
.
F
latten
,
tensor
.
Subtensor
,
tensor
.
Reshape
,
tensor
.
f
latten
,
tensor
.
Subtensor
,
tensor
.
AdvancedSubtensor1
,
tensor
.
AdvancedIncSubtensor1
,
tensor
.
IncSubtensor
,
tensor
.
Shape
,
tensor
.
Join
,
tensor
.
Alloc
,
tensor
.
Eye
]
...
...
@@ -972,23 +974,23 @@ def local_gpu_reshape(node):
return
False
@register_opt
()
@local_optimizer
([
gpu_from_host
,
tensor
.
Flatten
])
def
local_gpu_flatten
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_input
=
node
.
inputs
[
0
]
if
host_input
.
owner
and
\
isinstance
(
host_input
.
owner
.
op
,
tensor
.
Flatten
):
outdim
=
host_input
.
owner
.
op
.
outdim
return
[
GpuFlatten
(
outdim
)(
as_cuda_ndarray_variable
(
host_input
.
owner
.
inputs
[
0
]))]
if
isinstance
(
node
.
op
,
tensor
.
Flatten
):
x
,
=
node
.
inputs
outdim
=
node
.
op
.
outdim
if
x
.
owner
and
isinstance
(
x
.
owner
.
op
,
HostFromGpu
):
gpu_x
,
=
x
.
owner
.
inputs
return
[
host_from_gpu
(
GpuFlatten
(
outdim
)(
gpu_x
))]
return
False
#
@register_opt()
#@local_optimizer([gpu_from_host, tensor.Reshape
])
#
def local_gpu_flatten(node):
#
if isinstance(node.op, GpuFromHost):
#
host_input = node.inputs[0]
#
if host_input.owner and \
# isinstance(host_input.owner.op, tensor.Reshape
):
#
outdim = host_input.owner.op.outdim
#
return [GpuFlatten(outdim)(
#
as_cuda_ndarray_variable(host_input.owner.inputs[0]))]
# if isinstance(node.op, tensor.Reshape
):
# x, shp
= node.inputs
#
outdim = node.op.outdim
#
if x.owner and isinstance(x.owner.op, HostFromGpu):
#
gpu_x, = x.owner.inputs
#
return [host_from_gpu(GpuFlatten(outdim)(gpu_x))]
#
return False
@register_opt
()
...
...
theano/sandbox/cuda/tests/test_basic_ops.py
浏览文件 @
eced0049
...
...
@@ -307,7 +307,7 @@ def test_flatten():
x
=
cuda
.
fmatrix
(
'x'
)
f
=
theano
.
function
([
x
],
x
.
flatten
(),
mode
=
mode_with_gpu
)
assert
any
([
node
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
B
.
Gpu
Flatten
)])
if
isinstance
(
node
.
op
,
B
.
Gpu
Reshape
)])
assert
len
(
f
([[
0.
,
0.
],
[
0.
,
0.
]])
.
shape
)
==
1
...
...
theano/tensor/basic.py
浏览文件 @
eced0049
...
...
@@ -4499,160 +4499,183 @@ def reshape(x, newshape, ndim=None, name=None):
return
rval
class
Flatten
(
Op
):
"""
Flatten a tensor.
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
"""
view_map
=
{
0
:
[
0
]}
check_input
=
False
__props__
=
(
"outdim"
,)
def
__init__
(
self
,
outdim
=
1
):
self
.
outdim
=
int
(
outdim
)
def
__str__
(
self
):
return
'
%
s{
%
s}'
%
(
self
.
__class__
.
__name__
,
self
.
outdim
)
def
make_node
(
self
,
x
):
t_x
=
as_tensor_variable
(
x
)
if
self
.
outdim
<
1
or
(
x
.
ndim
and
self
.
outdim
>
x
.
ndim
):
raise
ValueError
(
'invalid output ndimensions (
%
i) for tensor of '
'rank
%
i'
%
(
self
.
outdim
,
t_x
.
ndim
))
# Infer the broadcastable pattern of the output. For every dimension
# unaffected by the flatten, the broadcast flag should be unchanged.
# For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were
# broadcastable.
bcast_kept_dims
=
x
.
broadcastable
[:
self
.
outdim
-
1
]
bcast_new_dim
=
python_all
(
x
.
broadcastable
[
self
.
outdim
-
1
:])
broadcastable
=
bcast_kept_dims
+
(
bcast_new_dim
,)
return
gof
.
Apply
(
self
,
[
t_x
],
[
tensor
(
x
.
type
.
dtype
,
broadcastable
)])
def
perform
(
self
,
node
,
inp
,
out_
):
x
,
=
inp
out
,
=
out_
outdim
=
self
.
outdim
if
outdim
==
1
:
try
:
out
[
0
]
=
x
.
reshape
(
x
.
size
)
except
AttributeError
:
out
[
0
]
=
x
.
reshape
((
numpy
.
prod
(
x
.
shape
),))
elif
outdim
==
len
(
x
.
shape
):
out
[
0
]
=
x
else
:
newshape
=
(
x
.
shape
[:
outdim
-
1
]
+
(
numpy
.
prod
(
x
.
shape
[
outdim
-
1
:]),))
out
[
0
]
=
x
.
reshape
(
newshape
)
def
infer_shape
(
self
,
node
,
in_shapes
):
in_shp
,
=
in_shapes
part1
=
in_shp
[:
self
.
outdim
-
1
]
part2
=
in_shp
[
self
.
outdim
-
1
:]
if
len
(
part2
)
>
1
:
part2
=
(
prod
(
part2
,
dtype
=
'int64'
),)
elif
len
(
part2
)
==
1
:
# We do not want to force an upcast of part2 if its length is 1
pass
else
:
if
len
(
in_shp
)
==
0
and
self
.
outdim
==
1
:
part2
=
(
1
,)
else
:
raise
ValueError
(
'invalid output ndimensions (
%
i) for tensor '
'of rank
%
i'
%
(
self
.
outdim
,
len
(
in_shp
)))
out_shape
=
(
part1
+
part2
)
return
[
out_shape
]
def
grad
(
self
,
inp
,
grads
):
x
,
=
inp
g_out
,
=
grads
return
[
reshape
(
g_out
,
shape
(
x
),
x
.
ndim
)]
def
R_op
(
self
,
inputs
,
eval_points
):
if
None
in
eval_points
:
return
[
None
]
return
self
.
make_node
(
*
eval_points
)
.
outputs
def
c_code_cache_version
(
self
):
return
(
1
,
1
)
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
x
,
=
inputs
out
,
=
outputs
outdim
=
self
.
outdim
fail
=
sub
[
'fail'
]
return
"""
if (
%(outdim)
s == PyArray_NDIM(
%(x)
s))
{
Py_XDECREF(
%(out)
s);
Py_XINCREF(
%(x)
s);
%(out)
s =
%(x)
s;
}
else
{
Py_XDECREF(
%(out)
s);
if (
%(outdim)
s == 1)
{
npy_intp size = PyArray_SIZE(
%(x)
s);
PyArray_Dims newshape;
newshape.ptr = &size;
newshape.len = 1;
%(out)
s = (PyArrayObject*)PyArray_Newshape(
%(x)
s,
&newshape,
NPY_CORDER);
}
else
{
npy_intp *oldshape = PyArray_DIMS(
%(x)
s);
npy_intp newshape_dims[
%(outdim)
s];
int i;
for (i = 0; i <
%(outdim)
s - 1; ++i)
newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1;
for (int j =
%(outdim)
s - 1; j < PyArray_NDIM(
%(x)
s); ++j)
newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape;
newshape.ptr = newshape_dims;
newshape.len =
%(outdim)
s;
%(out)
s = (PyArrayObject*)PyArray_Newshape(
%(x)
s,
&newshape,
NPY_CORDER);
}
}
if (!
%(out)
s)
{
//The error message should have been set by
// PyArray_Newshape
%(fail)
s;
}
if (!PyArray_ISALIGNED(
%(out)
s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't"
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)
s;
}
"""
%
locals
()
#class Flatten(Op):
# """
# Flatten a tensor.
#
# Flattens a tensor to `outdim` dimensions by preserving the leading
# outdim - 1 shape components.
#
# """
# view_map = {0: [0]}
#
# check_input = False
# __props__ = ("outdim",)
#
# def __init__(self, outdim=1):
# warnings.warn(
# "Flatten class is deprecated, "
# "please use flatten method instead.",
# DeprecationWarning,
# stacklevel=4)
# self.outdim = int(outdim)
#
# def __str__(self):
# return '%s{%s}' % (self.__class__.__name__, self.outdim)
#
# def make_node(self, x):
# t_x = as_tensor_variable(x)
# if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
# raise ValueError('invalid output ndimensions (%i) for tensor of '
# 'rank %i' % (self.outdim, t_x.ndim))
#
# # Infer the broadcastable pattern of the output. For every dimension
# # unaffected by the flatten, the broadcast flag should be unchanged.
# # For the dimension resulting from the collapse of other dimensions,
# # it should be broadcastable iff all the collapsed dimensions were
# # broadcastable.
# bcast_kept_dims = x.broadcastable[:self.outdim - 1]
# bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
# broadcastable = bcast_kept_dims + (bcast_new_dim,)
#
# return gof.Apply(self, [t_x], [tensor(x.type.dtype,
# broadcastable)])
#
# def perform(self, node, inp, out_):
# x, = inp
# out, = out_
# outdim = self.outdim
# if outdim == 1:
# try:
# out[0] = x.reshape(x.size)
# except AttributeError:
# out[0] = x.reshape((numpy.prod(x.shape),))
# elif outdim == len(x.shape):
# out[0] = x
# else:
# newshape = (x.shape[:outdim - 1] +
# (numpy.prod(x.shape[outdim - 1:]),))
# out[0] = x.reshape(newshape)
#
# def infer_shape(self, node, in_shapes):
# in_shp, = in_shapes
# part1 = in_shp[:self.outdim - 1]
# part2 = in_shp[self.outdim - 1:]
#
# if len(part2) > 1:
# part2 = (prod(part2, dtype='int64'),)
# elif len(part2) == 1:
# # We do not want to force an upcast of part2 if its length is 1
# pass
# else:
# if len(in_shp) == 0 and self.outdim == 1:
# part2 = (1,)
# else:
# raise ValueError('invalid output ndimensions (%i) for tensor '
# 'of rank %i' % (self.outdim, len(in_shp)))
#
# out_shape = (part1 + part2)
# return [out_shape]
#
# def grad(self, inp, grads):
# x, = inp
# g_out, = grads
# return [reshape(g_out, shape(x), x.ndim)]
#
# def R_op(self, inputs, eval_points):
# if None in eval_points:
# return [None]
# return self.make_node(*eval_points).outputs
#
# def c_code_cache_version(self):
# return (1, 1)
#
# def c_code(self, node, name, inputs, outputs, sub):
# x, = inputs
# out, = outputs
# outdim = self.outdim
# fail = sub['fail']
# return """
# if (%(outdim)s == PyArray_NDIM(%(x)s))
# {
# Py_XDECREF(%(out)s);
# Py_XINCREF(%(x)s);
# %(out)s = %(x)s;
# }
# else
# {
# Py_XDECREF(%(out)s);
#
# if (%(outdim)s == 1)
# {
# npy_intp size = PyArray_SIZE(%(x)s);
# PyArray_Dims newshape;
# newshape.ptr = &size;
# newshape.len = 1;
# %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
# &newshape,
# NPY_CORDER);
# }
# else
# {
# npy_intp *oldshape = PyArray_DIMS(%(x)s);
# npy_intp newshape_dims[%(outdim)s];
#
# int i;
# for (i = 0; i < %(outdim)s - 1; ++i)
# newshape_dims[i] = oldshape[i];
#
# newshape_dims[i] = 1;
#
# for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
# newshape_dims[i] *= oldshape[j];
#
# PyArray_Dims newshape;
# newshape.ptr = newshape_dims;
# newshape.len = %(outdim)s;
# %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
# &newshape,
# NPY_CORDER);
# }
# }
# if (!%(out)s)
# {
# //The error message should have been set by
# // PyArray_Newshape
# %(fail)s;
# }
# if (!PyArray_ISALIGNED(%(out)s)) {
# PyErr_Format(
# PyExc_RuntimeError,
# "PyArray_Newshape returned an object that isn't"
# " aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
# " this problem for some input shape/new shape"
# " combinations. Use another NumPy version.");
# %(fail)s;
# }
# """ % locals()
def
is_flatten
(
node
,
outdim
=
1
):
return
isinstance
(
node
.
op
,
theano
.
tensor
.
Reshape
)
and
node
.
inputs
[
1
]
.
ndim
==
outdim
def
flatten
(
x
,
outdim
=
1
):
return
Flatten
(
outdim
)(
x
)
outdim
=
int
(
outdim
)
if
outdim
<
1
or
outdim
>
x
.
ndim
:
raise
ValueError
(
'outdim of flatten must an int in the range [1,
%
s], recieved
%
s'
%
(
x
.
ndim
,
outdim
))
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
theano
.
tensor
.
prod
(
x
.
shape
[
outdim
-
1
:]),)
else
:
dims
=
(
-
1
,)
x_reshaped
=
x
.
reshape
(
dims
)
bcast_kept_dims
=
x
.
broadcastable
[:
outdim
-
1
]
bcast_new_dim
=
python_all
(
x
.
broadcastable
[
outdim
-
1
:])
broadcastable
=
bcast_kept_dims
+
(
bcast_new_dim
,)
broadcast_int
=
tuple
([
numpy
.
int
(
bc
)
for
bc
in
broadcastable
])
for
dim
,
br
in
enumerate
(
broadcast_int
):
if
br
:
x_reshaped
=
theano
.
tensor
.
addbroadcast
(
x_reshaped
,
dim
)
return
x_reshaped
# class TileGrad(Op):
...
...
theano/tensor/nnet/tests/test_sigm.py
浏览文件 @
eced0049
...
...
@@ -377,7 +377,7 @@ class T_softplus_opts(unittest.TestCase):
f
=
theano
.
function
([
x
],
out
,
mode
=
self
.
m
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
3
assert
isinstance
(
topo
[
0
]
.
op
,
T
.
Flatten
)
tensor
.
is_flatten
(
topo
[
0
]
)
assert
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
theano
.
tensor
.
nnet
.
sigm
.
ScalarSoftplus
)
assert
isinstance
(
topo
[
2
]
.
op
.
scalar_op
,
theano
.
scalar
.
Neg
)
...
...
theano/tensor/opt.py
浏览文件 @
eced0049
...
...
@@ -3877,24 +3877,24 @@ def local_useless_split(node):
################
# Flatten Opts #
################
@register_canonicalize
@register_stabilize
@gof.local_optimizer
([
T
.
Flatten
])
def
local_flatten_lift
(
node
):
"
""
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
This optimization is needed by optimization
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
"
""
if
(
isinstance
(
node
.
op
,
T
.
Flatten
)
and
node
.
inputs
[
0
]
.
owner
and
isinstance
(
node
.
inputs
[
0
]
.
owner
.
op
,
T
.
Elemwise
)
and
len
(
node
.
inputs
[
0
]
.
owner
.
inputs
)
==
1
):
f
=
node
.
op
(
node
.
inputs
[
0
]
.
owner
.
inputs
[
0
])
e
=
node
.
inputs
[
0
]
.
owner
.
op
(
f
)
return
[
e
]
#
@register_canonicalize
#
@register_stabilize
#
@gof.local_optimizer([T.Flatten])
#
def local_flatten_lift(node):
#
""
#
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
#
#
This optimization is needed by optimization
#
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
#
#
""
#
if (isinstance(node.op, T.Flatten) and
#
node.inputs[0].owner and
#
isinstance(node.inputs[0].owner.op, T.Elemwise) and
#
len(node.inputs[0].owner.inputs) == 1):
#
f = node.op(node.inputs[0].owner.inputs[0])
#
e = node.inputs[0].owner.op(f)
#
return [e]
##################
# Reshape opts #
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
eced0049
...
...
@@ -32,7 +32,7 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
alloc
,
as_tensor_variable
,
tensor_from_scalar
,
ARange
,
autocast_float
,
clip
,
constant
,
default
,
dot
,
dmatrix
,
dscalar
,
dvector
,
eq
,
eye
,
fill
,
flatten
,
inverse_permutation
,
tensor4
,
permute_row_elements
,
Flatten
,
fmatrix
,
fscalars
,
grad
,
tensor4
,
permute_row_elements
,
fmatrix
,
fscalars
,
grad
,
inplace
,
iscalar
,
matrix
,
minimum
,
matrices
,
maximum
,
mul
,
neq
,
Reshape
,
row
,
scalar
,
scalars
,
second
,
smallest
,
stack
,
sub
,
Tensor
,
tensor_copy
,
tensordot
,
TensorType
,
Tri
,
tri
,
tril
,
triu
,
unbroadcast
,
...
...
@@ -5147,11 +5147,6 @@ def test_make_column_matrix_broadcastable():
def
test_flatten_outdimNone
():
"""Flatten always returns a copy of the array. There is no danger
with in-place operations and thus no need to test it.
"""
a
=
dmatrix
()
c
=
flatten
(
a
)
f
=
inplace_func
([
a
],
c
)
...
...
@@ -5161,7 +5156,7 @@ def test_flatten_outdimNone():
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
()
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_scalar
():
...
...
@@ -5174,7 +5169,7 @@ def test_flatten_scalar():
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
# utt.verify_grad(
Flatten()
, [a_val]) #TODO: fix verify_grd to work on scalars
# utt.verify_grad(
flatten
, [a_val]) #TODO: fix verify_grd to work on scalars
def
test_flatten_outdim1
():
...
...
@@ -5187,7 +5182,7 @@ def test_flatten_outdim1():
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
(
1
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_outdim2
():
...
...
@@ -5199,7 +5194,7 @@ def test_flatten_outdim2():
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
a_val
)
utt
.
verify_grad
(
Flatten
(
2
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_outdim2_of_3
():
...
...
@@ -5213,7 +5208,7 @@ def test_flatten_outdim2_of_3():
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
(
2
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_broadcastable
():
...
...
@@ -7128,24 +7123,27 @@ class TestInferShape(utt.InferShapeTester):
# Flatten
atens3
=
tensor3
()
atens3_val
=
rand
(
4
,
5
,
3
)
for
outdim
in
(
3
,
2
,
1
):
self
.
_compile_and_check
([
atens3
],
[
Flatten
(
outdim
)(
atens3
)],
[
atens3_val
],
Flatten
)
amat
=
matrix
()
amat_val
=
rand
(
4
,
5
)
for
outdim
in
(
2
,
1
):
self
.
_compile_and_check
([
amat
],
[
Flatten
(
outdim
)(
amat
)],
[
amat_val
],
Flatten
)
avec
=
vector
()
avec_val
=
rand
(
4
)
outdim
=
1
self
.
_compile_and_check
([
avec
],
[
Flatten
(
outdim
)(
avec
)],
[
avec_val
],
Flatten
)
self
.
_compile_and_check
([
atens3
],
[
flatten
(
atens3
,
1
)],
[
atens3_val
],
Reshape
)
#for outdim in (3, 2, 1):
# self._compile_and_check([atens3],
# [flatten(atens3, outdim)],
# [atens3_val], Reshape)
#amat = matrix()
#amat_val = rand(4, 5)
#for outdim in (2, 1):
# self._compile_and_check([amat],
# [flatten(amat, outdim)],
# [amat_val], Reshape)
#avec = vector()
#avec_val = rand(4)
#outdim = 1
#self._compile_and_check([avec],
# [flatten(avec, outdim)],
# [avec_val], Reshape)
# Eye
aiscal
=
iscalar
()
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
eced0049
...
...
@@ -60,6 +60,7 @@ from theano.tests import unittest_tools as utt
from
theano.compile.mode
import
optdb
from
theano.compile
import
Mode
from
nose.plugins.attrib
import
attr
from
theano.tensor.basic
import
flatten
,
is_flatten
mode_opt
=
theano
.
config
.
mode
if
mode_opt
==
'FAST_COMPILE'
:
...
...
@@ -5879,18 +5880,19 @@ def test_local_useless_split():
def
test_local_flatten_lift
():
for
i
in
xrange
(
1
,
4
):
op
=
tensor
.
Flatten
(
i
)
x
=
tensor
.
tensor4
()
out
=
op
(
T
.
exp
(
x
)
)
out
=
tensor
.
flatten
(
T
.
exp
(
x
),
i
)
assert
out
.
ndim
==
i
mode
=
compile
.
mode
.
get_default_mode
()
mode
=
mode
.
including
(
'local_flatten_lift'
)
f
=
theano
.
function
([
x
],
out
,
mode
=
mode
)
f
(
numpy
.
random
.
rand
(
5
,
4
,
3
,
2
)
.
astype
(
config
.
floatX
))
x_np
=
numpy
.
random
.
rand
(
5
,
4
,
3
,
2
)
.
astype
(
config
.
floatX
)
out_np
=
f
(
x_np
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
2
assert
isinstance
(
topo
[
0
]
.
op
,
tensor
.
Flatten
)
assert
isinstance
(
topo
[
1
]
.
op
,
tensor
.
Elemwise
)
shape_out_np
=
tuple
(
x_np
.
shape
[:
i
-
1
])
+
(
numpy
.
prod
(
x_np
.
shape
[
i
-
1
:]),)
assert
shape_out_np
==
out_np
.
shape
tensor
.
is_flatten
(
topo
[
0
],
outdim
=
i
)
assert
isinstance
(
topo
[
-
1
]
.
op
,
tensor
.
Elemwise
)
class
Test_Reshape
(
unittest
.
TestCase
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论