Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
e521b20e
提交
e521b20e
authored
12月 05, 2015
作者:
Pascal Lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3480 from SinaHonari/issue950
Refactor Flatten
上级
91f08497
cf53977b
隐藏空白字符变更
内嵌
并排
正在显示
9 个修改的文件
包含
183 行增加
和
37 行删除
+183
-37
__init__.py
theano/sandbox/cuda/__init__.py
+2
-1
basic_ops.py
theano/sandbox/cuda/basic_ops.py
+37
-0
extra_ops.py
theano/sandbox/cuda/extra_ops.py
+2
-2
opt.py
theano/sandbox/cuda/opt.py
+6
-5
test_basic_ops.py
theano/sandbox/cuda/tests/test_basic_ops.py
+2
-1
basic.py
theano/tensor/basic.py
+70
-2
test_sigm.py
theano/tensor/nnet/tests/test_sigm.py
+1
-1
test_basic.py
theano/tensor/tests/test_basic.py
+52
-18
test_opt.py
theano/tensor/tests/test_opt.py
+11
-7
没有找到文件。
theano/sandbox/cuda/__init__.py
浏览文件 @
e521b20e
...
@@ -6,6 +6,7 @@ import os
...
@@ -6,6 +6,7 @@ import os
import
shutil
import
shutil
import
stat
import
stat
import
sys
import
sys
import
warnings
import
theano
import
theano
from
theano.compat
import
get_unbound_function
from
theano.compat
import
get_unbound_function
...
@@ -318,7 +319,7 @@ if cuda_available:
...
@@ -318,7 +319,7 @@ if cuda_available:
GpuDimShuffle
,
GpuCAReduce
,
GpuReshape
,
GpuContiguous
,
GpuDimShuffle
,
GpuCAReduce
,
GpuReshape
,
GpuContiguous
,
GpuSubtensor
,
GpuIncSubtensor
,
GpuSubtensor
,
GpuIncSubtensor
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuFlatten
,
GpuShape
,
GpuAlloc
,
GpuAllocEmpty
,
GpuSplit
,
gpu_flatten
,
GpuFlatten
,
GpuShape
,
GpuAlloc
,
GpuAllocEmpty
,
GpuSplit
,
GpuJoin
,
fscalar
,
fvector
,
fmatrix
,
frow
,
fcol
,
GpuJoin
,
fscalar
,
fvector
,
fmatrix
,
frow
,
fcol
,
ftensor3
,
ftensor4
,
ftensor3
,
ftensor4
,
scalar
,
vector
,
matrix
,
row
,
col
,
scalar
,
vector
,
matrix
,
row
,
col
,
...
...
theano/sandbox/cuda/basic_ops.py
浏览文件 @
e521b20e
...
@@ -3326,7 +3326,14 @@ class GpuFlatten(gof.HideC, tensor.Flatten, GpuOp):
...
@@ -3326,7 +3326,14 @@ class GpuFlatten(gof.HideC, tensor.Flatten, GpuOp):
"""
"""
Implement Flatten on the gpu.
Implement Flatten on the gpu.
.. note:: The interface GpuFlatten is deprecated, you should use gpu_flatten.
"""
"""
def
__init__
(
self
):
warnings
.
warn
(
"GpuFlatten class is deprecated, "
"please use gpu_flatten method instead."
,
DeprecationWarning
,
stacklevel
=
4
)
def
make_node
(
self
,
x
):
def
make_node
(
self
,
x
):
assert
isinstance
(
x
.
type
,
CudaNdarrayType
)
assert
isinstance
(
x
.
type
,
CudaNdarrayType
)
...
@@ -3336,6 +3343,36 @@ class GpuFlatten(gof.HideC, tensor.Flatten, GpuOp):
...
@@ -3336,6 +3343,36 @@ class GpuFlatten(gof.HideC, tensor.Flatten, GpuOp):
return
Apply
(
self
,
[
x
],
[
out_type
()])
return
Apply
(
self
,
[
x
],
[
out_type
()])
def
gpu_flatten
(
x
,
outdim
=
1
):
"""
Implement flatten on the gpu.
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
outdim : int
the number of dimensions of the returned variable
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
x
=
as_cuda_ndarray_variable
(
x
)
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
-
1
,)
else
:
dims
=
(
-
1
,)
return
GpuReshape
(
outdim
)(
x
,
dims
)
class
GpuShape
(
tensor
.
Shape
,
GpuOp
):
class
GpuShape
(
tensor
.
Shape
,
GpuOp
):
"""
"""
Implement Shape on the gpu.
Implement Shape on the gpu.
...
...
theano/sandbox/cuda/extra_ops.py
浏览文件 @
e521b20e
...
@@ -3,7 +3,7 @@ import copy
...
@@ -3,7 +3,7 @@ import copy
from
theano
import
Op
from
theano
import
Op
from
theano.gof
import
local_optimizer
from
theano.gof
import
local_optimizer
from
theano.sandbox.cuda
import
cuda_available
,
GpuOp
from
theano.sandbox.cuda
import
cuda_available
,
GpuOp
from
theano.sandbox.cuda.basic_ops
import
GpuF
latten
from
theano.sandbox.cuda.basic_ops
import
gpu_f
latten
from
theano.tensor.extra_ops
import
CumsumOp
from
theano.tensor.extra_ops
import
CumsumOp
if
cuda_available
:
if
cuda_available
:
...
@@ -453,7 +453,7 @@ def use_gpu_cumsum(node):
...
@@ -453,7 +453,7 @@ def use_gpu_cumsum(node):
x
=
gpu_from_host
(
x
)
x
=
gpu_from_host
(
x
)
if
axis
is
None
and
x
.
ndim
>
1
:
if
axis
is
None
and
x
.
ndim
>
1
:
x
=
GpuFlatten
()
(
x
)
x
=
gpu_flatten
(
x
)
# ``gpu_cumsum`` assume array has been flattened if needed.
# ``gpu_cumsum`` assume array has been flattened if needed.
if
axis
is
None
:
if
axis
is
None
:
...
...
theano/sandbox/cuda/opt.py
浏览文件 @
e521b20e
...
@@ -24,7 +24,8 @@ from theano.sandbox.cuda.basic_ops import (
...
@@ -24,7 +24,8 @@ from theano.sandbox.cuda.basic_ops import (
gpu_eye
,
gpu_contiguous
,
gpu_eye
,
gpu_contiguous
,
gpu_from_host
,
host_from_gpu
,
GpuFromHost
,
HostFromGpu
,
gpu_from_host
,
host_from_gpu
,
GpuFromHost
,
HostFromGpu
,
GpuContiguous
,
GpuContiguous
,
GpuElemwise
,
GpuDimShuffle
,
GpuReshape
,
GpuCAReduce
,
GpuFlatten
,
GpuElemwise
,
GpuDimShuffle
,
GpuReshape
,
GpuCAReduce
,
GpuFlatten
,
gpu_flatten
,
GpuSubtensor
,
GpuAdvancedSubtensor1
,
GpuSubtensor
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedIncSubtensor1_dev20
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedIncSubtensor1_dev20
,
GpuIncSubtensor
,
gpu_alloc
,
GpuAlloc
,
gpu_shape
,
GpuSplit
,
GpuAllocEmpty
)
GpuIncSubtensor
,
gpu_alloc
,
GpuAlloc
,
gpu_shape
,
GpuSplit
,
GpuAllocEmpty
)
...
@@ -152,7 +153,7 @@ cpu_ops_moved_to_gpu = [
...
@@ -152,7 +153,7 @@ cpu_ops_moved_to_gpu = [
tensor
.
elemwise
.
All
,
tensor
.
elemwise
.
Any
,
tensor
.
elemwise
.
All
,
tensor
.
elemwise
.
Any
,
tensor
.
elemwise
.
CAReduceDtype
,
tensor
.
elemwise
.
Sum
,
tensor
.
elemwise
.
CAReduceDtype
,
tensor
.
elemwise
.
Sum
,
tensor
.
elemwise
.
Prod
,
tensor
.
elemwise
.
ProdWithoutZeros
,
tensor
.
elemwise
.
Prod
,
tensor
.
elemwise
.
ProdWithoutZeros
,
tensor
.
Reshape
,
tensor
.
F
latten
,
tensor
.
Subtensor
,
tensor
.
Reshape
,
tensor
.
f
latten
,
tensor
.
Subtensor
,
tensor
.
AdvancedSubtensor1
,
tensor
.
AdvancedIncSubtensor1
,
tensor
.
AdvancedSubtensor1
,
tensor
.
AdvancedIncSubtensor1
,
tensor
.
IncSubtensor
,
tensor
.
Shape
,
tensor
.
Join
,
tensor
.
IncSubtensor
,
tensor
.
Shape
,
tensor
.
Join
,
tensor
.
Alloc
,
tensor
.
Eye
]
tensor
.
Alloc
,
tensor
.
Eye
]
...
@@ -980,14 +981,14 @@ def local_gpu_flatten(node):
...
@@ -980,14 +981,14 @@ def local_gpu_flatten(node):
if
host_input
.
owner
and
\
if
host_input
.
owner
and
\
isinstance
(
host_input
.
owner
.
op
,
tensor
.
Flatten
):
isinstance
(
host_input
.
owner
.
op
,
tensor
.
Flatten
):
outdim
=
host_input
.
owner
.
op
.
outdim
outdim
=
host_input
.
owner
.
op
.
outdim
return
[
GpuFlatten
(
outdim
)(
return
[
gpu_flatten
(
host_input
.
owner
.
inputs
[
0
],
outdim
)(
as_cuda_ndarray_variable
(
host_input
.
owner
.
inputs
[
0
]))]
as_cuda_ndarray_variable
(
host_input
.
owner
.
inputs
[
0
]))]
if
isinstance
(
node
.
op
,
tensor
.
Flatten
):
if
isinstance
(
node
.
op
,
tensor
.
Flatten
):
x
,
=
node
.
inputs
x
,
shp
=
node
.
inputs
outdim
=
node
.
op
.
outdim
outdim
=
node
.
op
.
outdim
if
x
.
owner
and
isinstance
(
x
.
owner
.
op
,
HostFromGpu
):
if
x
.
owner
and
isinstance
(
x
.
owner
.
op
,
HostFromGpu
):
gpu_x
,
=
x
.
owner
.
inputs
gpu_x
,
=
x
.
owner
.
inputs
return
[
host_from_gpu
(
GpuFlatten
(
outdim
)(
gpu_x
))]
return
[
host_from_gpu
(
gpu_flatten
(
host_input
.
owner
.
inputs
[
0
],
outdim
)(
gpu_x
))]
return
False
return
False
...
...
theano/sandbox/cuda/tests/test_basic_ops.py
浏览文件 @
e521b20e
...
@@ -307,7 +307,8 @@ def test_flatten():
...
@@ -307,7 +307,8 @@ def test_flatten():
x
=
cuda
.
fmatrix
(
'x'
)
x
=
cuda
.
fmatrix
(
'x'
)
f
=
theano
.
function
([
x
],
x
.
flatten
(),
mode
=
mode_with_gpu
)
f
=
theano
.
function
([
x
],
x
.
flatten
(),
mode
=
mode_with_gpu
)
assert
any
([
node
for
node
in
f
.
maker
.
fgraph
.
toposort
()
assert
any
([
node
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
B
.
GpuFlatten
)])
if
isinstance
(
node
.
op
,
B
.
GpuReshape
)])
assert
theano
.
tensor
.
is_flat
(
x
.
flatten
())
assert
len
(
f
([[
0.
,
0.
],
[
0.
,
0.
]])
.
shape
)
==
1
assert
len
(
f
([[
0.
,
0.
],
[
0.
,
0.
]])
.
shape
)
==
1
...
...
theano/tensor/basic.py
浏览文件 @
e521b20e
...
@@ -4417,7 +4417,7 @@ class Reshape(Op):
...
@@ -4417,7 +4417,7 @@ class Reshape(Op):
if
ele
==
-
1
:
if
ele
==
-
1
:
requ
[
i
]
=
missing
requ
[
i
]
=
missing
elif
crit
==
1
:
# we reshape to -1
elif
crit
==
1
:
# we reshape to -1
requ
=
[
mul
(
*
ishapes
[
0
])]
requ
=
[
mul
(
*
ishapes
[
0
])]
if
ishapes
[
0
]
else
[
1
]
elif
crit
>
1
:
elif
crit
>
1
:
raise
ValueError
(
'shape argument to Reshape.perform'
raise
ValueError
(
'shape argument to Reshape.perform'
' must have at most one entry equal to -1'
)
' must have at most one entry equal to -1'
)
...
@@ -4511,6 +4511,7 @@ class Flatten(Op):
...
@@ -4511,6 +4511,7 @@ class Flatten(Op):
Flattens a tensor to `outdim` dimensions by preserving the leading
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
outdim - 1 shape components.
.. note:: The interface Flatten(Op) is deprecated, you should use flatten.
"""
"""
view_map
=
{
0
:
[
0
]}
view_map
=
{
0
:
[
0
]}
...
@@ -4518,6 +4519,11 @@ class Flatten(Op):
...
@@ -4518,6 +4519,11 @@ class Flatten(Op):
__props__
=
(
"outdim"
,)
__props__
=
(
"outdim"
,)
def
__init__
(
self
,
outdim
=
1
):
def
__init__
(
self
,
outdim
=
1
):
warnings
.
warn
(
"Flatten class is deprecated, "
"please use flatten method instead."
,
DeprecationWarning
,
stacklevel
=
4
)
self
.
outdim
=
int
(
outdim
)
self
.
outdim
=
int
(
outdim
)
def
__str__
(
self
):
def
__str__
(
self
):
...
@@ -4656,8 +4662,70 @@ class Flatten(Op):
...
@@ -4656,8 +4662,70 @@ class Flatten(Op):
"""
%
locals
()
"""
%
locals
()
def
is_flat
(
var
,
outdim
=
1
):
"""
Verifies the dimensionality of the var is equal to
outdim. This method is usually called after flatten method on a
variable, where the first outdim-1 dimension size(s) of the variable
is kept intact, and the last dimension size of the variable is made
equal to the multiplication of its remaining dimension size(s), such that
the variable would end up with as many dimension as outdim.
Parameters
----------
var : theano.tensor.var.TensorVariable
the theano var on which the dimensionality is checked.
outdim : int
the expected dimensionality of var.
Returns
-------
bool
the comparison result of var's dim
and the expected outdim.
"""
return
var
.
ndim
==
outdim
def
flatten
(
x
,
outdim
=
1
):
def
flatten
(
x
,
outdim
=
1
):
return
Flatten
(
outdim
)(
x
)
"""
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
outdim : int
the number of dimensions of the returned variable
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
# Any input variable can be flattened to have outdim of 1,
# even if it's a scalar. Otherwise, outdim must be positive
# and smaller than x.ndim.
if
outdim
<
1
or
(
outdim
>
1
and
outdim
>
x
.
ndim
):
raise
ValueError
(
'outdim
%
s out of bound [1,
%
d)'
%
(
outdim
,
x
.
ndim
+
1
))
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
-
1
,)
else
:
dims
=
(
-
1
,)
x_reshaped
=
x
.
reshape
(
dims
)
bcast_kept_dims
=
x
.
broadcastable
[:
outdim
-
1
]
bcast_new_dim
=
python_all
(
x
.
broadcastable
[
outdim
-
1
:])
broadcastable
=
bcast_kept_dims
+
(
bcast_new_dim
,)
x_reshaped
=
theano
.
tensor
.
addbroadcast
(
x_reshaped
,
*
filter
(
lambda
i
:
broadcastable
[
i
],
range
(
outdim
)))
return
x_reshaped
# class TileGrad(Op):
# class TileGrad(Op):
...
...
theano/tensor/nnet/tests/test_sigm.py
浏览文件 @
e521b20e
...
@@ -377,7 +377,7 @@ class T_softplus_opts(unittest.TestCase):
...
@@ -377,7 +377,7 @@ class T_softplus_opts(unittest.TestCase):
f
=
theano
.
function
([
x
],
out
,
mode
=
self
.
m
)
f
=
theano
.
function
([
x
],
out
,
mode
=
self
.
m
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
3
assert
len
(
topo
)
==
3
assert
isinstance
(
topo
[
0
]
.
op
,
T
.
Flatten
)
assert
tensor
.
is_flat
(
topo
[
0
]
.
outputs
[
0
]
)
assert
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
assert
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
theano
.
tensor
.
nnet
.
sigm
.
ScalarSoftplus
)
theano
.
tensor
.
nnet
.
sigm
.
ScalarSoftplus
)
assert
isinstance
(
topo
[
2
]
.
op
.
scalar_op
,
theano
.
scalar
.
Neg
)
assert
isinstance
(
topo
[
2
]
.
op
.
scalar_op
,
theano
.
scalar
.
Neg
)
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
e521b20e
...
@@ -18,6 +18,7 @@ from nose.plugins.skip import SkipTest
...
@@ -18,6 +18,7 @@ from nose.plugins.skip import SkipTest
import
numpy
import
numpy
from
numpy.testing
import
dec
,
assert_array_equal
,
assert_allclose
from
numpy.testing
import
dec
,
assert_array_equal
,
assert_allclose
from
distutils.version
import
LooseVersion
from
distutils.version
import
LooseVersion
from
functools
import
partial
import
theano
import
theano
from
theano.compat
import
PY3
,
exc_message
,
operator_div
from
theano.compat
import
PY3
,
exc_message
,
operator_div
...
@@ -31,8 +32,8 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
...
@@ -31,8 +32,8 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
fscalar
,
zeros_like
,
sum
,
tensor3
,
vector
,
add
,
addbroadcast
,
fscalar
,
zeros_like
,
sum
,
tensor3
,
vector
,
add
,
addbroadcast
,
alloc
,
as_tensor_variable
,
tensor_from_scalar
,
ARange
,
autocast_float
,
alloc
,
as_tensor_variable
,
tensor_from_scalar
,
ARange
,
autocast_float
,
clip
,
constant
,
default
,
dot
,
clip
,
constant
,
default
,
dot
,
dmatrix
,
dscalar
,
dvector
,
eq
,
eye
,
fill
,
flatten
,
inverse_permutation
,
dmatrix
,
dscalar
,
dvector
,
eq
,
eye
,
fill
,
flatten
,
inverse_permutation
,
Flatten
,
tensor4
,
permute_row_elements
,
Flatten
,
fmatrix
,
fscalars
,
grad
,
tensor4
,
permute_row_elements
,
fmatrix
,
fscalars
,
grad
,
inplace
,
iscalar
,
matrix
,
minimum
,
matrices
,
maximum
,
mul
,
neq
,
inplace
,
iscalar
,
matrix
,
minimum
,
matrices
,
maximum
,
mul
,
neq
,
Reshape
,
row
,
scalar
,
scalars
,
second
,
smallest
,
stack
,
sub
,
Tensor
,
Reshape
,
row
,
scalar
,
scalars
,
second
,
smallest
,
stack
,
sub
,
Tensor
,
tensor_copy
,
tensordot
,
TensorType
,
Tri
,
tri
,
tril
,
triu
,
unbroadcast
,
tensor_copy
,
tensordot
,
TensorType
,
Tri
,
tri
,
tril
,
triu
,
unbroadcast
,
...
@@ -5147,11 +5148,6 @@ def test_make_column_matrix_broadcastable():
...
@@ -5147,11 +5148,6 @@ def test_make_column_matrix_broadcastable():
def
test_flatten_outdimNone
():
def
test_flatten_outdimNone
():
"""Flatten always returns a copy of the array. There is no danger
with in-place operations and thus no need to test it.
"""
a
=
dmatrix
()
a
=
dmatrix
()
c
=
flatten
(
a
)
c
=
flatten
(
a
)
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
...
@@ -5161,7 +5157,7 @@ def test_flatten_outdimNone():
...
@@ -5161,7 +5157,7 @@ def test_flatten_outdimNone():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
()
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_scalar
():
def
test_flatten_scalar
():
...
@@ -5174,7 +5170,7 @@ def test_flatten_scalar():
...
@@ -5174,7 +5170,7 @@ def test_flatten_scalar():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
# utt.verify_grad(
Flatten()
, [a_val]) #TODO: fix verify_grd to work on scalars
# utt.verify_grad(
flatten
, [a_val]) #TODO: fix verify_grd to work on scalars
def
test_flatten_outdim1
():
def
test_flatten_outdim1
():
...
@@ -5187,7 +5183,7 @@ def test_flatten_outdim1():
...
@@ -5187,7 +5183,7 @@ def test_flatten_outdim1():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
(
1
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_outdim2
():
def
test_flatten_outdim2
():
...
@@ -5199,7 +5195,8 @@ def test_flatten_outdim2():
...
@@ -5199,7 +5195,8 @@ def test_flatten_outdim2():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
a_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
a_val
)
utt
.
verify_grad
(
Flatten
(
2
),
[
a_val
])
flatten_2
=
partial
(
flatten
,
outdim
=
2
)
utt
.
verify_grad
(
flatten_2
,
[
a_val
])
def
test_flatten_outdim2_of_3
():
def
test_flatten_outdim2_of_3
():
...
@@ -5213,7 +5210,8 @@ def test_flatten_outdim2_of_3():
...
@@ -5213,7 +5210,8 @@ def test_flatten_outdim2_of_3():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
(
2
),
[
a_val
])
flatten_2
=
partial
(
flatten
,
outdim
=
2
)
utt
.
verify_grad
(
flatten_2
,
[
a_val
])
def
test_flatten_broadcastable
():
def
test_flatten_broadcastable
():
...
@@ -5255,6 +5253,37 @@ def test_flatten_outdim_invalid():
...
@@ -5255,6 +5253,37 @@ def test_flatten_outdim_invalid():
pass
pass
def
test_is_flat
():
"""
tests is_flat method for constant and symbolic variables,
as well as reshaped constant and symbolic variables on the
given outdim
"""
# Constant variable
assert
tensor
.
is_flat
(
tensor
.
as_tensor_variable
(
numpy
.
zeros
((
10
))))
assert
tensor
.
is_flat
(
tensor
.
as_tensor_variable
(
numpy
.
zeros
((
10
,
10
,
10
))),
outdim
=
3
)
assert
not
tensor
.
is_flat
(
tensor
.
as_tensor_variable
(
numpy
.
zeros
((
10
,
10
,
10
))))
# Symbolic variable
assert
tensor
.
is_flat
(
tensor
.
vector
())
assert
tensor
.
is_flat
(
tensor
.
tensor3
(),
outdim
=
3
)
assert
not
tensor
.
is_flat
(
tensor
.
tensor3
())
# Reshape with constant shape
X
=
tensor
.
tensor4
()
assert
tensor
.
is_flat
(
X
.
reshape
((
-
1
,
)))
assert
tensor
.
is_flat
(
X
.
reshape
((
10
,
10
,
-
1
)),
outdim
=
3
)
assert
not
tensor
.
is_flat
(
X
.
reshape
((
10
,
10
,
-
1
)))
# Reshape with symbolic shape
X
=
tensor
.
tensor4
()
assert
tensor
.
is_flat
(
X
.
reshape
((
tensor
.
iscalar
(),
)))
assert
tensor
.
is_flat
(
X
.
reshape
((
tensor
.
iscalar
(),
)
*
3
),
outdim
=
3
)
assert
not
tensor
.
is_flat
(
X
.
reshape
((
tensor
.
iscalar
(),
)
*
3
))
def
test_tile
():
def
test_tile
():
def
run_tile
(
x
,
x_
,
reps
,
use_symbolic_reps
):
def
run_tile
(
x
,
x_
,
reps
,
use_symbolic_reps
):
if
use_symbolic_reps
:
if
use_symbolic_reps
:
...
@@ -7128,24 +7157,29 @@ class TestInferShape(utt.InferShapeTester):
...
@@ -7128,24 +7157,29 @@ class TestInferShape(utt.InferShapeTester):
# Flatten
# Flatten
atens3
=
tensor3
()
atens3
=
tensor3
()
atens3_val
=
rand
(
4
,
5
,
3
)
atens3_val
=
rand
(
4
,
5
,
3
)
self
.
_compile_and_check
([
atens3
],
[
flatten
(
atens3
,
1
)],
[
atens3_val
],
Reshape
)
for
outdim
in
(
3
,
2
,
1
):
for
outdim
in
(
3
,
2
,
1
):
self
.
_compile_and_check
([
atens3
],
self
.
_compile_and_check
([
atens3
],
[
Flatten
(
outdim
)(
atens3
)],
[
flatten
(
atens3
,
outdim
)],
[
atens3_val
],
Flatten
)
[
atens3_val
],
Reshape
)
amat
=
matrix
()
amat
=
matrix
()
amat_val
=
rand
(
4
,
5
)
amat_val
=
rand
(
4
,
5
)
for
outdim
in
(
2
,
1
):
for
outdim
in
(
2
,
1
):
self
.
_compile_and_check
([
amat
],
self
.
_compile_and_check
([
amat
],
[
Flatten
(
outdim
)(
amat
)],
[
flatten
(
amat
,
outdim
)],
[
amat_val
],
Flatten
)
[
amat_val
],
Reshape
)
avec
=
vector
()
avec
=
vector
()
avec_val
=
rand
(
4
)
avec_val
=
rand
(
4
)
outdim
=
1
outdim
=
1
self
.
_compile_and_check
([
avec
],
self
.
_compile_and_check
([
avec
],
[
Flatten
(
outdim
)(
avec
)],
[
flatten
(
avec
,
outdim
)],
[
avec_val
],
Flatten
)
[
avec_val
],
Reshape
,
excluding
=
[
'local_useless_reshape'
])
# Eye
# Eye
aiscal
=
iscalar
()
aiscal
=
iscalar
()
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
e521b20e
...
@@ -5879,18 +5879,22 @@ def test_local_useless_split():
...
@@ -5879,18 +5879,22 @@ def test_local_useless_split():
def
test_local_flatten_lift
():
def
test_local_flatten_lift
():
for
i
in
xrange
(
1
,
4
):
for
i
in
xrange
(
1
,
4
):
op
=
tensor
.
Flatten
(
i
)
x
=
tensor
.
tensor4
()
x
=
tensor
.
tensor4
()
out
=
op
(
T
.
exp
(
x
)
)
out
=
tensor
.
flatten
(
T
.
exp
(
x
),
i
)
assert
out
.
ndim
==
i
assert
out
.
ndim
==
i
mode
=
compile
.
mode
.
get_default_mode
()
mode
=
compile
.
mode
.
get_default_mode
()
mode
=
mode
.
including
(
'local_
flatten
_lift'
)
mode
=
mode
.
including
(
'local_
reshape
_lift'
)
f
=
theano
.
function
([
x
],
out
,
mode
=
mode
)
f
=
theano
.
function
([
x
],
out
,
mode
=
mode
)
f
(
numpy
.
random
.
rand
(
5
,
4
,
3
,
2
)
.
astype
(
config
.
floatX
))
x_np
=
numpy
.
random
.
rand
(
5
,
4
,
3
,
2
)
.
astype
(
config
.
floatX
)
out_np
=
f
(
x_np
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
2
shape_out_np
=
tuple
(
x_np
.
shape
[:
i
-
1
])
+
(
numpy
.
prod
(
x_np
.
shape
[
i
-
1
:]),)
assert
isinstance
(
topo
[
0
]
.
op
,
tensor
.
Flatten
)
assert
shape_out_np
==
out_np
.
shape
assert
isinstance
(
topo
[
1
]
.
op
,
tensor
.
Elemwise
)
reshape_nodes
=
[
n
for
n
in
topo
if
isinstance
(
n
.
op
,
tensor
.
Reshape
)]
assert
(
len
(
reshape_nodes
)
==
1
and
tensor
.
is_flat
(
reshape_nodes
[
0
]
.
outputs
[
0
],
outdim
=
i
))
assert
isinstance
(
topo
[
-
1
]
.
op
,
tensor
.
Elemwise
)
class
Test_Reshape
(
unittest
.
TestCase
):
class
Test_Reshape
(
unittest
.
TestCase
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论