Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
eced0049
提交
eced0049
authored
9月 30, 2015
作者:
Sina Honari
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
first commit to remove Flatten Op
上级
33bda7ca
隐藏空白字符变更
内嵌
并排
正在显示
10 个修改的文件
包含
281 行增加
和
241 行删除
+281
-241
__init__.py
theano/sandbox/cuda/__init__.py
+1
-1
basic_ops.py
theano/sandbox/cuda/basic_ops.py
+27
-12
extra_ops.py
theano/sandbox/cuda/extra_ops.py
+2
-2
opt.py
theano/sandbox/cuda/opt.py
+21
-19
test_basic_ops.py
theano/sandbox/cuda/tests/test_basic_ops.py
+1
-1
basic.py
theano/tensor/basic.py
+175
-152
test_sigm.py
theano/tensor/nnet/tests/test_sigm.py
+1
-1
opt.py
theano/tensor/opt.py
+18
-18
test_basic.py
theano/tensor/tests/test_basic.py
+27
-29
test_opt.py
theano/tensor/tests/test_opt.py
+8
-6
没有找到文件。
theano/sandbox/cuda/__init__.py
浏览文件 @
eced0049
...
@@ -318,7 +318,7 @@ if cuda_available:
...
@@ -318,7 +318,7 @@ if cuda_available:
GpuDimShuffle
,
GpuCAReduce
,
GpuReshape
,
GpuContiguous
,
GpuDimShuffle
,
GpuCAReduce
,
GpuReshape
,
GpuContiguous
,
GpuSubtensor
,
GpuIncSubtensor
,
GpuSubtensor
,
GpuIncSubtensor
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuFlatten
,
GpuShape
,
GpuAlloc
,
GpuAllocEmpty
,
GpuSplit
,
gpu_flatten
,
GpuFlatten
,
GpuShape
,
GpuAlloc
,
GpuAllocEmpty
,
GpuSplit
,
GpuJoin
,
fscalar
,
fvector
,
fmatrix
,
frow
,
fcol
,
GpuJoin
,
fscalar
,
fvector
,
fmatrix
,
frow
,
fcol
,
ftensor3
,
ftensor4
,
ftensor3
,
ftensor4
,
scalar
,
vector
,
matrix
,
row
,
col
,
scalar
,
vector
,
matrix
,
row
,
col
,
...
...
theano/sandbox/cuda/basic_ops.py
浏览文件 @
eced0049
...
@@ -3322,18 +3322,33 @@ class GpuIncSubtensor(tensor.IncSubtensor, GpuOp):
...
@@ -3322,18 +3322,33 @@ class GpuIncSubtensor(tensor.IncSubtensor, GpuOp):
return
()
return
()
class
GpuFlatten
(
gof
.
HideC
,
tensor
.
Flatten
,
GpuOp
):
#class GpuFlatten(gof.HideC, tensor.Reshape, GpuOp):
"""
# """
Implement Flatten on the gpu.
# Implement Flatten on the gpu.
#
"""
# """
#
def
make_node
(
self
,
x
):
# def make_node(self, x):
assert
isinstance
(
x
.
type
,
CudaNdarrayType
)
# warnings.warn(
rval
=
tensor
.
Flatten
.
make_node
(
self
,
x
)
# "GpuFlatten class is deprecated, "
host_out_broadcastable
=
rval
.
outputs
[
0
]
.
type
.
broadcastable
# "please use gpu_flatten method instead.",
out_type
=
CudaNdarrayType
(
broadcastable
=
host_out_broadcastable
)
# DeprecationWarning,
return
Apply
(
self
,
[
x
],
[
out_type
()])
# stacklevel=4)
# assert isinstance(x.type, CudaNdarrayType)
# rval = tensor.Reshape.make_node(self, x, [tensor.prod(x.shape)])
# host_out_broadcastable = rval.outputs[0].type.broadcastable
# out_type = CudaNdarrayType(broadcastable=host_out_broadcastable)
# return Apply(self, [x], [out_type()])
def
gpu_flatten
(
x
,
outdim
=
1
):
x
=
as_cuda_ndarray_variable
(
x
)
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
theano
.
tensor
.
prod
(
x
.
shape
[
outdim
-
1
:]),)
else
:
dims
=
(
-
1
,)
return
GpuReshape
(
outdim
)(
x
,
dims
)
class
GpuShape
(
tensor
.
Shape
,
GpuOp
):
class
GpuShape
(
tensor
.
Shape
,
GpuOp
):
...
...
theano/sandbox/cuda/extra_ops.py
浏览文件 @
eced0049
...
@@ -3,7 +3,7 @@ import copy
...
@@ -3,7 +3,7 @@ import copy
from
theano
import
Op
from
theano
import
Op
from
theano.gof
import
local_optimizer
from
theano.gof
import
local_optimizer
from
theano.sandbox.cuda
import
cuda_available
,
GpuOp
from
theano.sandbox.cuda
import
cuda_available
,
GpuOp
from
theano.sandbox.cuda.basic_ops
import
GpuF
latten
from
theano.sandbox.cuda.basic_ops
import
gpu_f
latten
from
theano.tensor.extra_ops
import
CumsumOp
from
theano.tensor.extra_ops
import
CumsumOp
if
cuda_available
:
if
cuda_available
:
...
@@ -453,7 +453,7 @@ def use_gpu_cumsum(node):
...
@@ -453,7 +453,7 @@ def use_gpu_cumsum(node):
x
=
gpu_from_host
(
x
)
x
=
gpu_from_host
(
x
)
if
axis
is
None
and
x
.
ndim
>
1
:
if
axis
is
None
and
x
.
ndim
>
1
:
x
=
GpuFlatten
()
(
x
)
x
=
gpu_flatten
(
x
)
# ``gpu_cumsum`` assume array has been flattened if needed.
# ``gpu_cumsum`` assume array has been flattened if needed.
if
axis
is
None
:
if
axis
is
None
:
...
...
theano/sandbox/cuda/opt.py
浏览文件 @
eced0049
...
@@ -24,7 +24,9 @@ from theano.sandbox.cuda.basic_ops import (
...
@@ -24,7 +24,9 @@ from theano.sandbox.cuda.basic_ops import (
gpu_eye
,
gpu_contiguous
,
gpu_eye
,
gpu_contiguous
,
gpu_from_host
,
host_from_gpu
,
GpuFromHost
,
HostFromGpu
,
gpu_from_host
,
host_from_gpu
,
GpuFromHost
,
HostFromGpu
,
GpuContiguous
,
GpuContiguous
,
GpuElemwise
,
GpuDimShuffle
,
GpuReshape
,
GpuCAReduce
,
GpuFlatten
,
GpuElemwise
,
GpuDimShuffle
,
GpuReshape
,
GpuCAReduce
,
# GpuFlatten,
gpu_flatten
,
GpuSubtensor
,
GpuAdvancedSubtensor1
,
GpuSubtensor
,
GpuAdvancedSubtensor1
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedIncSubtensor1_dev20
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedIncSubtensor1_dev20
,
GpuIncSubtensor
,
gpu_alloc
,
GpuAlloc
,
gpu_shape
,
GpuSplit
,
GpuAllocEmpty
)
GpuIncSubtensor
,
gpu_alloc
,
GpuAlloc
,
gpu_shape
,
GpuSplit
,
GpuAllocEmpty
)
...
@@ -152,7 +154,7 @@ cpu_ops_moved_to_gpu = [
...
@@ -152,7 +154,7 @@ cpu_ops_moved_to_gpu = [
tensor
.
elemwise
.
All
,
tensor
.
elemwise
.
Any
,
tensor
.
elemwise
.
All
,
tensor
.
elemwise
.
Any
,
tensor
.
elemwise
.
CAReduceDtype
,
tensor
.
elemwise
.
Sum
,
tensor
.
elemwise
.
CAReduceDtype
,
tensor
.
elemwise
.
Sum
,
tensor
.
elemwise
.
Prod
,
tensor
.
elemwise
.
ProdWithoutZeros
,
tensor
.
elemwise
.
Prod
,
tensor
.
elemwise
.
ProdWithoutZeros
,
tensor
.
Reshape
,
tensor
.
F
latten
,
tensor
.
Subtensor
,
tensor
.
Reshape
,
tensor
.
f
latten
,
tensor
.
Subtensor
,
tensor
.
AdvancedSubtensor1
,
tensor
.
AdvancedIncSubtensor1
,
tensor
.
AdvancedSubtensor1
,
tensor
.
AdvancedIncSubtensor1
,
tensor
.
IncSubtensor
,
tensor
.
Shape
,
tensor
.
Join
,
tensor
.
IncSubtensor
,
tensor
.
Shape
,
tensor
.
Join
,
tensor
.
Alloc
,
tensor
.
Eye
]
tensor
.
Alloc
,
tensor
.
Eye
]
...
@@ -972,23 +974,23 @@ def local_gpu_reshape(node):
...
@@ -972,23 +974,23 @@ def local_gpu_reshape(node):
return
False
return
False
@register_opt
()
#
@register_opt()
@local_optimizer
([
gpu_from_host
,
tensor
.
Flatten
])
#@local_optimizer([gpu_from_host, tensor.Reshape
])
def
local_gpu_flatten
(
node
):
#
def local_gpu_flatten(node):
if
isinstance
(
node
.
op
,
GpuFromHost
):
#
if isinstance(node.op, GpuFromHost):
host_input
=
node
.
inputs
[
0
]
#
host_input = node.inputs[0]
if
host_input
.
owner
and
\
#
if host_input.owner and \
isinstance
(
host_input
.
owner
.
op
,
tensor
.
Flatten
):
# isinstance(host_input.owner.op, tensor.Reshape
):
outdim
=
host_input
.
owner
.
op
.
outdim
#
outdim = host_input.owner.op.outdim
return
[
GpuFlatten
(
outdim
)(
#
return [GpuFlatten(outdim)(
as_cuda_ndarray_variable
(
host_input
.
owner
.
inputs
[
0
]))]
#
as_cuda_ndarray_variable(host_input.owner.inputs[0]))]
if
isinstance
(
node
.
op
,
tensor
.
Flatten
):
# if isinstance(node.op, tensor.Reshape
):
x
,
=
node
.
inputs
# x, shp
= node.inputs
outdim
=
node
.
op
.
outdim
#
outdim = node.op.outdim
if
x
.
owner
and
isinstance
(
x
.
owner
.
op
,
HostFromGpu
):
#
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x
,
=
x
.
owner
.
inputs
#
gpu_x, = x.owner.inputs
return
[
host_from_gpu
(
GpuFlatten
(
outdim
)(
gpu_x
))]
#
return [host_from_gpu(GpuFlatten(outdim)(gpu_x))]
return
False
#
return False
@register_opt
()
@register_opt
()
...
...
theano/sandbox/cuda/tests/test_basic_ops.py
浏览文件 @
eced0049
...
@@ -307,7 +307,7 @@ def test_flatten():
...
@@ -307,7 +307,7 @@ def test_flatten():
x
=
cuda
.
fmatrix
(
'x'
)
x
=
cuda
.
fmatrix
(
'x'
)
f
=
theano
.
function
([
x
],
x
.
flatten
(),
mode
=
mode_with_gpu
)
f
=
theano
.
function
([
x
],
x
.
flatten
(),
mode
=
mode_with_gpu
)
assert
any
([
node
for
node
in
f
.
maker
.
fgraph
.
toposort
()
assert
any
([
node
for
node
in
f
.
maker
.
fgraph
.
toposort
()
if
isinstance
(
node
.
op
,
B
.
Gpu
Flatten
)])
if
isinstance
(
node
.
op
,
B
.
Gpu
Reshape
)])
assert
len
(
f
([[
0.
,
0.
],
[
0.
,
0.
]])
.
shape
)
==
1
assert
len
(
f
([[
0.
,
0.
],
[
0.
,
0.
]])
.
shape
)
==
1
...
...
theano/tensor/basic.py
浏览文件 @
eced0049
...
@@ -4499,160 +4499,183 @@ def reshape(x, newshape, ndim=None, name=None):
...
@@ -4499,160 +4499,183 @@ def reshape(x, newshape, ndim=None, name=None):
return
rval
return
rval
class
Flatten
(
Op
):
#class Flatten(Op):
"""
# """
Flatten a tensor.
# Flatten a tensor.
#
Flattens a tensor to `outdim` dimensions by preserving the leading
# Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
# outdim - 1 shape components.
#
"""
# """
view_map
=
{
0
:
[
0
]}
# view_map = {0: [0]}
#
check_input
=
False
# check_input = False
__props__
=
(
"outdim"
,)
# __props__ = ("outdim",)
#
def
__init__
(
self
,
outdim
=
1
):
# def __init__(self, outdim=1):
self
.
outdim
=
int
(
outdim
)
# warnings.warn(
# "Flatten class is deprecated, "
def
__str__
(
self
):
# "please use flatten method instead.",
return
'
%
s{
%
s}'
%
(
self
.
__class__
.
__name__
,
self
.
outdim
)
# DeprecationWarning,
# stacklevel=4)
def
make_node
(
self
,
x
):
# self.outdim = int(outdim)
t_x
=
as_tensor_variable
(
x
)
#
if
self
.
outdim
<
1
or
(
x
.
ndim
and
self
.
outdim
>
x
.
ndim
):
# def __str__(self):
raise
ValueError
(
'invalid output ndimensions (
%
i) for tensor of '
# return '%s{%s}' % (self.__class__.__name__, self.outdim)
'rank
%
i'
%
(
self
.
outdim
,
t_x
.
ndim
))
#
# def make_node(self, x):
# Infer the broadcastable pattern of the output. For every dimension
# t_x = as_tensor_variable(x)
# unaffected by the flatten, the broadcast flag should be unchanged.
# if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
# For the dimension resulting from the collapse of other dimensions,
# raise ValueError('invalid output ndimensions (%i) for tensor of '
# it should be broadcastable iff all the collapsed dimensions were
# 'rank %i' % (self.outdim, t_x.ndim))
# broadcastable.
#
bcast_kept_dims
=
x
.
broadcastable
[:
self
.
outdim
-
1
]
# # Infer the broadcastable pattern of the output. For every dimension
bcast_new_dim
=
python_all
(
x
.
broadcastable
[
self
.
outdim
-
1
:])
# # unaffected by the flatten, the broadcast flag should be unchanged.
broadcastable
=
bcast_kept_dims
+
(
bcast_new_dim
,)
# # For the dimension resulting from the collapse of other dimensions,
# # it should be broadcastable iff all the collapsed dimensions were
return
gof
.
Apply
(
self
,
[
t_x
],
[
tensor
(
x
.
type
.
dtype
,
# # broadcastable.
broadcastable
)])
# bcast_kept_dims = x.broadcastable[:self.outdim - 1]
# bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
def
perform
(
self
,
node
,
inp
,
out_
):
# broadcastable = bcast_kept_dims + (bcast_new_dim,)
x
,
=
inp
#
out
,
=
out_
# return gof.Apply(self, [t_x], [tensor(x.type.dtype,
outdim
=
self
.
outdim
# broadcastable)])
if
outdim
==
1
:
#
try
:
# def perform(self, node, inp, out_):
out
[
0
]
=
x
.
reshape
(
x
.
size
)
# x, = inp
except
AttributeError
:
# out, = out_
out
[
0
]
=
x
.
reshape
((
numpy
.
prod
(
x
.
shape
),))
# outdim = self.outdim
elif
outdim
==
len
(
x
.
shape
):
# if outdim == 1:
out
[
0
]
=
x
# try:
else
:
# out[0] = x.reshape(x.size)
newshape
=
(
x
.
shape
[:
outdim
-
1
]
+
# except AttributeError:
(
numpy
.
prod
(
x
.
shape
[
outdim
-
1
:]),))
# out[0] = x.reshape((numpy.prod(x.shape),))
out
[
0
]
=
x
.
reshape
(
newshape
)
# elif outdim == len(x.shape):
# out[0] = x
def
infer_shape
(
self
,
node
,
in_shapes
):
# else:
in_shp
,
=
in_shapes
# newshape = (x.shape[:outdim - 1] +
part1
=
in_shp
[:
self
.
outdim
-
1
]
# (numpy.prod(x.shape[outdim - 1:]),))
part2
=
in_shp
[
self
.
outdim
-
1
:]
# out[0] = x.reshape(newshape)
#
if
len
(
part2
)
>
1
:
# def infer_shape(self, node, in_shapes):
part2
=
(
prod
(
part2
,
dtype
=
'int64'
),)
# in_shp, = in_shapes
elif
len
(
part2
)
==
1
:
# part1 = in_shp[:self.outdim - 1]
# We do not want to force an upcast of part2 if its length is 1
# part2 = in_shp[self.outdim - 1:]
pass
#
else
:
# if len(part2) > 1:
if
len
(
in_shp
)
==
0
and
self
.
outdim
==
1
:
# part2 = (prod(part2, dtype='int64'),)
part2
=
(
1
,)
# elif len(part2) == 1:
else
:
# # We do not want to force an upcast of part2 if its length is 1
raise
ValueError
(
'invalid output ndimensions (
%
i) for tensor '
# pass
'of rank
%
i'
%
(
self
.
outdim
,
len
(
in_shp
)))
# else:
# if len(in_shp) == 0 and self.outdim == 1:
out_shape
=
(
part1
+
part2
)
# part2 = (1,)
return
[
out_shape
]
# else:
# raise ValueError('invalid output ndimensions (%i) for tensor '
def
grad
(
self
,
inp
,
grads
):
# 'of rank %i' % (self.outdim, len(in_shp)))
x
,
=
inp
#
g_out
,
=
grads
# out_shape = (part1 + part2)
return
[
reshape
(
g_out
,
shape
(
x
),
x
.
ndim
)]
# return [out_shape]
#
def
R_op
(
self
,
inputs
,
eval_points
):
# def grad(self, inp, grads):
if
None
in
eval_points
:
# x, = inp
return
[
None
]
# g_out, = grads
return
self
.
make_node
(
*
eval_points
)
.
outputs
# return [reshape(g_out, shape(x), x.ndim)]
#
def
c_code_cache_version
(
self
):
# def R_op(self, inputs, eval_points):
return
(
1
,
1
)
# if None in eval_points:
# return [None]
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
# return self.make_node(*eval_points).outputs
x
,
=
inputs
#
out
,
=
outputs
# def c_code_cache_version(self):
outdim
=
self
.
outdim
# return (1, 1)
fail
=
sub
[
'fail'
]
#
return
"""
# def c_code(self, node, name, inputs, outputs, sub):
if (
%(outdim)
s == PyArray_NDIM(
%(x)
s))
# x, = inputs
{
# out, = outputs
Py_XDECREF(
%(out)
s);
# outdim = self.outdim
Py_XINCREF(
%(x)
s);
# fail = sub['fail']
%(out)
s =
%(x)
s;
# return """
}
# if (%(outdim)s == PyArray_NDIM(%(x)s))
else
# {
{
# Py_XDECREF(%(out)s);
Py_XDECREF(
%(out)
s);
# Py_XINCREF(%(x)s);
# %(out)s = %(x)s;
if (
%(outdim)
s == 1)
# }
{
# else
npy_intp size = PyArray_SIZE(
%(x)
s);
# {
PyArray_Dims newshape;
# Py_XDECREF(%(out)s);
newshape.ptr = &size;
#
newshape.len = 1;
# if (%(outdim)s == 1)
%(out)
s = (PyArrayObject*)PyArray_Newshape(
%(x)
s,
# {
&newshape,
# npy_intp size = PyArray_SIZE(%(x)s);
NPY_CORDER);
# PyArray_Dims newshape;
}
# newshape.ptr = &size;
else
# newshape.len = 1;
{
# %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
npy_intp *oldshape = PyArray_DIMS(
%(x)
s);
# &newshape,
npy_intp newshape_dims[
%(outdim)
s];
# NPY_CORDER);
# }
int i;
# else
for (i = 0; i <
%(outdim)
s - 1; ++i)
# {
newshape_dims[i] = oldshape[i];
# npy_intp *oldshape = PyArray_DIMS(%(x)s);
# npy_intp newshape_dims[%(outdim)s];
newshape_dims[i] = 1;
#
# int i;
for (int j =
%(outdim)
s - 1; j < PyArray_NDIM(
%(x)
s); ++j)
# for (i = 0; i < %(outdim)s - 1; ++i)
newshape_dims[i] *= oldshape[j];
# newshape_dims[i] = oldshape[i];
#
PyArray_Dims newshape;
# newshape_dims[i] = 1;
newshape.ptr = newshape_dims;
#
newshape.len =
%(outdim)
s;
# for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
%(out)
s = (PyArrayObject*)PyArray_Newshape(
%(x)
s,
# newshape_dims[i] *= oldshape[j];
&newshape,
#
NPY_CORDER);
# PyArray_Dims newshape;
}
# newshape.ptr = newshape_dims;
}
# newshape.len = %(outdim)s;
if (!
%(out)
s)
# %(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
{
# &newshape,
//The error message should have been set by
# NPY_CORDER);
// PyArray_Newshape
# }
%(fail)
s;
# }
}
# if (!%(out)s)
if (!PyArray_ISALIGNED(
%(out)
s)) {
# {
PyErr_Format(
# //The error message should have been set by
PyExc_RuntimeError,
# // PyArray_Newshape
"PyArray_Newshape returned an object that isn't"
# %(fail)s;
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
# }
" this problem for some input shape/new shape"
# if (!PyArray_ISALIGNED(%(out)s)) {
" combinations. Use another NumPy version.");
# PyErr_Format(
%(fail)
s;
# PyExc_RuntimeError,
}
# "PyArray_Newshape returned an object that isn't"
"""
%
locals
()
# " aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
# " this problem for some input shape/new shape"
# " combinations. Use another NumPy version.");
# %(fail)s;
# }
# """ % locals()
def
is_flatten
(
node
,
outdim
=
1
):
return
isinstance
(
node
.
op
,
theano
.
tensor
.
Reshape
)
and
node
.
inputs
[
1
]
.
ndim
==
outdim
def
flatten
(
x
,
outdim
=
1
):
def
flatten
(
x
,
outdim
=
1
):
return
Flatten
(
outdim
)(
x
)
outdim
=
int
(
outdim
)
if
outdim
<
1
or
outdim
>
x
.
ndim
:
raise
ValueError
(
'outdim of flatten must an int in the range [1,
%
s], recieved
%
s'
%
(
x
.
ndim
,
outdim
))
if
outdim
>
1
:
dims
=
tuple
(
x
.
shape
[:
outdim
-
1
])
+
(
theano
.
tensor
.
prod
(
x
.
shape
[
outdim
-
1
:]),)
else
:
dims
=
(
-
1
,)
x_reshaped
=
x
.
reshape
(
dims
)
bcast_kept_dims
=
x
.
broadcastable
[:
outdim
-
1
]
bcast_new_dim
=
python_all
(
x
.
broadcastable
[
outdim
-
1
:])
broadcastable
=
bcast_kept_dims
+
(
bcast_new_dim
,)
broadcast_int
=
tuple
([
numpy
.
int
(
bc
)
for
bc
in
broadcastable
])
for
dim
,
br
in
enumerate
(
broadcast_int
):
if
br
:
x_reshaped
=
theano
.
tensor
.
addbroadcast
(
x_reshaped
,
dim
)
return
x_reshaped
# class TileGrad(Op):
# class TileGrad(Op):
...
...
theano/tensor/nnet/tests/test_sigm.py
浏览文件 @
eced0049
...
@@ -377,7 +377,7 @@ class T_softplus_opts(unittest.TestCase):
...
@@ -377,7 +377,7 @@ class T_softplus_opts(unittest.TestCase):
f
=
theano
.
function
([
x
],
out
,
mode
=
self
.
m
)
f
=
theano
.
function
([
x
],
out
,
mode
=
self
.
m
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
3
assert
len
(
topo
)
==
3
assert
isinstance
(
topo
[
0
]
.
op
,
T
.
Flatten
)
tensor
.
is_flatten
(
topo
[
0
]
)
assert
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
assert
isinstance
(
topo
[
1
]
.
op
.
scalar_op
,
theano
.
tensor
.
nnet
.
sigm
.
ScalarSoftplus
)
theano
.
tensor
.
nnet
.
sigm
.
ScalarSoftplus
)
assert
isinstance
(
topo
[
2
]
.
op
.
scalar_op
,
theano
.
scalar
.
Neg
)
assert
isinstance
(
topo
[
2
]
.
op
.
scalar_op
,
theano
.
scalar
.
Neg
)
...
...
theano/tensor/opt.py
浏览文件 @
eced0049
...
@@ -3877,24 +3877,24 @@ def local_useless_split(node):
...
@@ -3877,24 +3877,24 @@ def local_useless_split(node):
################
################
# Flatten Opts #
# Flatten Opts #
################
################
@register_canonicalize
#
@register_canonicalize
@register_stabilize
#
@register_stabilize
@gof.local_optimizer
([
T
.
Flatten
])
#
@gof.local_optimizer([T.Flatten])
def
local_flatten_lift
(
node
):
#
def local_flatten_lift(node):
"
""
#
""
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
#
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
#
This optimization is needed by optimization
#
This optimization is needed by optimization
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
#
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
#
"
""
#
""
if
(
isinstance
(
node
.
op
,
T
.
Flatten
)
and
#
if (isinstance(node.op, T.Flatten) and
node
.
inputs
[
0
]
.
owner
and
#
node.inputs[0].owner and
isinstance
(
node
.
inputs
[
0
]
.
owner
.
op
,
T
.
Elemwise
)
and
#
isinstance(node.inputs[0].owner.op, T.Elemwise) and
len
(
node
.
inputs
[
0
]
.
owner
.
inputs
)
==
1
):
#
len(node.inputs[0].owner.inputs) == 1):
f
=
node
.
op
(
node
.
inputs
[
0
]
.
owner
.
inputs
[
0
])
#
f = node.op(node.inputs[0].owner.inputs[0])
e
=
node
.
inputs
[
0
]
.
owner
.
op
(
f
)
#
e = node.inputs[0].owner.op(f)
return
[
e
]
#
return [e]
##################
##################
# Reshape opts #
# Reshape opts #
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
eced0049
...
@@ -32,7 +32,7 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
...
@@ -32,7 +32,7 @@ from theano.tensor import (_shared, wvector, bvector, autocast_float_as,
alloc
,
as_tensor_variable
,
tensor_from_scalar
,
ARange
,
autocast_float
,
alloc
,
as_tensor_variable
,
tensor_from_scalar
,
ARange
,
autocast_float
,
clip
,
constant
,
default
,
dot
,
clip
,
constant
,
default
,
dot
,
dmatrix
,
dscalar
,
dvector
,
eq
,
eye
,
fill
,
flatten
,
inverse_permutation
,
dmatrix
,
dscalar
,
dvector
,
eq
,
eye
,
fill
,
flatten
,
inverse_permutation
,
tensor4
,
permute_row_elements
,
Flatten
,
fmatrix
,
fscalars
,
grad
,
tensor4
,
permute_row_elements
,
fmatrix
,
fscalars
,
grad
,
inplace
,
iscalar
,
matrix
,
minimum
,
matrices
,
maximum
,
mul
,
neq
,
inplace
,
iscalar
,
matrix
,
minimum
,
matrices
,
maximum
,
mul
,
neq
,
Reshape
,
row
,
scalar
,
scalars
,
second
,
smallest
,
stack
,
sub
,
Tensor
,
Reshape
,
row
,
scalar
,
scalars
,
second
,
smallest
,
stack
,
sub
,
Tensor
,
tensor_copy
,
tensordot
,
TensorType
,
Tri
,
tri
,
tril
,
triu
,
unbroadcast
,
tensor_copy
,
tensordot
,
TensorType
,
Tri
,
tri
,
tril
,
triu
,
unbroadcast
,
...
@@ -5147,11 +5147,6 @@ def test_make_column_matrix_broadcastable():
...
@@ -5147,11 +5147,6 @@ def test_make_column_matrix_broadcastable():
def
test_flatten_outdimNone
():
def
test_flatten_outdimNone
():
"""Flatten always returns a copy of the array. There is no danger
with in-place operations and thus no need to test it.
"""
a
=
dmatrix
()
a
=
dmatrix
()
c
=
flatten
(
a
)
c
=
flatten
(
a
)
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
...
@@ -5161,7 +5156,7 @@ def test_flatten_outdimNone():
...
@@ -5161,7 +5156,7 @@ def test_flatten_outdimNone():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
()
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_scalar
():
def
test_flatten_scalar
():
...
@@ -5174,7 +5169,7 @@ def test_flatten_scalar():
...
@@ -5174,7 +5169,7 @@ def test_flatten_scalar():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
# utt.verify_grad(
Flatten()
, [a_val]) #TODO: fix verify_grd to work on scalars
# utt.verify_grad(
flatten
, [a_val]) #TODO: fix verify_grd to work on scalars
def
test_flatten_outdim1
():
def
test_flatten_outdim1
():
...
@@ -5187,7 +5182,7 @@ def test_flatten_outdim1():
...
@@ -5187,7 +5182,7 @@ def test_flatten_outdim1():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
(
1
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_outdim2
():
def
test_flatten_outdim2
():
...
@@ -5199,7 +5194,7 @@ def test_flatten_outdim2():
...
@@ -5199,7 +5194,7 @@ def test_flatten_outdim2():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
a_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
a_val
)
utt
.
verify_grad
(
Flatten
(
2
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_outdim2_of_3
():
def
test_flatten_outdim2_of_3
():
...
@@ -5213,7 +5208,7 @@ def test_flatten_outdim2_of_3():
...
@@ -5213,7 +5208,7 @@ def test_flatten_outdim2_of_3():
f
=
inplace_func
([
a
],
c
)
f
=
inplace_func
([
a
],
c
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
assert
numpy
.
all
(
f
(
a_val
)
==
c_val
)
utt
.
verify_grad
(
Flatten
(
2
)
,
[
a_val
])
utt
.
verify_grad
(
flatten
,
[
a_val
])
def
test_flatten_broadcastable
():
def
test_flatten_broadcastable
():
...
@@ -7128,24 +7123,27 @@ class TestInferShape(utt.InferShapeTester):
...
@@ -7128,24 +7123,27 @@ class TestInferShape(utt.InferShapeTester):
# Flatten
# Flatten
atens3
=
tensor3
()
atens3
=
tensor3
()
atens3_val
=
rand
(
4
,
5
,
3
)
atens3_val
=
rand
(
4
,
5
,
3
)
for
outdim
in
(
3
,
2
,
1
):
self
.
_compile_and_check
([
atens3
],
self
.
_compile_and_check
([
atens3
],
[
flatten
(
atens3
,
1
)],
[
Flatten
(
outdim
)(
atens3
)],
[
atens3_val
],
Reshape
)
[
atens3_val
],
Flatten
)
#for outdim in (3, 2, 1):
# self._compile_and_check([atens3],
amat
=
matrix
()
# [flatten(atens3, outdim)],
amat_val
=
rand
(
4
,
5
)
# [atens3_val], Reshape)
for
outdim
in
(
2
,
1
):
self
.
_compile_and_check
([
amat
],
#amat = matrix()
[
Flatten
(
outdim
)(
amat
)],
#amat_val = rand(4, 5)
[
amat_val
],
Flatten
)
#for outdim in (2, 1):
# self._compile_and_check([amat],
avec
=
vector
()
# [flatten(amat, outdim)],
avec_val
=
rand
(
4
)
# [amat_val], Reshape)
outdim
=
1
self
.
_compile_and_check
([
avec
],
#avec = vector()
[
Flatten
(
outdim
)(
avec
)],
#avec_val = rand(4)
[
avec_val
],
Flatten
)
#outdim = 1
#self._compile_and_check([avec],
# [flatten(avec, outdim)],
# [avec_val], Reshape)
# Eye
# Eye
aiscal
=
iscalar
()
aiscal
=
iscalar
()
...
...
theano/tensor/tests/test_opt.py
浏览文件 @
eced0049
...
@@ -60,6 +60,7 @@ from theano.tests import unittest_tools as utt
...
@@ -60,6 +60,7 @@ from theano.tests import unittest_tools as utt
from
theano.compile.mode
import
optdb
from
theano.compile.mode
import
optdb
from
theano.compile
import
Mode
from
theano.compile
import
Mode
from
nose.plugins.attrib
import
attr
from
nose.plugins.attrib
import
attr
from
theano.tensor.basic
import
flatten
,
is_flatten
mode_opt
=
theano
.
config
.
mode
mode_opt
=
theano
.
config
.
mode
if
mode_opt
==
'FAST_COMPILE'
:
if
mode_opt
==
'FAST_COMPILE'
:
...
@@ -5879,18 +5880,19 @@ def test_local_useless_split():
...
@@ -5879,18 +5880,19 @@ def test_local_useless_split():
def
test_local_flatten_lift
():
def
test_local_flatten_lift
():
for
i
in
xrange
(
1
,
4
):
for
i
in
xrange
(
1
,
4
):
op
=
tensor
.
Flatten
(
i
)
x
=
tensor
.
tensor4
()
x
=
tensor
.
tensor4
()
out
=
op
(
T
.
exp
(
x
)
)
out
=
tensor
.
flatten
(
T
.
exp
(
x
),
i
)
assert
out
.
ndim
==
i
assert
out
.
ndim
==
i
mode
=
compile
.
mode
.
get_default_mode
()
mode
=
compile
.
mode
.
get_default_mode
()
mode
=
mode
.
including
(
'local_flatten_lift'
)
mode
=
mode
.
including
(
'local_flatten_lift'
)
f
=
theano
.
function
([
x
],
out
,
mode
=
mode
)
f
=
theano
.
function
([
x
],
out
,
mode
=
mode
)
f
(
numpy
.
random
.
rand
(
5
,
4
,
3
,
2
)
.
astype
(
config
.
floatX
))
x_np
=
numpy
.
random
.
rand
(
5
,
4
,
3
,
2
)
.
astype
(
config
.
floatX
)
out_np
=
f
(
x_np
)
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
len
(
topo
)
==
2
shape_out_np
=
tuple
(
x_np
.
shape
[:
i
-
1
])
+
(
numpy
.
prod
(
x_np
.
shape
[
i
-
1
:]),)
assert
isinstance
(
topo
[
0
]
.
op
,
tensor
.
Flatten
)
assert
shape_out_np
==
out_np
.
shape
assert
isinstance
(
topo
[
1
]
.
op
,
tensor
.
Elemwise
)
tensor
.
is_flatten
(
topo
[
0
],
outdim
=
i
)
assert
isinstance
(
topo
[
-
1
]
.
op
,
tensor
.
Elemwise
)
class
Test_Reshape
(
unittest
.
TestCase
):
class
Test_Reshape
(
unittest
.
TestCase
):
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论