提交 c26f8cbf authored 作者: Sina Honari's avatar Sina Honari

applying requested changes

上级 3a264877
......@@ -324,11 +324,6 @@ if cuda_available:
ftensor3, ftensor4,
scalar, vector, matrix, row, col,
tensor3, tensor4)
warnings.warn(
"GpuFlatten class is deprecated, "
"please use gpu_flatten method instead.",
DeprecationWarning,
stacklevel=4)
from .basic_ops import (host_from_gpu, gpu_from_host,
as_cuda_array, as_cuda_ndarray_variable)
import cuda_ndarray
......
......@@ -3322,16 +3322,22 @@ class GpuIncSubtensor(tensor.IncSubtensor, GpuOp):
return ()
class GpuFlatten(gof.HideC, tensor.Reshape, GpuOp):
class GpuFlatten(gof.HideC, tensor.Flatten, GpuOp):
"""
Implement Flatten on the gpu.
Note: The interface GpuFlatten is deprecated, you should use gpu_flatten
.. note:: The interface GpuFlatten is deprecated, you should use gpu_flatten.
"""
def __init__(self):
warnings.warn(
"GpuFlatten class is deprecated, "
"please use gpu_flatten method instead.",
DeprecationWarning,
stacklevel=4)
def make_node(self, x):
assert isinstance(x.type, CudaNdarrayType)
rval = flatten(x)
rval = tensor.Flatten.make_node(self, x)
host_out_broadcastable = rval.outputs[0].type.broadcastable
out_type = CudaNdarrayType(broadcastable=host_out_broadcastable)
return Apply(self, [x], [out_type()])
......@@ -3341,14 +3347,23 @@ class GpuFlatten(gof.HideC, tensor.Reshape, GpuOp):
def gpu_flatten(x, outdim=1):
"""
Implement flatten on the gpu.
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
:param x: the variable that should be reshaped.
:type x: theano.tensor.var.TensorVariable
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
:param outdim: the number of dimensions of the returned variable
:type outdim: int
outdim : int
the number of dimensions of the returned variable
:returns: the flattend variable with dimensionality of outdim
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
x = as_cuda_ndarray_variable(x)
if outdim > 1:
......
......@@ -981,14 +981,14 @@ def local_gpu_flatten(node):
if host_input.owner and \
isinstance(host_input.owner.op, tensor.Flatten):
outdim = host_input.owner.op.outdim
return [gpu_flatten(outdim)(
return [gpu_flatten(host_input.owner.inputs[0], outdim)(
as_cuda_ndarray_variable(host_input.owner.inputs[0]))]
if isinstance(node.op, tensor.Flatten):
x, shp= node.inputs
outdim = node.op.outdim
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
return [host_from_gpu(gpu_flatten(outdim)(gpu_x))]
return [host_from_gpu(gpu_flatten(host_input.owner.inputs[0], outdim)(gpu_x))]
return False
......
......@@ -308,6 +308,7 @@ def test_flatten():
f = theano.function([x], x.flatten(), mode=mode_with_gpu)
assert any([node for node in f.maker.fgraph.toposort()
if isinstance(node.op, B.GpuReshape)])
assert theano.tensor.is_flat(x.flatten())
assert len(f([[0., 0.], [0., 0.]]).shape) == 1
......
......@@ -4506,7 +4506,7 @@ class Flatten(Op):
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
Note: The interface Flatten(Op) is deprecated, you should use flatten
.. note:: The interface Flatten(Op) is deprecated, you should use flatten.
"""
view_map = {0: [0]}
......@@ -4659,16 +4659,26 @@ class Flatten(Op):
def is_flat(node, outdim=1):
"""
verifies the dimensionality of variable is correct.
Verifies the dimensionality of the node's variable is equal to
outdim. This method is usually called after flatten method on a
variable, where the first outdim-1 dimension size(s) of the variable
is kept intact, and the last dimension size of the variable is made
equal to the multiplication of its remaining dimension size(s), such that
the variable would end up with as many dimension as outdim.
:param node: the theano node on which the dimensionality is checked.
:type node: theano.tensor.var.TensorVariable
Parameters
----------
node : theano.tensor.var.TensorVariable
the theano node on which the dimensionality is checked.
:param outdim: the expected dimensionality of node.
:type outdim: int
outdim : int
the expected dimensionality of node.
:returns: the comparison result of node's dim
and the expected outdim.
Returns
-------
bool
the comparison result of node's dim
and the expected outdim.
"""
return node.ndim == outdim
......@@ -4676,19 +4686,23 @@ def is_flat(node, outdim=1):
def flatten(x, outdim=1):
"""
Reshapes the variable x by keeping
the first outdim-1 dimension(s) of x the same,
and making the last dimension of x equal to
the multiplication of its remaining dimensions.
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
:param x: the theano variable that should be reshaped.
:type x: theano.tensor.var.TensorVariable
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
:param outdim: the number of dimensions of the returned variable
:type outdim: int
outdim : int
the number of dimensions of the returned variable
:returns: the flattend variable with dimensionality of outdim
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
outdim = int(outdim)
# Any input variable can be flattened to have outdim of 1,
# even if it's a scalar. Otherwise, outdim must be positive
# and smaller than x.ndim.
......
......@@ -377,7 +377,7 @@ class T_softplus_opts(unittest.TestCase):
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert tensor.is_flat(topo[0])
assert tensor.is_flat(topo[0].outputs[0])
assert isinstance(topo[1].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[2].op.scalar_op, theano.scalar.Neg)
......
......@@ -18,6 +18,7 @@ from nose.plugins.skip import SkipTest
import numpy
from numpy.testing import dec, assert_array_equal, assert_allclose
from distutils.version import LooseVersion
from functools import partial
import theano
from theano.compat import PY3, exc_message, operator_div
......@@ -5194,7 +5195,6 @@ def test_flatten_outdim2():
f = inplace_func([a], c)
assert numpy.all(f(a_val) == a_val)
from functools import partial
flatten_2 = partial(flatten, outdim=2)
utt.verify_grad(flatten_2, [a_val])
......@@ -5210,7 +5210,6 @@ def test_flatten_outdim2_of_3():
f = inplace_func([a], c)
assert numpy.all(f(a_val) == c_val)
from functools import partial
flatten_2 = partial(flatten, outdim=2)
utt.verify_grad(flatten_2, [a_val])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论