提交 d5944c96 authored 作者: nke001's avatar nke001 提交者: abergeron

Ccw4057 (#4592)

* flake 8 changes * flake 8 changes * add missing imports * code cleanup * code clean up * flake 8 changes * code cleanup * code cleanup * code cleanup * code cleanup * add missing import
上级 74b4c807
from __future__ import absolute_import, print_function, division
......@@ -11,7 +11,7 @@ from six.moves import xrange
import theano
from theano.gof.opt import check_stack_trace
from theano.tensor.nnet.conv3d2d import *
from theano.tensor.nnet.conv3d2d import conv3d, get_diagonal_subtensor_view, DiagonalSubtensor, IncDiagonalSubtensor
import theano.tests.unittest_tools as utt
......@@ -57,11 +57,11 @@ def pyconv3d(signals, filters):
Ns, Ts, C, Hs, Ws = signals.shape
Nf, Tf, C, Hf, Wf = filters.shape
Tf2 = Tf//2
Hf2 = Hf//2
Wf2 = Wf//2
Tf2 = Tf // 2
Hf2 = Hf // 2
Wf2 = Wf // 2
rval = numpy.zeros((Ns, Ts-Tf+1, Nf, Hs-Hf+1, Ws-Wf+1))
rval = numpy.zeros((Ns, Ts - Tf + 1, Nf, Hs - Hf + 1, Ws - Wf + 1))
for ns in xrange(Ns):
for nf in xrange(Nf):
for c in xrange(C):
......@@ -71,7 +71,7 @@ def pyconv3d(signals, filters):
o_i = ndimage.convolve(s_i, f_i, mode='constant', cval=1)
o_i_sh0 = o_i.shape[0]
# print s_i.shape, f_i.shape, r_i.shape, o_i.shape
r_i += o_i[Tf2:o_i_sh0-Tf2, Hf2:-Hf2, Wf2:-Wf2]
r_i += o_i[Tf2:o_i_sh0 - Tf2, Hf2:-Hf2, Wf2:-Wf2]
return rval
......@@ -85,10 +85,10 @@ def test_conv3d(mode=mode_without_gpu, shared=theano.tensor._shared):
raise SkipTest("conv3d2d tests need SciPy")
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 5 , 3, 5 , 5
Nf, Tf, C, Hf, Wf = 32, 5, 3, 5, 5
signals = numpy.arange(Ns*Ts*C*Hs*Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf*Tf*C*Hf*Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
t0 = time.time()
pyres = pyconv3d(signals, filters)
......@@ -96,7 +96,7 @@ def test_conv3d(mode=mode_without_gpu, shared=theano.tensor._shared):
s_signals = shared(signals)
s_filters = shared(filters)
s_output = shared(signals*0)
s_output = shared(signals * 0)
out = conv3d(s_signals, s_filters,
signals_shape=signals.shape,
......@@ -130,12 +130,12 @@ def test_conv3d(mode=mode_without_gpu, shared=theano.tensor._shared):
filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
utt.verify_grad(conv3d, [signals, filters], eps=1e-1, mode=mode)
### Additional Test that covers the case of patched implementation for filter with Tf=1
# Additional Test that covers the case of patched implementation for filter with Tf=1
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 1 , 3, 5 , 5
Nf, Tf, C, Hf, Wf = 32, 1, 3, 5, 5
signals = numpy.arange(Ns*Ts*C*Hs*Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf*Tf*C*Hf*Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
t0 = time.time()
pyres = pyconv3d(signals, filters)
......@@ -143,7 +143,7 @@ def test_conv3d(mode=mode_without_gpu, shared=theano.tensor._shared):
s_signals = shared(signals)
s_filters = shared(filters)
s_output = shared(signals*0)
s_output = shared(signals * 0)
out = conv3d(s_signals, s_filters,
signals_shape=signals.shape,
......
......@@ -16,7 +16,7 @@ from theano.tensor.nnet.sigm import (
compute_mul, is_1pexp, parse_mul_tree, perform_sigm_times_exp,
register_local_1msigmoid, simplify_mul,
)
from theano.tensor.tests.test_basic import (makeBroadcastTester, rand,
from theano.tensor.tests.test_basic import (makeBroadcastTester,
check_floatX, upcast_int8_nfunc,
_good_broadcast_unary_normal_no_complex)
......@@ -45,7 +45,7 @@ UltraFastSigmoidTester = makeBroadcastTester(
good=_good_broadcast_unary_normal_no_complex,
# grad=_grad_broadcast_unary_normal,
name='UltraFastSigmoidTester',
# This is an approx of the sigmoid. That is why we raise eps
# This is an approx of the sigmoid. That is why we raise eps
eps=5e-2)
HardSigmoidTester = makeBroadcastTester(
......@@ -55,7 +55,7 @@ HardSigmoidTester = makeBroadcastTester(
good=_good_broadcast_unary_normal_no_complex,
# grad=_grad_broadcast_unary_normal,
name='HardSigmoidTester',
# This is an approx of the sigmoid. That is why we raise eps
# This is an approx of the sigmoid. That is why we raise eps
eps=1e-1)
......@@ -146,20 +146,20 @@ class T_sigmoid_opts(unittest.TestCase):
# todo: solve issue #4589 first
# assert check_stack_trace(
# f, ops_to_check=[sigmoid, theano.tensor.inplace.neg_inplace])
assert [node.op for node in f.maker.fgraph.toposort()] == [sigmoid,
theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] ==
[sigmoid, theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], T.fill(x, -1.0) / (1 - T.exp(-x)), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], T.fill(x, -1.0) / (2 + T.exp(-x)), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], T.fill(x, -1.1) / (1 + T.exp(-x)), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
theano.tensor.inplace.neg_inplace])
f(data)
# tests double inv_1_plus_exp with neg
......@@ -170,33 +170,33 @@ class T_sigmoid_opts(unittest.TestCase):
((1 + T.exp(x)) * (1 + T.exp(-x))), mode=m)
# todo: solve issue #4589 first
# assert check_stack_trace(f, ops_to_check=[sigmoid, T.mul])
assert [node.op for node in f.maker.fgraph.toposort()] == [sigmoid,
T.mul]
assert ([node.op for node in f.maker.fgraph.toposort()] == [sigmoid,
T.mul])
f(data)
f = theano.function([x], (T.fill(x, -1.1) * T.exp(x)) /
((1 + T.exp(x)) * (1 + T.exp(-x))), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], (T.fill(x, -1.0) * T.exp(x)) /
((2 + T.exp(x)) * (1 + T.exp(-x))), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], (T.fill(x, -1.0) * T.exp(x)) /
((1 + T.exp(x)) * (2 + T.exp(-x))), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], (T.fill(x, -1.0) * T.exp(x)) /
((1 + T.exp(x)) * (1 + T.exp(x))), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace])
f(data)
f = theano.function([x], (T.fill(x, -1.0) * T.exp(x)) /
((1 + T.exp(x)) * (2 + T.exp(-x))), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] != [sigmoid,
T.mul, theano.tensor.inplace.neg_inplace])
f(data)
finally:
......@@ -219,8 +219,8 @@ class T_sigmoid_opts(unittest.TestCase):
# tests inv_1_plus_exp
f = theano.function([x], 1 - T.fill(x, 1.0) / (1 + T.exp(-x)), mode=m)
assert check_stack_trace(f, ops_to_check=[tensor.neg, sigmoid_inplace])
assert [node.op for node in f.maker.fgraph.toposort()] == [tensor.neg,
sigmoid_inplace]
assert ([node.op for node in f.maker.fgraph.toposort()] == [tensor.neg,
sigmoid_inplace])
def test_local_sigm_times_exp(self):
"""
......@@ -247,10 +247,9 @@ class T_sigmoid_opts(unittest.TestCase):
# assert check_stack_trace(f, ops_to_check=sigmoid)
f = theano.function(
[x, y],
(sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *
tensor.exp(x * y) * tensor.exp(y)),
mode=m)
[x, y],
(sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *
tensor.exp(x * y) * tensor.exp(y)), mode=m)
match(f, [sigmoid, tensor.mul, tensor.neg, tensor.exp, sigmoid,
tensor.mul])
# assert check_stack_trace(f, ops_to_check=[sigmoid, tensor.mul,
......@@ -271,8 +270,8 @@ class T_sigmoid_opts(unittest.TestCase):
perform_sigm_times_exp(trees[0])
trees[0] = simplify_mul(trees[0])
good = theano.gof.graph.is_same_graph(
compute_mul(trees[0]),
compute_mul(trees[1]))
compute_mul(trees[0]),
compute_mul(trees[1]))
if not good:
print(trees[0])
print(trees[1])
......@@ -286,11 +285,11 @@ class T_sigmoid_opts(unittest.TestCase):
-x * sigmoid(-x) * (y * (-1 * z)))
ok(-sigmoid(-x) *
(exp(y) * (-exp(-z) * 3 * -exp(x)) *
(y * 2 * (-sigmoid(-y) * (z + t) * exp(z)) * sigmoid(z))) *
-sigmoid(x),
(y * 2 * (-sigmoid(-y) * (z + t) * exp(z)) * sigmoid(z))) * -
sigmoid(x),
sigmoid(x) *
(-sigmoid(y) * (-sigmoid(-z) * 3) * (y * 2 * ((z + t) * exp(z)))) *
-sigmoid(x))
(-sigmoid(x)))
ok(exp(-x) * -exp(-x) * (-sigmoid(x) * -sigmoid(x)),
-sigmoid(-x) * sigmoid(-x))
ok(-exp(x) * -sigmoid(-x) * -exp(-x),
......@@ -333,7 +332,7 @@ class T_sigmoid_opts(unittest.TestCase):
topo = f.maker.fgraph.toposort()
assert topo[0].op == ultra_fast_sigmoid
assert len(topo) == 1
ux_v = f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
def test_local_hard_sigmoid(self):
x = tensor.matrix('x')
......@@ -350,7 +349,7 @@ class T_sigmoid_opts(unittest.TestCase):
f = theano.function([x], s, mode=mode)
topo = f.maker.fgraph.toposort()
assert not any([n.op == sigmoid for n in topo])
ux_v = f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]])
mode2 = mode.excluding('fusion').excluding('inplace')
f2 = theano.function([x], s, mode=mode2)
......@@ -416,7 +415,7 @@ class T_softplus_opts(unittest.TestCase):
out = T.log(1 - sigmoid(x).reshape([x.size]))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
#assert len(topo) == 3
# assert len(topo) == 3
assert any(isinstance(node.op, T.Reshape) for node in topo)
assert any(isinstance(getattr(node.op, 'scalar_op', None),
theano.tensor.nnet.sigm.ScalarSoftplus)
......@@ -454,7 +453,7 @@ class T_sigmoid_utils(unittest.TestCase):
mul_tree = parse_mul_tree(tree)
assert parse_mul_tree(compute_mul(mul_tree)) == mul_tree
assert theano.gof.graph.is_same_graph(
compute_mul(parse_mul_tree(tree)), tree)
compute_mul(parse_mul_tree(tree)), tree)
def test_parse_mul_tree(self):
x, y, z = tensor.vectors('x', 'y', 'z')
......@@ -463,7 +462,7 @@ class T_sigmoid_utils(unittest.TestCase):
assert parse_mul_tree(-x * y) == [False, [[True, x], [False, y]]]
assert parse_mul_tree(-x) == [True, x]
assert parse_mul_tree((x * y) * -z) == [
False, [[False, [[False, x], [False, y]]], [True, z]]]
False, [[False, [[False, x], [False, y]]], [True, z]]]
def test_is_1pexp(self):
backup = config.warn.identify_1pexp_bug
......
......@@ -72,14 +72,6 @@ whitelist_flake8 = [
"tensor/tests/test_blas_scipy.py",
"tensor/tests/test_mpi.py",
"tensor/nnet/__init__.py",
"tensor/nnet/tests/__init__.py",
"tensor/nnet/tests/test_conv.py",
"tensor/nnet/tests/test_neighbours.py",
"tensor/nnet/tests/test_nnet.py",
"tensor/nnet/tests/test_conv3d2d.py",
"tensor/nnet/tests/test_conv3d.py",
"tensor/nnet/tests/speed_test_conv.py",
"tensor/nnet/tests/test_sigm.py",
"tensor/signal/__init__.py",
"tensor/signal/tests/__init__.py",
"scalar/__init__.py",
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论