提交 b34bc31d authored 作者: Bart van Merrienboer's avatar Bart van Merrienboer

Fix E251: unexpected spaces around keyword / parameter equals

上级 83c0cd06
......@@ -1624,8 +1624,8 @@ class _Linker(gof.link.LocalLinker):
self.no_recycling = no_recycling
return self
def make_all(self, profiler = None, input_storage = None
, output_storage = None):
def make_all(self, profiler=None, input_storage=None
, output_storage=None):
if 1:
# can't import at toplevel because of circular import TODO:
......@@ -2176,8 +2176,8 @@ class _Maker(FunctionMaker): # inheritance buys a few helper functions
0: silent)"""
def __init__(self, inputs, outputs, optimizer, mode,
accept_inplace = False,
function_builder = Function,
accept_inplace=False,
function_builder=Function,
profile=None,
on_unused_input=None):
"""
......
......@@ -744,7 +744,7 @@ def _pickle_Function(f):
def _constructor_Function(maker, input_storage, inputs_data):
if not theano.config.unpickle_function:
return None
f = maker.create(input_storage, trustme = True)
f = maker.create(input_storage, trustme=True)
assert len(f.input_storage) == len(inputs_data)
for container, x in zip(f.input_storage, inputs_data):
assert (container.data is x) or \
......
......@@ -293,7 +293,7 @@ class T_function(unittest.TestCase):
x, s = T.scalars('xs')
inc = function([x, In(s, update=(s+x), value=10.0)], [])
dec = function([x, In(s, update=(s-x), value=inc.container[s],
implicit = False)], [])
implicit=False)], [])
self.assertTrue(dec[s] is inc[s])
inc[s] = 2
self.assertTrue(dec[s] == 2)
......@@ -467,16 +467,16 @@ class T_picklefunction(unittest.TestCase):
# Ensure that shared containers remain shared after a deep copy.
a, x = T.scalars('ax')
h = function([In(a, value = 0.0)], a)
f = function([x, In(a, value=h.container[a], implicit = True)], x + a)
h = function([In(a, value=0.0)], a)
f = function([x, In(a, value=h.container[a], implicit=True)], x + a)
try:
memo = {}
ac = copy.deepcopy(a)
memo.update({id(a): ac})
hc = copy.deepcopy(h, memo = memo)
hc = copy.deepcopy(h, memo=memo)
memo.update({id(h): hc})
fc = copy.deepcopy(f, memo = memo)
fc = copy.deepcopy(f, memo=memo)
except NotImplementedError, e:
if e[0].startswith('DebugMode is not picklable'):
return
......
......@@ -8,8 +8,8 @@ from theano.tensor.nnet import sigmoid
class NNet(object):
def __init__(self,
input = tensor.dvector('input'),
target = tensor.dvector('target'),
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
......@@ -29,9 +29,9 @@ class NNet(object):
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params = [self.input, self.target],
outputs = [self.output, self.cost],
updates = self.sgd_updates)
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
......@@ -42,7 +42,7 @@ class TestNnet(unittest.TestCase):
def test_nnet(self):
rng = numpy.random.RandomState(1827)
data = rng.rand(10, 4)
nnet = NNet(n_input = 3, n_hidden = 10)
nnet = NNet(n_input=3, n_hidden=10)
for epoch in range(3):
mean_cost = 0
for x in data:
......
......@@ -4,14 +4,14 @@ if 0:
def __init__(self,
local_optimizers,
failure_callback = None,
max_depth = None,
max_use_ratio = None):
failure_callback=None,
max_depth=None,
max_use_ratio=None):
super(EquilibriumOptimizer, self).__init__(
None,
ignore_newtrees = False,
failure_callback = failure_callback)
ignore_newtrees=False,
failure_callback=failure_callback)
self.local_optimizers = local_optimizers
self.max_depth = max_depth
......
......@@ -14,7 +14,7 @@ from theano.gof.toolbox import ReplaceValidate
from copy import copy
PatternOptimizer = lambda p1, p2, ign=True: OpKeyOptimizer(PatternSub(p1, p2), ignore_newtrees=ign)
OpSubOptimizer = lambda op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True: TopoOptimizer(OpSub(op1, op2), ignore_newtrees=ign, failure_callback = fail)
OpSubOptimizer = lambda op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True: TopoOptimizer(OpSub(op1, op2), ignore_newtrees=ign, failure_callback=fail)
def as_variable(x):
......
......@@ -92,8 +92,8 @@ class X:
def str(self, inputs, outputs):
return as_string(inputs, outputs,
leaf_formatter = self.leaf_formatter,
node_formatter = self.node_formatter)
leaf_formatter=self.leaf_formatter,
node_formatter=self.node_formatter)
class TestStr(X):
......
......@@ -26,7 +26,7 @@ class MyType(Type):
def MyVariable(name):
return Variable(MyType(), None, None, name = name)
return Variable(MyType(), None, None, name=name)
class MyOp(Op):
......@@ -73,8 +73,8 @@ op5 = MyOp('Op5')
op6 = MyOp('Op6')
op_d = MyOp('OpD', {0: [0]})
op_y = MyOp('OpY', x = 1)
op_z = MyOp('OpZ', x = 1)
op_y = MyOp('OpY', x=1)
op_z = MyOp('OpZ', x=1)
......@@ -192,9 +192,9 @@ class TestPatternOptimizer:
assert str(g) == "[Op1(x)]"
def test_constant_unification(self):
x = Constant(MyType(), 2, name = 'x')
x = Constant(MyType(), 2, name='x')
y = MyVariable('y')
z = Constant(MyType(), 2, name = 'z')
z = Constant(MyType(), 2, name='z')
e = op1(op1(x, y), y)
g = Env([y], [e])
PatternOptimizer((op1, z, '1'),
......@@ -294,8 +294,8 @@ class TestMergeOptimizer:
def test_constant_merging(self):
x = MyVariable('x')
y = Constant(MyType(), 2, name = 'y')
z = Constant(MyType(), 2, name = 'z')
y = Constant(MyType(), 2, name='y')
z = Constant(MyType(), 2, name='z')
e = op1(op2(x, y), op2(x, y), op2(x, z))
g = Env([x, y, z], [e])
MergeOptimizer().optimize(g)
......@@ -340,8 +340,8 @@ class TestMergeOptimizer:
def test_identical_constant_args(self):
x = MyVariable('x')
y = Constant(MyType(), 2, name = 'y')
z = Constant(MyType(), 2, name = 'z')
y = Constant(MyType(), 2, name='y')
z = Constant(MyType(), 2, name='z')
ctv_backup = config.compute_test_value
config.compute_test_value = 'off'
try:
......@@ -366,7 +366,7 @@ class TestEquilibrium(object):
PatternSub((op4, 'x', 'y'), (op1, 'x', 'y')),
PatternSub((op3, (op2, 'x', 'y')), (op4, 'x', 'y'))
],
max_use_ratio = 10)
max_use_ratio=10)
opt.optimize(g)
# print g
assert str(g) == '[Op2(x, y)]'
......@@ -383,7 +383,7 @@ class TestEquilibrium(object):
PatternSub((op5, 'x', 'y'), (op6, 'x', 'y')),
PatternSub((op6, 'x', 'y'), (op2, 'x', 'y'))
],
max_use_ratio = 10)
max_use_ratio=10)
opt.optimize(g)
assert str(g) == '[Op2(x, y)]'
......@@ -403,7 +403,7 @@ class TestEquilibrium(object):
PatternSub((op4, 'x', 'y'), (op1, 'x', 'y')),
PatternSub((op3, (op2, 'x', 'y')), (op4, 'x', 'y'))
],
max_use_ratio = 1. / len(g.apply_nodes)) # each opt can only be applied once
max_use_ratio=1. / len(g.apply_nodes)) # each opt can only be applied once
opt.optimize(g)
finally:
_logger.setLevel(oldlevel)
......
......@@ -294,7 +294,7 @@ class PureType(object):
"""Optional: return a message explaining the output of is_valid_value"""
return "none"
def make_variable(self, name = None):
def make_variable(self, name=None):
"""Return a new `Variable` instance of Type `self`.
:Parameters:
......@@ -302,7 +302,7 @@ class PureType(object):
A pretty string for printing and debugging.
"""
return self.Variable(self, name = name)
return self.Variable(self, name=name)
def make_constant(self, value, name=None):
return self.Constant(type=self, data=value, name=name)
......
......@@ -32,7 +32,7 @@ class Variable:
If that doesn't sound like what you're doing, the Variable class you
want is probably theano.gof.graph.Variable
"""
def __init__(self, name = "?"):
def __init__(self, name="?"):
self.name = name
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join(["%s=%s" % (key, value) for key, value in self.__dict__.items()]) + ")"
......@@ -111,7 +111,7 @@ class Unification:
with each other or with tangible values.
"""
def __init__(self, inplace = False):
def __init__(self, inplace=False):
"""
If inplace is False, the merge method will return a new Unification
that is independent from the previous one (which allows backtracking).
......
......@@ -2069,7 +2069,7 @@ class GpuCAReduce(GpuOp):
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub = {})
reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
"A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2]",
{}, True)
......
......@@ -253,9 +253,9 @@ class NaiveAlgo(object):
# TODO: What if the scalar_op needs support_code??
task_code = self.scalar_op.c_code(
Apply(self.scalar_op,
[scalar.Scalar(dtype = input.type.dtype).make_variable()
[scalar.Scalar(dtype=input.type.dtype).make_variable()
for input in node.inputs],
[scalar.Scalar(dtype = output.type.dtype).make_variable()
[scalar.Scalar(dtype=output.type.dtype).make_variable()
for output in node.outputs])
, nodename + '_scalar_'
, get_str_list_logical_scalar(node, value_str='value0[%i]')
......@@ -391,9 +391,9 @@ class NaiveAlgo(object):
def task_code(d):
print >> sio, self.scalar_op.c_code(
Apply(self.scalar_op,
[scalar.Scalar(dtype = input.type.dtype).make_variable()
[scalar.Scalar(dtype=input.type.dtype).make_variable()
for input in node.inputs],
[scalar.Scalar(dtype = output.type.dtype).make_variable()
[scalar.Scalar(dtype=output.type.dtype).make_variable()
for output in node.outputs])
, nodename + '_scalar_'
, ['i%i_data_%i[0]'%(ipos, d) for ipos, i in enumerate(node.inputs)]
......@@ -465,9 +465,9 @@ class NaiveAlgo(object):
# TODO: What if the scalar_op needs support_code??
task_code = self.scalar_op.c_code(
Apply(self.scalar_op,
[scalar.Scalar(dtype = input.type.dtype).make_variable()
[scalar.Scalar(dtype=input.type.dtype).make_variable()
for input in node.inputs],
[scalar.Scalar(dtype = output.type.dtype).make_variable()
[scalar.Scalar(dtype=output.type.dtype).make_variable()
for output in node.outputs])
, nodename + '_scalar_'
#, ['i%i_data[i]'%ipos for ipos, i in enumerate(node.inputs)]
......
......@@ -105,11 +105,11 @@ class Kouh2008(object):
if use_softmax_w:
rval = cls(w_list, x_list, p, q, r, k,
params = [p_unbounded, q_unbounded, r_unbounded, k_unbounded, w] + params,
params=[p_unbounded, q_unbounded, r_unbounded, k_unbounded, w] + params,
updates=updates)
else:
rval = cls(w_list, x_list, p, q, r, k,
params = [p_unbounded, q_unbounded, r_unbounded, k_unbounded] + w_list + params,
params=[p_unbounded, q_unbounded, r_unbounded, k_unbounded] + w_list + params,
updates=updates)
rval.p_unbounded = p_unbounded
rval.q_unbounded = q_unbounded
......
......@@ -21,8 +21,8 @@ class DebugLinker(gof.WrapLinker):
if debug_post is None:
debug_post = []
gof.WrapLinker.__init__(self,
linkers = linkers,
wrapper = self.wrapper)
linkers=linkers,
wrapper=self.wrapper)
self.fgraph = None
......@@ -65,7 +65,7 @@ class DebugLinker(gof.WrapLinker):
for thunk, linker in zip(thunks, self.linkers):
for r in node.outputs:
try:
r.type.filter(r.value, strict = True)
r.type.filter(r.value, strict=True)
except TypeError, e:
exc_type, exc_value, exc_trace = sys.exc_info()
exc = DebugException(e, "The output %s was filled with data with the wrong type using linker " \
......@@ -170,7 +170,7 @@ def print_sep(i, node, *thunks):
print "==================================="
import numpy
def numpy_compare(a, b, tolerance = 1e-6):
def numpy_compare(a, b, tolerance=1e-6):
if isinstance(a, numpy.ndarray):
return (abs(a - b) <= tolerance).all()
else:
......@@ -183,6 +183,6 @@ def numpy_debug_linker(pre, post=None):
return DebugLinker([gof.OpWiseCLinker],
pre,
post,
compare_fn = numpy_compare)
compare_fn=numpy_compare)
......@@ -2199,7 +2199,7 @@ class GpuCAReduceCuda(HideC, CAReduceDtype):
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub = {})
reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
"A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2]",
{}, True)
......
......@@ -314,7 +314,7 @@ if 0:
y=T.dmatrix(), #our targets
v=T.dmatrix(), #first layer weights
c=T.dvector(), #first layer bias
l2_coef = T.dscalar()
l2_coef=T.dscalar()
):
pred = T.dot(x, v) + c
sse = T.sum((pred - y) * (pred - y))
......@@ -415,8 +415,8 @@ if 0:
@SymbolicModule_fromFn
def PCA(
x = T.dmatrix(),
var_thresh = T.dscalar()
x=T.dmatrix(),
var_thresh=T.dscalar()
):
# naive version, yes
s, v, d = T.svd(x)
......
......@@ -8,8 +8,8 @@ def test_001():
state = theano.tensor.unbroadcast(
theano.tensor.shape_padleft(x0), 0)
out, _ = scan.scan(lambda x: x+numpy.float32(1),
states = state,
n_steps = 5)
states=state,
n_steps=5)
fn = theano.function([x0], out[0])
val_x0 = numpy.float32([1, 2, 3])
assert numpy.all(fn(val_x0) == val_x0 +5)
......@@ -24,8 +24,8 @@ def test_002():
state = theano.tensor.set_subtensor(state[0], x0)
out, _ = scan.scan(lambda x: x+numpy.float32(1),
states = state,
n_steps = 5)
states=state,
n_steps=5)
fn = theano.function([x0], out)
val_x0 = numpy.float32([1, 2, 3])
assert numpy.all(fn(val_x0)[-1] == val_x0 +5)
......@@ -43,8 +43,8 @@ def test_003():
out, _ = scan.scan(lambda s, x: x+s,
sequences=sq,
states = state,
n_steps = 5)
states=state,
n_steps=5)
fn = theano.function([sq, x0], out)
val_x0 = numpy.float32([1, 2, 3])
val_sq = numpy.float32([1, 2, 3, 4, 5])
......@@ -56,8 +56,8 @@ def test_004():
nst = theano.tensor.iscalar('nst')
out, _ = scan.scan(lambda s: s+numpy.float32(1),
sequences=sq,
states = [],
n_steps = nst)
states=[],
n_steps=nst)
fn = theano.function([sq, nst], out)
val_sq = numpy.float32([1, 2, 3, 4, 5])
assert numpy.all(fn(val_sq, 5) == val_sq +1)
......@@ -67,8 +67,8 @@ def test_005():
nst = theano.tensor.iscalar('nst')
out, _ = scan.scan(lambda s: s+numpy.float32(1),
sequences=sq,
states = [None],
n_steps = nst)
states=[None],
n_steps=nst)
fn = theano.function([sq, nst], out)
val_sq = numpy.float32([1, 2, 3, 4, 5])
assert numpy.all(fn(val_sq, 5) == val_sq +1)
......
......@@ -213,7 +213,7 @@ class TheanoObject(object):
v = tensor.lscalar(name)
v._theanoclass_container = \
theano.gof.Container(v,
storage = [theano._asarray(ival, dtype='int64')],
storage=[theano._asarray(ival, dtype='int64')],
readonly=False)
assert not hasattr(v, 'set')
assert not hasattr(v, 'get')
......
......@@ -128,7 +128,7 @@ class test_composite(unittest.TestCase):
e = mul(add(70.0, y), div_proxy(x, y))
C = Composite([x, y], [e])
c = C.make_node(x, y)
assert "70.0" in c.op.c_code(c, 'dummy', ['x', 'y'], ['z'], dict(id = 0))
assert "70.0" in c.op.c_code(c, 'dummy', ['x', 'y'], ['z'], dict(id=0))
# print c.c_code(['x', 'y'], ['z'], dict(id = 0))
g = FunctionGraph([x, y], [c.out])
fn = gof.DualLinker().accept(g).make_function()
......@@ -417,8 +417,8 @@ class test_div(unittest.TestCase):
assert isinstance((a/c).owner.op, TrueDiv)
def test_grad_gt():
x = float32(name = 'x')
y = float32(name = 'y')
x = float32(name='x')
y = float32(name='y')
z = x > y
g = theano.gradient.grad(z, y)
assert g.eval({ y : 1. }) == 0.
......
......@@ -1946,10 +1946,10 @@ class T_Scan(unittest.TestCase):
# hidden and outputs of the entire sequence
[h, y], _ = theano.scan(
fn=one_step,
sequences = dict(input=x),
sequences=dict(input=x),
# corresponds to the return type of one_step
outputs_info = [dict(initial=h0, taps=[-2, -1]), None],
non_sequences = [W_ih, W_hh, b_h, W_ho, b_o])
outputs_info=[dict(initial=h0, taps=[-2, -1]), None],
non_sequences=[W_ih, W_hh, b_h, W_ho, b_o])
# target values
t = tensor.matrix()
......@@ -3784,7 +3784,7 @@ class T_Scan(unittest.TestCase):
A = tensor.matrix('A')
B = tensor.matrix('B')
S, _ = theano.scan(lambda x1, x2, u: u + tensor.dot(x1, x2),
sequences = [A.dimshuffle(0, 1, 'x'),
sequences=[A.dimshuffle(0, 1, 'x'),
B.dimshuffle(0, 'x', 1)],
outputs_info=[tensor.zeros_like(A)])
f = theano.function([A, B], S.owner.inputs[0][-1])
......@@ -3925,14 +3925,14 @@ class T_Scan(unittest.TestCase):
inps = tensor.vector()
state = tensor.scalar()
y1, _ = theano.scan(lambda x, y: x*y,
sequences = inps,
outputs_info = state,
n_steps = 5)
sequences=inps,
outputs_info=state,
n_steps=5)
y2, _ = theano.scan(lambda x, y : (x+y, theano.scan_module.until(x>0)),
sequences = inps,
outputs_info = state,
n_steps = 5)
sequences=inps,
outputs_info=state,
n_steps=5)
scan_node1 = y1.owner.inputs[0].owner
assert isinstance(scan_node1.op, theano.scan_module.scan_op.Scan)
scan_node2 = y2.owner.inputs[0].owner
......
......@@ -315,8 +315,8 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
declare_totals = """
int init_totals[%(nnested)s] = {%(totals)s};
""" % dict(
nnested = nnested,
totals = ', '.join(totals)
nnested=nnested,
totals=', '.join(totals)
)
# Sort totals to match the new order that was computed by sorting
......@@ -354,9 +354,9 @@ def make_reordered_loop(init_loop_orders, olv_index, dtypes, inner_task, sub, op
int init_strides[%(nvars)i][%(nnested)i] = {
%(strides)s
};""" % dict(
nvars = nvars,
nnested = nnested,
strides = ', \n'.join(
nvars=nvars,
nnested=nnested,
strides=', \n'.join(
', '.join(get_loop_strides(lo, i))
for i, lo in enumerate(init_loop_orders)
if len(lo)>0))
......
......@@ -26,7 +26,7 @@ class ConvGrad3D(theano.Op):
WShape_ = T.as_tensor_variable(WShape)
dCdH_ = T.as_tensor_variable(dCdH)
return theano.Apply(self, inputs=[V_, d_, WShape_, dCdH_], outputs = [ T.TensorType(V_.dtype, (False, False, False, False, False))() ] )
return theano.Apply(self, inputs=[V_, d_, WShape_, dCdH_], outputs=[ T.TensorType(V_.dtype, (False, False, False, False, False))() ] )
def infer_shape(self, node, input_shapes):
V, d, W_shape, dCdH = node.inputs
......
......@@ -33,7 +33,7 @@ class ConvTransp3D(theano.Op):
else:
RShape_ = T.as_tensor_variable([-1, -1, -1])
return theano.Apply(self, inputs=[W_, b_, d_, H_, RShape_], outputs = [ T.TensorType(H_.dtype, (False, False, False, False, False))() ] )
return theano.Apply(self, inputs=[W_, b_, d_, H_, RShape_], outputs=[ T.TensorType(H_.dtype, (False, False, False, False, False))() ] )
def infer_shape(self, node, input_shapes):
W, b, d, H, RShape = node.inputs
......
......@@ -225,7 +225,7 @@ def conv3d(signals, filters,
filters.reshape(_filters_shape_4d),
image_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode = border_mode[1]) # ignoring border_mode[2]
border_mode=border_mode[1]) # ignoring border_mode[2]
# reshape the output to restore its original size
# shape = Ns, Ts, Nf, Tf, W-Wf+1, H-Hf+1
......
......@@ -161,8 +161,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
# DownsampleFactorMax op
maxpool_op = DownsampleFactorMax(maxpoolshp,
ignore_border=
ignore_border)(images)
ignore_border=ignore_border)(images)
f = function([images], maxpool_op)
output_val = f(imval)
utt.assert_allclose(output_val, numpy_output_val)
......@@ -298,8 +297,7 @@ class TestDownsampleFactorMax(utt.InferShapeTester):
# print 'ignore_border =', ignore_border
def mp(input):
return DownsampleFactorMax(maxpoolshp,
ignore_border=
ignore_border)(input)
ignore_border=ignore_border)(input)
utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMax_grad_st(self):
......
......@@ -78,7 +78,7 @@ class LogisticRegression(object):
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype = theano.config.floatX),
self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX),
name=name_prefix+'W')
# compute vector of class-membership probabilities in symbolic form
......@@ -122,7 +122,7 @@ class LogisticRegression(object):
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, activation = T.tanh, name_prefix=''):
def __init__(self, rng, input, n_in, n_out, activation=T.tanh, name_prefix=''):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
......@@ -155,10 +155,10 @@ class HiddenLayer(object):
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
W_values = numpy.asarray( rng.uniform( \
low = -numpy.sqrt(6./(n_in+n_out)), \
high = numpy.sqrt(6./(n_in+n_out)), \
size = (n_in, n_out)), dtype = theano.config.floatX)
self.W = theano.shared(value = W_values, name=name_prefix+'W')
low=-numpy.sqrt(6./(n_in+n_out)), \
high=numpy.sqrt(6./(n_in+n_out)), \
size=(n_in, n_out)), dtype=theano.config.floatX)
self.W = theano.shared(value=W_values, name=name_prefix+'W')
self.output = T.dot(input, self.W)
# parameters of the model
......@@ -205,16 +205,16 @@ class MLP(object):
# translate into a TanhLayer connected to the LogisticRegression
# layer; this can be replaced by a SigmoidalLayer, or a layer
# implementing any other nonlinearity
self.hiddenLayer = HiddenLayer(rng = rng, input = input,
n_in = n_in, n_out = n_hidden,
activation = T.tanh, name_prefix='hid_')
self.hiddenLayer = HiddenLayer(rng=rng, input=input,
n_in=n_in, n_out=n_hidden,
activation=T.tanh, name_prefix='hid_')
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input = self.hiddenLayer.output,
n_in = n_hidden,
n_out = n_out, name_prefix='log_')
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out, name_prefix='log_')
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
......@@ -275,7 +275,7 @@ def test_mlp():
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)
classifier = MLP( rng=rng, input=x, n_in=28*28, n_hidden=500, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model.
......@@ -296,8 +296,8 @@ def test_mlp():
updates2 = OrderedDict()
updates2[classifier.hiddenLayer.params[0]]=T.grad(cost, classifier.hiddenLayer.params[0])
train_model =theano.function( inputs = [index],
updates = updates2,
train_model =theano.function( inputs=[index],
updates=updates2,
givens={
x: train_set_x[index*batch_size:(index+1)*batch_size],
y: train_set_y[index*batch_size:(index+1)*batch_size]},
......@@ -307,8 +307,8 @@ def test_mlp():
assert any([isinstance(i.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])
# Even without FeatureShape
train_model =theano.function( inputs = [index],
updates = updates2,
train_model =theano.function( inputs=[index],
updates=updates2,
mode=mode.excluding('ShapeOpt'),
givens={
x: train_set_x[index*batch_size:(index+1)*batch_size],
......
......@@ -252,13 +252,11 @@ class test_canonize(unittest.TestCase):
fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float32').reshape(1, shp[0])
fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])
dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')
dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=
'float64').reshape(1, shp[0])
dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])
cases = [
(fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),
(fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),
......
......@@ -23,7 +23,7 @@ def makeSharedTester(shared_constructor_,
test_internal_type_,
theano_fct_,
ref_fct_,
cast_value_ = numpy.asarray,
cast_value_=numpy.asarray,
op_by_matrix_=False,
name=None,
):
......@@ -86,7 +86,7 @@ def makeSharedTester(shared_constructor_,
x = self.cast_value(x)
x_ref = self.ref_fct(x)
x_shared = self.shared_constructor(x, borrow = False)
x_shared = self.shared_constructor(x, borrow=False)
total = self.theano_fct(x_shared)
total_func = theano.function([], total)
......@@ -105,7 +105,7 @@ def makeSharedTester(shared_constructor_,
# value used to construct should not alias with internal
assert numpy.allclose(total_val, total_val_2)
x = x_shared.get_value(borrow = False)
x = x_shared.get_value(borrow=False)
x /= values_to_div
......@@ -115,7 +115,7 @@ def makeSharedTester(shared_constructor_,
assert numpy.allclose(total_val, total_val_3)
# in this case we can alias
x = x_shared.get_value(borrow = True)
x = x_shared.get_value(borrow=True)
x /= values_to_div
# this is not required by the contract but it is a feature we've
......@@ -135,7 +135,7 @@ def makeSharedTester(shared_constructor_,
x = self.cast_value(x)
x_ref = self.ref_fct(x)
x_shared = self.shared_constructor(x, borrow = False)
x_shared = self.shared_constructor(x, borrow=False)
total = self.theano_fct(x_shared)
f = theano.function([], x_shared.shape)
......@@ -158,7 +158,7 @@ def makeSharedTester(shared_constructor_,
x = self.cast_value(x)
x_ref = self.ref_fct(x)
x_shared = self.shared_constructor(x, borrow = False)
x_shared = self.shared_constructor(x, borrow=False)
total = self.theano_fct(x_shared)
f = theano.function([], x_shared.shape[1])
......@@ -179,13 +179,13 @@ def makeSharedTester(shared_constructor_,
x = self.cast_value(x)
x_ref = self.ref_fct(x)
x_shared = self.shared_constructor(x, borrow = False)
x_shared = self.shared_constructor(x, borrow=False)
total = self.theano_fct(x_shared)
total_func = theano.function([], total)
# in this case we can alias with the internal value
x = x_shared.get_value(borrow = True, return_internal_type = True)
x = x_shared.get_value(borrow=True, return_internal_type=True)
assert self.test_internal_type(x)
values_to_div = .5
......@@ -199,7 +199,7 @@ def makeSharedTester(shared_constructor_,
# implement for some type of SharedVariable.
assert numpy.allclose(self.ref_fct(x), total_func())
x = x_shared.get_value(borrow = False, return_internal_type = True)
x = x_shared.get_value(borrow=False, return_internal_type=True)
assert self.test_internal_type(x)
assert x is not x_shared.container.value
x /= values_to_div#supported by ndarray and CudaNdarray
......@@ -219,10 +219,10 @@ def makeSharedTester(shared_constructor_,
x_orig = numpy.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
x_cast = self.cast_value(x_orig)
if self.shared_constructor_accept_ndarray:
x_shared = self.shared_constructor(x_orig, borrow = False)
x_shared = self.shared_constructor(x_orig, borrow=False)
assert isinstance(x_shared.get_value(), x_orig.__class__)
x_shared = self.shared_constructor(x_cast, borrow = False)
x_shared = self.shared_constructor(x_cast, borrow=False)
assert isinstance(x_shared.get_value(), x_cast.__class__)
def test_set_value(self):
......@@ -237,7 +237,7 @@ def makeSharedTester(shared_constructor_,
x_orig = x
x_orig_copy = x.copy()
x_ref = self.ref_fct(x)
x_shared = self.shared_constructor(x, borrow = False)
x_shared = self.shared_constructor(x, borrow=False)
total = self.theano_fct(x_shared)
total_func = theano.function([], total)
......@@ -284,7 +284,7 @@ def makeSharedTester(shared_constructor_,
x = self.cast_value(x)
x_ref = self.ref_fct(x)
x_shared = self.shared_constructor(x, borrow = True)
x_shared = self.shared_constructor(x, borrow=True)
total = self.theano_fct(x_shared)
......@@ -601,18 +601,18 @@ def makeSharedTester(shared_constructor_,
return SharedTester
test_shared_options=makeSharedTester(
shared_constructor_ = tensor._shared,
dtype_ = theano.config.floatX,
get_value_borrow_true_alias_ = True,
shared_borrow_true_alias_ = True,
set_value_borrow_true_alias_ = True,
set_value_inplace_ = False,
set_cast_value_inplace_ = False,
shared_constructor_accept_ndarray_ = True,
internal_type_ = numpy.ndarray,
test_internal_type_ = lambda a: isinstance(a, numpy.ndarray),
theano_fct_ = lambda a: a*2,
ref_fct_ = lambda a: numpy.asarray((a*2)),
cast_value_ = numpy.asarray,
op_by_matrix_ = False,
shared_constructor_=tensor._shared,
dtype_=theano.config.floatX,
get_value_borrow_true_alias_=True,
shared_borrow_true_alias_=True,
set_value_borrow_true_alias_=True,
set_value_inplace_=False,
set_cast_value_inplace_=False,
shared_constructor_accept_ndarray_=True,
internal_type_=numpy.ndarray,
test_internal_type_=lambda a: isinstance(a, numpy.ndarray),
theano_fct_=lambda a: a*2,
ref_fct_=lambda a: numpy.asarray((a*2)),
cast_value_=numpy.asarray,
op_by_matrix_=False,
name='test_shared_options')
......@@ -158,7 +158,7 @@ class RecordMode(Mode):
self.record = record
self.known_fgraphs = set([])
def __init__(self, record = None, **kwargs):
def __init__(self, record=None, **kwargs):
"""
Takes either a Record object or the keyword arguments to make one.
......
......@@ -533,7 +533,7 @@ def test_undefined_cost_grad():
cost = x + y
assert cost.dtype in theano.tensor.discrete_dtypes
try:
grads = theano.tensor.grad(cost, [x, y], known_grads = {cost: NullType()() })
grads = theano.tensor.grad(cost, [x, y], known_grads={cost: NullType()() })
except theano.gradient.NullTypeGradError:
return
raise AssertionError("An undefined gradient has been ignored.")
......@@ -551,7 +551,7 @@ def test_disconnected_cost_grad():
cost = x + y
assert cost.dtype in theano.tensor.discrete_dtypes
try:
grads = theano.tensor.grad(cost, [x, y], known_grads = {cost: gradient.DisconnectedType()() },
grads = theano.tensor.grad(cost, [x, y], known_grads={cost: gradient.DisconnectedType()() },
disconnected_inputs='raise')
except theano.gradient.DisconnectedInputError:
return
......
......@@ -293,7 +293,7 @@ class test_RopLop(RopLop_checker):
lambda i, y, x1, x2, v1, v2:
(tensor.grad(y[i], x1) * v1).sum() + \
(tensor.grad(y[i], x2) * v2).sum(),
sequences = tensor.arange(output.shape[0]),
sequences=tensor.arange(output.shape[0]),
non_sequences=[output, input, filters,
ev_input, ev_filters])
scan_f = function([input, filters, ev_input, ev_filters], sy,
......
......@@ -261,7 +261,7 @@ class T_extending(unittest.TestCase):
def c_init(name, sub):
return """
%(name)s = 0.0;
""" % dict(name = name)
""" % dict(name=name)
double.c_init = c_init
def c_extract(name, sub, check_input=True):
......@@ -270,12 +270,12 @@ class T_extending(unittest.TestCase):
if (!PyFloat_Check(py_%(name)s)) {
PyErr_SetString(PyExc_TypeError, "expected a float");
%(fail)s
}""" % dict(name = name, fail = sub['fail'])
}""" % dict(name=name, fail=sub['fail'])
else:
pre = ""
return pre + """
%(name)s = PyFloat_AsDouble(py_%(name)s);
""" % dict(name = name, fail = sub['fail'])
""" % dict(name=name, fail=sub['fail'])
double.c_extract = c_extract
def c_sync( name, sub):
......@@ -287,7 +287,7 @@ class T_extending(unittest.TestCase):
Py_XINCREF(Py_None);
py_%(name)s = Py_None;
}
""" % dict(name = name)
""" % dict(name=name)
double.c_sync = c_sync
def c_cleanup(name, sub):
......@@ -321,12 +321,12 @@ class T_extending(unittest.TestCase):
def c_declare(self, name, sub, check_input=True):
return """
double %(name)s;
""" % dict(name = name)
""" % dict(name=name)
def c_init(self, name, sub):
return """
%(name)s = 0.0;
""" % dict(name = name)
""" % dict(name=name)
def c_extract(self, name, sub, check_input=True):
if(check_input):
......@@ -351,7 +351,7 @@ class T_extending(unittest.TestCase):
Py_XINCREF(Py_None);
py_%(name)s = Py_None;
}
""" % dict(name = name)
""" % dict(name=name)
def c_cleanup(self, name, sub):
return ""
......@@ -1452,7 +1452,7 @@ class T_scan(unittest.TestCase):
compute_with_bnoise = theano.function(inputs=[X, W, b_sym],
outputs=[results],
updates=updates,
allow_input_downcast = True)
allow_input_downcast=True)
x = numpy.eye(10, 2)
w = numpy.ones((2, 2))
b = numpy.ones((2))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论