提交 29b9b721 authored 作者: fsavard's avatar fsavard

Merge

......@@ -601,7 +601,7 @@ dimensions, see :meth:`_tensor_py_operators.dimshuffle`
Create a matrix by filling the shape of `a` with `b`
.. function:: eye(n, m = None, k = 0, dtype='float64')
.. function:: eye(n, m = None, k = 0, dtype=theano.config.floatX)
:param n: number of rows in output (value or theano scalar)
:param m: number of columns in output (value or theano scalar)
......@@ -628,6 +628,7 @@ Reductions
:Parameter: *x* - symbolic Tensor (or compatible)
:Parameter: *axis* - axis along which to compute the maximum
:Returns: the maximum value along a given axis
:note: see maximum for elemwise max
If axis=None, then axis is assumed to be ndim(x)-1
......@@ -636,6 +637,7 @@ Reductions
:Parameter: *x* - symbolic Tensor (or compatible)
:Parameter: *axis* - axis along which to compute the minimum
:Returns: the minimum value along a given axis
:note: see miminum for elemwise min
if axis=None, then axis is assumed to be ndim(x)-1
......@@ -920,6 +922,14 @@ Mathematical
Returns a variable representing the exponential of a, ie e^a.
.. function:: maximum(a,b)
Returns a variable representing the maximum element by element of a and b
.. function:: minimum(a,b)
Returns a variable representing the minimum element by element of a and b
.. function:: neg(a)
Returns a variable representing the negation of `a` (also ``-a``).
......
......@@ -393,16 +393,17 @@ class T_picklefunction(unittest.TestCase):
old_default_opt = config.optimizer
old_default_link = config.linker
try:
str_f = cPickle.dumps(f)
config.mode = 'Mode'
config.linker = 'py'
config.optimizer = 'None'
g = cPickle.loads(str_f)
#print g.maker.mode
#print compile.mode.default_mode
except NotImplementedError, e:
if e[0].startswith('DebugMode is not pickl'):
g = 'ok'
try:
str_f = cPickle.dumps(f)
config.mode = 'Mode'
config.linker = 'py'
config.optimizer = 'None'
g = cPickle.loads(str_f)
#print g.maker.mode
#print compile.mode.default_mode
except NotImplementedError, e:
if e[0].startswith('DebugMode is not pickl'):
g = 'ok'
finally:
config.mode = old_default_mode
config.optimizer = old_default_opt
......
......@@ -46,7 +46,7 @@ def raise_with_op(op, exc_info = None):
except AttributeError:
trace = ()
exc_value.__thunk_trace__ = trace
exc_value.args = exc_value.args + (op, )
exc_value.args = exc_value.args + (op, ) + ('Sequence id of Apply node='+str(op.env.toposort().index(op)),)
raise exc_type, exc_value, exc_trace
......
......@@ -7,7 +7,9 @@ def run(TF):
if TF and RUN_TESTS:
print 'running test', f.__name__
f()
return f if RUN_TESTS else None
if RUN_TESTS:
return f
else: return None
return deco
......
......@@ -726,6 +726,42 @@ invert = Invert()
# Arithmetic
##############
class Maximum(BinaryScalarOp):
commutative = True
associative = True
def impl(self, *inputs):
return max(inputs)
def c_code(self, node, name, (x,y), (z, ), sub):
return "%(z)s = ((%(y)s)>(%(x)s)? (%(y)s):(%(x)s));" %locals()
def grad(self, (x, y), (gz, )):
gx, gy = None, None
if x.type in grad_types:
gx = eq(maximum(x,y), x)*gz
if y.type in grad_types:
gy = eq(maximum(x,y), y)*gz
return (gx,gy)
maximum = Maximum(upcast_out, name = 'maximum')
class Minimum(BinaryScalarOp):
commutative = True
associative = True
def impl(self, *inputs):
return min(inputs)
def c_code(self, node, name, (x,y), (z, ), sub):
return "%(z)s = ((%(y)s)<(%(x)s)? (%(y)s):(%(x)s));" %locals()
def grad(self, (x, y), (gz, )):
gx, gy = None, None
if x.type in grad_types:
gx = eq(minimum(x,y), x)*gz
if y.type in grad_types:
gy = eq(minimum(x,y), y)*gz
return (gx,gy)
minimum = Minimum(upcast_out, name = 'minimum')
class Add(ScalarOp):
identity = 0
commutative = True
......
......@@ -1703,7 +1703,7 @@ def zeros_like(model):
return fill(model, constant(0.0, dtype=model.type.dtype))
class Eye(gof.Op):
def __init__(self, dtype='float64'):
def __init__(self, dtype=config.floatX):
self.dtype = dtype
def make_node(self,n,m,k):
n = as_tensor_variable(n)
......@@ -1724,7 +1724,7 @@ class Eye(gof.Op):
return hash(self.dtype) ^ hash(type(self))
def eye(n, m=None, k = 0, dtype = 'float64'):
def eye(n, m=None, k = 0, dtype = config.floatX):
if m == None:
m = n
localop = Eye(dtype)
......@@ -2029,6 +2029,17 @@ setdefault = default # legacy
##########################
# Arithmetics
##########################
@_scal_elemwise
def maximum(x,y):
"""elemwise maximum. See max for the maximum in one tensor
"""
# see decorator for function body
@_scal_elemwise
def minimum(x,y):
"""elemwise minimum. See min for the minimum in one tensor
"""
# see decorator for function body
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y.
......@@ -3172,9 +3183,10 @@ class ARange(Op):
if is_constant_value(start, 0):
return [(cast(stop, 'int64'),)]
else:
return [(theano.tensor.max([cast(stop-start, 'int64'),0]),)]
return [(maximum(cast(stop-start, 'int64'),0),)]
else:
return [(theano.tensor.max([cast(ceil(cast((stop-start),'float64')/step),'int64'),0]),)]
return [(maximum(cast(ceil(cast((stop-start),'float64')
/step),'int64'),0),)]
def perform(self, node, (start, stop, step), (out,)):
start = start.item()
......
......@@ -168,6 +168,14 @@ fill_inplace = second_inplace
pprint.assign(fill_inplace, printing.FunctionPrinter('fill='))
@_scal_inplace
def maximum_inplace(a, b):
"""elementwise addition (inplace on `a`)"""
@_scal_inplace
def minimum_inplace(a, b):
"""elementwise addition (inplace on `a`)"""
@_scal_inplace
def add_inplace(a, b):
"""elementwise addition (inplace on `a`)"""
......
......@@ -43,17 +43,24 @@ class T_sigmoid_opts(unittest.TestCase):
# tests inv_1_plus_exp with neg
f = theano.function([x], T.fill(x,-1.0) / (1+T.exp(-x)), mode=m)
#theano.printing.debugprint(f)
assert [node.op for node in f.maker.env.toposort()] == [sigmoid,
T.inplace.neg_inplace]
assert len(f.maker.env.toposort())==1
assert str(f.maker.env.toposort()[0].op)=='Elemwise{Composite{scalar_sigmoid,neg}}'
#without fusion
#assert [node.op for node in f.maker.env.toposort()] == [sigmoid,
# T.inplace.neg_inplace]
# tests double inv_1_plus_exp with neg
# (-1)(exp(x)) / (1+exp(x))(1+exp(-x))
# = (-1)/(1+exp(-x)) * exp(x)/(1+exp(x))
# = - (sigm(x) * sigm(x))
f = theano.function([x], (T.fill(x,-1.0)*T.exp(x)) / ((1+T.exp(x))*(1+T.exp(-x))), mode=m)
theano.printing.debugprint(f)
assert [node.op for node in f.maker.env.toposort()] == [sigmoid,
T.mul, T.inplace.neg_inplace]
assert len(f.maker.env.toposort())==2
assert f.maker.env.toposort()[0].op == sigmoid
assert str(f.maker.env.toposort()[1].op)=='Elemwise{Composite{mul,neg}}'
#without fusion
#assert [node.op for node in f.maker.env.toposort()] == [sigmoid,
# T.mul, T.inplace.neg_inplace]
def test_1msigmoid(self):
if not register_local_1msigmoid:
......
import traceback
import numpy
import theano.tensor.basic
from basic import TensorType, _tensor_py_operators
......
......@@ -9,6 +9,7 @@ import numpy
import theano
import theano.tensor as T
from theano.gof.python25 import any
def gen_data():
......@@ -301,7 +302,7 @@ def test_mlp():
y:train_set_y[index*batch_size:(index+1)*batch_size]},
mode=mode)
for i in train_model.maker.env.toposort(): print i
theano.printing.pydotprint(train_model)
#theano.printing.pydotprint(train_model)
assert any( [isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.env.toposort()])
train_model =theano.function( inputs = [index],
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论