提交 eb6c4c4b authored 作者: James Bergstra's avatar James Bergstra

Merge (did I miss something in sandbox/downsample?

......@@ -65,25 +65,27 @@ class ProfileMode(Mode):
"""
local_time = self.local_time[0]
apply_time = self.apply_time
apply_call = self.apply_call
op_time = self.op_time
op_call = self.op_call
print ''
print 'ProfileMode.print_summary()'
print '---------------------------'
print ''
print 'local_time %fs (Time spent running thunks)'% local_time
print 'Apply-wise summary: <% of local_time spent at this position> <total of local_time spent at this position> (<Apply position>, <Apply Op name>)'
atimes = [(t/local_time, t, (a[0], str(a[1]))) for a, t in apply_time.items()]
print 'Apply-wise summary: <% of local_time spent at this position> <total of local_time spent at this position> <nb_call> <Apply position> <Apply Op name>'
atimes = [(t/local_time, t, (a[0], str(a[1])), apply_call[a]) for a, t in apply_time.items()]
atimes.sort()
atimes.reverse()
tot=0
for f,t,a in atimes[:n_apply_to_print]:
for f,t,a,nb_call in atimes[:n_apply_to_print]:
tot+=t
print ' %4.1f%% %.3fs %.3fs %i %s' % (f*100, tot, t, a[0], a[1])
print ' %4.1f%% %.3fs %.3fs %i %i %s' % (f*100, tot, t, nb_call, a[0], a[1])
print ' ... (remaining %i Apply instances account for %.2f%%(%.2fs) of the runtime)'\
%(max(0, len(atimes)-n_apply_to_print),
sum(f for f, t, a in atimes[n_apply_to_print:])*100,
sum(t for f, t, a in atimes[n_apply_to_print:]))
sum(f for f, t, a, nb_call in atimes[n_apply_to_print:])*100,
sum(t for f, t, a, nb_call in atimes[n_apply_to_print:]))
flops=False
flops_msg=''
......@@ -94,13 +96,13 @@ class ProfileMode(Mode):
print '\nHACK WARNING: we print the flops for some OP, but the logic don\' always work. You need to know the internal of Theano to make it work correctly. Otherwise don\'t use!'
break
print '\nOp-wise summary: < of local_time spent on this kind of Op> <cumulative seconds> <self seconds>%s <Op name>'%(flops_msg)
print '\nOp-wise summary: < of local_time spent on this kind of Op> <cumulative seconds> <self seconds>%s <nb_call> <Op name>'%(flops_msg)
otimes = [(t/local_time, t, a, self.op_cimpl[a]) for a, t in op_time.items()]
otimes = [(t/local_time, t, a, self.op_cimpl[a], op_call[a]) for a, t in op_time.items()]
otimes.sort()
otimes.reverse()
tot=0
for f,t,a,ci in otimes[:n_ops_to_print]:
for f,t,a,ci,nb_call in otimes[:n_ops_to_print]:
tot+=t
if ci:
msg = '*'
......@@ -110,39 +112,41 @@ class ProfileMode(Mode):
if hasattr(a,'flops'):
m=a.flops*self.op_call[a]/t/1e6
if flops:
print ' %4.1f%% %.3fs %.3fs %s %7.1f %s' % (f*100, tot, t, msg, m,a)
print ' %4.1f%% %.3fs %.3fs %s %7.1f %d %s' % (f*100, tot, t, msg, m, nb_call, a)
else:
print ' %4.1f%% %.3fs %.3fs %s %s' % (f*100, tot, t, msg, a)
print ' ... (remaining %i Ops account for %6.2f%%(%.2fs) of the runtime)'\
%(max(0, len(otimes)-n_ops_to_print),
sum(f for f, t, a, ci in otimes[n_ops_to_print:])*100,
sum(t for f, t, a, ci in otimes[n_ops_to_print:]))
sum(f for f, t, a, ci, nb_call in otimes[n_ops_to_print:])*100,
sum(t for f, t, a, ci, nb_call in otimes[n_ops_to_print:]))
print '(*) Op is running a c implementation'
sop_time={}
sop_call={}
sop_c={} #map each op class to Bool. True iff all applies were done in c.
for a,t in op_time.items():
sop_time.setdefault(type(a),0)
sop_time[type(a)]+=t
sop_c.setdefault(type(a),True)
sop_c[type(a)]=sop_c[type(a)] and self.op_cimpl[a]
print '\nSingle Op-wise summary: <% of local_time spent on this kind of Op> <cumulative seconds> <self seconds> <Op name>'
sotimes = [(t/local_time, t, a, sop_c[a]) for a, t in sop_time.items()]
sop_call[type(a)]=sop_call.get(type(a),0)+op_call[a]
print '\nSingle Op-wise summary: <% of local_time spent on this kind of Op> <cumulative seconds> <self seconds> <nb_call> <Op name>'
sotimes = [(t/local_time, t, a, sop_c[a], sop_call[a]) for a, t in sop_time.items()]
sotimes.sort()
sotimes.reverse()
tot=0
for f,t,a,ci in sotimes[:n_ops_to_print]:
for f,t,a,ci, nb_call in sotimes[:n_ops_to_print]:
tot+=t
if ci:
msg = '*'
else:
msg = ' '
print ' %4.1f%% %.3fs %.3fs %s %s' % (f*100, tot, t, msg, a)
print ' %4.1f%% %.3fs %.3fs %s %d %s' % (f*100, tot, t, msg, nb_call, a)
print ' ... (remaining %i Ops account for %.2f%%(%.2fs) of the runtime)'\
%(max(0, len(sotimes)-n_ops_to_print),
sum(f for f, t, a in sotimes[n_ops_to_print:])*100,
sum(t for f, t, a in sotimes[n_ops_to_print:]))
sum(f for f, t, a, nb_call in sotimes[n_ops_to_print:])*100,
sum(t for f, t, a, nb_call in sotimes[n_ops_to_print:]))
print '(*) Op is running a c implementation'
print 'compile time: %.3fs'%self.compile_time
......
......@@ -144,9 +144,9 @@ class Container(object):
self.storage = storage
self.readonly = readonly
self.strict = strict
def __get(self):
def __get__(self):
return self.storage[0]
def __set(self, value):
def __set__(self, value):
if self.readonly:
raise Exception("Cannot set readonly storage: %s" % self.name)
try:
......@@ -160,8 +160,8 @@ class Container(object):
except Exception, e:
e.args = e.args + (('Container name "%s"' % self.name),)
raise
data = property(__get, __set)
value = property(__get, __set)
data = property(__get__, __set__)
value = property(__get__, __set__)
def __str__(self):
return "<" + str(self.storage[0]) + ">"
def __repr__(self):
......
......@@ -405,9 +405,11 @@ class TensorType(Type):
def c_extract(self, name, sub):
"""Override `CLinkerOp.c_extract` """
# TODO: make the error message print out the dtype of the
# input received.
return """
%(name)s = NULL;
type_num_%(name)s = %(type_num)s;
type_num_%(name)s = ((PyArrayObject*)py_%(name)s)->descr->type_num; //we expect %(type_num)s
if (py_%(name)s == Py_None) {
// We can either fail here or set %(name)s to NULL and rely on Ops using
// tensors to handle the NULL case, but if they fail to do so they'll end up
......@@ -419,7 +421,7 @@ class TensorType(Type):
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
%(fail)s
}
else if (((PyArrayObject*)py_%(name)s)->descr->type_num != %(type_num)s) {
else if (type_num_%(name)s != %(type_num)s) {
PyErr_SetString(PyExc_ValueError, "expected %(type_num)s");
%(fail)s
}
......@@ -1394,6 +1396,15 @@ class Repeat(gof.Op):
repeat = Repeat()
class SetDefault(gof.Op):
view_map = {0: [1]}
def make_node(self, x, default):
assert x.type == default.type
return gof.Apply(self, [x, default], [default.type()])
def perform(self, node, (x, default), (out, )):
out[0] = default.copy() if x is None else x
setdefault = SetDefault()
##########################
......@@ -1879,7 +1890,6 @@ class Split(Op):
return [join(axis, *g_outputs), None, None]
class Rebroadcast(Op):
"""
Change the input's broadcastable fields in
......@@ -1919,6 +1929,7 @@ def unbroadcast(x, *axes):
return Rebroadcast(*[(axis, False) for axis in axes])(x)
class Join(Op):
"""
Concatenate multiple `TensorVariable`s along some axis.
......@@ -1970,6 +1981,7 @@ class Join(Op):
# for the axis dimension.
# All concatenated elements must also have the same broadcastable
# dimensions.
orig = as_tensor_variable_args
if isinstance(axis, int):
bcasts = [x.type.broadcastable[0:axis] + \
x.type.broadcastable[axis + 1:] for x in as_tensor_variable_args]
......@@ -1991,7 +2003,9 @@ class Join(Op):
outputs = [tensor(dtype = out_dtype,
broadcastable = bcastable)]
return Apply(self, inputs, outputs)
node = Apply(self, inputs, outputs)
node.tag.shape_zero = None if any(not x.type.broadcastable[0] for x in orig) else len(orig)
return node
def perform(self, node, axis_and_tensors, (out, )):
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
......@@ -2031,13 +2045,10 @@ class Join(Op):
assert isinstance(node.owner.op, Join)
if node.ndim != 1:
raise TypeError('argument must be symbolic vector')
inputs = node.owner.inputs
axis, tensors = inputs[0], inputs[1:]
# if v is a vector, then axis must be 0
# the question is whether all the inputs are broadcastable.
if all(i.broadcastable[0] for i in tensors):
return len(tensors)
raise ValueError("could not determine vector length")
if node.owner.tag.shape_zero is None:
raise ValueError("could not determine vector length")
else:
return node.owner.tag.shape_zero
@_redefine_asRoutine(Join())
def join(axis, *tensors):
......@@ -2137,7 +2148,7 @@ def get_vector_length(v):
if v.owner and isinstance(v.owner.op, Join):
try:
return join.vec_length(v)
except:
except ValueError:
pass
if v.owner and v.owner.op == shape:
return v.owner.inputs[0].type.ndim
......@@ -2554,6 +2565,7 @@ class Outer(Op):
return "outer"
outer = Outer()
#########################
# Gradient
#########################
......
......@@ -401,10 +401,12 @@ def local_softmax_with_bias(node):
vectors = []
non_vectors = []
for x_in in x.owner.inputs:
if list(x_in.type.broadcastable) == [True, False] \
and isinstance(x_in.owner.op, tensor.DimShuffle):
assert len(x_in.owner.inputs)==1
vectors.append(x_in.owner.inputs[0])
if list(x_in.type.broadcastable) == [True, False]:
if x_in.owner and isinstance(x_in.owner.op, tensor.DimShuffle):
assert len(x_in.owner.inputs)==1
vectors.append(x_in.owner.inputs[0])
else:
vectors.append(tensor.DimShuffle((True, False), (1,))(x_in))
else:
non_vectors.append(x_in)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论