提交 9944efc5 authored 作者: abergeron's avatar abergeron

Merge pull request #2119 from nouiz/warning

Warning
...@@ -84,7 +84,9 @@ def _atexit_print_fn(): ...@@ -84,7 +84,9 @@ def _atexit_print_fn():
if len(to_sum) > 1: if len(to_sum) > 1:
# Make a global profile # Make a global profile
cum = copy.copy(to_sum[0]) cum = copy.copy(to_sum[0])
cum.message = "Sum of all printed profiles at exit excluding Scan op profile." msg = ("Sum of all(%d) printed profiles at exit excluding Scan op"
" profile." % len(to_sum))
cum.message = msg
for ps in to_sum[1:]: for ps in to_sum[1:]:
for attr in ["compile_time", "fct_call_time", "fct_callcount", for attr in ["compile_time", "fct_call_time", "fct_callcount",
"vm_call_time", "optimizer_time", "linker_time", "vm_call_time", "optimizer_time", "linker_time",
...@@ -655,6 +657,7 @@ class ProfileStats(object): ...@@ -655,6 +657,7 @@ class ProfileStats(object):
# track min peak memory usage # track min peak memory usage
min_max_peak = 0 min_max_peak = 0
min_peak_time = 0
def count_running_memory(order, fgraph, nodes_mem): def count_running_memory(order, fgraph, nodes_mem):
""" """
...@@ -981,7 +984,9 @@ class ProfileStats(object): ...@@ -981,7 +984,9 @@ class ProfileStats(object):
# Config: whether print min memory peak # Config: whether print min memory peak
if config.profiling.min_peak_memory: if config.profiling.min_peak_memory:
node_list = fgraph.apply_nodes node_list = fgraph.apply_nodes
ttt = time.time()
min_peak = count_minimum_peak(node_list, fgraph, nodes_mem) min_peak = count_minimum_peak(node_list, fgraph, nodes_mem)
min_peak_time += time.time() - ttt
min_max_peak = max(min_max_peak, min_peak) min_max_peak = max(min_max_peak, min_peak)
del fgraph, nodes_mem del fgraph, nodes_mem
...@@ -1006,8 +1011,8 @@ class ProfileStats(object): ...@@ -1006,8 +1011,8 @@ class ProfileStats(object):
new_max_running_max_memory_size / 1024.)), int(round( new_max_running_max_memory_size / 1024.)), int(round(
max_running_max_memory_size / 1024.))) max_running_max_memory_size / 1024.)))
if min_max_peak: if min_max_peak:
print >> file, " Minimum peak from all valid apply node order is %dKB" % int(round( print >> file, " Minimum peak from all valid apply node order is %dKB(took %f.2s to compute)" % (int(round(
min_max_peak / 1024.)) min_max_peak / 1024.)), min_peak_time)
print >> file, " Memory saved if views are used: %dKB (%dKB)" % (int( print >> file, " Memory saved if views are used: %dKB (%dKB)" % (int(
round(new_max_node_memory_saved_by_view / 1024.)), int( round(new_max_node_memory_saved_by_view / 1024.)), int(
round(max_node_memory_saved_by_view / 1024.))) round(max_node_memory_saved_by_view / 1024.)))
......
...@@ -10,9 +10,10 @@ http://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps ...@@ -10,9 +10,10 @@ http://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps
import sys import sys
import numpy import numpy
import theano from numpy.lib.stride_tricks import as_strided
import scipy.sparse import scipy.sparse
import theano
from theano import gof, tensor, compile, scalar, config from theano import gof, tensor, compile, scalar, config
from theano.gof.python25 import all from theano.gof.python25 import all
from theano.gradient import DisconnectedType from theano.gradient import DisconnectedType
...@@ -20,7 +21,6 @@ from theano.sparse.utils import hash_from_sparse ...@@ -20,7 +21,6 @@ from theano.sparse.utils import hash_from_sparse
import theano.tests.unittest_tools as utt import theano.tests.unittest_tools as utt
from theano.gradient import grad_not_implemented, grad_undefined from theano.gradient import grad_not_implemented, grad_undefined
from theano.sparse.type import SparseType, _is_sparse from theano.sparse.type import SparseType, _is_sparse
from numpy.lib.stride_tricks import as_strided
sparse_formats = ['csc', 'csr'] sparse_formats = ['csc', 'csr']
...@@ -689,7 +689,7 @@ class CSM(gof.Op): ...@@ -689,7 +689,7 @@ class CSM(gof.Op):
# node.inputs[3] is of lenght as we only support sparse matrix. # node.inputs[3] is of lenght as we only support sparse matrix.
return [(node.inputs[3][0], node.inputs[3][1])] return [(node.inputs[3][0], node.inputs[3][1])]
else: else:
return node.fgraph.shape_feature.default_infer_shape(node, shapes) raise theano.tensor.basic.ShapeError("case not implemented")
CSC = CSM('csc') CSC = CSM('csc')
......
...@@ -1890,7 +1890,7 @@ class AdvancedSubtensor(Op): ...@@ -1890,7 +1890,7 @@ class AdvancedSubtensor(Op):
else: else:
return [ind1shp] return [ind1shp]
# Default case, we don't know # Default case, we don't know
return node.fgraph.shape_feature.default_infer_shape(node, ishapes) raise theano.tensor.basic.ShapeError("case not implemented")
def perform(self, node, inputs, out_): def perform(self, node, inputs, out_):
out, = out_ out, = out_
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论