提交 62bbc9b0 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #3870 from Saizheng/master

move print_tips to profiling.py
...@@ -31,7 +31,6 @@ from six import iteritems ...@@ -31,7 +31,6 @@ from six import iteritems
from theano.gof import graph from theano.gof import graph
from theano.configparser import AddConfigVar, BoolParam, IntParam, StrParam from theano.configparser import AddConfigVar, BoolParam, IntParam, StrParam
theano_imported_time = time.time() theano_imported_time = time.time()
config = theano.config config = theano.config
...@@ -1302,6 +1301,122 @@ class ProfileStats(object): ...@@ -1302,6 +1301,122 @@ class ProfileStats(object):
print("-----------------", file=file) print("-----------------", file=file)
self.optimizer_profile[0].print_profile(file, self.optimizer_profile[0].print_profile(file,
self.optimizer_profile[1]) self.optimizer_profile[1])
self.print_tips(file)
def print_tips(self, file):
print("""Here are tips to potentially make your code run faster
(if you think of new ones, suggest them on the mailing list).
Test them first, as they are not guaranteed to always provide a speedup.""", file = file)
RandomFunction = theano.tensor.raw_random.RandomFunction
scal = theano.scalar
T = theano.tensor
scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE,
scal.EQ, scal.NEQ, scal.InRange,
scal.Switch, scal.OR, scal.XOR,
scal.AND, scal.Invert, scal.Maximum,
scal.Minimum, scal.Add, scal.Mul,
scal.Sub, scal.TrueDiv, scal.IntDiv,
scal.Clip, scal.Second, scal.Identity,
scal.Cast, scal.Sgn, scal.Neg,
scal.Inv, scal.Sqr]
scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil,
scal.Floor, scal.RoundHalfToEven,
scal.RoundHalfAwayFromZero, scal.Log,
scal.Log2, scal.Log10, scal.Log1p,
scal.Exp, scal.Sqrt, scal.Abs, scal.Cos,
scal.Sin, scal.Tan, scal.Tanh,
scal.Cosh, scal.Sinh,
T.nnet.sigm.ScalarSigmoid,
T.nnet.sigm.ScalarSoftplus]
def get_scalar_ops(s):
if isinstance(s, theano.scalar.Composite):
l = []
for node in s.fgraph.toposort():
l += get_scalar_ops(node.op)
return l
else:
return [s]
def list_scalar_op(op):
if isinstance(op.scalar_op, theano.scalar.Composite):
return get_scalar_ops(op.scalar_op)
else:
return [op.scalar_op]
def amdlibm_speed_up(op):
if not isinstance(op, T.Elemwise):
return False
else:
l = list_scalar_op(op)
for s_op in l:
if s_op.__class__ in scalar_op_amdlibm_speed_up:
return True
elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:
print("We don't know if amdlibm will accelerate "
"this scalar op.", s_op , file = file)
return False
def exp_float32_op(op):
if not isinstance(op, T.Elemwise):
return False
else:
l = list_scalar_op(op)
return any([s_op.__class__ in [scal.Exp] for s_op in l])
printed_tip = False
# tip 1
if config.floatX == 'float64':
print(" - Try the Theano flag floatX=float32", file = file)
printed_tip = True
# tip 2
if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for a
in self.apply_time]):
print(" - Try installing amdlibm and set the Theano flag "
"lib.amdlibm=True. This speeds up only some Elemwise "
"operation.", file = file)
printed_tip = True
# tip 3
if not config.lib.amdlibm and any([exp_float32_op(a.op) and
a.inputs[0].dtype == 'float32'
for a in self.apply_time]):
print(" - With the default gcc libm, exp in float32 is slower "
"than in float64! Try Theano flag floatX=float64, or "
"install amdlibm and set the theano flags lib.amdlibm=True", file = file)
printed_tip = True
# tip 4
for a in self.apply_time:
node = a
if (isinstance(node.op, T.Dot) and
all([len(i.type.broadcastable) == 2
for i in node.inputs])):
print(" - You have a dot operation that was not optimized to"
" dot22 (which is faster). Make sure the inputs are "
"float32 or float64, and are the same for both inputs. "
"Currently they are: %s" %
[i.type for i in node.inputs], file = file)
printed_tip = True
# tip 5
for a in self.apply_time:
node = a
if isinstance(node.op, RandomFunction):
printed_tip = True
print(" - Replace the default random number generator by "
"'from theano.sandbox.rng_mrg import MRG_RandomStreams "
"as RandomStreams', as this is is faster. It is still "
"experimental, but seems to work correctly.", file = file)
if config.device.startswith("gpu"):
print(" - MRG_RandomStreams is the only random number"
" generator supported on the GPU.", file = file)
break
if not printed_tip:
print(" Sorry, no tip for today.", file = file)
if False: # old code still to be ported from ProfileMode if False: # old code still to be ported from ProfileMode
......
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论