提交 8598dab6 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Merged

......@@ -17,10 +17,10 @@ Isolating the problem/Testing Theano compiler
You can run your Theano function in a DebugMode(:ref:`using_debugmode`). This test the Theano optimizations and help to find where NaN, inf and other problem come from.
Can I get Theano to test my graph incrementally, as it's being built ?
-----------------------------------------------------------------------
Interactive Debugger
--------------------
Yes ! As of v.0.4.0, Theano has a new mechanism by which graphs are executed
As of v.0.4.0, Theano has a new mechanism by which graphs are executed
on-the-fly, before a theano.function is ever compiled. Since optimizations
haven't been applied at this stage, it is easy for the user to locate the
source of this bug. This functionality is enabled through the config flag
......
......@@ -411,7 +411,10 @@ class ProfileMode(Mode):
apply_time, op_cimpl, message, outputs_size,
other_time)
if outputs_size:
if not outputs_size:
print """\nProfile of Theano intermediate memory disabled.
To enabled, put the Theano flag ProfileMode.profile_memory to True."""
else:
fct_memory={}#env->dict(node->(outputs size))
var_mem = {}
for node,val in outputs_size.items():
......@@ -421,6 +424,7 @@ class ProfileMode(Mode):
var_mem[out]=v
print
print "Profile of Theano functions memory:"
print "(This check only the output of each apply node. It don't check the temporary memory used by the op in the apply node.)"
nb_skipped = 0
for env,nodes_mem in fct_memory.iteritems():
size_sum=sum([sum(val) for key,val in nodes_mem.iteritems()])
......
......@@ -401,7 +401,7 @@ class PerformLinker(LocalLinker):
for node in order:
# Maker sure we don't use C version of the code, but rather only
# the python version
node._op_use_c_code = False
node.op._op_use_c_code = False
thunks += [node.op.make_thunk(node,
storage_map,
compute_map,
......
......@@ -10,7 +10,10 @@ ls ${COMPILEDIR}|wc -l
FLAGS=warn.argmax_pushdown_bug=False,warn.gpusum_01_011_0111_bug=False,warn.sum_sum_bug=False,warn.sum_div_dimshuffle_bug=False,compiledir=${COMPILEDIR}
export PYTHONPATH=${ROOT_CWD}:$PYTHONPATH
cd ${ROOT_CWD}
cd ${ROOT_CWD}/Theano
hg summary
cd ..
echo "executing nosetests with mode=FAST_COMPILE"
THEANO_FLAGS=${FLAGS},mode=FAST_COMPILE ${NOSETESTS} Theano
echo "nb element in the compiledir:"
......
......@@ -591,4 +591,3 @@ def use_gpu_images2neibs(node):
if cuda_available:
register_gpu_opt()(use_gpu_images2neibs)
......@@ -1126,26 +1126,23 @@ int_div = IntDiv(upcast_out, name = 'int_div')
floor_div = int_div
def raise_complex_error():
raise ComplexError(
"Theano does not support the mod operator (%) on "
"complex numbers, since numpy deprecated it.")
def mod_check(x, y):
if (as_scalar(x).type in complex_types or
as_scalar(y).type in complex_types):
# Currently forbidden.
raise_complex_error()
raise Mod.complex_error
else:
return mod(x, y)
class Mod(BinaryScalarOp):
complex_error = ComplexError(
"Theano does not support the mod operator (%) on "
"complex numbers, since numpy deprecated it.")
def impl(self, x, y):
if isinstance(x, numpy.complex) or isinstance(y, numpy.complex):
raise_complex_error()
raise self.complex_error
return x % y
def c_code_cache_version(self):
......@@ -1184,7 +1181,7 @@ class Mod(BinaryScalarOp):
x_mod_ypm = "fmod(%(x)s,-%(y)s)"%locals()
x_mod_ymp = "fmod(-%(x)s,%(y)s)"%locals()
elif str(t) in imap(str, complex_types):
raise_complex_error()
raise self.complex_error
else:
raise NotImplementedError('type not supported', type)
......
......@@ -2630,7 +2630,7 @@ def mod_check(x, y):
if (as_tensor_variable(x).dtype in complex_dtypes or
as_tensor_variable(y).dtype in complex_dtypes):
# Currently forbidden.
scal.raise_complex_error()
raise scal.Mod.complex_error
else:
return mod(x, y)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论