提交 8598dab6 authored 作者: Olivier Delalleau's avatar Olivier Delalleau

Merged

...@@ -17,10 +17,10 @@ Isolating the problem/Testing Theano compiler ...@@ -17,10 +17,10 @@ Isolating the problem/Testing Theano compiler
You can run your Theano function in a DebugMode(:ref:`using_debugmode`). This test the Theano optimizations and help to find where NaN, inf and other problem come from. You can run your Theano function in a DebugMode(:ref:`using_debugmode`). This test the Theano optimizations and help to find where NaN, inf and other problem come from.
Can I get Theano to test my graph incrementally, as it's being built ? Interactive Debugger
----------------------------------------------------------------------- --------------------
Yes ! As of v.0.4.0, Theano has a new mechanism by which graphs are executed As of v.0.4.0, Theano has a new mechanism by which graphs are executed
on-the-fly, before a theano.function is ever compiled. Since optimizations on-the-fly, before a theano.function is ever compiled. Since optimizations
haven't been applied at this stage, it is easy for the user to locate the haven't been applied at this stage, it is easy for the user to locate the
source of this bug. This functionality is enabled through the config flag source of this bug. This functionality is enabled through the config flag
......
...@@ -411,7 +411,10 @@ class ProfileMode(Mode): ...@@ -411,7 +411,10 @@ class ProfileMode(Mode):
apply_time, op_cimpl, message, outputs_size, apply_time, op_cimpl, message, outputs_size,
other_time) other_time)
if outputs_size: if not outputs_size:
print """\nProfile of Theano intermediate memory disabled.
To enabled, put the Theano flag ProfileMode.profile_memory to True."""
else:
fct_memory={}#env->dict(node->(outputs size)) fct_memory={}#env->dict(node->(outputs size))
var_mem = {} var_mem = {}
for node,val in outputs_size.items(): for node,val in outputs_size.items():
...@@ -421,6 +424,7 @@ class ProfileMode(Mode): ...@@ -421,6 +424,7 @@ class ProfileMode(Mode):
var_mem[out]=v var_mem[out]=v
print print
print "Profile of Theano functions memory:" print "Profile of Theano functions memory:"
print "(This check only the output of each apply node. It don't check the temporary memory used by the op in the apply node.)"
nb_skipped = 0 nb_skipped = 0
for env,nodes_mem in fct_memory.iteritems(): for env,nodes_mem in fct_memory.iteritems():
size_sum=sum([sum(val) for key,val in nodes_mem.iteritems()]) size_sum=sum([sum(val) for key,val in nodes_mem.iteritems()])
......
...@@ -401,7 +401,7 @@ class PerformLinker(LocalLinker): ...@@ -401,7 +401,7 @@ class PerformLinker(LocalLinker):
for node in order: for node in order:
# Maker sure we don't use C version of the code, but rather only # Maker sure we don't use C version of the code, but rather only
# the python version # the python version
node._op_use_c_code = False node.op._op_use_c_code = False
thunks += [node.op.make_thunk(node, thunks += [node.op.make_thunk(node,
storage_map, storage_map,
compute_map, compute_map,
......
...@@ -10,7 +10,10 @@ ls ${COMPILEDIR}|wc -l ...@@ -10,7 +10,10 @@ ls ${COMPILEDIR}|wc -l
FLAGS=warn.argmax_pushdown_bug=False,warn.gpusum_01_011_0111_bug=False,warn.sum_sum_bug=False,warn.sum_div_dimshuffle_bug=False,compiledir=${COMPILEDIR} FLAGS=warn.argmax_pushdown_bug=False,warn.gpusum_01_011_0111_bug=False,warn.sum_sum_bug=False,warn.sum_div_dimshuffle_bug=False,compiledir=${COMPILEDIR}
export PYTHONPATH=${ROOT_CWD}:$PYTHONPATH export PYTHONPATH=${ROOT_CWD}:$PYTHONPATH
cd ${ROOT_CWD} cd ${ROOT_CWD}/Theano
hg summary
cd ..
echo "executing nosetests with mode=FAST_COMPILE" echo "executing nosetests with mode=FAST_COMPILE"
THEANO_FLAGS=${FLAGS},mode=FAST_COMPILE ${NOSETESTS} Theano THEANO_FLAGS=${FLAGS},mode=FAST_COMPILE ${NOSETESTS} Theano
echo "nb element in the compiledir:" echo "nb element in the compiledir:"
......
...@@ -14,7 +14,7 @@ class Images2Neibs(Op): ...@@ -14,7 +14,7 @@ class Images2Neibs(Op):
def __init__(self, mode='valid'): def __init__(self, mode='valid'):
""" """
Modes: Modes:
valid : Reshapes the input as a a 2D tensor where each row is a pooling example. valid : Reshapes the input as a a 2D tensor where each row is a pooling example.
Requires an input that is a multiple of the pooling factor (in each direction) Requires an input that is a multiple of the pooling factor (in each direction)
ignore_borders : Same as valid, but will ignore the borders if the shape(s) of the input ignore_borders : Same as valid, but will ignore the borders if the shape(s) of the input
is not a multiple of the pooling factor(s) is not a multiple of the pooling factor(s)
...@@ -248,7 +248,7 @@ def neibs2images(neibs, neib_shape, original_shape, mode='valid'): ...@@ -248,7 +248,7 @@ def neibs2images(neibs, neib_shape, original_shape, mode='valid'):
new_neib_shape = T.stack(original_shape[-1] // neib_shape[1], neib_shape[1]) new_neib_shape = T.stack(original_shape[-1] // neib_shape[1], neib_shape[1])
output_2d = images2neibs(neibs.dimshuffle('x','x',0,1), new_neib_shape, mode=mode) output_2d = images2neibs(neibs.dimshuffle('x','x',0,1), new_neib_shape, mode=mode)
if mode == 'ignore_borders': if mode == 'ignore_borders':
valid_shape = list(original_shape) valid_shape = list(original_shape)
valid_shape[2] = (valid_shape[2] // neib_shape[0]) * neib_shape[0] valid_shape[2] = (valid_shape[2] // neib_shape[0]) * neib_shape[0]
...@@ -261,7 +261,7 @@ def neibs2images(neibs, neib_shape, original_shape, mode='valid'): ...@@ -261,7 +261,7 @@ def neibs2images(neibs, neib_shape, original_shape, mode='valid'):
output_4d = T.concatenate([output_4d,T.zeros(pad_shape)],axis=d) output_4d = T.concatenate([output_4d,T.zeros(pad_shape)],axis=d)
else: else:
output_4d = output_2d.reshape(original_shape) output_4d = output_2d.reshape(original_shape)
return output_4d return output_4d
...@@ -539,7 +539,7 @@ class GpuImages2Neibs(Images2Neibs): ...@@ -539,7 +539,7 @@ class GpuImages2Neibs(Images2Neibs):
dim3 n_blocks(std::min(32*1024,nb_block)); dim3 n_blocks(std::min(32*1024,nb_block));
int n_shared = 0; int n_shared = 0;
void (*f)(int, int, int ,int, void (*f)(int, int, int ,int,
int, int, int ,int, int, int, int ,int,
int, int, int, int,
int, int, int, int, int, int, int, int,
...@@ -591,4 +591,3 @@ def use_gpu_images2neibs(node): ...@@ -591,4 +591,3 @@ def use_gpu_images2neibs(node):
if cuda_available: if cuda_available:
register_gpu_opt()(use_gpu_images2neibs) register_gpu_opt()(use_gpu_images2neibs)
...@@ -1126,26 +1126,23 @@ int_div = IntDiv(upcast_out, name = 'int_div') ...@@ -1126,26 +1126,23 @@ int_div = IntDiv(upcast_out, name = 'int_div')
floor_div = int_div floor_div = int_div
def raise_complex_error():
raise ComplexError(
"Theano does not support the mod operator (%) on "
"complex numbers, since numpy deprecated it.")
def mod_check(x, y): def mod_check(x, y):
if (as_scalar(x).type in complex_types or if (as_scalar(x).type in complex_types or
as_scalar(y).type in complex_types): as_scalar(y).type in complex_types):
# Currently forbidden. # Currently forbidden.
raise_complex_error() raise Mod.complex_error
else: else:
return mod(x, y) return mod(x, y)
class Mod(BinaryScalarOp): class Mod(BinaryScalarOp):
complex_error = ComplexError(
"Theano does not support the mod operator (%) on "
"complex numbers, since numpy deprecated it.")
def impl(self, x, y): def impl(self, x, y):
if isinstance(x, numpy.complex) or isinstance(y, numpy.complex): if isinstance(x, numpy.complex) or isinstance(y, numpy.complex):
raise_complex_error() raise self.complex_error
return x % y return x % y
def c_code_cache_version(self): def c_code_cache_version(self):
...@@ -1184,7 +1181,7 @@ class Mod(BinaryScalarOp): ...@@ -1184,7 +1181,7 @@ class Mod(BinaryScalarOp):
x_mod_ypm = "fmod(%(x)s,-%(y)s)"%locals() x_mod_ypm = "fmod(%(x)s,-%(y)s)"%locals()
x_mod_ymp = "fmod(-%(x)s,%(y)s)"%locals() x_mod_ymp = "fmod(-%(x)s,%(y)s)"%locals()
elif str(t) in imap(str, complex_types): elif str(t) in imap(str, complex_types):
raise_complex_error() raise self.complex_error
else: else:
raise NotImplementedError('type not supported', type) raise NotImplementedError('type not supported', type)
......
...@@ -2630,7 +2630,7 @@ def mod_check(x, y): ...@@ -2630,7 +2630,7 @@ def mod_check(x, y):
if (as_tensor_variable(x).dtype in complex_dtypes or if (as_tensor_variable(x).dtype in complex_dtypes or
as_tensor_variable(y).dtype in complex_dtypes): as_tensor_variable(y).dtype in complex_dtypes):
# Currently forbidden. # Currently forbidden.
scal.raise_complex_error() raise scal.Mod.complex_error
else: else:
return mod(x, y) return mod(x, y)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论