提交 d8433b0c authored 作者: Razvan Pascanu's avatar Razvan Pascanu

merge

......@@ -233,7 +233,8 @@ predefined_modes = {'FAST_COMPILE': FAST_COMPILE,
'SANITY_CHECK': SANITY_CHECK}
def get_mode(string):
if string is None: string = config.mode
if string is None:
return get_default_mode()
if not isinstance(string, str): return string #it is hopefully already a mode...
if not predefined_modes.has_key(string):
raise Exception("No predefixed mode exist for string: %s"%string)
......
......@@ -830,25 +830,8 @@ class Dot22Scalar(GemmRelated):
def __str__(self):
return "_dot22scalar"
setup_z_Nz_Sz = """
if ((NULL == %(_z)s)
|| (%(_z)s->dimensions[0] != %(_x)s->dimensions[0])
|| (%(_z)s->dimensions[1] != %(_y)s->dimensions[1]))
{
if (NULL != %(_z)s) Py_XDECREF(%(_z)s);
npy_intp dims[2];
dims[0] = %(_x)s->dimensions[0];
dims[1] = %(_y)s->dimensions[1];
%(_z)s = (PyArrayObject*)PyArray_SimpleNew(2, dims, type_num_%(_x)s);
if(!%(_z)s) {
PyErr_SetString(PyExc_MemoryError, "failed to alloc dot22scalar output");
%(fail)s
}
}
Nz = %(_z)s->dimensions;
Sz = %(_z)s->strides;
setup_z_Nz_Sz = Dot22.setup_z_Nz_Sz
"""
check_ab_double_or_float = """
if ((%(_a)s->descr->type_num != PyArray_DOUBLE)
&& (%(_a)s->descr->type_num != PyArray_FLOAT))
......@@ -871,13 +854,13 @@ class Dot22Scalar(GemmRelated):
#undef REAL
double b = 0.0;
"""
def c_code(self, node, name, (_x, _y, _a), (_z, ), sub): #DEBUG
def c_code(self, node, name, (_x, _y, _a), (_zout, ), sub): #DEBUG
if len(self.c_libraries())<=0:
return super(Dot22Scalar, self).c_code(node, name, (_x, _y), (_z, ), sub)
return super(Dot22Scalar, self).c_code(node, name, (_x, _y), (_zout, ), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
return (1,) + self.build_gemm_version()
return (2,) + self.build_gemm_version()
_dot22scalar = Dot22Scalar()
......
......@@ -465,6 +465,33 @@ class ShapeOptimizer(Optimizer):
# -1 should make it run right before the first merge
theano.compile.mode.optdb.register('ShapeOpt', ShapeOptimizer(), -1, 'fast_run', 'fast_compile')
#This is a bug fix as ShapeFeature should do this, but it don't in the deep teaching tutorial.
@gof.local_optimizer([None, None])
def local_shape_i_lift(node):
"""
"Lifts" DimShuffle through Elemwise operations and merges
consecutive DimShuffles. Basically, applies the following
transformations on the whole graph:
DimShuffle(Elemwise(x, y)) => Elemwise(DimShuffle(x), DimShuffle(y))
DimShuffle(DimShuffle(x)) => DimShuffle(x)
After this transform, clusters of Elemwise operations are
void of DimShuffle operations.
"""
op = node.op
if not isinstance(op, Shape_i):
return False
input = node.inputs[0]
inode = input.owner
if inode and isinstance(inode.op, Elemwise) and inode.op.scalar_op.nin==1:
return node.env.shape_feature.shape_of[input]
register_canonicalize(local_shape_i_lift)
register_specialize(local_shape_i_lift)
@register_specialize
@register_canonicalize
@gof.local_optimizer([T.fill])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论