提交 ec1caa4d authored 作者: Pascal Lamblin's avatar Pascal Lamblin

More pep8

上级 043c3eef
......@@ -964,144 +964,133 @@ class test_fusion(unittest.TestCase):
fwv + fxv + fyv + fzv, 'float32'),
(fw + (fx + (fy + fz)), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
fwv + fxv + fyv + fzv, 'float32'),
((fw+fx)+(fy+fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
fwv+fxv+fyv+fzv, 'float32'), # 10
(fw*fx*fy*fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
((fw + fx) + (fy + fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
fwv + fxv + fyv + fzv, 'float32'), # 10
(fw * fx * fy * fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
fwv * fxv * fyv * fzv, 'float32'),
(fw+fx*fy*fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
(fw + fx * fy * fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
fwv + fxv * fyv * fzv, 'float32'),
(fx+fy*fz*fx, (fx, fy, fz), (fxv, fyv, fzv), 1,
(fx + fy * fz * fx, (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv + fyv * fzv * fxv, 'float32'),
(fx*fy+fz+fy, (fx, fy, fz), (fxv, fyv, fzv), 1,
(fx * fy + fz + fy, (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv * fyv + fzv + fyv, 'float32'),
(fx*fy*fz*fw+fx+fy+fz+fw, (fw, fx, fy, fz), (fwv, fxv,
fyv, fzv), 1, fxv*fyv*fzv*fwv+fxv+fyv+fzv+fwv, 'float32'), # 15
(fx * fy * fz * fw + fx + fy + fz + fw, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv), 1,
fxv * fyv * fzv * fwv + fxv + fyv + fzv + fwv, 'float32'), # 15
# test with constant
((fw+fx)+(fy+fz) + 2., (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv+fxv+fyv+fzv+2, 'float32'),
(((fw+fx)+2.+fy)+fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv+fxv+fyv+fzv+2, 'float32'),
((fw+(fx+2.+fy))+fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv+fxv+fyv+fzv+2, 'float32'),
((fw+(fx+fy)+2+fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv+fxv+fyv+fzv+2, 'float32'),
(fw+(fx+(fy+fz)+2.), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv+fxv+fyv+fzv+2, 'float32'), # 20
(2+(fw+fx)+(fy+fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv+fxv+fyv+fzv+2, 'float32'),
((fw + fx) + (fy + fz) + 2., (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv + fxv + fyv + fzv + 2, 'float32'),
(((fw + fx) + 2. + fy) + fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv + fxv + fyv + fzv + 2, 'float32'),
((fw + (fx + 2. + fy)) + fz, (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv + fxv + fyv + fzv + 2, 'float32'),
((fw + (fx + fy) + 2 + fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv + fxv + fyv + fzv + 2, 'float32'),
(fw + (fx + (fy + fz) + 2.), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv + fxv + fyv + fzv + 2, 'float32'), # 20
(2 + (fw + fx) + (fy + fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
1, fwv + fxv + fyv + fzv + 2, 'float32'),
# mix float32 and float64
(2+(dw+fx)+(fy+fz), (dw, fx, fy, fz), (dwv, fxv, fyv, fzv),
1, dwv+fxv+fyv+fzv+2, 'float64'),
(2+(fw+dw)+(fy+fz), (fw, dw, fy, fz), (fwv, dwv, fyv, fzv),
1, fwv+dwv+fyv+fzv+2, 'float64'),
(2+(fw+fx)+(dw+fz), (fw, fx, dw, fz), (fwv, fxv, dwv, fzv),
1, fwv+fxv+dwv+fzv+2, 'float64'),
(2+(fw+fx)+(fy+dw), (fw, fx, fy, dw), (fwv, fxv, fyv, dwv),
1, fwv+fxv+fyv+dwv+2, 'float64'), # 25
(2 + (dw + fx) + (fy + fz), (dw, fx, fy, fz), (dwv, fxv, fyv, fzv),
1, dwv + fxv + fyv + fzv + 2, 'float64'),
(2 + (fw + dw) + (fy + fz), (fw, dw, fy, fz), (fwv, dwv, fyv, fzv),
1, fwv + dwv + fyv + fzv + 2, 'float64'),
(2 + (fw + fx) + (dw + fz), (fw, fx, dw, fz), (fwv, fxv, dwv, fzv),
1, fwv + fxv + dwv + fzv + 2, 'float64'),
(2 + (fw + fx) + (fy + dw), (fw, fx, fy, dw), (fwv, fxv, fyv, dwv),
1, fwv + fxv + fyv + dwv + 2, 'float64'), # 25
# test when their is other op then elemwise.
# the good output for the next test.
# (Pdb) p f.maker.fgraph.toposort()
#[Elemwise{add,no_inplace}(w, x), Sum(Elemwise{add,no_inplace}.0), InplaceDimShuffle{x,x}(Sum.0), Elemwise{Composite{_impls=[<function <lambda> at 0x2c5c8c0>], nin=4, _c_code={
# npy_float32 V%(id)s_tmp1;
# V%(id)s_tmp1 = %(i2)s + %(i3)s;
# npy_float32 V%(id)s_tmp2;
# V%(id)s_tmp2 = %(i0)s + %(i1)s;
#%(o0)s = V%(id)s_tmp2 + V%(id)s_tmp1;
#}
#, nout=1, fgraph=[add(add(<float32>, <float32>), add(<float32>, <float32>))]}}(InplaceDimShuffle{x,x}.0, Elemwise{add,no_inplace}.0, y, z)]
((fwx.sum())+(fwx)+(fy+fz), (fw, fx, fy, fz), (fwv, fxv,
fyv, fzv), 4, (fwv+fxv).sum()+fwv+fxv+fyv+fzv, 'float32'),
((fwx.sum()) + (fwx) + (fy + fz), (fw, fx, fy, fz), (fwv, fxv, fyv, fzv),
4, (fwv + fxv).sum() + fwv + fxv + fyv + fzv, 'float32'),
# test other elemwise op
(fx+fy+tensor.cos(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv+fyv+numpy.cos(fzv), 'float32'),
(fx+fy+tensor.cosh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv+fyv+numpy.cosh(fzv), 'float32'),
(fx+fy+abs(fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv+fyv +
(fx + fy + tensor.cos(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv + fyv + numpy.cos(fzv), 'float32'),
(fx + fy + tensor.cosh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv + fyv + numpy.cosh(fzv), 'float32'),
(fx + fy + abs(fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv + fyv +
numpy.absolute(fzv), 'float32'),
(ix+iy+abs(iz), (ix, iy, iz), (ixv, iyv, izv), 1, ixv+iyv +
(ix + iy + abs(iz), (ix, iy, iz), (ixv, iyv, izv), 1, ixv + iyv +
numpy.absolute(izv), 'int32'), # 30
(fx+fy+theano.tensor.log(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv+fyv+numpy.log(fzv), 'float32'),
(fx+fy+theano.tensor.log2(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv+fyv+numpy.log2(fzv), 'float32'),
(fx+fy+theano.tensor.log10(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv+fyv+numpy.log10(fzv), 'float32'),
(fx+fy**fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv+fyv**fzv,
(fx + fy + theano.tensor.log(fz), (fx, fy, fz), (fxv, fyv, fzv),
1, fxv + fyv + numpy.log(fzv), 'float32'),
(fx + fy + theano.tensor.log2(fz), (fx, fy, fz), (fxv, fyv, fzv),
1, fxv + fyv + numpy.log2(fzv), 'float32'),
(fx + fy + theano.tensor.log10(fz), (fx, fy, fz), (fxv, fyv, fzv),
1, fxv + fyv + numpy.log10(fzv), 'float32'),
(fx + fy ** fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv + fyv ** fzv,
'float32'), # pow
(fx+fy+theano.tensor.exp(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv+fyv+numpy.exp(fzv), 'float32'), # 35
(fx-fy-fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-fyv-fzv, 'float32'),
(fx-(fy/fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv/fzv), 'float32'),
(fx-theano.tensor.true_div(fy, 2), (fx, fy), (fxv, fyv),
1, fxv-(fyv/2), 'float32'),
(fx-theano.tensor.true_div(fy, fz), (fx, fy, fz), (fxv,
fyv, fzv), 1, fxv-(fyv/fzv), 'float32'),
(fx-theano.tensor.int_div(ix*100, iy*1000), (fx, ix,
iy), (fxv, ixv, iyv), 1, fxv-((ixv*100)//(iyv*1000)), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}), # 40
(fx-(fy/2), (fx, fy), (fxv, fyv), 1, fxv-(fyv/2), 'float32'),
(fx-(fy%fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv%fzv), 'float32'),
(fx-(fy > fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv > fzv), 'float32'),
(fx-(fy >= fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv >= fzv), 'float32'),
(fx-(fy < fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv < fzv), 'float32'), # 45
(fx-(fy <= fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(fyv <= fzv), 'float32'),
(fx-T.eq(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(
fyv == fzv), 'float32'),
(fx-T.neq(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv-(
(fx + fy + theano.tensor.exp(fz), (fx, fy, fz), (fxv, fyv, fzv),
1, fxv + fyv + numpy.exp(fzv), 'float32'), # 35
(fx - fy - fz, (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - fyv - fzv, 'float32'),
(fx - (fy / fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (fyv / fzv), 'float32'),
(fx - theano.tensor.true_div(fy, 2), (fx, fy), (fxv, fyv),
1, fxv - (fyv / 2), 'float32'),
(fx - theano.tensor.true_div(fy, fz), (fx, fy, fz), (fxv, fyv, fzv),
1, fxv - (fyv / fzv), 'float32'),
(fx - theano.tensor.int_div(ix * 100, iy * 1000), (fx, ix, iy), (fxv, ixv, iyv),
1, fxv - ((ixv * 100) // (iyv * 1000)), {'custom': 'float64', 'numpy + floatX': config.floatX, 'numpy': 'float64'}), # 40
(fx - (fy / 2), (fx, fy), (fxv, fyv), 1, fxv - (fyv / 2), 'float32'),
(fx - (fy % fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (fyv % fzv), 'float32'),
(fx - (fy > fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (fyv > fzv), 'float32'),
(fx - (fy >= fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (fyv >= fzv), 'float32'),
(fx - (fy < fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (fyv < fzv), 'float32'), # 45
(fx - (fy <= fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (fyv <= fzv), 'float32'),
(fx - T.eq(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - (fyv == fzv), 'float32'),
(fx - T.neq(fy, fz), (fx, fy, fz), (fxv, fyv, fzv), 1, fxv - (
fyv != fzv), 'float32'),
(fx-fy+tensor.tan(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv-fyv+numpy.tan(fzv), 'float32'),
(fx-fy+tensor.tanh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv-fyv+numpy.tanh(fzv), 'float32'), # 50
(fx-fy+tensor.sin(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv-fyv+numpy.sin(fzv), 'float32'),
(fx-fy+tensor.sinh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv-fyv+numpy.sinh(fzv), 'float32'),
(fx-fy+theano.tensor.sqr(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv-fyv+(fzv*fzv), 'float32'),
(fx-fy+theano.tensor.sqrt(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv-fyv+numpy.sqrt(fzv), 'float32'),
(fx-fy+theano.tensor.inv(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv-fyv+(1/fzv), 'float32'), # 55
(fx-fy+theano.tensor.neg(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv-fyv+(-fzv), 'float32'),
(fx-fy+theano.tensor.round(fz), (fx, fy, fz), (fxv, fyv,
fzv), 1, fxv-fyv+numpy.round(fzv), 'float32'),
(ix-iy+theano.tensor.iround(fz), (ix, iy, fz), (ixv,
iyv, fzv), 1, ixv-iyv+numpy.round(fzv), 'int64'),
(fx - fy + tensor.tan(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + numpy.tan(fzv), 'float32'),
(fx - fy + tensor.tanh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + numpy.tanh(fzv), 'float32'), # 50
(fx - fy + tensor.sin(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + numpy.sin(fzv), 'float32'),
(fx - fy + tensor.sinh(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + numpy.sinh(fzv), 'float32'),
(fx - fy + theano.tensor.sqr(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + (fzv * fzv), 'float32'),
(fx - fy + theano.tensor.sqrt(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + numpy.sqrt(fzv), 'float32'),
(fx - fy + theano.tensor.inv(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + (1 / fzv), 'float32'), # 55
(fx - fy + theano.tensor.neg(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + (-fzv), 'float32'),
(fx - fy + theano.tensor.round(fz), (fx, fy, fz), (fxv, fyv, fzv), 1,
fxv - fyv + numpy.round(fzv), 'float32'),
(ix - iy + theano.tensor.iround(fz), (ix, iy, fz), (ixv, iyv, fzv), 1,
ixv - iyv + numpy.round(fzv), 'int64'),
# Bit op
(fx-theano.tensor.or_(iy, iz), (fx, iy, iz), (fxv, iyv,
izv), 1, fxv-(iyv|izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.xor(iy, iz), (fx, iy, iz), (fxv, iyv,
izv), 1, fxv-(iyv^izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}), # 60
(fx-theano.tensor.and_(iy, iz), (fx, iy, iz), (fxv, iyv,
izv), 1, fxv-(iyv&izv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.invert(iy), (fx, iy), (fxv, iyv), 1,
fxv-(~iyv), {'custom': 'float64', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),
(fx-theano.tensor.cast(fy, dtype='float64'), (fx, fy), (fxv, fyv), 1,
fxv-numpy.asarray(fyv, 'float64'), 'float64'),
(theano.tensor.pow(fx*fy+fz, fx*fy), (fx, fy, fz), (fxv,
fyv, fzv), 1, numpy.power(fxv*fyv+fzv, fxv*fyv), 'float32'),
(fv+fy**fz, (fv, fy, fz), (fvv, fyv, fzv), 2, fvv+fyv**fzv,
'float32'), # fused with a dimshuffle #65
(fv-fy+tensor.tanh(fz), (fv, fy, fz), (fvv, fyv, fzv), 2,
fvv-fyv+numpy.tanh(fzv), 'float32'), # fused with a dimshuffle
(fx - theano.tensor.or_(iy, iz), (fx, iy, iz), (fxv, iyv, izv), 1,
fxv - (iyv | izv), {'custom': 'float64', 'numpy + floatX': config.floatX, 'numpy': 'float64'}),
(fx - theano.tensor.xor(iy, iz), (fx, iy, iz), (fxv, iyv, izv), 1,
fxv - (iyv ^ izv), {'custom': 'float64', 'numpy + floatX': config.floatX, 'numpy': 'float64'}), # 60
(fx - theano.tensor.and_(iy, iz), (fx, iy, iz), (fxv, iyv, izv), 1,
fxv - (iyv & izv), {'custom': 'float64', 'numpy + floatX': config.floatX, 'numpy': 'float64'}),
(fx - theano.tensor.invert(iy), (fx, iy), (fxv, iyv), 1,
fxv - (~iyv), {'custom': 'float64', 'numpy + floatX': config.floatX, 'numpy': 'float64'}),
(fx - theano.tensor.cast(fy, dtype='float64'), (fx, fy), (fxv, fyv), 1,
fxv - numpy.asarray(fyv, 'float64'), 'float64'),
(theano.tensor.pow(fx * fy + fz, fx * fy), (fx, fy, fz), (fxv, fyv, fzv), 1,
numpy.power(fxv * fyv + fzv, fxv * fyv), 'float32'),
(fv + fy ** fz, (fv, fy, fz), (fvv, fyv, fzv), 2, fvv + fyv ** fzv, 'float32'), # fused with a dimshuffle #65
(fv - fy + tensor.tanh(fz), (fv, fy, fz), (fvv, fyv, fzv), 2,
fvv - fyv + numpy.tanh(fzv), 'float32'), # fused with a dimshuffle
# Cases where the same input is reused many times.
(theano.tensor.mul(fx, fx, fx, fx), (fx,), (fxv,), 1, fxv *
fxv*fxv*fxv, 'float32'),
fxv * fxv * fxv, 'float32'),
(theano.tensor.mul(fx, ftanx, ftanx), (fx,), (fxv,), 1,
fxv*numpy.tan(fxv)*numpy.tan(fxv), 'float32'),
fxv * numpy.tan(fxv) * numpy.tan(fxv), 'float32'),
(theano.tensor.mul(fx, ftanx, ftanx, fx), (fx,), (fxv,),
1, fxv*numpy.tan(fxv)*numpy.tan(fxv)*fxv, 'float32'),
(theano.tensor.mul(ftanx, ftanx, fx+fy), (fx, fy), (fxv,
fyv), 1, numpy.tan(fxv)*numpy.tan(fxv)*(fxv+fyv), 'float32'), # 70
1, fxv * numpy.tan(fxv) * numpy.tan(fxv) * fxv, 'float32'),
(theano.tensor.mul(ftanx, ftanx, fx + fy), (fx, fy), (fxv, fyv),
1, numpy.tan(fxv) * numpy.tan(fxv) * (fxv + fyv), 'float32'), # 70
# Cases with different broadcast pattern. They should not
# be merged as this would duplicate computation
# The graph should have 2 elemwise and 1 dimshuffle
(fx*theano.tensor.sin(fs), (fx, fs), (fxv,
fsv), 3, fxv*numpy.sin(fsv), 'float32'),
(fx * theano.tensor.sin(fs), (fx, fs), (fxv, fsv), 3,
fxv * numpy.sin(fsv), 'float32'),
]
if slice:
cases = cases[slice]
......@@ -1149,15 +1138,14 @@ class test_fusion(unittest.TestCase):
print(val_inputs)
print(out)
print(answer * nb_repeat)
#assert 0
topo = f.maker.fgraph.toposort()
if gpu:
import theano.sandbox.cuda as cuda
topo_ = [x for x in topo if not isinstance(
x.op, (cuda.basic_ops.GpuFromHost, cuda.basic_ops.HostFromGpu))]
gpu_ = [x for x in topo if isinstance(x.op,
cuda.basic_ops.GpuFromHost)]
gpu_ = [x for x in topo
if isinstance(x.op, cuda.basic_ops.GpuFromHost)]
if not len(gpu_) == len(sym_inputs):
fail2.append((id, gpu_, sym_inputs))
else:
......@@ -1173,7 +1161,7 @@ class test_fusion(unittest.TestCase):
if len(set(g.owner.inputs)) == len(g.owner.inputs):
expected_len_sym_inputs = numpy.sum(
[not isinstance(x, theano.gof.Constant)
for x in topo_[0].inputs])
for x in topo_[0].inputs])
assert expected_len_sym_inputs == len(sym_inputs)
if not out_dtype == out.dtype:
......@@ -1183,7 +1171,7 @@ class test_fusion(unittest.TestCase):
print("Executed", len(cases), "cases", "failed", failed)
if failed > 0:
raise Exception("Failed %d cases" % failed, fail1,
fail2, fail3, fail4)
fail2, fail3, fail4)
return times
......@@ -1213,11 +1201,11 @@ class test_fusion(unittest.TestCase):
# we need the optimisation enabled, debug do this.
if theano.config.mode == "FAST_COMPILE":
mode = theano.compile.mode.get_mode("FAST_RUN").including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
else:
mode = theano.compile.mode.get_default_mode().including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
......@@ -1230,11 +1218,11 @@ class test_fusion(unittest.TestCase):
# we need the optimisation enabled, debug do this.
if theano.config.mode == "FAST_COMPILE":
mode = theano.compile.mode.get_mode("FAST_RUN").including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
else:
mode = theano.compile.mode.get_default_mode().including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
......@@ -1301,8 +1289,8 @@ class test_fusion(unittest.TestCase):
shp = (3000, 3000)
shp = (1000, 1000)
nb_repeat = 50
# linker=gof.CLinker
# linker=gof.OpWiseCLinker
# linker=gof.CLinker
# linker=gof.OpWiseCLinker
mode1 = copy.copy(compile.get_default_mode())
mode1._optimizer = mode1._optimizer.including('local_elemwise_fusion')
......@@ -1325,15 +1313,15 @@ class test_fusion(unittest.TestCase):
print("times2/times1")
print(d)
print("min", d.min(), "argmin", d.argmin(), "max", d.max(), \
"mean", d.mean(), "std", d.std())
print("min", d.min(), "argmin", d.argmin(), "max", d.max(),
"mean", d.mean(), "std", d.std())
def test_fusion_inplace(self):
mode = copy.copy(compile.mode.get_default_mode())
# we need the optimisation enabled and the canonicalize.
# the canonicalize is needed to merge multiplication/addition by constant.
mode._optimizer = mode._optimizer.including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'inplace')
x, y, z = dmatrices('xyz')
......@@ -1346,8 +1334,8 @@ class test_fusion(unittest.TestCase):
def speed_fusion_gpu(self):
import theano.sandbox.cuda as cuda
self.speed_fusion(shared_fn=cuda.
float32_shared_constructor, gpu=True, s=slice(0, 15))
self.speed_fusion(shared_fn=cuda.float32_shared_constructor,
gpu=True, s=slice(0, 15))
def speed_log_exp(self):
s = slice(31, 36)
......@@ -1385,13 +1373,15 @@ class test_fusion(unittest.TestCase):
gc.collect()
gc.collect()
gc.collect()
# print 'v1',v1
v1 = weakref.ref(v)
v1 = weakref.ref(v) # noqa
pdb.set_trace()
# no memory leak
# f = orig_function([compile.In(fx),compile.In(variable=fy, value=None)],
# [fy+fx],mode=mode)#no memory leak
f = orig_function([compile.In(fx), compile.In(variable=fy, value=v)],
[fy + fx], mode=mode) # memory leak
# [fy+fx],mode=mode)
# memory leak
f = orig_function( # noqa
[compile.In(fx), compile.In(variable=fy, value=v)],
[fy + fx], mode=mode)
del v
gc.collect()
gc.collect()
......@@ -1428,8 +1418,7 @@ class test_fusion(unittest.TestCase):
for x in ndl:
cmp = not isinstance(x, list)
if not cmp and x:
cmp = x[0].__class__.__name__ != \
'array_converter'
cmp = (x[0].__class__.__name__ != 'array_converter')
if cmp:
cmp = x[0] != 'Option'
if cmp:
......@@ -1497,13 +1486,13 @@ class TestCompositeCodegen(unittest.TestCase):
self.scal_times_2 = TimesN(2, upgrade_to_float, name='times_2')
self.times_2 = theano.tensor.elemwise.Elemwise(
self.scal_times_2,
name='times_2')
self.scal_times_2,
name='times_2')
self.scal_times_3 = TimesN(3, upgrade_to_float, name='times_3')
self.times_3 = theano.tensor.elemwise.Elemwise(
self.scal_times_3,
name='times_3')
self.scal_times_3,
name='times_3')
self.x = fvector()
......@@ -1525,8 +1514,9 @@ class TestCompositeCodegen(unittest.TestCase):
y = self.times_2(self.x)
z = self.times_3(y)
f = theano.function([self.x], cuda.gpu_from_host(z),
mode=theano.compile.mode.get_default_mode().including('gpu'))
f = theano.function(
[self.x], cuda.gpu_from_host(z),
mode=theano.compile.mode.get_default_mode().including('gpu'))
topo = f.maker.fgraph.toposort()
if config.mode != "FAST_COMPILE":
assert len(topo) == 2
......@@ -1564,8 +1554,8 @@ def test_log1p():
assert [node.op for node in f.maker.fgraph.toposort()][3:] == [
T.log1p, tensor.alloc]
f = function([x, y], T.log(2 + (x) - tensor.fill(y, 1.0)), mode=m)
assert [node.op for node in f.maker.fgraph.toposort()][3:] \
== [T.log1p, tensor.alloc]
assert ([node.op for node in f.maker.fgraph.toposort()][3:] ==
[T.log1p, tensor.alloc])
f([1e-7, 10], [[0, 0], [0, 0]]) # debugmode will verify values
......@@ -1636,8 +1626,8 @@ def test_local_useless_slice():
f_unopt = theano.function([x], o, mode=mode_unopt)
f_opt = theano.function([x], o, mode=mode_opt)
test_inp = numpy.random.randint(-10, 10, (4, 4)).astype('float32')
assert all(f_opt(test_inp) == f_unopt(test_inp)),\
"The optimization caused a mismatch in the result"
assert all(f_opt(test_inp) == f_unopt(test_inp)),\
"The optimization caused a mismatch in the result"
# test to see if the slice is truely gone
apply_node = f_opt.maker.fgraph.toposort()[0]
subtens = apply_node.op
......@@ -1669,6 +1659,7 @@ def test_local_useless_slice():
assert check_stack_trace(f_opt_check, ops_to_check=Subtensor)
assert check_stack_trace(f_opt_check_apply, ops_to_check=Subtensor)
def test_local_useless_inc_subtensor():
x = tensor.matrix('x')
y = tensor.matrix('y')
......@@ -1943,7 +1934,7 @@ class test_local_subtensor_make_vector(unittest.TestCase):
v = make_vector(x, y, z)
mode = theano.compile.mode.get_default_mode().including(
"local_subtensor_make_vector")
"local_subtensor_make_vector")
# list of subtensor cases, where local_subtensor_make_vector
# inserts a new MakeVector node
......@@ -1977,7 +1968,7 @@ class test_local_subtensor_lift(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=[
Subtensor, tensor.Elemwise]))
Subtensor, tensor.Elemwise]))
prog = f.maker.fgraph.toposort()
assert prog[0].op == tensor.exp
......@@ -1995,7 +1986,7 @@ class test_local_subtensor_lift(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=[
Subtensor, tensor.DimShuffle]))
Subtensor, tensor.DimShuffle]))
prog = f.maker.fgraph.toposort()
assert isinstance(prog[0].op, tensor.Subtensor)
......@@ -2004,8 +1995,8 @@ class test_local_subtensor_lift(unittest.TestCase):
assert isinstance(prog[3].op.scalar_op, theano.scalar.
Composite) # Composite{add,add}
assert len(prog) == 4
# let debugmode test something
f([[0, 1], [2, 3]], 4, [[4, 5], [6, 7]])
# let debugmode test something
def test2(self):
# as 1, but take a slice
......@@ -2016,7 +2007,7 @@ class test_local_subtensor_lift(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=[
Subtensor, tensor.DimShuffle]))
Subtensor, tensor.DimShuffle]))
prog = f.maker.fgraph.toposort()
assert isinstance(prog[0].op, tensor.Subtensor)
......@@ -2025,8 +2016,8 @@ class test_local_subtensor_lift(unittest.TestCase):
assert isinstance(prog[3].op.scalar_op, theano.scalar.
Composite) # Composite{add,add}
assert len(prog) == 4
# let debugmode test something
f([[0, 1], [2, 3]], 4, [[4, 5], [6, 7]])
# let debugmode test something
def test3(self):
# basic test that the optimization does work with broadcasting
......@@ -2142,7 +2133,7 @@ class test_local_subtensor_lift(unittest.TestCase):
f3 = function([y], newy[:, 3, 0], mode=mode_opt)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f3, ops_to_check=[
Subtensor, tensor.Rebroadcast]))
Subtensor, tensor.Rebroadcast]))
prog = f3.maker.fgraph.toposort()
assert isinstance(prog[0].op, tensor.Subtensor)
assert isinstance(prog[1].op, tensor.Rebroadcast)
......@@ -2155,11 +2146,10 @@ class test_local_subtensor_lift(unittest.TestCase):
newz = tensor.Rebroadcast((3, True))(z)
assert newz.broadcastable == (False, False, False, True)
out = newz[:, 3, 0]
f4 = function([z], newz[:, 3, 0], mode=mode_opt)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f4, ops_to_check=[
Subtensor, tensor.Rebroadcast]))
Subtensor, tensor.Rebroadcast]))
prog = f4.maker.fgraph.toposort()
assert isinstance(prog[0].op, tensor.Subtensor)
assert isinstance(prog[1].op, tensor.Rebroadcast)
......@@ -2208,7 +2198,7 @@ class test_local_subtensor_merge(unittest.TestCase):
f = function([x, y], x[y::][-1], mode=mode_opt)
g = function([x, y], x[y::][-1],
mode=mode_opt.excluding('local_subtensor_merge'))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
......@@ -2243,7 +2233,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert len([t for t in topo
......@@ -2269,7 +2259,7 @@ class test_local_subtensor_merge(unittest.TestCase):
f = function([x, y], x[::-1][y], mode=mode_opt)
g = function([x, y], x[::-1][y],
mode=mode_opt.excluding('local_subtensor_merge'))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
......@@ -2299,7 +2289,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert len([t for t in topo
......@@ -2320,7 +2310,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
......@@ -2344,7 +2334,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check='all'))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
assert len([t for t in topo
......@@ -2366,7 +2356,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
......@@ -2398,7 +2388,6 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
x_val = self.rng.uniform(size=shape).astype(config.floatX)
f(x_val)
......@@ -2418,7 +2407,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
......@@ -2447,7 +2436,7 @@ class test_local_subtensor_merge(unittest.TestCase):
for s2 in s2r:
f(x_val, b1, e1, s1, b2, e2, s2)
def test_const4(self):
def test_const5(self):
# Bug reported by Razvan
data = numpy.asarray(numpy.arange(8),
dtype=theano.config.floatX)
......@@ -2460,7 +2449,7 @@ class test_local_subtensor_merge(unittest.TestCase):
val = fun(data)
assert val == data[7:1:-1][0]
def test_const5(self):
def test_const6(self):
# Bug reported by Graham
data = self.rng.uniform(size=(8, 8, 8)).astype(theano.config.floatX)
x = theano.tensor.tensor3('x')
......@@ -2506,7 +2495,7 @@ class test_local_subtensor_merge(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))
#theano.printing.debugprint(f, print_type=True)
# theano.printing.debugprint(f, print_type=True)
topo = f.maker.fgraph.toposort()
# print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
......@@ -2762,7 +2751,6 @@ class test_local_adv_sub1_adv_inc_sub1(unittest.TestCase):
dx = numpy.random.rand(4, 5).astype(config.floatX)
dy = numpy.random.rand(2, 5).astype(config.floatX)
didx = numpy.asarray([1, 3], "int32")
# set_subtensor
inc = tensor.set_subtensor(x[idx], y)
......@@ -2809,8 +2797,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.set_subtensor(x0[:4], y0)
f = theano.function([x, y], z, mode=self.mode)
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_setsubtensor_allocs1(self):
y = tensor.matrix()
......@@ -2819,8 +2807,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.set_subtensor(x0[:4], y0)
f = theano.function([y], z, mode=self.mode)
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_setsubtensor_allocs1t(self):
y = tensor.matrix()
......@@ -2829,8 +2817,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.set_subtensor(x0[:4], y0.T)
f = theano.function([y], z, mode=mode_opt)
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_setsubtensor_allocs2(self):
x = tensor.matrix()
......@@ -2839,8 +2827,8 @@ class Test_alloc_zero(unittest.TestCase):
x0 = tensor.zeros_like(x)
z = tensor.set_subtensor(x0[:4], y0)
f = theano.function([x], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor)
for x in f.maker.fgraph.toposort()])
def test_incsubtensor_allocs0(self):
x = tensor.matrix()
......@@ -2848,8 +2836,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[:4], y0)
f = theano.function([x, y], z, mode=self.mode)
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_incsubtensor_allocs0t(self):
x = tensor.matrix()
......@@ -2857,8 +2845,8 @@ class Test_alloc_zero(unittest.TestCase):
y0 = tensor.zeros_like(y)
z = tensor.inc_subtensor(x[:4], y0.T)
f = theano.function([x, y], z, mode=mode_opt)
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(n.op, tensor.IncSubtensor)
for n in f.maker.fgraph.toposort()])
def test_incsubtensor_allocs1(self):
x = tensor.matrix()
......@@ -2866,8 +2854,8 @@ class Test_alloc_zero(unittest.TestCase):
dtype=config.floatX))
z = tensor.inc_subtensor(x[:4], y0)
f = theano.function([x], z, mode=self.mode)
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor) for x in
f.maker.fgraph.toposort()])
assert numpy.all([not isinstance(x.op, tensor.IncSubtensor)
for x in f.maker.fgraph.toposort()])
def test_advancedincsubtensor1_allocs0(self):
x = tensor.matrix()
......@@ -3001,6 +2989,7 @@ def test_local_IncSubtensor_serialize():
tensor.IncSubtensor, tensor.AdvancedIncSubtensor,
tensor.AdvancedIncSubtensor1])
def test_local_set_to_inc_subtensor():
v = theano.tensor.fmatrix()
s = v[[2, 1]]
......@@ -3475,8 +3464,6 @@ class Test_local_useless_elemwise_comparison(unittest.TestCase):
x_val = 10
assert f(x_val) == x_val
#def assert_returns
def test_inequality_with_self(self):
x = T.scalar('x', dtype=config.floatX)
mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')
......@@ -3501,10 +3488,11 @@ class Test_local_useless_elemwise_comparison(unittest.TestCase):
def test_shape_inequality_with_self(self):
x = T.vector('x', dtype=config.floatX)
mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
'local_shape_to_shape_i',
'local_track_shape_i',
'local_subtensor_make_vector')
mode = theano.compile.get_default_mode().including(
'local_useless_elemwise_comparison',
'local_shape_to_shape_i',
'local_track_shape_i',
'local_subtensor_make_vector')
f = theano.function([x], T.lt(x.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 0)
......@@ -3534,22 +3522,23 @@ class Test_local_useless_elemwise_comparison(unittest.TestCase):
assert f(x_val) == 0
f = theano.function([x], T.minimum([0, 0], x.shape[0]), mode=mode)
# This case isn't optimized.
# self.assert_eqs_const(f, 0)
# self.assert_eqs_const(f, 0)
utt.assert_allclose(f(x_val), [0, 0])
def test_shape_add_inequality(self):
x = T.vector('x', dtype=config.floatX)
mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
'local_shape_to_shape_i',
'local_track_shape_i',
'local_subtensor_make_vector')
mode = theano.compile.get_default_mode().including(
'local_useless_elemwise_comparison',
'local_shape_to_shape_i',
'local_track_shape_i',
'local_subtensor_make_vector')
y = T.vector('y', dtype=config.floatX)
f = theano.function([x, y], T.lt(x.shape[0]+y.shape[0], 0), mode=mode)
f = theano.function([x, y], T.lt(x.shape[0] + y.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x, y], T.ge(x.shape[0]+y.shape[0], 0), mode=mode)
f = theano.function([x, y], T.ge(x.shape[0] + y.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 1)
def test_equality_shapes(self):
......@@ -3640,8 +3629,8 @@ class Test_local_canonicalize_alloc(unittest.TestCase):
f = function([], a, mode=mode_opt)
# The optimization should then be applied, and remove Alloc
assert ([node.op for node in f.maker.fgraph.toposort()]
== [deep_copy_op])
assert ([node.op for node in f.maker.fgraph.toposort()] ==
[deep_copy_op])
# In DebugMode, the shape mismatch should be detected
if isinstance(mode_opt, compile.DebugMode):
......@@ -3671,7 +3660,7 @@ class Test_local_canonicalize_alloc(unittest.TestCase):
mode = mode_opt.excluding('local_canonicalize_alloc')
x = tensor.matrix('x')
y = tensor.tile(x, (1,)*2)
y = tensor.tile(x, (1,) * 2)
f = function([x], [y], mode=mode)
op_classes = [node.op.__class__ for node in f.maker.fgraph.toposort()]
......@@ -3781,7 +3770,6 @@ class Test_local_useless_inc_subtensor_alloc(unittest.TestCase):
self.assertTrue(check_stack_trace(f1, ops_to_check=tensor.AdvancedIncSubtensor))
self.assertTrue(check_stack_trace(f2, ops_to_check=tensor.AdvancedIncSubtensor))
def test_advanced_inc_subtensor1(self):
if tensor.inplace_increment is None:
raise SkipTest('NumPy version >= 1.8 not available')
......@@ -3813,7 +3801,7 @@ class Test_local_useless_inc_subtensor_alloc(unittest.TestCase):
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(
f1, ops_to_check=tensor.AdvancedIncSubtensor1))
f1, ops_to_check=tensor.AdvancedIncSubtensor1))
self.assertTrue(check_stack_trace(f2, ops_to_check='all'))
def test_incsubtensor(self):
......@@ -4156,6 +4144,7 @@ class test_assert(utt.InferShapeTester):
self._compile_and_check([admat, adscal, bdscal], [out],
[admat_val, adscal_val, bdscal_val], Assert)
def test_local_mul_specialize():
mode = theano.config.mode
if mode == 'FAST_COMPILE':
......@@ -4492,7 +4481,6 @@ class T_func_inverse(unittest.TestCase):
mode = theano.compile.get_default_mode()
self.mode = mode.including('local_func_inv')
def assert_func_pair_optimized(self, func1, func2, data,
should_copy=True, is_complex=False):
"""
......@@ -4528,7 +4516,7 @@ class T_func_inverse(unittest.TestCase):
dx = numpy.random.rand(5, 4).astype("float32")
self.assert_func_pair_optimized(T.deg2rad, T.rad2deg, dx)
dx = numpy.random.rand(5, 4).astype("float32")*180
dx = numpy.random.rand(5, 4).astype("float32") * 180
self.assert_func_pair_optimized(T.rad2deg, T.deg2rad, dx)
# Test the other functional inverses
......@@ -4538,13 +4526,13 @@ class T_func_inverse(unittest.TestCase):
self.assert_func_pair_optimized(T.arctanh, T.tanh, dx)
self.assert_func_pair_optimized(T.inv, T.inv, dx)
self.assert_func_pair_optimized(T.neg, T.neg, dx)
cx = dx + complex(0, 1)*(dx + 0.01)
cx = dx + complex(0, 1) * (dx + 0.01)
self.assert_func_pair_optimized(T.conj, T.conj, cx, is_complex=True)
# Test that non-inverse functions are ran normally
self.assert_func_pair_optimized(T.conj, T.neg, cx,
should_copy=False, is_complex=True)
dx = numpy.random.rand(5, 4).astype("float32")+0.01
dx = numpy.random.rand(5, 4).astype("float32") + 0.01
self.assert_func_pair_optimized(T.rad2deg, T.rad2deg, dx,
should_copy=False)
self.assert_func_pair_optimized(T.rad2deg, T.cosh, dx,
......@@ -4590,8 +4578,8 @@ def test_constant_get_stabilized():
f2 = theano.function([x2], y2, mode=mode)
try:
assert len(f2.maker.fgraph.toposort()) == 1
assert f2.maker.fgraph.toposort()[0].op == \
theano.tensor.nnet.sigm.softplus
assert (f2.maker.fgraph.toposort()[0].op ==
theano.tensor.nnet.sigm.softplus)
assert f2(800) == 800
x = T.as_tensor_variable(800)
......@@ -4624,14 +4612,16 @@ class T_local_switch_sink(unittest.TestCase):
self.xs = 1.
# expected results
self.resm = [numpy.asarray([[1, 0, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1]])]*3 + [numpy.asarray([[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]])] + \
2*[numpy.asarray([[1, 0, 1, 0]])] + [[numpy.ones((3, 4)), numpy.zeros((3, 4)), numpy.ones((3, 4)), numpy.zeros((3, 4))]] + \
[[numpy.ones((4,)), numpy.zeros((4,)), numpy.ones((4,)), numpy.zeros((4,))]] + \
[[numpy.asarray(1.0), numpy.asarray(
0.0), numpy.asarray(1.0), numpy.asarray(0.0)]]
self.resm = (
[numpy.asarray([[1, 0, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1]])] * 3 +
[numpy.asarray([[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]])] +
2 * [numpy.asarray([[1, 0, 1, 0]])] +
[[numpy.ones((3, 4)), numpy.zeros((3, 4)), numpy.ones((3, 4)), numpy.zeros((3, 4))]] +
[[numpy.ones((4,)), numpy.zeros((4,)), numpy.ones((4,)), numpy.zeros((4,))]] +
[[numpy.asarray(1.0), numpy.asarray(0.0), numpy.asarray(1.0), numpy.asarray(0.0)]])
self.mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run').excluding('gpu', 'fusion')
'canonicalize', 'fast_run').excluding('gpu', 'fusion')
self.mode = copy.copy(self.mode)
self.mode.check_isfinite = False
......@@ -4640,7 +4630,7 @@ class T_local_switch_sink(unittest.TestCase):
It disables checking
for NaN removed by optimizations in DebugMode (it has false
positives in that case.
positives in that case).
"""
f = theano.function(*args, **kwargs)
......@@ -4677,8 +4667,8 @@ class T_local_switch_sink(unittest.TestCase):
self.resm[idx][i])).sum() == self.resm[idx][i].size
else:
res = f(condition[1], x[1], -1)
assert (res == numpy.asarray(self.
resm[idx])).sum() == self.resm[idx].size
assert ((res == numpy.asarray(self.resm[idx])).sum() ==
self.resm[idx].size)
idx += 1
# This case caused a missed optimization in the past.
......@@ -4693,33 +4683,34 @@ class T_local_switch_sink(unittest.TestCase):
idx = 0
for condition in [(T.dmatrix('cond'), self.condm), (T.dvector('cond'), self.condv), (T.dscalar('cond'), self.conds)]:
for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]:
y = T.true_div(T.switch(condition[0] > 0, 1. *
x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0]))
y = T.true_div(
T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]),
T.switch(condition[0] > 0, 1. * x[0], T.log(c) * x[0]))
f = self.function_remove_nan([condition[0], x[0], c],
[y], mode=self.mode)
if type(condition[1]) is list:
for i in xrange(len(condition[1])):
res = f(condition[1][i], x[1], -1)
assert (res == numpy.
asarray(self.resm[idx][i])).sum() == self.resm[idx][i].size
assert ((res == numpy.asarray(self.resm[idx][i])).sum() ==
self.resm[idx][i].size)
else:
res = f(condition[1], x[1], -1)
assert (res == numpy.asarray(self.
resm[idx])).sum() == self.resm[idx].size
assert ((res == numpy.asarray(self.resm[idx])).sum() ==
self.resm[idx].size)
idx += 1
class T_local_erf(unittest.TestCase):
def setUp(self):
self.mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run').excluding('gpu', 'fusion')
'canonicalize', 'fast_run').excluding('gpu', 'fusion')
self.mode._optimizer.position_cutoff = 1.50001
if theano.config.cxx == '' and not theano.scalar.basic_scipy.imported_scipy_special:
raise SkipTest("erf need a c++ compiler or scipy")
def test_local_one_plus_erf(self):
val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],
dtype=config.floatX)
dtype=config.floatX)
x = T.vector()
f = theano.function([x], 1 + T.erf(x), mode=self.mode)
......@@ -4746,18 +4737,18 @@ class T_local_erf(unittest.TestCase):
x = T.vector()
f = theano.function([x], 1 - T.erf(x), mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], 1 + (-T.erf(x)), mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], (-T.erf(x)) + 1, mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], 2 - T.erf(x), mode=self.mode)
......@@ -4771,7 +4762,7 @@ class T_local_erf(unittest.TestCase):
def test_local_erf_minus_one(self):
val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],
dtype=config.floatX)
dtype=config.floatX)
x = T.vector()
f = theano.function([x], T.erf(x) - 1, mode=self.mode)
......@@ -4803,24 +4794,24 @@ class T_local_erfc(unittest.TestCase):
self.mode = self.mode_fusion.excluding('fusion')
self.mode._optimizer.position_cutoff = 1.50001
if (theano.config.cxx == '' and
not theano.scalar.basic_scipy.imported_scipy_special):
not theano.scalar.basic_scipy.imported_scipy_special):
raise SkipTest("erfc need a c++ compiler or scipy")
def test_local_one_minus_erfc(self):
""" test opt: 1-erfc(x) => erf(x) and -erfc(x)+1 => erf(x)
"""
val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],
dtype=config.floatX)
dtype=config.floatX)
x = T.vector('x')
f = theano.function([x], 1 - T.erfc(x), mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], (-T.erfc(x)) + 1, mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], 2 - T.erfc(x), mode=self.mode)
......@@ -4828,34 +4819,34 @@ class T_local_erfc(unittest.TestCase):
assert len(topo) == 2, f.maker.fgraph.toposort()
assert topo[0].op == T.erfc, f.maker.fgraph.toposort()
assert isinstance(topo[1].op, T.Elemwise), f.maker.fgraph.toposort()
assert isinstance(topo[1].op.scalar_op, scal.Sub)\
, f.maker.fgraph.toposort()
assert isinstance(topo[1].op.scalar_op, scal.Sub),\
f.maker.fgraph.toposort()
print(f(val))
def test_local_erf_neg_minus_one(self):
""" test opt: (-1)+erfc(-x)=>erf(x)"""
val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30],
dtype=config.floatX)
dtype=config.floatX)
x = T.vector('x')
f = theano.function([x], -1 + T.erfc(-x), mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], T.erfc(-x) - 1, mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf],\
f.maker.fgraph.toposort()
print(f(val))
f = theano.function([x], T.erfc(-x) + (-1), mode=self.mode)
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\
, f.maker.fgraph.toposort()
assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf],\
f.maker.fgraph.toposort()
print(f(val))
def test_local_log_erfc(self):
val = [-30, -27, -26, -11, -10, -3, -2, -1, 0, 1, 2, 3, 10,
11, 26, 27, 28, 30]
11, 26, 27, 28, 30]
if theano.config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
# python mode don't like the inv(0)
val.remove(0)
......@@ -4894,9 +4885,11 @@ class T_local_erfc(unittest.TestCase):
def test_local_grad_log_erfc_neg(self):
val = [-100, -30, -27, -26.4, -26.2, -26, -11, -10, -9, -3, -2, -1, 0,
1, 2, 3, 9, 10, 11, 27, 26.4, 26.2, 26, 28, 30, 100]
1, 2, 3, 9, 10, 11, 27, 26.4, 26.2, 26, 28, 30, 100]
if theano.config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
# python mode don't like the inv(0) in computation, but the switch don't select this value. So it is computed for no good reason.
# python mode don't like the inv(0) in computation,
# but the switch don't select this value.
# So it is computed for no good reason.
val.remove(0)
if theano.config.mode in ["DebugMode", "DEBUG_MODE"] and theano.config.floatX == 'float32':
# In float32 their is a plage of values close to 10 that we stabilize as it give bigger error then the stabilized version.
......@@ -4918,8 +4911,10 @@ class T_local_erfc(unittest.TestCase):
assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX
# test with a different mul constant
f = theano.function([x], T.mul(T.exp(T.neg(T.sqr(x))), -
10.12837917) / T.erfc(x), mode=mode)
f = theano.function(
[x],
T.mul(T.exp(T.neg(T.sqr(x))), - 10.12837917) / T.erfc(x),
mode=mode)
assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)
assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX
assert all(numpy.isfinite(f(val)))
......@@ -4944,14 +4939,12 @@ class T_local_erfc(unittest.TestCase):
assert all(numpy.isfinite(f(val)))
# test that it work correctly if x is x*2 in the graph.
f = theano.function([x], T.grad(T.log(T.erfc(2 * x)).sum(),
x), mode=mode)
f = theano.function([x], T.grad(T.log(T.erfc(2 * x)).sum(), x), mode=mode)
assert len(f.maker.fgraph.apply_nodes) == 23, len(f.maker.fgraph.apply_nodes)
assert numpy.isfinite(f(val)).all()
assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX
f = theano.function([x], T.grad(T.log(T.erfc(x)).sum(), x),
mode=mode_fusion)
f = theano.function([x], T.grad(T.log(T.erfc(x)).sum(), x), mode=mode_fusion)
assert len(f.maker.fgraph.apply_nodes) == 1, len(f.maker.fgraph.apply_nodes)
assert f.maker.fgraph.outputs[0].dtype == theano.config.floatX
......@@ -4973,8 +4966,8 @@ class T_local_erfc(unittest.TestCase):
val = numpy.random.rand(1e6)
x = T.vector()
mode = theano.compile.mode.get_mode("FAST_RUN")
f1 = theano.function([x], T.log(T.erfc(x)), mode=mode.
excluding("local_log_erfc"))
f1 = theano.function([x], T.log(T.erfc(x)),
mode=mode.excluding("local_log_erfc"))
f2 = theano.function([x], T.log(T.erfc(x)), mode=mode)
print(f1.maker.fgraph.toposort())
print(f2.maker.fgraph.toposort())
......@@ -4999,9 +4992,9 @@ class test_local_useless_switch(unittest.TestCase):
z = theano.tensor.switch(0, x, y)
f = theano.function([x, y], z, mode=self.mode)
assert len([node.op for node in f.maker.fgraph.toposort() if
(isinstance(node.op, theano.tensor.Elemwise)
and isinstance(node.op.scalar_op,
theano.scalar.basic.Switch))]) == 0
(isinstance(node.op, theano.tensor.Elemwise) and
isinstance(node.op.scalar_op,
theano.scalar.basic.Switch))]) == 0
vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)
vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype=dtype2)
assert numpy.all(f(vx, vy) == vy)
......@@ -5015,9 +5008,9 @@ class test_local_useless_switch(unittest.TestCase):
z = theano.tensor.switch(1, x, y)
f = theano.function([x, y], z, mode=self.mode)
assert len([node.op for node in f.maker.fgraph.toposort() if
(isinstance(node.op, theano.tensor.Elemwise)
and isinstance(node.op.scalar_op,
theano.scalar.basic.Switch))]) == 0
(isinstance(node.op, theano.tensor.Elemwise) and
isinstance(node.op.scalar_op,
theano.scalar.basic.Switch))]) == 0
vx = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)
vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype=dtype2)
assert numpy.all(f(vx, vy) == vx)
......@@ -5032,7 +5025,7 @@ class test_local_useless_switch(unittest.TestCase):
z2 = theano.tensor.switch(varc, x, x)
f1 = theano.function([x], z1, mode=self.mode)
f0 = theano.function([x], z0, mode=self.mode)
f2 = theano.function([x,varc], z2, mode=self.mode)
f2 = theano.function([x, varc], z2, mode=self.mode)
topo = f1.maker.fgraph.toposort()
assert len(topo) == 1
......@@ -5050,7 +5043,7 @@ class test_local_useless_switch(unittest.TestCase):
vc = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype1)
assert numpy.all(f1(vx) == vx)
assert numpy.all(f0(vx) == vx)
assert numpy.all(f2(vx,vc) == vx)
assert numpy.all(f2(vx, vc) == vx)
def test_shape_le_0(self):
......@@ -5064,11 +5057,10 @@ class test_local_useless_switch(unittest.TestCase):
f1 = theano.function([x], z1, mode=self.mode)
assert isinstance(f1.maker.fgraph.toposort()[0].op, Shape_i)
vx = numpy.random.randn(0,5).astype(dtype1)
vx = numpy.random.randn(0, 5).astype(dtype1)
assert f0(vx) == 0
assert f1(vx) == 5
def test_broadcast1(self):
# test switch(cst, matrix, row)
x = theano.tensor.matrix('x', dtype='int32')
......@@ -5197,7 +5189,7 @@ class T_local_sum_prod(unittest.TestCase):
# ensuring that the optimized graph contains the expected number
# of apply nodes for the sum op
prod_nodes = [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, reduction_op)]
if isinstance(n.op, reduction_op)]
assert len(prod_nodes) == nb_expected_sum_nodes
# Test sum
......@@ -5391,9 +5383,9 @@ class T_local_sum_prod(unittest.TestCase):
dtype='float64')
mode = self.mode.including('specialize').excluding('fusion')
for t_like, n_like, nb_nodes in [(tensor.zeros_like, numpy.zeros_like, (1, 3, 3, 2)),
(tensor.ones_like, numpy.ones_like, (5, 5, 5, 6))]:
for t_like, n_like, nb_nodes in [
(tensor.zeros_like, numpy.zeros_like, (1, 3, 3, 2)),
(tensor.ones_like, numpy.ones_like, (5, 5, 5, 6))]:
# test sum
f = theano.function([a], t_like(a).sum(None), mode=mode)
utt.assert_allclose(f(input), n_like(input).sum())
......@@ -5421,23 +5413,23 @@ class T_local_sum_prod(unittest.TestCase):
# test prod
f = theano.function([a], t_like(a).prod(None), mode=mode)
utt.assert_allclose(f(input), n_like(input).prod())
#assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]
# assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]
f = theano.function([a], t_like(a).prod([0, 1, 2]), mode=mode)
utt.assert_allclose(f(input), n_like(input).prod())
#assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]
# assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0]
for d in range(3):
f = theano.function([a], t_like(a).prod(d), mode=mode)
utt.assert_allclose(f(input), n_like(input).prod(d))
#assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1]
# assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1]
topo = f.maker.fgraph.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.elemwise.Prod) for node in topo])
for i in range(3):
f = theano.function([a], t_like(a).prod(i), mode=mode)
utt.assert_allclose(f(input), n_like(input).prod(i))
#assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2]
# assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2]
topo = f.maker.fgraph.toposort()
assert topo[-1].op == T.alloc
assert not any([isinstance(node.op, T.elemwise.Prod) for node in topo])
......@@ -5449,7 +5441,7 @@ class T_local_sum_prod(unittest.TestCase):
f = theano.function([a], t_like(a).
sum(d).sum(dd), mode=mode)
utt.assert_allclose(f(input),
n_like(input).sum(d).sum(dd))
n_like(input).sum(d).sum(dd))
assert len(f.maker.fgraph.apply_nodes) == nb_nodes[3]
topo = f.maker.fgraph.toposort()
assert topo[-1].op == T.alloc
......@@ -5470,7 +5462,7 @@ class T_local_sum_prod(unittest.TestCase):
config.on_opt_error = 'raise'
try:
# This compilation would fail prior to fix.
f = theano.function([x], y)
theano.function([x], y)
finally:
config.on_opt_error = backup
......@@ -5484,7 +5476,7 @@ class T_local_sum_prod(unittest.TestCase):
config.on_opt_error = 'raise'
try:
# This compilation would fail prior to fix.
f = theano.function([x], y)
theano.function([x], y)
finally:
config.on_opt_error = backup
......@@ -5698,14 +5690,14 @@ class T_local_sum_prod_dimshuffle(unittest.TestCase):
for i, s in enumerate(sums):
print(i)
f = theano.function([a, b, c, d], s, mode=self.mode,
on_unused_input='ignore')
on_unused_input='ignore')
g = f.maker.fgraph.toposort()
assert isinstance(g[-1].op.scalar_op,
theano.scalar.basic.TrueDiv)
f(a_val, b_val, c_val, d_val)
finally:
config.warn.sum_sum_bug, config.warn.sum_div_dimshuffle_bug =\
backup
backup
def test_local_prod_div_dimshuffle(self):
a = T.matrix('a')
......@@ -5827,19 +5819,18 @@ class TestMakeVector(utt.InferShapeTester):
# Should work
for (dtype, inputs) in [("int8", (b, b)),
("int32", (i, b)),
("int32", (b, i)),
("float64", (b, i)),
("float64", (b, d)),
("float64", (d, i)),
("float64", ()),
("int64", ()),
]:
("int32", (i, b)),
("int32", (b, i)),
("float64", (b, i)),
("float64", (b, d)),
("float64", (d, i)),
("float64", ()),
("int64", ()),
]:
mv = opt.MakeVector(dtype=dtype)(*inputs)
assert mv.dtype == dtype
f = theano.function([b, i, d], mv, on_unused_input='ignore')
f_val = f(val[b], val[i], val[d])
# print 'f_val =', f_val
f(val[b], val[i], val[d])
s = mv.sum()
gb = T.grad(s, b, disconnected_inputs='ignore')
......@@ -5888,13 +5879,13 @@ class TestMakeVector(utt.InferShapeTester):
# should fail
for (dtype, inputs) in [("int8", (b, i)),
("int8", (i, b)),
("int8", (b, d)),
("int8", (i, i)),
("int32", (d, i)),
("int32", (i, d)),
("float32", (i, d)),
]:
("int8", (i, b)),
("int8", (b, d)),
("int8", (i, i)),
("int32", (d, i)),
("int32", (i, d)),
("float32", (i, d)),
]:
try:
opt.MakeVector(dtype=dtype)(*inputs)
raise Exception("Theano should have raised an error")
......@@ -5915,17 +5906,17 @@ class TestMakeVector(utt.InferShapeTester):
ciscal_val = numpy.random.randint(10)
discal_val = numpy.random.randint(10)
self._compile_and_check([adscal, aiscal],
[MakeVector('float64')(adscal, aiscal)],
[adscal_val, aiscal_val], MakeVector)
[MakeVector('float64')(adscal, aiscal)],
[adscal_val, aiscal_val], MakeVector)
self._compile_and_check([adscal, bdscal, aiscal],
[MakeVector('float64')(adscal, bdscal, aiscal)],
[adscal_val, bdscal_val, aiscal_val], MakeVector)
[MakeVector('float64')(adscal, bdscal, aiscal)],
[adscal_val, bdscal_val, aiscal_val], MakeVector)
self._compile_and_check([aiscal, biscal, ciscal, discal],
[MakeVector('int32')(aiscal, biscal, ciscal, discal)],
[aiscal_val, biscal_val, ciscal_val, discal_val],
MakeVector)
[MakeVector('int32')(aiscal, biscal, ciscal, discal)],
[aiscal_val, biscal_val, ciscal_val, discal_val],
MakeVector)
def test_local_join_1():
......@@ -5982,7 +5973,6 @@ def test_local_join_empty():
for n in e if isinstance(n.op, Join)])
assert f.maker.fgraph.outputs[0].dtype == config.floatX
# test for matrix join(1,a)
empty_mat = numpy.asarray([[]], dtype=config.floatX)
m = tensor.matrix('m')
......@@ -6059,10 +6049,10 @@ def test_local_add_specialize():
def test_local_tensor_scalar_tensor():
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128'
]
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128'
]
for dtype in dtypes:
t_type = TensorType(dtype=dtype, broadcastable=())
......@@ -6073,18 +6063,18 @@ def test_local_tensor_scalar_tensor():
f = function([t], t2, mode=mode_opt)
e = f.maker.fgraph.toposort()
cast_nodes = [n for n in e
if isinstance(n.op, (tensor.TensorFromScalar,
tensor.ScalarFromTensor))]
if isinstance(n.op, (tensor.TensorFromScalar,
tensor.ScalarFromTensor))]
assert len(cast_nodes) == 0
f(0)
def test_local_scalar_tensor_scalar():
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128'
]
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128'
]
for dtype in dtypes:
s_type = theano.scalar.Scalar(dtype=dtype)
......@@ -6095,8 +6085,8 @@ def test_local_scalar_tensor_scalar():
f = function([s], s2, mode=mode_opt)
e = f.maker.fgraph.toposort()
cast_nodes = [n for n in e
if isinstance(n.op, (tensor.TensorFromScalar,
tensor.ScalarFromTensor))]
if isinstance(n.op, (tensor.TensorFromScalar,
tensor.ScalarFromTensor))]
assert len(cast_nodes) == 0
f(0)
......@@ -6127,13 +6117,13 @@ def test_local_useless_split():
f_opt = theano.function([x, splits], opt, mode=mode)
f_nonopt = theano.function([x, splits], nonopt, mode=mode)
f_opt(numpy.random.rand(4,4).astype(config.floatX), [4])
f_nonopt(numpy.random.rand(4,4).astype(config.floatX), [1,2,1])
f_opt(numpy.random.rand(4, 4).astype(config.floatX), [4])
f_nonopt(numpy.random.rand(4, 4).astype(config.floatX), [1, 2, 1])
graph_opt = f_opt.maker.fgraph.toposort()
graph_nonopt = f_nonopt.maker.fgraph.toposort()
assert isinstance(graph_opt[-1].op, DeepCopyOp)
assert len(graph_nonopt)==1
assert len(graph_nonopt) == 1
assert isinstance(graph_nonopt[0].op, tensor.Split)
assert check_stack_trace(f_opt, ops_to_check=[Assert])
......@@ -6151,12 +6141,12 @@ def test_local_flatten_lift():
x_np = numpy.random.rand(5, 4, 3, 2).astype(config.floatX)
out_np = f(x_np)
topo = f.maker.fgraph.toposort()
shape_out_np = tuple(x_np.shape[:i-1])+(numpy.prod(x_np.shape[i-1:]),)
shape_out_np = tuple(x_np.shape[:i - 1]) + (numpy.prod(x_np.shape[i - 1:]),)
assert shape_out_np == out_np.shape
reshape_nodes = [n for n in topo if isinstance(n.op, tensor.Reshape)]
assert (len(reshape_nodes) == 1 and
tensor.is_flat(reshape_nodes[0].outputs[0], outdim=i))
tensor.is_flat(reshape_nodes[0].outputs[0], outdim=i))
assert isinstance(topo[-1].op, tensor.Elemwise)
......@@ -6182,7 +6172,7 @@ class Test_local_useless_reshape(unittest.TestCase):
mode = theano.compile.get_default_mode().including(
'local_useless_reshape')
i = T.iscalar('i')
m = theano.tensor.mgrid[0:i,]
m = theano.tensor.mgrid[0:i, ]
f = theano.function([i], m, mode=mode)
topo = f.maker.fgraph.toposort()
assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
......@@ -6343,10 +6333,10 @@ class TestShape_i(utt.InferShapeTester):
admat = matrix()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
self._compile_and_check([admat], [Shape_i(0)(admat)],
[admat_val], Shape_i)
[admat_val], Shape_i)
self._compile_and_check([admat], [Shape_i(1)(admat)],
[admat_val], Shape_i)
[admat_val], Shape_i)
class TestShapeFeature(unittest.TestCase):
......@@ -6435,7 +6425,7 @@ class TestIntDivByOne(unittest.TestCase):
"""Simple test case for removing dividing by 1"""
y = T.tensor4('y')
z = y // 1
f = theano.function([y], z, mode = self.mode)
f = theano.function([y], z, mode=self.mode)
graph = f.maker.fgraph.toposort()
divs = [node for node in graph
if isinstance(node.op, T.elemwise.Elemwise) and
......@@ -6445,7 +6435,7 @@ class TestIntDivByOne(unittest.TestCase):
def test3(self):
"""Simple test case for removing dividing by a tensor of ones"""
y = T.tensor4('y')
z = y // numpy.ones((2,2,2,2))
z = y // numpy.ones((2, 2, 2, 2))
f = theano.function([y], z, mode=self.mode)
graph = f.maker.fgraph.toposort()
divs = [node for node in graph
......@@ -6456,7 +6446,6 @@ class TestIntDivByOne(unittest.TestCase):
def test_local_zero_div():
"""Tests 0/x -> 0"""
mode = theano.compile.mode.get_default_mode().including("local_zero_div")
for t in (T.scalar, T.ivector, T.ftensor4):
x = t('x')
for op in (T.int_div, T.true_div):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论