提交 6df579b2 authored 作者: lamblin's avatar lamblin

Merge pull request #1379 from nouiz/fix_opt_crash

Fix opt error when trying to access a python list with a Type object.
......@@ -64,13 +64,13 @@ def check_equal(x, y):
# Mode, it will be used as the key to retrieve the real linker in this
# dictionary
predefined_linkers = {
'py': gof.PerformLinker(),
'c': gof.CLinker(),
'c|py': gof.OpWiseCLinker(),
'py': gof.PerformLinker(), # Use allow_gc Theano flag
'c': gof.CLinker(), # Don't support gc. so don't check allow_gc
'c|py': gof.OpWiseCLinker(), # Use allow_gc Theano flag
'c|py_nogc': gof.OpWiseCLinker(allow_gc=False),
'c&py': gof.DualLinker(checker=check_equal),
'vm': gof.vm.VM_Linker(use_cloop=False),
'cvm': gof.vm.VM_Linker(use_cloop=True),
'c&py': gof.DualLinker(checker=check_equal), # Deprecated
'vm': gof.vm.VM_Linker(use_cloop=False), # Use allow_gc Theano flag
'cvm': gof.vm.VM_Linker(use_cloop=True), # Use allow_gc Theano flag
'vm_nogc': gof.vm.VM_Linker(allow_gc=False, use_cloop=False),
'cvm_nogc': gof.vm.VM_Linker(allow_gc=False, use_cloop=True),
}
......
......@@ -1502,8 +1502,7 @@ class OpWiseCLinker(link.LocalLinker):
finally:
node.op._op_use_c_code = old_value
for node_idx, node in enumerate(order):
for node in order:
if self.allow_gc:
post_thunk_old_storage.append([storage_map[input]
for input in node.inputs
......
"""WRITEME"""
import theano
from theano.gof import utils
from theano.gof import graph
from theano.gof.type import Type
......@@ -419,8 +420,9 @@ class PerformLinker(LocalLinker):
the L{FunctionGraph} in the order given by L{Linker.schedule}.
"""
def __init__(self, allow_gc=True, schedule=None):
#TODO: set allow_gc = True by default, when it works with the OpWiseCLinker
def __init__(self, allow_gc=None, schedule=None):
if allow_gc is None:
allow_gc = theano.config.allow_gc
self.fgraph = None
self.allow_gc = allow_gc
if schedule:
......
......@@ -609,6 +609,7 @@ def get_scalar_constant_value(v):
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op,
theano.tensor.opt.MakeVector) and
......@@ -616,8 +617,10 @@ def get_scalar_constant_value(v):
# We put this check in case there is change in the future
python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1):
len(v.owner.op.idx_list) == 1 and
#idx_list can contain Scalar Type object.
isinstance(v.owner.op.idx_list[0], (int, long,
numpy.integer))):
ret = v.owner.inputs[0].owner.inputs[v.owner.op.idx_list[0]]
ret = get_scalar_constant_value(ret)
# MakeVector can cast implicitly its input in some case.
......
......@@ -4330,59 +4330,122 @@ class T_Join_and_Split(unittest.TestCase):
class test_comparison(unittest.TestCase):
"""Test <, >, <=, >=, == and !=
Test that we can do the comparison with different
combination of tensor(shared and constant variable) with
ndarray. ndarray cmp tensor was crashing. In a NumPy PR (should
be in the release 1.8 of NumPy), it will work. So we assert it
work(futur behavior) or raise an error(current NumPy release).
"""
def test_gt(self):
for dtype in ['float64', 'float32', 'complex64', 'complex128']:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x > y)
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (l > r)), (v, (l > r)))
for x, y, err in [
(shared(l.astype(dtype)), shared(r.astype(dtype)), False),
(l, shared(r.astype(dtype)), True),
(tensor.constant(l), shared(r.astype(dtype)), False),
(shared(l.astype(dtype)), r, False),
(shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = inplace_func([], x > y)
v = fn()
self.assertTrue(numpy.all(v == (l > r)), (v, (l > r)))
except TypeError:
assert err
def test_lt(self):
for dtype in ['float64', 'float32', 'complex64', 'complex128']:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x < y)
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (l < r)), (v, (l < r)))
for x, y, err in [
(shared(l.astype(dtype)), shared(r.astype(dtype)), False),
(l, shared(r.astype(dtype)), True),
(tensor.constant(l), shared(r.astype(dtype)), False),
(shared(l.astype(dtype)), r, False),
(shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = inplace_func([], x < y)
v = fn()
self.assertTrue(numpy.all(v == (l < r)), (v, (l < r)))
except TypeError:
assert err
def test_le(self):
for dtype in ['float64', 'float32', 'complex64', 'complex128']:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x <= y)
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (l <= r)), (v, (l <= r)))
for x, y, err in [
(shared(l.astype(dtype)), shared(r.astype(dtype)), False),
(l, shared(r.astype(dtype)), True),
(tensor.constant(l), shared(r.astype(dtype)), False),
(shared(l.astype(dtype)), r, False),
(shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = inplace_func([], x <= y)
v = fn()
self.assertTrue(numpy.all(v == (l <= r)), (v, (l <= r)))
except TypeError:
assert err
def test_ge(self):
for dtype in ['float64', 'float32', 'complex64', 'complex128']:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], x >= y)
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (l >= r)), (v, (l >= r)))
for x, y, err in [
(shared(l.astype(dtype)), shared(r.astype(dtype)), False),
(l, shared(r.astype(dtype)), True),
(tensor.constant(l), shared(r.astype(dtype)), False),
(shared(l.astype(dtype)), r, False),
(shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = inplace_func([], x >= y)
v = fn()
self.assertTrue(numpy.all(v == (l >= r)), (v, (l >= r)))
except TypeError:
assert err
def test_eq(self):
for dtype in ['float64', 'float32', 'complex64', 'complex128']:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], eq(x, y))
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (l == r)), (v, (l == r)))
for x, y, err in [
(shared(l.astype(dtype)), shared(r.astype(dtype)), False),
(l, shared(r.astype(dtype)), True),
(tensor.constant(l), shared(r.astype(dtype)), False),
(shared(l.astype(dtype)), r, False),
(shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = inplace_func([], eq(x, y))
v = fn()
self.assertTrue(numpy.all(v == (l == r)), (v, (l == r)))
except TypeError:
assert err
def test_neq(self):
for dtype in ['float64', 'float32', 'complex64', 'complex128']:
x, y = vector(dtype=dtype), vector(dtype=dtype)
fn = inplace_func([x, y], neq(x, y))
l = numpy.asarray([0., -1., 1.], dtype=dtype)
r = numpy.asarray([0., 1., -1.], dtype=dtype)
v = fn(l, r)
self.assertTrue(numpy.all(v == (l != r)), (v, (l != r)))
for x, y, err in [
(shared(l.astype(dtype)), shared(r.astype(dtype)), False),
(l, shared(r.astype(dtype)), True),
(tensor.constant(l), shared(r.astype(dtype)), False),
(shared(l.astype(dtype)), r, False),
(shared(l.astype(dtype)), tensor.constant(r), False),
]:
try:
fn = inplace_func([], neq(x, y))
v = fn()
self.assertTrue(numpy.all(v == (l != r)), (v, (l != r)))
except TypeError:
assert err
class test_bitwise(unittest.TestCase):
......@@ -6668,6 +6731,24 @@ class T_get_scalar_constant_value(unittest.TestCase):
get_scalar_constant_value,
numpy.array([]))
def test_make_vector(self):
mv = opt.make_vector(1, 2, 3)
self.assertRaises(
tensor.NotScalarConstantError,
get_scalar_constant_value,
mv)
assert get_scalar_constant_value(mv[0]) == 1
assert get_scalar_constant_value(mv[1]) == 2
assert get_scalar_constant_value(mv[2]) == 3
assert get_scalar_constant_value(mv[numpy.int8(0)]) == 1
assert get_scalar_constant_value(mv[numpy.int64(1)]) == 2
assert get_scalar_constant_value(mv[numpy.uint(2)]) == 3
t = theano.scalar.Scalar('int64')
self.assertRaises(
tensor.NotScalarConstantError,
get_scalar_constant_value,
mv[t()])
class T_as_tensor_variable(unittest.TestCase):
"""
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论