提交 00b55dc2 authored 作者: Arnaud Bergeron's avatar Arnaud Bergeron

Fix test errors.

上级 213e07c8
from __future__ import absolute_import, print_function, division
from .config import test_ctx_name, mode_with_gpu
from ..basic_ops import (HostFromGpu, GpuFromHost)
from ..type import (get_context, GpuArrayType, GpuArraySharedVariable,
gpuarray_shared_constructor)
......@@ -10,12 +11,13 @@ import numpy as np
from theano.misc.tests.test_may_share_memory import may_share_memory_core
from theano.misc.pkl_utils import dump, load
from theano.tensor.tests.test_opt import test_fusion as t_fusion
from theano.tensor.tests import test_opt
class test_fusion(t_fusion):
class test_fusion(test_opt.test_fusion):
mode = mode_with_gpu
shared = gpuarray_shared_constructor
_shared = staticmethod(gpuarray_shared_constructor)
topo_exclude = (GpuFromHost, HostFromGpu)
def test_may_share_memory():
......
......@@ -94,8 +94,8 @@ test_shared_options = makeSharedTester(
test_internal_type_=lambda a: isinstance(a, pygpu.gpuarray.GpuArray),
theano_fct_=theano.tensor.exp,
ref_fct_=np.exp,
cast_value_=lambda v: pygpu.asarray(v, context=get_context(test_ctx_name),
cls=pygpu._array.ndgpuarray),
cast_value_=lambda v: pygpu.array(v, context=get_context(test_ctx_name),
cls=pygpu._array.ndgpuarray),
name='test_shared_options')
......@@ -113,6 +113,6 @@ test_shared_options2 = makeSharedTester(
test_internal_type_=lambda a: isinstance(a, pygpu.gpuarray.GpuArray),
theano_fct_=theano.tensor.exp,
ref_fct_=np.exp,
cast_value_=lambda v: pygpu.asarray(v, context=get_context(test_ctx_name),
cls=pygpu._array.ndgpuarray),
cast_value_=lambda v: pygpu.array(v, context=get_context(test_ctx_name),
cls=pygpu._array.ndgpuarray),
name='test_shared_options2')
......@@ -27,11 +27,6 @@ from theano.compat import PY3
from six import string_types
from theano.compile.sharedvalue import SharedVariable
try:
import pygpu
except ImportError:
pygpu = None
__docformat__ = "restructuredtext en"
__authors__ = "Pascal Lamblin"
__copyright__ = "Copyright 2013, Universite de Montreal"
......@@ -204,10 +199,17 @@ class PersistentNdarrayID(object):
class PersistentGpuArrayID(PersistentNdarrayID):
def __call__(self, obj):
from theano.gpuarray.type import _name_for_ctx
try:
import pygpu
except ImportError:
pygpu = None
if (pygpu and
isinstance(obj, pygpu.gpuarray.GpuArray)):
if id(obj) not in self.seen:
def write_array(f):
pickle.dump(f, _name_for_ctx(obj.context), 2)
np.lib.format.write_array(f, np.asarray(obj))
name = self._resolve_name(obj)
zipadd(write_array, self.zip_file, name)
......@@ -282,24 +284,28 @@ class PersistentNdarrayLoad(object):
self.cache = {}
def __call__(self, persid):
from theano.gpuarray.type import get_context
array_type, name = persid.split('.')
if name in self.cache:
return self.cache[name]
ret = None
array = np.lib.format.read_array(self.zip_file.open(name))
if array_type == 'gpuarray':
with self.zip_file.open(name) as f:
ctx_name = pickle.load(f)
array = np.lib.format.read_array(f)
if config.experimental.unpickle_gpu_on_cpu:
# directly return numpy array
warnings.warn("config.experimental.unpickle_gpu_on_cpu is set "
"to True. Unpickling GpuArray as numpy.ndarray")
ret = array
elif pygpu:
ret = pygpu.array(array)
ret = pygpu.array(array, context=get_context(ctx_name))
else:
raise ImportError("pygpu not found. Cannot unpickle GpuArray")
else:
ret = array
with self.zip_file.open(name) as f:
ret = np.lib.format.read_array(f)
self.cache[name] = ret
return ret
......
......@@ -1651,7 +1651,7 @@ class ScanMerge(gof.Optimizer):
info['truncate_gradient'] = nodes[0].op.truncate_gradient
info['name'] = '&'.join([nd.op.name for nd in nodes])
info['mode'] = nodes[0].op.mode
info['gpu'] = False
info['gpua'] = False
info['as_while'] = as_while
info['profile'] = nodes[0].op.profile
info['allow_gc'] = nodes[0].op.allow_gc
......
......@@ -44,12 +44,10 @@ if theano.config.mode == 'FAST_COMPILE':
mode_with_opt = theano.compile.mode.get_mode('FAST_RUN')
else:
mode_with_opt = theano.compile.mode.get_default_mode()
mode_with_gpu = mode_with_opt.including('gpu', 'scan')
if theano.config.mode in ('DEBUG_MODE', 'DebugMode'):
mode_nodebug = theano.compile.mode.get_mode('FAST_RUN')
else:
mode_nodebug = mode_with_opt
mode_with_gpu_nodebug = mode_nodebug.including('gpu', 'scan')
type_eps = {'float64': 1e-7,
......
......@@ -908,7 +908,8 @@ def test_const_type_in_mul_canonizer():
class test_fusion(unittest.TestCase):
mode = copy.copy(compile.mode.get_default_mode())
_shared = shared
_shared = staticmethod(shared)
topo_exclude = ()
def do(self, mode, shared_fn, shp, nb_repeat=1, assert_len_topo=True, slice=None):
"""
......@@ -1121,10 +1122,6 @@ class test_fusion(unittest.TestCase):
t1 = time.time()
out = out.get_value()
# print "CASE2/3", f.maker.fgraph.toposort()
# print 'CASE2/3', f.maker.fgraph
# print 'CASE2/3', f.maker.fgraph.toposort()[3].op.scalar_op.fgraph
times[id] = t1 - t0
atol = 1e-8
if out_dtype == 'float32':
......@@ -1135,7 +1132,8 @@ class test_fusion(unittest.TestCase):
print(out)
print(answer * nb_repeat)
topo = f.maker.fgraph.toposort()
topo_ = topo
topo_ = [n for n in topo
if not isinstance(n.op, self.topo_exclude)]
if assert_len_topo:
if not len(topo_) == nb_elemwise:
fail3.append((id, topo_, nb_elemwise))
......@@ -1274,9 +1272,10 @@ class test_fusion(unittest.TestCase):
x, y, z = dmatrices('xyz')
f = theano.function([x, y, z], tensor.dot(x, y) + x + y + z, mode=mode)
topo = f.maker.fgraph.toposort()
topo = [n for n in f.maker.fgraph.toposort()
if not isinstance(n.op, self.topo_exclude)]
assert len(topo) == 2
assert f.maker.fgraph.toposort()[-1].op.inplace_pattern
assert topo[-1].op.inplace_pattern
f(np.random.random((5, 5)), np.random.random((5, 5)),
np.random.random((5, 5)))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论