提交 0d99258d authored 作者: abergeron's avatar abergeron

Merge pull request #2766 from nouiz/mixed

Mixed
...@@ -1111,8 +1111,18 @@ class ModuleCache(object): ...@@ -1111,8 +1111,18 @@ class ModuleCache(object):
# Verify that when we reload the KeyData from the pickled file, the # Verify that when we reload the KeyData from the pickled file, the
# same key can be found in it, and is not equal to more than one # same key can be found in it, and is not equal to more than one
# other key. # other key.
with open(key_pkl, 'rb') as f: for i in range(3):
key_data = cPickle.load(f) try:
with open(key_pkl, 'rb') as f:
key_data = cPickle.load(f)
break
except EOFError:
# This file is probably getting written/updated at the
# same time. This can happen as we read the cache
# without taking the lock.
if i == 2:
raise
time.sleep(2)
found = sum(key == other_key for other_key in key_data.keys) found = sum(key == other_key for other_key in key_data.keys)
msg = '' msg = ''
......
...@@ -206,8 +206,7 @@ def filter_compiledir(path): ...@@ -206,8 +206,7 @@ def filter_compiledir(path):
if os.path.exists(init_file): if os.path.exists(init_file):
pass # has already been created pass # has already been created
else: else:
if os.path.exists(path): e.args += ('%s exist? %s' % (path, os.path.exists(path)),)
e.args += ('%s does not exist..' % path,)
raise raise
return path return path
......
...@@ -59,7 +59,8 @@ try: ...@@ -59,7 +59,8 @@ try:
if os.path.exists(init_file): if os.path.exists(init_file):
pass # has already been created pass # has already been created
else: else:
e.args += ('%s exist?' % os.path.exists(location),) e.args += ('%s exist? %s' % (location,
os.path.exists(location)),)
raise raise
_need_reload = False _need_reload = False
......
...@@ -167,6 +167,23 @@ class TestComputeTestValue(unittest.TestCase): ...@@ -167,6 +167,23 @@ class TestComputeTestValue(unittest.TestCase):
finally: finally:
theano.config.compute_test_value = orig_compute_test_value theano.config.compute_test_value = orig_compute_test_value
def test_empty_elemwise(self):
orig_compute_test_value = theano.config.compute_test_value
try:
theano.config.compute_test_value = 'raise'
x = theano.shared(numpy.random.rand(0, 6).astype(config.floatX),
'x')
# should work
z = (x + 2) * 3
assert hasattr(z.tag, 'test_value')
f = theano.function([], z)
assert _allclose(f(), z.tag.test_value)
finally:
theano.config.compute_test_value = orig_compute_test_value
def test_constant(self): def test_constant(self):
orig_compute_test_value = theano.config.compute_test_value orig_compute_test_value = theano.config.compute_test_value
try: try:
......
...@@ -74,6 +74,8 @@ def debugprint(obj, depth=-1, print_type=False, ...@@ -74,6 +74,8 @@ def debugprint(obj, depth=-1, print_type=False,
to the Apply's identifier, to indicate which output a line corresponds to. to the Apply's identifier, to indicate which output a line corresponds to.
""" """
if not isinstance(depth, int):
raise Exception("depth parameter must be an int")
if file == 'str': if file == 'str':
_file = StringIO() _file = StringIO()
elif file is None: elif file is None:
......
...@@ -599,12 +599,12 @@ class NaiveAlgo(object): ...@@ -599,12 +599,12 @@ class NaiveAlgo(object):
for d in xrange(nd): for d in xrange(nd):
print >> sio, 'std::cerr << " " << local_dims[%(d)s]; ' % locals() print >> sio, 'std::cerr << " " << local_dims[%(d)s]; ' % locals()
print >> sio, 'std::cerr << "\\n";' print >> sio, 'std::cerr << "\\n";'
if nd > 0:
for ipos in xrange(len(node.inputs)): for ipos in xrange(len(node.inputs)):
print >> sio, 'std::cerr << " local_str inputs %(ipos)s: " <<'%locals() + \ print >> sio, 'std::cerr << " local_str inputs %(ipos)s: " <<'%locals() + \
' << " " << '.join(["local_str[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";' ' << " " << '.join(["local_str[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";'
for ipos in xrange(len(node.outputs)): for ipos in xrange(len(node.outputs)):
print >> sio, 'std::cerr << " local_ostr inputs %(ipos)s: " <<'%locals() + \ print >> sio, 'std::cerr << " local_ostr inputs %(ipos)s: " <<'%locals() + \
' << " " << '.join(["local_ostr[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";' ' << " " << '.join(["local_ostr[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";'
print >> sio, """ print >> sio, """
...@@ -642,11 +642,11 @@ class NaiveAlgo(object): ...@@ -642,11 +642,11 @@ class NaiveAlgo(object):
for d in xrange(nd): for d in xrange(nd):
print >> sio, 'std::cerr << " " << local_dims[%(d)s]; '%locals() print >> sio, 'std::cerr << " " << local_dims[%(d)s]; '%locals()
print >> sio, 'std::cerr << "\\n";' print >> sio, 'std::cerr << "\\n";'
if nd > 0:
for ipos in xrange(len(node.inputs)): for ipos in xrange(len(node.inputs)):
print >> sio, 'std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";' print >> sio, 'std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";'
for ipos in xrange(len(node.outputs)): for ipos in xrange(len(node.outputs)):
print >> sio, 'std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";' print >> sio, 'std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]" % (ipos, x) for x in xrange(nd)])+'<<"\\n";'
# collapse contiguous dimensions (ignoring scalars, generic version(collapse any dimensions, right, left, middle)) # collapse contiguous dimensions (ignoring scalars, generic version(collapse any dimensions, right, left, middle))
# this is a good idea because we make less index calculation in the gpu. # this is a good idea because we make less index calculation in the gpu.
...@@ -729,11 +729,11 @@ nd_collapse_[i]=0; ...@@ -729,11 +729,11 @@ nd_collapse_[i]=0;
for d in xrange(nd): for d in xrange(nd):
print >> sio, 'std::cerr << " " << local_dims[%(d)s]; '%locals() print >> sio, 'std::cerr << " " << local_dims[%(d)s]; '%locals()
print >> sio, 'std::cerr << "\\n";' print >> sio, 'std::cerr << "\\n";'
if nd > 0:
for ipos in xrange(len(node.inputs)): for ipos in xrange(len(node.inputs)):
print >> sio, 'std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]"%(ipos, x) for x in xrange(nd)])+'<<"\\n";' print >> sio, 'std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]"%(ipos, x) for x in xrange(nd)])+'<<"\\n";'
for ipos in xrange(len(node.outputs)): for ipos in xrange(len(node.outputs)):
print >> sio, 'std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]"%(ipos, x) for x in xrange(nd)])+'<<"\\n";' print >> sio, 'std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]"%(ipos, x) for x in xrange(nd)])+'<<"\\n";'
def launch_Ccontiguous(nodename, scalar_op, sync=True): def launch_Ccontiguous(nodename, scalar_op, sync=True):
kernel_call_args = ["numEls"] kernel_call_args = ["numEls"]
......
...@@ -29,6 +29,8 @@ else: ...@@ -29,6 +29,8 @@ else:
def test_dnn_conv_desc_merge(): def test_dnn_conv_desc_merge():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img_shp = T.as_tensor_variable( img_shp = T.as_tensor_variable(
numpy.asarray([2, 1, 8, 8]).astype('int64')) numpy.asarray([2, 1, 8, 8]).astype('int64'))
kern_shp = T.as_tensor_variable( kern_shp = T.as_tensor_variable(
......
...@@ -185,6 +185,8 @@ def clone(output, ...@@ -185,6 +185,8 @@ def clone(output,
as the original graph. If False, clone them. Note that cloned as the original graph. If False, clone them. Note that cloned
shared variables still use the same underlying storage, so they shared variables still use the same underlying storage, so they
will always have the same value. will always have the same value.
:param copy_inputs: deprecated, use share_inputs.
""" """
if copy_inputs is not DEPRECATED_ARG: if copy_inputs is not DEPRECATED_ARG:
warnings.warn('In `clone()` function, the argument `copy_inputs` has been deprecated and renamed into `share_inputs`') warnings.warn('In `clone()` function, the argument `copy_inputs` has been deprecated and renamed into `share_inputs`')
......
...@@ -5433,7 +5433,11 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 1024, ...@@ -5433,7 +5433,11 @@ def local_elemwise_fusion_op(OP, max_input_fct=lambda node: 1024,
else: else:
tmp = scalar.get_scalar_type(ii.dtype).make_variable() tmp = scalar.get_scalar_type(ii.dtype).make_variable()
try: try:
tmp.tag.test_value = gof.op.get_test_value(ii).flatten()[0] tv = gof.op.get_test_value(ii)
if tv.size > 0:
tmp.tag.test_value = tv.flatten()[0]
else:
tmp.tag.test_value = tv
except AttributeError: except AttributeError:
pass pass
tmp_s_input.append(tmp) tmp_s_input.append(tmp)
......
...@@ -134,7 +134,7 @@ class Append(Op): ...@@ -134,7 +134,7 @@ class Append(Op):
def make_node(self, x, toAppend): def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType) assert isinstance(x.type, TypedListType)
assert x.ttype == toAppend.type assert x.ttype == toAppend.type, (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()]) return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, (x, toAppend), (out, )): def perform(self, node, (x, toAppend), (out, )):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论