提交 2c7949b6 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #1526 from nouiz/lamblin-fix_pickle_cache_leak2

fix pickle cache leak
......@@ -14,6 +14,30 @@ from theano.scan_module import scan
from theano.tensor.basic import _allclose
# Used in TestComputeTestValue.test_no_perform
class IncOneC(Op):
"""An Op with only a C (c_code) implementation"""
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, input):
input = scalar.as_scalar(input)
output = input.type()
return Apply(self, [input], [output])
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
return "%(z)s = %(x)s + 1;" % locals()
class TestComputeTestValue(unittest.TestCase):
def test_variable_only(self):
......@@ -338,28 +362,6 @@ class TestComputeTestValue(unittest.TestCase):
def test_no_perform(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
class IncOneC(Op):
"""An Op with only a C (c_code) implementation"""
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, input):
input = scalar.as_scalar(input)
output = input.type()
return Apply(self, [input], [output])
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
return "%(z)s = %(x)s + 1;" % locals()
orig_compute_test_value = theano.config.compute_test_value
try:
......@@ -368,6 +370,8 @@ class TestComputeTestValue(unittest.TestCase):
i = scalar.int32('i')
i.tag.test_value = 3
# Class IncOneC is defined outside of the TestComputeTestValue
# so it can be pickled and unpickled
o = IncOneC()(i)
# Check that the perform function is not implemented
......
......@@ -148,7 +148,8 @@ class Scalar(Type):
return py_type(data)
else:
raise TypeError('Value cannot accurately be converted to dtype'
' (%s) and allow_downcast is not True' % self.dtype)
' (%s) and allow_downcast is not True' %
self.dtype)
except Exception, e:
raise TypeError("Could not convert %s (value=%s) to %s" % (
type(data), data, self.dtype), e)
......@@ -788,17 +789,18 @@ class ScalarOp(Op):
if output_types_preference is not None:
if not callable(output_types_preference):
raise TypeError(
"Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" % (self.__class__, output_types_preference))
"Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" %
self.__class__, output_types_preference)
self.output_types_preference = output_types_preference
def make_node(self, *inputs):
if self.nin >= 0:
if len(inputs) != self.nin:
raise TypeError("Wrong number of inputs for %s.make_node (got %i(%s), expected %i)" \
% (self, len(inputs), str(inputs), self.nin))
raise TypeError("Wrong number of inputs for %s.make_node (got %i(%s), expected %i)" %
self, len(inputs), str(inputs), self.nin)
inputs = [as_scalar(input) for input in inputs]
outputs = [t() for t in self.output_types([input.
type for input in inputs])]
outputs = [t() for t in self.output_types([input.type
for input in inputs])]
if len(outputs) != self.nout:
raise TypeError("Not the right number of outputs produced for %s(%s). Expected %s, got %s."
% (self, ", ".join(str(input) for input in inputs), self.nout, len(outputs)))
......@@ -906,6 +908,7 @@ class UnaryScalarOp(ScalarOp):
%(fct)s(n, x, z);
""" % locals()
class BinaryScalarOp(ScalarOp):
# One may define in subclasses the following fields:
# - `identity`: for an associative operation, identity corresponds to
......@@ -940,7 +943,7 @@ class FixedLogicalComparison(UnaryScalarOp):
return [int8]
def grad(self, inputs, output_gradients):
x ,= inputs
x, = inputs
out = self(x)
assert str(out.type.dtype).find('int') != -1
return [x.zeros_like().astype(theano.config.floatX)]
......@@ -1169,8 +1172,9 @@ class BinaryBitOp(BinaryScalarOp):
return upcast_out(*input_types[0])
def grad(self, inputs, output_gradients):
a,b = inputs
return [a.zeros_like().astype(theano.config.floatX), b.zeros_like().astype(theano.config.floatX)]
a, b = inputs
return [a.zeros_like().astype(theano.config.floatX),
b.zeros_like().astype(theano.config.floatX)]
class OR(BinaryBitOp):
......@@ -1342,8 +1346,9 @@ class Mul(ScalarOp):
output_type = self.output_types([i.type for i in inputs])[0]
if output_type in complex_types:
if not gz.type in complex_types:
raise TypeError('Mul with output_type ' + str(output_type) +\
' expected gz type to be complex, got gz with type ' +\
raise TypeError(
'Mul with output_type ' + str(output_type) +
' expected gz type to be complex, got gz with type ' +
str(gz.type))
if output_type in discrete_types:
......@@ -1989,7 +1994,7 @@ class RoundHalfToEven(UnaryScalarOp):
def c_code___(self, node, name, (x, ), (z, ), sub):
typ = node.outputs[0].type.dtype
if not node.outputs[0].type.dtype in ['float32', 'float64']:
if not typ in ['float32', 'float64']:
Exception("The output should be float32 or float64")
return dedent("""
......@@ -2036,7 +2041,7 @@ class RoundHalfToEven(UnaryScalarOp):
#undef ROUNDING_EPSILON
""")
""" % locals())
round_half_to_even = RoundHalfToEven(same_out_float_only)
......@@ -2754,11 +2759,10 @@ class Composite(ScalarOp):
" be Constant instances.")
_c_code = "{\n"
i = 0
j = 0
self.nodenames = ["%(nodename)s_" + ('subnode%i' % j)
for j, n in enumerate(self.fgraph.toposort())]
i = 0
for j, node in enumerate(self.fgraph.toposort()):
for output in node.outputs:
if output not in subd:
......@@ -2835,6 +2839,10 @@ class Composite(ScalarOp):
self.fgraph = fgraph
def __init__(self, inputs, outputs):
# We need to clone the graph as sometimes its nodes already
# contain a reference to an fgraph. As we want the Composite
# to be pickable, we can't have reference to fgraph.
inputs, outputs = gof.graph.clone(inputs, outputs)
self.inputs = copy(inputs)
self.outputs = copy(outputs)
self.inputs_type = tuple([input.type for input in inputs])
......
......@@ -12,10 +12,14 @@ If you do want to rewrite these tests, bear in mind:
import unittest
import theano
from theano.gof import Variable, Op, FunctionGraph
from theano.gof import FunctionGraph
from theano import gof
from theano.scalar.basic import *
from theano.scalar.basic import (floats, float32, float64,
ints, int8, int32, complex64,
ComplexError, IntDiv, TrueDiv,
Composite, add, div_proxy,
and_, eq, neq, invert, mul)
def inputs():
......@@ -216,7 +220,7 @@ class test_div(unittest.TestCase):
d = float64()
f = float32()
print (a//b).owner.op
#print (a//b).owner.op
assert isinstance((a//b).owner.op, IntDiv)
assert isinstance((b//a).owner.op, IntDiv)
assert isinstance((b/d).owner.op, TrueDiv)
......
......@@ -880,15 +880,8 @@ class T_using_gpu(unittest.TestCase):
for x in f.maker.fgraph.toposort()])
class T_fibby(unittest.TestCase):
## All tests here belong to
## http://deeplearning.net/software/theano/extending/fibby.html
## Theano/doc/extending/fibby.txt
## Any change you do here also add it to the tutorial !
def test_fibby_1(self):
class Fibby(theano.Op):
# Used in T_fibby
class Fibby(theano.Op):
"""
An arbitrarily generalized Fibbonacci sequence
......@@ -936,6 +929,17 @@ class T_fibby(unittest.TestCase):
def c_code_cache_version(self):
return (1,)
class T_fibby(unittest.TestCase):
## All tests here belong to
## http://deeplearning.net/software/theano/extending/fibby.html
## Theano/doc/extending/fibby.txt
## Any change you do here also add it to the tutorial !
def test_fibby_1(self):
# The definition of class Fibby is done outside of the test,
# so the object can be pickled.
fibby = Fibby()
from theano.tensor.opt import (get_scalar_constant_value,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论