提交 b8374e28 authored 作者: Pascal Lamblin's avatar Pascal Lamblin

Merge pull request #3157 from harlouci/props_tests

Props tests
...@@ -308,6 +308,8 @@ version that it produces in the code I gave above. ...@@ -308,6 +308,8 @@ version that it produces in the code I gave above.
class BinaryDoubleOp(gof.Op): class BinaryDoubleOp(gof.Op):
__props__ = ("name", "fn", "ccode")
def __init__(self, name, fn, ccode): def __init__(self, name, fn, ccode):
self.name = name self.name = name
self.fn = fn self.fn = fn
......
...@@ -35,12 +35,7 @@ you should check the strides and alignment. ...@@ -35,12 +35,7 @@ you should check the strides and alignment.
""" """
An arbitrarily generalized Fibbonacci sequence An arbitrarily generalized Fibbonacci sequence
""" """
__props__ = ()
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, x): def make_node(self, x):
x_ = tensor.as_tensor_variable(x) x_ = tensor.as_tensor_variable(x)
......
...@@ -682,16 +682,12 @@ arithmetic operators: ...@@ -682,16 +682,12 @@ arithmetic operators:
class BinaryDoubleOp(gof.Op): class BinaryDoubleOp(gof.Op):
__props__ = ("name", "fn")
def __init__(self, name, fn): def __init__(self, name, fn):
self.name = name self.name = name
self.fn = fn self.fn = fn
def __eq__(self, other):
return type(self) == type(other) and (self.name == other.name) and (self.fn == other.fn)
def __hash__(self):
return hash(type(self)) ^ hash(self.name) ^ hash(self.fn)
def make_node(self, x, y): def make_node(self, x, y):
if isinstance(x, (int, float)): if isinstance(x, (int, float)):
x = gof.Constant(double, x) x = gof.Constant(double, x)
......
...@@ -86,12 +86,7 @@ You can use a GPU function compiled with PyCUDA in a Theano op: ...@@ -86,12 +86,7 @@ You can use a GPU function compiled with PyCUDA in a Theano op:
import theano.sandbox.cuda as cuda import theano.sandbox.cuda as cuda
class PyCUDADoubleOp(theano.Op): class PyCUDADoubleOp(theano.Op):
def __eq__(self, other): __props__ = ()
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def make_node(self, inp): def make_node(self, inp):
inp = cuda.basic_ops.gpu_contiguous( inp = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp)) cuda.basic_ops.as_cuda_ndarray_variable(inp))
......
...@@ -688,14 +688,8 @@ Modify and execute to work for a matrix of shape (20, 10). ...@@ -688,14 +688,8 @@ Modify and execute to work for a matrix of shape (20, 10).
import theano.sandbox.cuda as cuda import theano.sandbox.cuda as cuda
class PyCUDADoubleOp(theano.Op): class PyCUDADoubleOp(theano.Op):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self): __props__ = ()
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def make_node(self, inp): def make_node(self, inp):
inp = cuda.basic_ops.gpu_contiguous( inp = cuda.basic_ops.gpu_contiguous(
......
...@@ -36,6 +36,7 @@ class testgrad_sources_inputs(unittest.TestCase): ...@@ -36,6 +36,7 @@ class testgrad_sources_inputs(unittest.TestCase):
def test_retNone1(self): def test_retNone1(self):
"""Test that it is not ok to return None from op.grad()""" """Test that it is not ok to return None from op.grad()"""
class retNone(gof.op.Op): class retNone(gof.op.Op):
__props__ = ()
def make_node(self): def make_node(self):
inputs = [theano.tensor.vector()] inputs = [theano.tensor.vector()]
outputs = [theano.tensor.vector()] outputs = [theano.tensor.vector()]
...@@ -52,6 +53,7 @@ class testgrad_sources_inputs(unittest.TestCase): ...@@ -52,6 +53,7 @@ class testgrad_sources_inputs(unittest.TestCase):
"""Test that it is not ok to return the wrong number of gradient terms """Test that it is not ok to return the wrong number of gradient terms
""" """
class retOne(gof.op.Op): class retOne(gof.op.Op):
__props__ = ()
def make_node(self, *inputs): def make_node(self, *inputs):
outputs = [theano.tensor.vector()] outputs = [theano.tensor.vector()]
return gof.Apply(self, inputs, outputs) return gof.Apply(self, inputs, outputs)
...@@ -72,6 +74,7 @@ class testgrad_sources_inputs(unittest.TestCase): ...@@ -72,6 +74,7 @@ class testgrad_sources_inputs(unittest.TestCase):
gval = theano.tensor.matrix() gval = theano.tensor.matrix()
class O(gof.op.Op): class O(gof.op.Op):
__props__ = ()
def make_node(self): def make_node(self):
inputs = [theano.tensor.matrix()] inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.matrix()] outputs = [theano.tensor.matrix()]
...@@ -88,6 +91,7 @@ class testgrad_sources_inputs(unittest.TestCase): ...@@ -88,6 +91,7 @@ class testgrad_sources_inputs(unittest.TestCase):
gval = theano.tensor.matrix() gval = theano.tensor.matrix()
class O(gof.op.Op): class O(gof.op.Op):
__props__ = ()
def make_node(self): def make_node(self):
inputs = [theano.tensor.matrix()] inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.scalar(), theano.tensor.scalar()] outputs = [theano.tensor.scalar(), theano.tensor.scalar()]
...@@ -107,6 +111,7 @@ class testgrad_sources_inputs(unittest.TestCase): ...@@ -107,6 +111,7 @@ class testgrad_sources_inputs(unittest.TestCase):
gval1 = theano.tensor.scalar() gval1 = theano.tensor.scalar()
class O(gof.op.Op): class O(gof.op.Op):
__props__ = ()
def make_node(self): def make_node(self):
inputs = [theano.tensor.scalar(), theano.tensor.scalar()] inputs = [theano.tensor.scalar(), theano.tensor.scalar()]
outputs = [theano.tensor.matrix()] outputs = [theano.tensor.matrix()]
...@@ -127,6 +132,7 @@ class testgrad_sources_inputs(unittest.TestCase): ...@@ -127,6 +132,7 @@ class testgrad_sources_inputs(unittest.TestCase):
gval1 = theano.tensor.matrix() gval1 = theano.tensor.matrix()
class O(gof.op.Op): class O(gof.op.Op):
__props__ = ()
def make_node(self): def make_node(self):
inputs = [theano.tensor.matrix(), theano.tensor.matrix()] inputs = [theano.tensor.matrix(), theano.tensor.matrix()]
outputs = [theano.tensor.matrix(), theano.tensor.matrix()] outputs = [theano.tensor.matrix(), theano.tensor.matrix()]
...@@ -161,6 +167,7 @@ class test_grad(unittest.TestCase): ...@@ -161,6 +167,7 @@ class test_grad(unittest.TestCase):
# tests that unimplemented grads are caught in the grad method # tests that unimplemented grads are caught in the grad method
class DummyOp(gof.Op): class DummyOp(gof.Op):
__props__ = ()
def make_node(self, x): def make_node(self, x):
return gof.Apply(self, [x], [x.type()]) return gof.Apply(self, [x], [x.type()])
...@@ -350,6 +357,7 @@ class test_grad(unittest.TestCase): ...@@ -350,6 +357,7 @@ class test_grad(unittest.TestCase):
# Op1 has two outputs, f and g # Op1 has two outputs, f and g
# x is connected to f but not to g # x is connected to f but not to g
class Op1(theano.gof.Op): class Op1(theano.gof.Op):
__props__ = ()
def make_node(self, x): def make_node(self, x):
return theano.Apply(self, inputs=[x], return theano.Apply(self, inputs=[x],
outputs=[x.type(), theano.tensor.scalar()]) outputs=[x.type(), theano.tensor.scalar()])
...@@ -363,6 +371,7 @@ class test_grad(unittest.TestCase): ...@@ -363,6 +371,7 @@ class test_grad(unittest.TestCase):
# Op2 has two inputs, f and g # Op2 has two inputs, f and g
# Its gradient with respect to g is not defined # Its gradient with respect to g is not defined
class Op2(theano.gof.Op): class Op2(theano.gof.Op):
__props__ = ()
def make_node(self, f, g): def make_node(self, f, g):
return theano.Apply(self, inputs=[f, g], return theano.Apply(self, inputs=[f, g],
outputs=[theano.tensor.scalar()]) outputs=[theano.tensor.scalar()])
......
...@@ -34,11 +34,7 @@ class BreakRop(Op): ...@@ -34,11 +34,7 @@ class BreakRop(Op):
""" """
@note: Non-differentiable. @note: Non-differentiable.
""" """
def __hash__(self): __props__ = ()
return hash(type(self))
def __eq__(self, other):
return type(self) == type(other)
def make_node(self, x): def make_node(self, x):
return Apply(self, [x], [x.type()]) return Apply(self, [x], [x.type()])
......
...@@ -139,16 +139,13 @@ class T_extending(unittest.TestCase): ...@@ -139,16 +139,13 @@ class T_extending(unittest.TestCase):
from theano import gof from theano import gof
class BinaryDoubleOp(gof.Op): class BinaryDoubleOp(gof.Op):
__props__ = ("name", "fn")
def __init__(self, name, fn): def __init__(self, name, fn):
self.name = name self.name = name
self.fn = fn self.fn = fn
def __eq__(self, other):
return type(self) == type(other) and (self.name == other.name) and (self.fn == other.fn)
def __hash__(self):
return hash(type(self)) ^ hash(self.name) ^ hash(self.fn)
def make_node(self, x, y): def make_node(self, x, y):
if isinstance(x, (int, float)): if isinstance(x, (int, float)):
x = gof.Constant(double, x) x = gof.Constant(double, x)
...@@ -207,16 +204,13 @@ class T_extending(unittest.TestCase): ...@@ -207,16 +204,13 @@ class T_extending(unittest.TestCase):
double = Double() double = Double()
class BinaryDoubleOp(gof.Op): class BinaryDoubleOp(gof.Op):
__props__ = ("name", "fn")
def __init__(self, name, fn): def __init__(self, name, fn):
self.name = name self.name = name
self.fn = fn self.fn = fn
def __eq__(self, other):
return type(self) == type(other) and (self.name == other.name) and (self.fn == other.fn)
def __hash__(self):
return hash(type(self)) ^ hash(self.name) ^ hash(self.fn)
def make_node(self, x, y): def make_node(self, x, y):
if isinstance(x, (int, float)): if isinstance(x, (int, float)):
x = gof.Constant(double, x) x = gof.Constant(double, x)
...@@ -366,6 +360,8 @@ class T_extending(unittest.TestCase): ...@@ -366,6 +360,8 @@ class T_extending(unittest.TestCase):
from theano import gof from theano import gof
class BinaryDoubleOp(gof.Op): class BinaryDoubleOp(gof.Op):
__props__ = ("name", "fn", "ccode")
def __init__(self, name, fn, ccode): def __init__(self, name, fn, ccode):
self.name = name self.name = name
self.fn = fn self.fn = fn
...@@ -1012,14 +1008,8 @@ class T_using_gpu(unittest.TestCase): ...@@ -1012,14 +1008,8 @@ class T_using_gpu(unittest.TestCase):
raise SkipTest('Optional package cuda disabled') raise SkipTest('Optional package cuda disabled')
class PyCUDADoubleOp(theano.Op): class PyCUDADoubleOp(theano.Op):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self): __props__ = ()
return self.__class__.__name__
def make_node(self, inp): def make_node(self, inp):
inp = cuda.basic_ops.gpu_contiguous( inp = cuda.basic_ops.gpu_contiguous(
...@@ -1061,12 +1051,7 @@ class Fibby(theano.Op): ...@@ -1061,12 +1051,7 @@ class Fibby(theano.Op):
""" """
An arbitrarily generalized Fibbonacci sequence An arbitrarily generalized Fibbonacci sequence
""" """
__props__ = ()
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, x): def make_node(self, x):
x_ = theano.tensor.as_tensor_variable(x) x_ = theano.tensor.as_tensor_variable(x)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论