提交 7c17434d authored 作者: nouiz's avatar nouiz

Merge pull request #188 from jaberg/master

Fix failing tests in tensor module
...@@ -272,7 +272,8 @@ class CLinkerOp(CLinkerObject): ...@@ -272,7 +272,8 @@ class CLinkerOp(CLinkerObject):
- `MethodNotDefined`: Subclass does not implement this method - `MethodNotDefined`: Subclass does not implement this method
""" """
raise utils.MethodNotDefined("c_support_code_apply", type(self), self.__class__.__name__) raise utils.MethodNotDefined("c_support_code_apply",
type(self), self.__class__.__name__)
class PureOp(object): class PureOp(object):
......
"""
Tests for GPU convolution
"""
import sys import sys
import time import time
import unittest import unittest
......
...@@ -1206,13 +1206,16 @@ class Mod(BinaryScalarOp): ...@@ -1206,13 +1206,16 @@ class Mod(BinaryScalarOp):
def c_code(self, node, name, (x, y), (z, ), sub): def c_code(self, node, name, (x, y), (z, ), sub):
""" """
We want the result to have the same sign as python, not the other implementation of mod. We want the result to have the same sign as python, not the other
implementation of mod.
""" """
#raise NotImplementedError("Unlike Python, C's modulo returns negative modulo on negative dividend (to implement)") # raise NotImplementedError("Unlike Python, C's modulo returns negative
# modulo on negative dividend (to implement)")
t = node.inputs[0].type.upcast(*[ i.type for i in node.inputs[1:]]) t = node.inputs[0].type.upcast(*[ i.type for i in node.inputs[1:]])
if (str(t) in imap(str, discrete_types) or if (str(t) in imap(str, discrete_types) or
t in ['uint8','int8','uint16','int16','uint32','int32','uint64','int64'] or t in ['uint8','int8','uint16','int16'] or
t in discrete_types): t in ['uint32','int32','uint64','int64'] or
t in discrete_types):
# The above or's should not be needed anymore. However, for now we # The above or's should not be needed anymore. However, for now we
# keep them out of safety, and verify they are useless with an # keep them out of safety, and verify they are useless with an
# assert. # assert.
...@@ -2097,6 +2100,16 @@ class Composite(ScalarOp): ...@@ -2097,6 +2100,16 @@ class Composite(ScalarOp):
return () return ()
return tuple(rval) return tuple(rval)
def c_support_code(self):
rval = []
for subnode in self.env.toposort():
try:
rval.append(subnode.op.c_support_code())
except gof.utils.MethodNotDefined:
pass
# remove duplicate code blocks
return "\n".join(sorted(set(rval)))
def c_support_code_apply(self, node, name): def c_support_code_apply(self, node, name):
rval = [] rval = []
for subnode, subnodename in zip(self.env.toposort(), self.nodenames): for subnode, subnodename in zip(self.env.toposort(), self.nodenames):
...@@ -2107,6 +2120,10 @@ class Composite(ScalarOp): ...@@ -2107,6 +2120,10 @@ class Composite(ScalarOp):
subnodename % dict(nodename=name))) subnodename % dict(nodename=name)))
except gof.utils.MethodNotDefined: except gof.utils.MethodNotDefined:
pass pass
# there should be no need to remove duplicate code blocks because
# each block should have been specialized for the given nodename.
# Any block that isn't specialized should be returned via
# c_support_code instead of c_support_code_apply.
return "\n".join(rval) return "\n".join(rval)
def __eq__(self, other): def __eq__(self, other):
......
...@@ -929,6 +929,9 @@ class Elemwise(Op): ...@@ -929,6 +929,9 @@ class Elemwise(Op):
def c_headers(self): def c_headers(self):
return ['<vector>', '<algorithm>'] return ['<vector>', '<algorithm>']
def c_support_code(self):
return self.scalar_op.c_support_code()
def c_support_code_apply(self, node, nodename): def c_support_code_apply(self, node, nodename):
support_code = self.scalar_op.c_support_code_apply(node, support_code = self.scalar_op.c_support_code_apply(node,
nodename + '_scalar_') nodename + '_scalar_')
......
...@@ -670,12 +670,15 @@ class GradientError(Exception): ...@@ -670,12 +670,15 @@ class GradientError(Exception):
def __str__(self): def __str__(self):
# args may have been inserted by e.g. makeTester
args_msg = ", ".join(str(a) for a in self.args)
return """GradientError: numeric gradient and analytic gradient exceed tolerance: return """GradientError: numeric gradient and analytic gradient exceed tolerance:
At position %i of argument %i, At position %i of argument %i,
abs. error = %f, abs. tolerance = %f abs. error = %f, abs. tolerance = %f
rel. error = %f, rel. tolerance = %f rel. error = %f, rel. tolerance = %f\nException args: %s
""" %(self.err_pos, self.arg, """ %(self.err_pos, self.arg,
self.abs_err, self.abs_tol, self.abs_err, self.abs_tol,
self.rel_err, self.rel_tol) self.rel_err, self.rel_tol,
args_msg)
verify_grad.E_grad = GradientError verify_grad.E_grad = GradientError
import copy, sys import copy
import numpy, theano import sys
import numpy
import theano
from theano import tensor from theano import tensor
from theano.tensor.nnet import crossentropy_softmax_argmax_1hot_with_bias from theano.tensor.nnet import crossentropy_softmax_argmax_1hot_with_bias
def test_bug_2009_06_02_trac_387():
def test_bug_2009_06_02_trac_387():
y = tensor.lvector('y') y = tensor.lvector('y')
#f = theano.function([y], tensor.stack(y[0] / 2)) f = theano.function([y],
#f = theano.function([y], tensor.join(0,tensor.shape_padleft(y[0] / 2,1))) tensor.int_div(
f = theano.function([y], tensor.int_div(tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]), 2)) tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]), 2))
sys.stdout.flush() sys.stdout.flush()
print f(numpy.ones(1, dtype='int64') * 3) print f(numpy.ones(1, dtype='int64') * 3)
#z = tensor.lscalar('z') # XXX: there is no assert, nor comment that DEBUGMODE is to do the
#f = theano.function([z], tensor.DimShuffle([], ['x'])(z) / 2) # checking. What was the bug, and how is it being tested?
def test_bug_2009_07_17_borrowed_output(): def test_bug_2009_07_17_borrowed_output():
"""Regression test for a bug where output was borrowed by mistake.""" """Regression test for a bug where output was borrowed by mistake."""
...@@ -21,10 +24,10 @@ def test_bug_2009_07_17_borrowed_output(): ...@@ -21,10 +24,10 @@ def test_bug_2009_07_17_borrowed_output():
# The output should *NOT* be borrowed. # The output should *NOT* be borrowed.
g = theano.function([a, b], g = theano.function([a, b],
theano.Out(theano.tensor.dot(a, b), borrow=False)) theano.Out(theano.tensor.dot(a, b), borrow=False))
x = numpy.zeros((1, 2)) x = numpy.zeros((1, 2))
y = numpy.ones((2, 5)) y = numpy.ones((2, 5))
z = g(x, y) z = g(x, y)
print z # Should be zero. print z # Should be zero.
x.fill(1) x.fill(1)
...@@ -51,11 +54,11 @@ def test_bug_2009_07_17_borrowed_output(): ...@@ -51,11 +54,11 @@ def test_bug_2009_07_17_borrowed_output():
output = nll_softmax_argmax[1] output = nll_softmax_argmax[1]
g = theano.function([test_output_activation_no_bias, test_b2, test_target], g = theano.function([test_output_activation_no_bias, test_b2, test_target],
theano.Out(output, borrow=False)) theano.Out(output, borrow=False))
a = numpy.zeros((1, 5)) a = numpy.zeros((1, 5))
b = numpy.ones(5) b = numpy.ones(5)
c = numpy.zeros(1, dtype=numpy.int32) c = numpy.zeros(1, dtype=numpy.int32)
z = g(a, b, c) z = g(a, b, c)
z_backup = copy.copy(z) z_backup = copy.copy(z)
id_z = id(z) id_z = id(z)
...@@ -68,4 +71,3 @@ def test_bug_2009_07_17_borrowed_output(): ...@@ -68,4 +71,3 @@ def test_bug_2009_07_17_borrowed_output():
assert id_z != id_other assert id_z != id_other
# Just to be 100% sure, ensure that z was not altered. # Just to be 100% sure, ensure that z was not altered.
assert (z == z_backup).all() assert (z == z_backup).all()
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论