fixed orphan order bug in Function, continuing to bring Ops back

上级 1e2dd4e6
......@@ -273,39 +273,5 @@ def matrices(n):
return [matrix() for i in xrange(n)]
#TODO: move this to the _test_tensor_ops.py
class _testCase_matinv:# (unittest.TestCase):
def setUp(self):
numpy.random.seed(1)
def matinv(self,dim):
# symbolic program
a,b = matrices(2)
ab = T.dot(a,b)
diff = ab - tensor.tensor(numpy.identity(dim))
ssdiff = T.sum((diff**2.0))
g = grad(ssdiff,None, tensor.tensor(numpy.ones(1)))
# compilation to function
fn = compile.Function([a,b], [ssdiff,g(b)])
# use the function
w = numpy.random.rand(dim,dim)
wi = numpy.random.rand(dim,dim)
for i in xrange(300):
ssd, gw = fn(w,wi)
#print ssdiff
if i == 0:
str0 = str(ssd)
wi -= 0.4 * gw
return str0, str(ssd)
def test_matinv(self):
"""Matrix inversion by gradient descent (eval mode)"""
self.assertEqual(('2.67327580893', '0.000438649434819'), self.matinv(3))
if __name__ == '__main__':
unittest.main()
差异被折叠。
......@@ -55,7 +55,10 @@ class BaseTensor(ResultBase):
if not isinstance(arr, numpy.ndarray):
arr = numpy.asarray(arr, dtype = self.dtype)
if len(self.broadcastable) != len(arr.shape):
raise ValueError(BaseTensor.filter.E_rank)
raise ValueError(BaseTensor.filter.E_rank,
self.broadcastable,
arr.shape,
self.owner)
for b, s in zip(self.broadcastable, arr.shape):
if b and (s != 1):
raise ValueError(BaseTensor.filter.E_shape)
......
......@@ -71,7 +71,7 @@ class Function:
#print 'orphans', orphans
#print 'ops', gof.graph.ops(inputs, outputs)
env = gof.env.Env(inputs, outputs, features, consistency_check = True)
env = gof.env.Env(inputs, outputs, features + [gof.EquivTool], consistency_check = True)
#print 'orphans in env', env.orphans()
......@@ -79,7 +79,7 @@ class Function:
#print 'orphans after clone', env.orphans()
for d, o in zip(orphan_data, env.orphans()):
for d, o in zip(orphan_data, [env.equiv(orphan) for orphan in orphans]):
#print 'assigning orphan value', d
o.data = d
......
......@@ -95,13 +95,13 @@ def grad_sources_inputs(sources, graph_inputs):
gmap[r] = g_r
return gmap
def grad(cost, param):
def grad(cost, param, g_cost=1.0):
"""Return symbolic expression of gradient of <cost> wrt <param>.
If <param> is a list, then return a list containing the gradient of cost wrt
each element of the list.
"""
inputs = gof.graph.inputs([cost])
gmap = grad_sources_inputs([(cost, 1.0)], inputs)
gmap = grad_sources_inputs([(cost, g_cost)], inputs)
if isinstance(param, list):
return [gmap.get(p, None) for p in param]
else:
......@@ -136,9 +136,9 @@ class numeric_grad:
f_eps = f(*pt)
gf[idx][i] = numpy.asarray((f_eps - f_pt)/eps)
pt[idx][i] = orig
elif len(args[idx].shape) == 2:
elif len(pt[idx].shape) == 2:
for i in xrange(pt[idx].shape[0]):
for j in xrange(args[idx].shape[1]):
for j in xrange(pt[idx].shape[1]):
orig = pt[idx][i,j]
pt[idx][i,j] = pt[idx][i,j] + eps
f_eps = f(*pt)
......
差异被折叠。
......@@ -83,16 +83,7 @@ class InvElemwiseInplace(InvElemwise.inplace_version()):
return x
class Exp(Elemwise):
def impl(self, x): return numpy.exp(x)
def grad(self, x, gz): return gz * exp(x)
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = exp(x_i);"
class Log(Elemwise):
def impl(self, x): return numpy.log(x)
def grad(self, x, gz): return gz / x
def c_foreach(self, (x_i, ), (z_i, )): return "z_i = log(x_i);"
class Log2(Elemwise):
def impl(self, x): return numpy.log2(x)
def grad(self, x, gz): return gz / (x * numpy.log(2))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论