提交 b7a20acb authored 作者: Olivier Breuleux's avatar Olivier Breuleux

merge

......@@ -96,21 +96,21 @@ class _test_compile(unittest.TestCase):
fn()
self.failUnless(go[0].data == 6.0)
def test_prog_noopt(self):
def test_noopt(self):
gi, go = graph1()
p = Prog(gi,go)
p = Function(gi,go)
self.failUnless(p() == 1.5)
def test_prog_opt(self):
def test_opt(self):
opt = gof.opt.PatternOptimizer((Div, '1', '2'), (Div, '2', '1'))
gi, go = graph1()
p = Prog(gi,go, optimizer=opt)
p = Function(gi,go, optimizer=opt)
self.failUnless(p() == 6.0)
def test_prog_multiout(self):
def test_multiout(self):
opt = gof.opt.PatternOptimizer((Div, '1', '2'), (Div, '2', '1'))
gi, go = graph2()
p = Prog(gi,go, optimizer=opt)
p = Function(gi,go, optimizer=opt)
a,b,c = p()
self.failUnless(a == 6.0)
self.failUnless(b == 6.0)
......
......@@ -77,6 +77,662 @@ class _test_TensorOps(unittest.TestCase):
# # assert e.data == 1.5
from core import *
import unittest
import gradient
#useful mostly for unit tests
def _approx_eq(a,b,eps=1.0e-9):
a = numpy.asarray(a)
b = numpy.asarray(b)
if a.shape != b.shape:
return False
return numpy.max(numpy.abs(a-b)) < eps
if 1: # run gradient tests
def _scalar(x):
rval = numpy.zeros(())
rval.itemset(x)
return rval
def _test_grad(self, op_cls, args, n_tests=1,eps=0.0000001, tol=0.0001):
"""unittest.TestCase.failUnless( analytic gradient matches finite-diff gradient )
The criterion is that every input gradient must match every
finite-difference gradient (using stepsize of eps) to relative precision
tol.
"""
def _finite_diff1(f, x, eps, f_of_x = None):
if f_of_x is None: f_of_x = f(x)
y_eps = f(x+eps)
return (y_eps - f_of_x) / eps
def _scalar_f(op_cls, args, R, arg_idx, coord=None):
m = args[arg_idx].data
if () == m.shape:
def rval(x):
old_x = float(m)
m.itemset(x)
y = float(sum(mul_elemwise(R, op_cls(*args))).data)
m.itemset(old_x)
return y
return rval
else:
def rval(x):
old_x = m.__getitem__(coord)
#print old_x.shape
#print x.shape
m.__setitem__(coord, x)
y = float(sum(mul_elemwise(R, op_cls(*args))).data)
m.__setitem__(coord, old_x)
return y
return rval
self.failUnless(hasattr(op_cls, 'update_gradient'), op_cls)
op_out = op_cls(*args)
if len(op_out.owner.outputs) > 1:
raise NotImplementedError('cant autotest gradient of op with multiple outputs')
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -jsb.
R = numpy.random.rand(*op_out.shape)
y = sum(mul_elemwise(R, op_out))
g = gradient.grad(y)
def abs_rel_err(a,b):
return abs( (a-b) / (a+b+eps))
for idx in range(len(args)):
#print 'aaaaaaa', op_cls, [i.shape for i in args]
g_i = g(args[idx])
if g_i is gradient.Undefined:
continue
if args[idx].shape == ():
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx),
args[idx].data, eps, y.data)
err = abs_rel_err(fd_grad,g_i.data)
self.failUnless( err < tol, (err, op_cls, idx))
elif len(args[idx].shape) == 1:
for i in xrange(args[idx].shape[0]):
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx, (i,)),
args[idx].data[i], eps, y.data)
err = abs_rel_err(fd_grad,g_i.data[i])
self.failUnless( abs(err) < tol, (err, op_cls, idx, i))
elif len(args[idx].shape) == 2:
for i in xrange(args[idx].shape[0]):
for j in xrange(args[idx].shape[1]):
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx, (i,j)),
args[idx].data[i,j], eps, y.data)
err = abs_rel_err(fd_grad,g_i.data[i,j])
self.failUnless( abs(err) < tol, (err, op_cls, idx, i, j))
else:
raise NotImplementedError()
def _testgrad_unary_elemwise_randnearzero(op_cls, n_tests=1,eps=0.000001, tol=0.0001):
class test_some_op_gradient(unittest.TestCase):
def setUp(self):
gof.lib.build_eval_mode()
numpy.random.seed([234,234,23333])
def tearDown(self):
gof.lib.pop_mode()
def test0(self):
"""Gradient Test with a small scalar"""
_test_grad(self, op_cls,
(Numpy2(data=(numpy.ones(()))*0.03),),
n_tests, eps, tol)
def test1(self):
"""Gradient Test with a medium scalar"""
_test_grad(self, op_cls,
(Numpy2(data=(numpy.ones(()))*1.03),),
n_tests, eps, tol)
def test2(self):
"""Gradient Test with a big scalar"""
_test_grad(self, op_cls,
(Numpy2(data=(numpy.ones(()))*90.03),),
n_tests, eps, tol)
def test3(self):
"""Gradient Test with a vector"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.random.rand(3)+0.01),),
n_tests, eps, tol)
def test4(self):
"""Gradient Test with a matrix"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.random.rand(2,3)*4),),
n_tests, eps, tol)
return test_some_op_gradient
neg_test = _testgrad_unary_elemwise_randnearzero(neg)
twice_test = _testgrad_unary_elemwise_randnearzero(twice)
exp_test = _testgrad_unary_elemwise_randnearzero(exp)
sqr_test = _testgrad_unary_elemwise_randnearzero(sqr)
sqrt_test = _testgrad_unary_elemwise_randnearzero(sqrt)
inv_test = _testgrad_unary_elemwise_randnearzero(inv_elemwise)
transpose_test = _testgrad_unary_elemwise_randnearzero(transpose)
def _testgrad_unary_elemwise_randpositive(op_cls, n_tests=1,eps=0.000001, tol=0.0001):
class test_some_op_gradient(unittest.TestCase):
def setUp(self):
gof.lib.build_eval_mode()
numpy.random.seed([234,234,23333])
def tearDown(self):
gof.lib.pop_mode()
def test0(self):
"""Gradient Test with a small scalar"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.ones(())*0.03),),
n_tests, eps, tol)
def test1(self):
"""Gradient Test with a medium scalar"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.ones(())*1.03),),
n_tests, eps, tol)
def test2(self):
"""Gradient Test with a big scalar"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.ones(())*90.03),),
n_tests, eps, tol)
def test3(self):
"""Gradient Test with a vector"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.random.rand(3)+0.01),),
n_tests, eps, tol)
def test4(self):
"""Gradient Test with a matrix"""
_test_grad(self, op_cls,
(Numpy2(data=numpy.random.rand(2,3)*4),),
n_tests, eps, tol)
return test_some_op_gradient
log_test = _testgrad_unary_elemwise_randpositive(log)
log2_test = _testgrad_unary_elemwise_randpositive(log2)
sqrt_test = _testgrad_unary_elemwise_randpositive(sqrt)
def _testgrad_binary_elemwise(op_cls, domain, n_tests=1,eps=0.000001, tol=0.0001):
class test_some_op_gradient(unittest.TestCase):
def setUp(self):
gof.lib.build_eval_mode()
numpy.random.seed([234,234,23333])
def tearDown(self):
gof.lib.pop_mode()
def mytest(self, *raw_args):
args = [Numpy2(data=d(a)) for a,d in zip(raw_args,domain)]
_test_grad(self, op_cls, args, n_tests, eps, tol)
def test0(self):
"""Gradient test low"""
self.mytest(numpy.zeros(()), numpy.zeros(()))
def test1(self):
"""Gradient test middle"""
self.mytest(numpy.ones(())*.5, numpy.ones(())*0.5)
def test2(self):
"""Gradient test high"""
self.mytest(numpy.ones(()), numpy.ones(()))
def test3(self):
"""Gradient test with a vector"""
self.mytest(numpy.random.rand(4),numpy.random.rand(4))
def test4(self):
"""Gradient test with a matrix"""
self.mytest(numpy.random.rand(3,2),numpy.random.rand(3,2))
return test_some_op_gradient
add_test = _testgrad_binary_elemwise(add_elemwise, [lambda x:(x-0.5)*50]*2)
sub_test = _testgrad_binary_elemwise(sub_elemwise, [lambda x:(x-0.5)*50]*2)
mul_test = _testgrad_binary_elemwise(mul_elemwise, [lambda x:(x-0.5)*50]*2)
div_test = _testgrad_binary_elemwise(div_elemwise, [lambda x:(x-0.4)*50]*2)
pow_test = _testgrad_binary_elemwise(pow_elemwise, [lambda x:x*10+0.01, lambda x:(x-0.5)*4])
def _testgrad_binary_scalar(op_cls, domain, n_tests=1,eps=0.000001, tol=0.0001):
class test_some_op_gradient(unittest.TestCase):
def setUp(self):
gof.lib.build_eval_mode()
numpy.random.seed([234,234,23333])
def tearDown(self):
gof.lib.pop_mode()
def mytest(self, *raw_args):
args = [Numpy2(data=domain[0](raw_args[0])),
Numpy2(data=_scalar(domain[1](raw_args[1])))]
#print repr(args[0].data), repr(args[1].data)
_test_grad(self, op_cls, args, n_tests, eps, tol)
def test0_low(self):
self.mytest(numpy.zeros(()), _scalar(0))
def test1_middle(self):
self.mytest(numpy.ones(())*.5, _scalar(0.5))
def test2_high(self):
self.mytest(numpy.ones(()), _scalar(1.0))
def test3_vector(self):
self.mytest(numpy.random.rand(4),_scalar(numpy.random.rand()))
def test4_matrix(self):
self.mytest(numpy.random.rand(3,2),_scalar(numpy.random.rand()))
test_some_op_gradient.__name__ = str(op_cls.__name__) + '_test'
return test_some_op_gradient
add_scalar_test = _testgrad_binary_scalar(add_scalar, [lambda x:(x-0.5)*50]*2)
mul_scalar_test = _testgrad_binary_scalar(mul_scalar, [lambda x:(x-0.5)*50]*2)
pow_scalar_l_test = _testgrad_binary_scalar(pow_scalar_l,
[lambda x:(x-0.5)*10, lambda x:(x+0.01)*10.0])
pow_scalar_r_test = _testgrad_binary_scalar(pow_scalar_r,
[lambda x:(x+0.01)*10, lambda x:(x-0.5)*10.0])
fill_test = _testgrad_binary_scalar(fill, [lambda x:(x-0.5)*50]*2)
class test_some_op_gradient(unittest.TestCase):
def setUp(self):
gof.lib.build_eval_mode()
numpy.random.seed([234,234,23333])
def tearDown(self):
gof.lib.pop_mode()
def mytest(self, *raw_args):
n_tests = 1
eps = 0.000001
tol=0.00001
args = [Numpy2(data=raw_args[0]),
Numpy2(data=raw_args[1])]
#print repr(args[0].data), repr(args[1].data)
_test_grad(self, dot, args, n_tests, eps, tol)
def test0(self):
"""Gradient test low"""
self.mytest(numpy.zeros(()), _scalar(0))
def test1(self):
"""Gradient test middle"""
self.mytest(_scalar(0.5), _scalar(0.5))
def test2(self):
"""Gradient test high"""
self.mytest(numpy.ones(()), _scalar(1.0))
def test3(self):
"""Gradient test dot with vectors"""
self.mytest(numpy.random.rand(4),numpy.random.rand(4))
def test4(self):
"""Gradient test dot with matrices"""
self.mytest(numpy.random.rand(3,2),numpy.random.rand(2,4))
def _notyet_test5(self):
"""Gradient test dot with 3d-tensor on left"""
self.mytest(numpy.random.rand(3,4,2),numpy.random.rand(2,5))
def _notyet_test6(self):
"""Gradient test dot with 3d-tensor on right"""
self.mytest(numpy.random.rand(4,2),numpy.random.rand(3,2,5))
class testCase_slicing(unittest.TestCase):
def setUp(self):
build_eval_mode()
def tearDown(self):
pop_mode()
def test_getitem0(self):
a = numpy.ones((4,4))
wa1 = wrap(a)[:,1]
try:
err = wa1 + a
except ValueError, e:
self.failUnless(str(e) == \
'The dimensions of the inputs do not match.',
'Wrong ValueError')
return
self.fail('add should not have succeeded')
def test_getitem1(self):
a = numpy.ones((4,4))
wa1 = wrap(a)[1]
self.failUnless(wa1.data.shape == (4,))
def test_getslice_0d_all(self):
"""Test getslice does not work on 0d array """
a = numpy.ones(())
try:
wa1 = wrap(a)[:]
except IndexError, e:
self.failUnless(str(e) == "0-d arrays can't be indexed.")
return
self.fail()
def test_getslice_1d_all(self):
"""Test getslice on 1d array"""
a = numpy.ones(4)
wa1 = wrap(a)[:]
self.failUnless(wa1.data.shape == (4,), 'wrong shape')
self.failUnless(numpy.all(wa1.data == a), 'unequal value')
a[1] = 3.4
self.failUnless(wa1.data[1] == 3.4, 'not a view')
try:
wa1[2] = 2.5
except TypeError, e:
self.failUnless("object does not support item assignment" in str(e))
return
self.fail()
def test_getslice_3d_all(self):
"""Test getslice on 3d array"""
a = numpy.ones((4,5,6))
wa1 = wrap(a)[:]
self.failUnless(wa1.data.shape == (4,5,6), 'wrong shape')
self.failUnless(numpy.all(wa1.data == a), 'unequal value')
a[1,1,1] = 3.4
self.failUnless(wa1.data[1,1,1] == 3.4, 'not a view')
def test_getslice_1d_some(self):
"""Test getslice on 1d array"""
a = numpy.ones(5)
wa1 = wrap(a)[1:3]
a[2] = 5.0
a[3] = 2.5
self.failUnless(wa1.data.shape == (2,))
self.failUnless(a[1] == wa1.data[0])
self.failUnless(a[2] == wa1.data[1])
def test_getslice_1d_step(self):
"""Test getslice on 1d array"""
a = numpy.ones(8)
wa1 = wrap(a)[0:8:2]
for i in xrange(8): a[i] = i
self.failUnless(wa1.shape == (4,))
for i in xrange(4):
self.failUnless(a[i*2] == wa1.data[i])
def test_getslice_3d_float(self):
"""Test getslice on 3d array"""
a = numpy.asarray(range(4*5*6))
a.resize((4,5,6))
wa1 = wrap(a)[1:3]
self.failUnless(wa1.shape == (2,5,6))
self.failUnless(numpy.all(a[1:3] == wa1.data))
a[1] *= -1.0
self.failUnless(numpy.all(a[1:3] == wa1.data))
def test_getslice_3d_one(self):
"""Test getslice on 3d array"""
a = numpy.asarray(range(4*5*6))
a.resize((4,5,6))
wa = wrap(a)
wa_123 = wa[1,2,3]
self.failUnless(wa_123.shape == (), wa_123.shape)
class test_Numpy2(unittest.TestCase):
def setUp(self):
build_eval_mode()
numpy.random.seed(44)
def tearDown(self):
pop_mode()
def test_0(self):
r = Numpy2()
def test_1(self):
o = numpy.ones((3,3))
r = Numpy2(data=o)
self.failUnless(r.data is o)
self.failUnless(r.shape == (3,3))
self.failUnless(str(r.dtype) == 'float64')
def test_2(self):
r = Numpy2(data=[(3,3),'int32'])
self.failUnless(r.data is None)
self.failUnless(r.shape == (3,3))
self.failUnless(str(r.dtype) == 'int32')
r.alloc()
self.failUnless(isinstance(r.data, numpy.ndarray))
self.failUnless(r.shape == (3,3))
self.failUnless(str(r.dtype) == 'int32')
def test_3(self):
a = Numpy2(data=numpy.ones((2,2)))
b = Numpy2(data=numpy.ones((2,2)))
c = add(a,b)
self.failUnless(_approx_eq(c, numpy.ones((2,2))*2))
def test_4(self):
ones = numpy.ones((2,2))
a = Numpy2(data=ones)
o = numpy.asarray(a)
self.failUnless((ones == o).all())
def test_5(self):
ones = numpy.ones((2,2))
self.failUnless(_approx_eq(Numpy2(data=ones), Numpy2(data=ones)))
class testCase_producer_build_mode(unittest.TestCase):
def test_0(self):
"""producer in build mode"""
build_mode()
a = ones(4)
self.failUnless(a.data is None, a.data)
self.failUnless(a.state is gof.result.Empty, a.state)
self.failUnless(a.shape == 4, a.shape)
self.failUnless(str(a.dtype) == 'float64', a.dtype)
pop_mode()
def test_1(self):
"""producer in build_eval mode"""
build_eval_mode()
a = ones(4)
self.failUnless((a.data == numpy.ones(4)).all(), a.data)
self.failUnless(a.state is gof.result.Computed, a.state)
self.failUnless(a.shape == (4,), a.shape)
self.failUnless(str(a.dtype) == 'float64', a.dtype)
pop_mode()
class testCase_add_build_mode(unittest.TestCase):
def setUp(self):
build_mode()
numpy.random.seed(44)
def tearDown(self):
pop_mode()
class testCase_dot(unittest.TestCase):
def setUp(self):
build_eval_mode()
numpy.random.seed(44)
def tearDown(self):
pop_mode()
@staticmethod
def rand(*args):
return numpy.random.rand(*args)
def cmp_dot(self,x,y):
def spec(x):
x = numpy.asarray(x)
return type(x), x.dtype, x.shape
zspec = dot.specs(spec(x), spec(y))
nz = numpy.dot(x,y)
self.failUnless(zspec == spec(nz))
self.failUnless(_approx_eq(dot(x,y), numpy.dot(x,y)))
def cmp_dot_comp(self, x,y):
x = numpy.asarray(x)
y = numpy.asarray(y)
z = dot(x,y)
p = compile.single(z)
if len(x.shape):
x[:] = numpy.random.rand(*x.shape)
else:
x.fill(numpy.random.rand(*x.shape))
if len(y.shape):
y[:] = numpy.random.rand(*y.shape)
else:
y.fill(numpy.random.rand(*y.shape))
p() # recalculate z
self.failUnless(_approx_eq(z, numpy.dot(x,y)))
def test_dot_0d_0d(self): self.cmp_dot(1.1, 2.2)
def test_dot_0d_1d(self): self.cmp_dot(1.1, self.rand(5))
def test_dot_0d_2d(self): self.cmp_dot(3.0, self.rand(6,7))
def test_dot_0d_3d(self): self.cmp_dot(3.0, self.rand(8,6,7))
def test_dot_1d_0d(self): self.cmp_dot(self.rand(5), 1.1 )
def test_dot_1d_1d(self): self.cmp_dot(self.rand(5), self.rand(5))
def test_dot_1d_2d(self): self.cmp_dot(self.rand(6), self.rand(6,7))
def test_dot_1d_3d(self): self.cmp_dot(self.rand(6), self.rand(8,6,7))
def test_dot_2d_0d(self): self.cmp_dot(self.rand(5,6), 1.0)
def test_dot_2d_1d(self): self.cmp_dot(self.rand(5,6), self.rand(6))
def test_dot_2d_2d(self): self.cmp_dot(self.rand(5,6), self.rand(6,7))
def test_dot_2d_3d(self): self.cmp_dot(self.rand(5,6), self.rand(8,6,7))
def test_dot_3d_0d(self): self.cmp_dot(self.rand(4,5,6), 1.0)
def test_dot_3d_1d(self): self.cmp_dot(self.rand(4,5,6), self.rand(6))
def test_dot_3d_2d(self): self.cmp_dot(self.rand(4,5,6), self.rand(6,7))
def test_dot_3d_3d(self): self.cmp_dot(self.rand(4,5,6), self.rand(8,6,7))
def test_dot_0d_0d_(self): self.cmp_dot_comp(1.1, 2.2)
def test_dot_0d_1d_(self): self.cmp_dot_comp(1.1, self.rand(5))
def test_dot_0d_2d_(self): self.cmp_dot_comp(3.0, self.rand(6,7))
def test_dot_0d_3d_(self): self.cmp_dot_comp(3.0, self.rand(8,6,7))
def test_dot_1d_0d_(self): self.cmp_dot_comp(self.rand(5), 1.1 )
def test_dot_1d_1d_(self): self.cmp_dot_comp(self.rand(5), self.rand(5))
def test_dot_1d_2d_(self): self.cmp_dot_comp(self.rand(6), self.rand(6,7))
def test_dot_1d_3d_(self): self.cmp_dot_comp(self.rand(6), self.rand(8,6,7))
def test_dot_2d_0d_(self): self.cmp_dot_comp(self.rand(5,6), 1.0)
def test_dot_2d_1d_(self): self.cmp_dot_comp(self.rand(5,6), self.rand(6))
def test_dot_2d_2d_(self): self.cmp_dot_comp(self.rand(5,6), self.rand(6,7))
def test_dot_2d_3d_(self): self.cmp_dot_comp(self.rand(5,6), self.rand(8,6,7))
def test_dot_3d_0d_(self): self.cmp_dot_comp(self.rand(4,5,6), 1.0)
def test_dot_3d_1d_(self): self.cmp_dot_comp(self.rand(4,5,6), self.rand(6))
def test_dot_3d_2d_(self): self.cmp_dot_comp(self.rand(4,5,6), self.rand(6,7))
def test_dot_3d_3d_(self): self.cmp_dot_comp(self.rand(4,5,6), self.rand(8,6,7))
def test_dot_fail_1_1(self):
x = numpy.random.rand(5)
y = numpy.random.rand(6)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_1_2(self):
x = numpy.random.rand(5)
y = numpy.random.rand(6,4)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_1_3(self):
x = numpy.random.rand(5)
y = numpy.random.rand(6,4,7)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_2_1(self):
x = numpy.random.rand(5,4)
y = numpy.random.rand(6)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_2_2(self):
x = numpy.random.rand(5,4)
y = numpy.random.rand(6,7)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_2_3(self):
x = numpy.random.rand(5,4)
y = numpy.random.rand(6,7,8)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_3_1(self):
x = numpy.random.rand(5,4,3)
y = numpy.random.rand(6)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_3_2(self):
x = numpy.random.rand(5,4,3)
y = numpy.random.rand(6,7)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
def test_dot_fail_3_3(self):
x = numpy.random.rand(5,4,3)
y = numpy.random.rand(6,7,8)
try:
z = dot(x,y)
except ValueError, e:
self.failUnless(str(e) == 'objects are not aligned', e)
return
self.fail()
class testCase_transpose(unittest.TestCase):
def setUp(self):
build_eval_mode()
def tearDown(self):
pop_mode()
def test_1d_alias(self):
a = numpy.ones(10)
ta = transpose(a)
self.failUnless(ta.data.shape == a.shape)
self.failUnless(numpy.all(ta.data == a))
a[3] *= -1.0
self.failUnless(numpy.all(ta.data == a))
def test_1d_copy(self):
a = numpy.ones(10)
ta = transpose_copy(a)
self.failUnless(ta.data.shape == a.shape)
self.failUnless(numpy.all(ta.data == a))
a[3] *= -1.0
self.failIf(numpy.all(ta.data == a))
def test_2d_alias(self):
a = numpy.ones((10,3))
ta = transpose(a)
self.failUnless(ta.data.shape == (3,10))
def test_3d_alias(self):
a = numpy.ones((10,3,5))
ta = transpose(a)
self.failUnless(ta.data.shape == (5,3,10))
a[9,0,0] = 5.0
self.failUnless(ta.data[0,0,9] == 5.0)
def test_3d_copy(self):
a = numpy.ones((10,3,5))
ta = transpose_copy(a)
self.failUnless(ta.data.shape == (5,3,10))
a[9,0,0] = 5.0
self.failUnless(ta.data[0,0,9] == 1.0)
class testCase_power(unittest.TestCase):
def setUp(self):
build_eval_mode()
numpy.random.seed(44)
def tearDown(self):
pop_mode()
def test1(self):
r = numpy.random.rand(50)
exp_r = exp(r)
self.failUnless(exp_r.__array__().__class__ is numpy.ndarray)
def test_0(self):
r = numpy.random.rand(50)
exp_r = exp(r)
n_exp_r = numpy.exp(r)
self.failUnless( _approx_eq(exp_r, n_exp_r),
(exp_r, exp_r.data, n_exp_r,
numpy.max(numpy.abs(n_exp_r.__sub__(exp_r.__array__())))))
log_exp_r = log(exp_r)
self.failUnless( _approx_eq(log_exp_r, r), log_exp_r)
def test_1(self):
r = numpy.random.rand(50)
r2 = pow(r,2)
self.failUnless( _approx_eq(r2, r*r))
if __name__ == '__main__':
unittest.main()
if __name__ == '__main__':
......
......@@ -6,15 +6,15 @@ import gof
_optimizations = None
def prog_py_opt(inputs, outputs, features=[]):
def exec_py_opt(inputs, outputs, features=[]):
"""Return an optimized graph running purely python implementations"""
return Prog(intputs, outputs, features, _optimizations, gof.link.PerformLinker, False)
return Function(intputs, outputs, features, _optimizations, gof.link.PerformLinker, False)
def prog_opt(inputs, outputs, features=[]):
def exec_opt(inputs, outputs, features=[]):
"""Return a fast implementation"""
return Prog(intputs, outputs, features, _optimizations, gof.link.PerformLinker, False)
return Function(intputs, outputs, features, _optimizations, gof.link.PerformLinker, False)
class Prog:
class Function:
"""An 'executable' compiled from a graph
This class is meant to be used as a function: the idea is to use
......
class Grad(object):
"""A dictionary-like class, into which derivative expressions may be added.
This class maps keys to their ids to deal with the ndarray, which is not
hashable.
Attributes: None
Methods:
add()
bprop()
__call__()
__getitem__()
"""
def __init__(self, dct={}):
self.map = {}
self.outputs = []
self._compute_history = set([])
self.did_bprop = False
for key,val in dct.items():
self.add_output(key,val)
def __contains__(self, item):
return item in self.map
def __getitem__(self, item):
"""Map item to its id and retrieve it."""
try:
return self.map[item]
except KeyError:
return Undefined
def __setitem__(self, item, val):
"""Map item to its id and store internally."""
self.map[item] = val
def add_output(self, r, dr):
self.add(r, dr)
self.outputs.append(r)
def add(self, r, dr):
"""Add dr to the sum of gradients associated with r.
This function should be fed as follows:
if dr is undefined:
r could be anything
else dr might be core.UNCOMPUTED:
r may be uncomputed or NumpyR
else dr will be isinstance(NumpyR):
r may be uncomputed or NumpyR
"""
if dr is Undefined:
# nothing to do
return
# if r.data is not None and dr.data is not None:
# if not hasattr(r, 'shape'):
# raise ValueError(('Grad::add r lacks shape: type=',
# type(r)))
# if not hasattr(dr, 'shape'):
# raise ValueError(('Grad::add dr lacks shape: type=',
# type(dr)))
# if r.shape != dr.shape:
# raise ValueError(('Grad::add r, dr shape mismatch',
# r.shape, dr.shape))
# prevent 'r' from being re-calculated by self.__call__ in 'build_eval' mode
if r.state is gof.result.Computed:
self._compute_history.add(r)
# add dr to self[r]
if r in self:
self[r] = self[r] + dr
else:
self[r] = dr
def bprop(self, maybe_redo=False):
"""Build a backpropagation graph.
The gradient associated with each value is stored in <self> which
inherits from dictionary. The idea is that when we call
op.update_gradient(self), that the op's update_gradient function calls
back into <self>.add(), and says what gradient term goes with each of
its inputs. Most of the time, the gradients of the op's outputs are
necessary for the op to compute the gradient wrt its inputs, so
op.update_gradient will usually call <self>.__getitem__, (via the
[] notation).
It is essential that the gradient of an op's outputs be fully computed
before op.update_gradient is called, or else key errors may be raised
and incorrect gradients will be computed.
bprop sets the omega evaluation mode to be 'build', so no computations
or allocations are done by bprop.
"""
if not maybe_redo and self.did_bprop:
raise Exception('bprop has already been done. Consider calling with maybe_redo=True.')
try:
outputs = self.outputs
inputs = gof.graph.inputs(outputs)
for op in gof.graph.io_toposort(inputs, outputs).__reversed__():
op.update_gradient(self)
finally:
self.did_bprop = True
def __call__(self, item):
"""Return a derivative term.
If the current omega evaluation mode is 'build_eval' then the node is
computed if necessary.
"""
if not self.did_bprop:
raise Exception('Grad.__call__ only makes sense after a bprop')
rval = self[item]
if rval is not Undefined:
compute_from([rval], self._compute_history)
return rval
def grad(cost, param=None, cost_grad = 1.0):
"""Return symbolic expression of gradient of <cost> wrt <param>.
If <param> is None, then return a Grad instance, from which the gradients of
multiple objects can be retrieved using the __getitem__ or __call__ methods
(as in function currying in languages such as scheme and OCaML).
If <param> is not None, then return the gradient expression for
d cost / d param.
"""
if core.current_mode() == 'eval':
raise NotImplementedError('Gradient-related functions are not available in eval mode')
rval = Grad({cost:core.wrap(cost_grad)})
rval.bprop()
if param is None:
return rval
else:
return rval(param)
class update_gradient_via_grad:
"""Inherit from this class to add a convenient self.update_gradient function"""
def update_gradient(self, grad_d):
"""Call self.grad() and add the result to grad_d
This function is called by grad.Grad.bprop() to construct a symbolic gradient graph.
self.grad is called like this:
self.grad(*(self.inputs + [grad_d[output] for output in self.outputs]))
In general, grad() should return a list of ResultValue instances whose
length matches that of self.inputs, and whose elements are the
gradients of self.inputs.
There is a (but often used) special feature in place to automatically
wrap the return value of grad() in a list if it is a ResultValue instance
and the op is unary. This makes many grad implementations a little
cuter.
"""
inputgs = self.grad(*(self.inputs + [grad_d[output] for output in self.outputs]))
if len(self.inputs) == 1 and is_result(inputgs):
inputgs = [inputgs]
else:
assert len(inputgs) == len(self.inputs)
for input, inputg in zip(self.inputs, inputgs):
grad_d.add(input, inputg)
# import gof
# from gof.lib import compute_from, is_result
# import core
# class Undefined:
# """A special class representing a gradient of 0"""
# class Grad(object):
# """A dictionary-like class, into which derivative expressions may be added.
# This class maps keys to their ids to deal with the ndarray, which is not
# hashable.
# Attributes: None
# Methods:
# add()
# bprop()
# __call__()
# __getitem__()
# """
# def __init__(self, dct={}):
# self.map = {}
# self.outputs = []
# self._compute_history = set([])
# self.did_bprop = False
# for key,val in dct.items():
# self.add_output(key,val)
# def __contains__(self, item):
# return item in self.map
# def __getitem__(self, item):
# """Map item to its id and retrieve it."""
# key = core.wrap(item)
# try:
# return self.map[key]
# except KeyError:
# return Undefined
# def __setitem__(self, item, val):
# """Map item to its id and store internally."""
# self.map[item] = val
# def add_output(self, r, dr):
# self.add(r, dr)
# self.outputs.append(r)
# def add(self, r, dr):
# """Add dr to the sum of gradients associated with r.
# This function should be fed as follows:
# if dr is undefined:
# r could be anything
# else dr might be core.UNCOMPUTED:
# r may be uncomputed or NumpyR
# else dr will be isinstance(NumpyR):
# r may be uncomputed or NumpyR
# """
# if dr is Undefined:
# # nothing to do
# return
# if r.data is not None and dr.data is not None:
# if not hasattr(r, 'shape'):
# raise ValueError(('Grad::add r lacks shape: type=',
# type(r)))
# if not hasattr(dr, 'shape'):
# raise ValueError(('Grad::add dr lacks shape: type=',
# type(dr)))
# if r.shape != dr.shape:
# raise ValueError(('Grad::add r, dr shape mismatch',
# r.shape, dr.shape))
# # prevent 'r' from being re-calculated by self.__call__ in 'build_eval' mode
# if r.state is gof.result.Computed:
# self._compute_history.add(r)
# # add dr to self[r]
# if r in self:
# self[r] = self[r] + dr
# else:
# self[r] = dr
# def bprop(self, maybe_redo=False):
# """Build a backpropagation graph.
# The gradient associated with each value is stored in <self> which
# inherits from dictionary. The idea is that when we call
# op.update_gradient(self), that the op's update_gradient function calls
# back into <self>.add(), and says what gradient term goes with each of
# its inputs. Most of the time, the gradients of the op's outputs are
# necessary for the op to compute the gradient wrt its inputs, so
# op.update_gradient will usually call <self>.__getitem__, (via the
# [] notation).
# It is essential that the gradient of an op's outputs be fully computed
# before op.update_gradient is called, or else key errors may be raised
# and incorrect gradients will be computed.
# bprop sets the omega evaluation mode to be 'build', so no computations
# or allocations are done by bprop.
# """
# if not maybe_redo and self.did_bprop:
# raise Exception('bprop has already been done. Consider calling with maybe_redo=True.')
# core.build_mode()
# try:
# outputs = self.outputs
# inputs = gof.graph.inputs(outputs)
# for op in gof.graph.io_toposort(inputs, outputs).__reversed__():
# op.update_gradient(self)
# finally:
# core.pop_mode()
# self.did_bprop = True
# def __call__(self, item):
# """Return a derivative term.
# If the current omega evaluation mode is 'build_eval' then the node is
# computed if necessary.
# """
# if not self.did_bprop:
# raise Exception('Grad.__call__ only makes sense after a bprop')
# rval = self[item]
# if rval is not Undefined \
# and core.current_mode() == 'build_eval':
# compute_from([rval], self._compute_history)
# return rval
# def grad(cost, param=None, cost_grad = 1.0):
# """Return symbolic expression of gradient of <cost> wrt <param>.
# If <param> is None, then return a Grad instance, from which the gradients of
# multiple objects can be retrieved using the __getitem__ or __call__ methods
# (as in function currying in languages such as scheme and OCaML).
# If <param> is not None, then return the gradient expression for
# d cost / d param.
# """
# if core.current_mode() == 'eval':
# raise NotImplementedError('Gradient-related functions are not available in eval mode')
# rval = Grad({cost:core.wrap(cost_grad)})
# rval.bprop()
# if param is None:
# return rval
# else:
# return rval(param)
# class update_gradient_via_grad:
# """Inherit from this class to add a convenient self.update_gradient function"""
# def update_gradient(self, grad_d):
# """Call self.grad() and add the result to grad_d
# This function is called by grad.Grad.bprop() to construct a symbolic gradient graph.
# self.grad is called like this:
# self.grad(*(self.inputs + [grad_d[output] for output in self.outputs]))
# In general, grad() should return a list of ResultValue instances whose
# length matches that of self.inputs, and whose elements are the
# gradients of self.inputs.
# There is a (but often used) special feature in place to automatically
# wrap the return value of grad() in a list if it is a ResultValue instance
# and the op is unary. This makes many grad implementations a little
# cuter.
# """
# inputgs = self.grad(*(self.inputs + [grad_d[output] for output in self.outputs]))
# if len(self.inputs) == 1 and is_result(inputgs):
# inputgs = [inputgs]
# else:
# assert len(inputgs) == len(self.inputs)
# for input, inputg in zip(self.inputs, inputgs):
# grad_d.add(input, inputg)
# #
# # UNIT TEST
# #
# import unittest
# import numpy
# import compile
# class _testCase (unittest.TestCase):
# class posneg(core.omega_op):
# nout=2
# def impl(x): return x, -x
# def grad(x, gpos, gneg): return gpos - gneg
# class posnegzero(core.omega_op):
# nout=3
# def impl(x): return x, -x, 0.0
# def grad(x, gpos, gneg, gzero): return gpos - gneg
# def setUp(self):
# numpy.random.seed(1)
# core.build_eval_mode()
# def matinv(self,dim):
# w = core.wrap(numpy.random.rand(dim,dim))
# wi = core.wrap(numpy.random.rand(dim,dim))
# ident = core.wrap(numpy.identity(dim))
# for i in xrange(300):
# wwi = core.dot(w, wi)
# diff = wwi - ident
# ssdiff = core.sum((diff**2))
# if i == 0:
# str0 = str_ssdiff = str(ssdiff.data)
# #print ssdiff
# g = grad(ssdiff)
# gw = g(w)
# w.data[:] += -0.4 * gw.data
# return str0, str(ssdiff.data)
# def matinv_compiled(self, dim):
# w = core.wrap(numpy.random.rand(dim,dim))
# wi = core.wrap(numpy.random.rand(dim,dim))
# ident = core.wrap(numpy.identity(dim))
# wwi = core.dot(w, wi)
# diff = wwi - ident
# ssdiff = core.sum((diff**2))
# str0 = str_ssdiff = str(ssdiff.data)
# #print ssdiff
# g = grad(ssdiff)
# gw = g(w)
# prog = compile.single(g(w),ssdiff)
# for i in xrange(300):
# prog()
# w.data[:] += -0.4 * gw.data
# return str0, str(ssdiff.data)
# def test0(self):
# """Matrix inversion by gradient descent (eval mode)"""
# self.assertEqual(('2.67327580893', '0.000438649434819'), self.matinv(3))
# def test1(self):
# """Matrix inversion by gradient descent (compiled mode)"""
# self.assertEqual(('2.67327580893', '0.000438649434819'),
# self.matinv_compiled(3))
# def test_grad_wrt_ndarray_pointer(self):
# """Grad indexing by un-wrapped ndarray"""
# a = numpy.ones((4, 4))
# b = numpy.ones((4, 4))
# c = numpy.ones((4, 4))
# expr = core.sum(core.dot(core.add(a, b), c))
# g = grad(expr)
# g[a]
# def test_bprop_call_order(self):
# """Ensure call before bprop is illegal"""
# a = numpy.ones((3,3,3))
# b = core.exp(a)
# gb = Grad({b:core.wrap(a)})
# try:
# gb(a)
# self.assertEqual('should have raised',0)
# except Exception, e:
# self.assertEqual(str(e), 'Grad.__call__ only makes sense after a bprop')
# return
# self.assertEqual('should have caught, returned',0)
# def test_undefined_grad0(self):
# """Make sure posneg works with fully specified gradients"""
# a = numpy.ones((3,3,3))
# b,c = _testCase.posneg(a)
# g = Grad({b:core.wrap(a),c:core.wrap(a)})
# g.bprop()
# max = numpy.max(g(a))
# min = numpy.min(g(a))
# self.assertEqual(max, min)
# self.assertEqual(max, 0.0)
# def test_undefined_grad1(self):
# """Propagate undefined values through posneg's first gradient"""
# a = numpy.ones((3,3,3))
# b,c = _testCase.posneg(a)
# gb = Grad({b:core.wrap(a)})
# try:
# gb.bprop()
# self.assertEqual('should have raised',0)
# except AttributeError, e:
# self.assertEqual(str(e), "class Undefined has no attribute 'shape'")
# return
# self.assertEqual("Should have been error", 0)
# def test_undefined_grad2(self):
# """Propagate undefined values through posneg's second gradient"""
# a = numpy.ones((3,3,3))
# b,c = _testCase.posneg(a)
# gc = Grad({c:core.wrap(a)})
# try:
# gc.bprop()
# self.assertEqual('should have raised',0)
# except AttributeError, e:
# self.assertEqual(str(e), "class Undefined has no attribute 'shape'")
# return
# self.assertEqual("Should have been error", 0)
# def test_undefined_grad3(self):
# """Ignore undefined values properly"""
# a = numpy.ones((3,3,3))
# b,c,d = _testCase.posnegzero(a)
# #print b, c, d
# g = Grad({b:core.wrap(a), c:core.wrap(a)})
# g.bprop()
# max = numpy.max(g(a))
# min = numpy.min(g(a))
# self.assertEqual(max, min)
# self.assertEqual(max, 0.0)
# def test_repeat_bprop(self):
# """Refuse to repeat bprop"""
# a = numpy.ones((3,3,3))
# b,c,d = _testCase.posnegzero(a)
# #print b, c, d
# g = Grad({b:core.wrap(a), c:core.wrap(a)})
# g.bprop()
# try:
# g.bprop()
# self.assertEqual('should have raised')
# except Exception, e:
# self.assertEqual(str(e), 'bprop has already been done. Consider calling with maybe_redo=True.')
# return
# self.assertEqual('should have caught')
# def test_repeat_bprop1(self):
# """Force repeat bprop"""
# a = numpy.ones((3,3,3))
# z = numpy.zeros((3,3,3))
# b,c,d = _testCase.posnegzero(a)
# #print b, c, d
# g = Grad({b:core.wrap(a), c:core.wrap(z)})
# g.bprop()
# g.bprop(maybe_redo=True)
# max = numpy.max(g(a))
# min = numpy.min(g(a))
# self.assertEqual(max, min)
# self.assertEqual(max, 2.0)
# def tearDown(self):
# core.pop_mode()
# if __name__ == '__main__':
# unittest.main()
import gof
class OrderError(Exception):
"""Grad has been manipulated in the wrong order"""
class Grad(object):
"""A dictionary-like class, into which derivative expressions may be added.
Attributes:
map - dict: result -> grad(result)
outputs - list: results from which to backpropagate gradient
did_bprop - bool: has bprop been called?
items_got - set: results for which we have returned the gradient
Methods:
add() - accumulate a gradient expression
bprop() - recursively construct gradient expressions
__call__() - retrieve the gradient wrt a given Op or result
__getitem__() - retrieve the gradient wrt a given Op or result
This class operates on graphs of nodes which implement the UpdateGradient interface.
"""
def __init__(self, dct={}):
self.map = {}
self.outputs = []
self.did_bprop = False
self.items_got = set([])
for key,val in dct.items():
self.add_output(key,val)
def __contains__(self, item):
return item in self.map
def __getitem__(self, r):
"""Return the gradient wrt result r
r is also added to the set of things for which the gradient has been
given. Subsequent attempts to modify the gradient wrt r will fail
with exception FixedGradientError.
"""
self.items_got.add(r)
try:
return self.map[r]
except KeyError:
return None
def __call__(self, r):
"""Return the gradient wrt result r"""
return self.__getitem__(r)
def add_output(self, r, dr):
self.add(r, dr)
self.outputs.append(r)
def add(self, r, dr):
"""Add dr to the sum of gradients associated with r."""
if r in self.items_got:
raise OrderError('gradient has already been retrieved', r)
if r in self.map:
self.map[r] = self.map[r] + dr
else:
self.map[r] = dr
def bprop(self):
"""Build a backpropagation graph.
This function traverses the graph backward from self.outputs, calling
update_gradient on the ops as it goes. Ops without an update_gradient
function are considered not differentiable. The update_gradient
function is defined in the UpdateGradient class.
maybe_redo
"""
if self.did_bprop:
raise OrderError('bprop has already been done')
try:
outputs = self.outputs
inputs = gof.graph.inputs(outputs)
for op in gof.graph.io_toposort(inputs, outputs).__reversed__():
op.update_gradient(self)
finally:
self.did_bprop = True
def grad(cost, param=None, cost_grad = 1.0):
"""Return symbolic expression of gradient of <cost> wrt <param>.
If <param> is None, then return a Grad instance, from which the gradients of
multiple objects can be retrieved using the __getitem__ or __call__ methods
(as in function currying in languages such as scheme and OCaML).
If <param> is not None, then return the gradient expression for
d cost / d param.
"""
rval = Grad({cost:cost_grad})
rval.bprop()
if param is None:
return rval
else:
return rval(param)
class UpdateGradient:
"""This class defines the interface that Grad.bprop expects of each
differentiable Op"""
def update_gradient(self, grad_d):
"""Override this function to call grad_d.add(r,grad_r) for each
differentiable input result, r.
You can assume that the gradient with respect to all output results
has been accumulated in grad_d. These expressions are available by
calling grad_d[o] for o in self.outputs. If grad_d[o] returns None,
then this function should assume that grad_d[o] is an appropriate sort
of zero.
"""
raise AbstractFunctionError()
class SelfGrad (UpdateGradient):
"""This class implements update_gradient in terms of the popular self.grad
This class defines update_gradient (necessary for Grad.bprop) to call a
self.grad function like this:
if len(self.outputs) > 1:
self.grad(self.inputs, [grad_d[o] for o in self.outputs])
else
self.grad(self.inputs, grad_d[output[0]])
self.grad() is an Abstract function, see its documentation for the
expected behaviour.
"""
def update_gradient(self, grad_d):
#Call self.grad(inputs, output_gradients) and add the result to grad_d
if len(self.outputs) > 1:
inputgs = self.grad(self.inputs, [grad_d[o] for o in self.outputs])
else:
inputgs = self.grad(self.inputs, grad_d[self.outputs[0]])
if len(self.inputs) == 1 and is_result(inputgs):
inputgs = [inputgs]
else:
assert len(inputgs) == len(self.inputs)
for input, inputgrad in zip(self.inputs, inputgs):
grad_d.add(input, inputgrad)
def grad(self, *args):
"""Return gradient expressions wrt input arguments
If len(self.inputs)==1 : return the input gradient expression
If len(self.inputs)>=2 : return a list of input gradient expressions
"""
raise AbstractFunctionError()
from tensor import *
from gof import Op, utils, Destroyer, Viewer
import gof.op
import gradient
from tensor import *
def upcast(dtype, *dtypes):
def _upcast(dtype, *dtypes):
z = numpy.zeros((), dtype = dtype)
for dtype in dtypes:
z = z + numpy.zeros((), dtype = dtype)
return str(z.dtype)
def wrap_as_tensor(x):
if isinstance(x, Tensor):
def _wrap_as_tensor(x):
if isinstance(x,Op):
return _wrap_as_tensor(x.out)
elif isinstance(x, Tensor):
return x
else:
return Tensor(data=x, constant=True)
class TensorOp(Op):
# _TensorOp is a convenient base class, permitting to factor the code for the
# Ops in this file.
# It is not necessary to inherit from TensorOp to make an Op that manipulates
# Tensors.
class _TensorOp(Op, gradient.SelfGrad):
nin = -1
nout = 1
cast_method = lambda self, *args: upcast(*args)
cast_method = lambda self, *args: _upcast(*args)
def __init__(self, *inputs):
inputs = map(wrap_as_tensor, inputs)
inputs = map(_wrap_as_tensor, inputs)
if self.nin >= 0:
if len(inputs) != self.nin:
......@@ -69,10 +78,10 @@ class TensorOp(Op):
class UnaryTensorOp(TensorOp):
class UnaryTensorOp(_TensorOp):
nin = 1
class BinaryTensorOp(TensorOp):
class BinaryTensorOp(_TensorOp):
nin = 2
......@@ -104,7 +113,7 @@ class BinaryTensorOp(TensorOp):
def scalar_switch(normal_f, scalar_f, scalar_f_reverse = None):
def f(x, y):
x, y = wrap_as_tensor(x), wrap_as_tensor(y)
x, y = _wrap_as_tensor(x), _wrap_as_tensor(y)
if 0 not in y.broadcastable:
return scalar_f(x, y)
if 0 not in x.broadcastable:
......@@ -129,7 +138,7 @@ def assert_tensor_scalar(x, a):
class Elemwise(TensorOp):
class Elemwise(_TensorOp):
@staticmethod
def extract_name(name):
......@@ -211,7 +220,7 @@ class TensorScalarOp(Elemwise):
## Dot ##
#########
class Dot(TensorOp):
class Dot(_TensorOp):
@staticmethod
def _output_shape(xshape, yshape):
# This describes the logic to calculate numpy.dot(x, y).shape
......@@ -454,7 +463,7 @@ class Fill(Elemwise):
#### Unary Operations ####
##########################
class Transpose(TensorOp, Viewer):
class Transpose(_TensorOp, Viewer):
def view_map(self):
return {self.out: [self.inputs[0]]}
def impl(self, x):
......@@ -754,6 +763,8 @@ Tensor.__mul__ = mul
Tensor.__iadd__ = add_inplace
Tensor.__isub__ = sub_inplace
Tensor.__imul__ = mul_inplace
Tensor.__pow__ = pow
Tensor.__ipow__ = pow_inplace
Tensor.T = property(transpose)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论