提交 05d6f2fe authored 作者: Shawn Tan's avatar Shawn Tan

Refactored AllocDiag tests for both GPU and CPU.

上级 ee382960
...@@ -5,7 +5,7 @@ import unittest ...@@ -5,7 +5,7 @@ import unittest
import theano import theano
from theano import tensor from theano import tensor
from theano.compile import DeepCopyOp from theano.compile import DeepCopyOp
from theano.tensor.tests import test_subtensor from theano.tensor.tests import test_subtensor, test_basic
from theano.tests import unittest_tools as utt from theano.tests import unittest_tools as utt
from ..basic_ops import HostFromGpu, GpuFromHost, GpuContiguous from ..basic_ops import HostFromGpu, GpuFromHost, GpuContiguous
...@@ -318,6 +318,15 @@ class test_gpuextractdiag(unittest.TestCase): ...@@ -318,6 +318,15 @@ class test_gpuextractdiag(unittest.TestCase):
np_x.diagonal(offset, axis1, axis2)) np_x.diagonal(offset, axis1, axis2))
class test_gpu_alloc_diag(test_basic.test_alloc_diag):
def __init__(self, name):
return test_basic.test_alloc_diag.__init__(
self, name,
alloc_diag=GpuAllocDiag,
mode=mode_with_gpu
)
class test_gpuallocdiag(unittest.TestCase): class test_gpuallocdiag(unittest.TestCase):
def test_allocdiag_opt(self): def test_allocdiag_opt(self):
x = tensor.vector() x = tensor.vector()
......
...@@ -7561,56 +7561,86 @@ class test_diag(unittest.TestCase): ...@@ -7561,56 +7561,86 @@ class test_diag(unittest.TestCase):
tensor.verify_grad(diag, [x], rng=rng) tensor.verify_grad(diag, [x], rng=rng)
def test_alloc_diag(): class test_alloc_diag(unittest.TestCase):
dims = 4 def __init__(self, name, alloc_diag=AllocDiag, mode=None):
shape = (5,) * dims self.alloc_diag = AllocDiag
xv = np.random.randn(*shape).astype(config.floatX)
for d in xrange(1, dims + 1): if mode is None:
# Create a TensorType of the same dimensions as mode = theano.compile.mode.get_default_mode()
# as the data we want to test. self.mode = mode
x = TensorType(dtype=config.floatX, broadcastable=(False,) * d)('x')
return super(test_alloc_diag, self).__init__(name)
# Make a slice of the test data that has the
# dimensions we need by doing xv[0,...,0] def _generator(self):
# For example, for an array of shape (5,), we dims = 4
# need to do xv[0, 0, 0, 0]. shape = (5,) * dims
test_val = xv[((0,) * (dims - d))] xv = np.random.randn(*shape).astype(config.floatX)
for offset, axis1, axis2 in [(0, 0, 1), (0, 1, 2), (1, 0, 1), for d in xrange(1, dims + 1):
(0, 1, 3), (0, 2, 3), (1, 2, 3), # Create a TensorType of the same dimensions as
(-1, 0, 1), (-2, 0, 1), (-1, 1, 2)]: # as the data we want to test.
x = TensorType(dtype=config.floatX, broadcastable=(False,) * d)('x')
# Test AllocDiag values
if np.maximum(axis1, axis2) > len(test_val.shape): # Make a slice of the test data that has the
continue # dimensions we need by doing xv[0,...,0]
adiag_op = AllocDiag(offset=offset, # For example, for an array of shape (5,), we
axis1=axis1, # need to do xv[0, 0, 0, 0].
axis2=axis2) test_val = xv[((0,) * (dims - d))]
f = theano.function([x], adiag_op(x)) yield x, test_val
# AllocDiag and extract the diagonal again
# to check def test_alloc_diag_values(self):
diag_arr = f(test_val) for x, test_val in self._generator():
rediag = np.diagonal( for offset, axis1, axis2 in [(0, 0, 1), (0, 1, 2), (1, 0, 1),
diag_arr, (0, 1, 3), (0, 2, 3), (1, 2, 3),
offset=offset, (-1, 0, 1), (-2, 0, 1), (-1, 1, 2)]:
axis1=axis1, # Test AllocDiag values
axis2=axis2 if np.maximum(axis1, axis2) > len(test_val.shape):
) continue
assert np.all(rediag == test_val) adiag_op = self.alloc_diag(offset=offset,
axis1=axis1,
# Test infer_shape axis2=axis2)
f_shape = theano.function([x], adiag_op(x).shape, mode='FAST_RUN') f = theano.function([x], adiag_op(x))
# AllocDiag and extract the diagonal again
theano.printing.debugprint(f_shape.maker.fgraph.outputs[0]) # to check
output_shape = f_shape(test_val) diag_arr = f(test_val)
assert not any(isinstance(node.op, AllocDiag) rediag = np.diagonal(
for node in f_shape.maker.fgraph.toposort()) diag_arr,
rediag_shape = np.diagonal( offset=offset,
np.ones(output_shape), axis1=axis1,
offset=offset, axis2=axis2
axis1=axis1, )
axis2=axis2 assert np.all(rediag == test_val)
).shape
assert np.all(rediag_shape == test_val.shape) # Test infer_shape
f_shape = theano.function([x], adiag_op(x).shape, mode='FAST_RUN')
theano.printing.debugprint(f_shape.maker.fgraph.outputs[0])
output_shape = f_shape(test_val)
assert not any(isinstance(node.op, self.alloc_diag)
for node in f_shape.maker.fgraph.toposort())
rediag_shape = np.diagonal(
np.ones(output_shape),
offset=offset,
axis1=axis1,
axis2=axis2
).shape
assert np.all(rediag_shape == test_val.shape)
diag_x = adiag_op(x)
sum_diag_x = tensor.sum(diag_x)
grad_x = tensor.grad(sum_diag_x, x)
grad_diag_x = tensor.grad(sum_diag_x, diag_x)
f_grad_x = theano.function([x], grad_x, mode=self.mode)
f_grad_diag_x = theano.function([x], grad_diag_x, mode=self.mode)
grad_input = f_grad_x(test_val)
grad_diag_input = f_grad_diag_x(test_val)
true_grad_input = np.diagonal(
grad_diag_input,
offset=offset,
axis1=axis1,
axis2=axis2
)
assert np.all(true_grad_input == grad_input)
class test_numpy_assumptions(unittest.TestCase): class test_numpy_assumptions(unittest.TestCase):
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论