提交 4c70aa6c authored 作者: Frederic's avatar Frederic

Don't auto matically unbroadcast in CudaNdarrayType.filter_variable

This is a revert from what was done in the trunk. This also give better error message.
上级 111c6604
......@@ -5,12 +5,9 @@ from nose.plugins.skip import SkipTest
import theano
from theano import tensor
from theano import sparse
from theano.tensor import TensorType
from theano.tests import unittest_tools as utt
from theano.sandbox.cuda.var import float32_shared_constructor as f32sc
from theano.sandbox.cuda import CudaNdarrayType, cuda_available
import theano.sandbox.cuda as cuda
# Skip test if cuda_ndarray is not available.
if cuda_available == False:
raise SkipTest('Optional package cuda disabled')
......@@ -66,6 +63,10 @@ class T_updates(unittest.TestCase):
f = theano.function([], y, updates={x: x + 1})
f()
# Test that we can update with a CudaVariable
f = theano.function([], y, updates={x: cuda.gpu_from_host(x + 1)})
f()
def test_2(self):
# This test case uses code mentionned in #698
data = numpy.random.rand(10, 10).astype('float32')
......@@ -79,14 +80,42 @@ class T_updates(unittest.TestCase):
updates=output_updates, givens=output_givens)
output_func()
def test_3(self):
# Test that broadcastable dimensions don't screw up
# update expressions.
def test_err_ndim(self):
# Test that we raise a good error message when we don't
# same the same number of dimensions.
data = numpy.random.rand(10, 10).astype('float32')
output_var = f32sc(name="output", value=data)
# the update_var has type matrix, and the update expression
# is a broadcasted scalar, and that should be allowed.
self.assertRaises(TypeError, theano.function, inputs=[], outputs=[],
updates={output_var:
output_var.sum()})
def test_err_broadcast(self):
# Test that we raise a good error message when we don't
# same the same number of dimensions.
data = numpy.random.rand(10, 10).astype('float32')
output_var = f32sc(name="output", value=data)
# the update_var has type matrix, and the update expression
# is a broadcasted scalar, and that should be allowed.
self.assertRaises(TypeError, theano.function, inputs=[], outputs=[],
updates={output_var:
output_var.sum().dimshuffle('x', 'x')})
def test_broadcast(self):
# Test that we can rebroadcast
data = numpy.random.rand(10, 10).astype('float32')
output_var = f32sc(name="output", value=data)
up = tensor.unbroadcast(output_var.sum().dimshuffle('x', 'x'), 0, 1)
output_func = theano.function(inputs=[], outputs=[],
updates={output_var: up})
output_func()
up = tensor.patternbroadcast(output_var.sum().dimshuffle('x', 'x'),
output_var.type.broadcastable)
output_func = theano.function(inputs=[], outputs=[],
updates={output_var: output_var.sum().dimshuffle('x', 'x')})
updates={output_var: up})
output_func()
......@@ -130,20 +130,20 @@ class CudaNdarrayType(Type):
if other.type == self:
return other
if not isinstance(other.type, tensor.TensorType):
if not isinstance(other.type, (tensor.TensorType, CudaNdarrayType)):
raise TypeError('Incompatible type', (self, other.type))
if (other.type.dtype != self.dtype):
raise TypeError('Incompatible dtype', (self.dtype,
other.type.dtype))
if numpy.any([bi and not obi
for obi, bi in zip(
other.type.broadcastable,
self.broadcastable)]):
raise TypeError('Incompatible broadcastable', (self.broadcastable,
other.type.broadcastable))
if other.type.ndim != self.ndim:
raise TypeError('Incompatible number of dimensions.'
' Expected %d, got %d.' % (self.ndim, other.ndim))
if other.type.broadcastable != self.broadcastable:
rebroadcast = tensor.Rebroadcast(*enumerate(self.broadcastable))
other = rebroadcast(other)
raise TypeError('Incompatible broadcastable dimensions.'
' Expected %s, got %s.' %
(str(other.type.broadcastable),
str(self.broadcastable)))
return theano.sandbox.cuda.basic_ops.GpuFromHost()(other)
@staticmethod
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论