提交 b981e02b authored 作者: abergeron's avatar abergeron 提交者: GitHub

Merge pull request #5013 from nouiz/gpu_alloc_empty

Fix an error. GpuAllocEmpty in the new back-end was inheriting from A…
......@@ -7,7 +7,8 @@ import numpy
from theano import Op, Apply, Type, Variable
from theano import tensor, config
from theano.gradient import grad_undefined
from theano.tensor.basic import Alloc, Join, Split
from theano.tensor.basic import (
Alloc, AllocEmpty, alloc_validate_shape, Join, Split)
from theano.gof import HideC, COp
from theano.gof.utils import MethodNotDefined
......@@ -805,7 +806,7 @@ class GpuAlloc(HideC, Alloc):
def make_node(self, value, *shape):
value = as_gpuarray_variable(value, context_name=self.context_name)
sh, bcast = self.validate_shape(shape)
sh, bcast = alloc_validate_shape(shape)
if value.ndim > len(sh):
TypeError("The GpuAlloc value to use has more dimensions "
"than the specified shape", value.ndim, len(sh))
......@@ -941,7 +942,7 @@ def gpu_alloc(ctx, memset_0=False):
gpu_alloc.cache = {}
class GpuAllocEmpty(HideC, Alloc):
class GpuAllocEmpty(HideC, AllocEmpty):
"""
Allocate uninitialized memory on the GPU.
......@@ -958,7 +959,7 @@ class GpuAllocEmpty(HideC, Alloc):
return get_context(self.context_name)
def make_node(self, *shape):
sh, bcast = self.validate_shape(shape)
sh, bcast = alloc_validate_shape(shape)
output = GpuArrayType(dtype=self.dtype, broadcastable=bcast,
context_name=self.context_name)()
output.tag.values_eq_approx = tensor.type.values_eq_approx_always_true
......
......@@ -3658,25 +3658,10 @@ class GpuAllocEmpty(GpuOp):
__props__ = ()
@staticmethod
def validate_shape(shape):
sh = [tensor.as_tensor_variable(s) for s in shape]
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments must be integers', s)
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = tensor.get_scalar_constant_value(s)
except tensor.NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
def make_node(self, *shape):
shape, bcast = tensor.basic.alloc_validate_shape(shape)
otype = CudaNdarrayType(dtype='float32', broadcastable=bcast)
output = otype()
return sh, output
def make_node(self, *shape):
shape, output = self.validate_shape(shape)
output.tag.values_eq_approx = tensor.type.values_eq_approx_always_true
# The outut can contain nan/inf. output.type is a new
# instance, so we can do this only for that variable.
......@@ -3767,7 +3752,11 @@ class GpuAlloc(GpuAllocEmpty):
# if there is unneeded transfert generated by the next line
# the optimizer will remove them.
v = as_cuda_ndarray_variable(value)
shape, output = self.validate_shape(shape)
shape, bcast = tensor.basic.alloc_validate_shape(shape)
otype = CudaNdarrayType(dtype='float32', broadcastable=bcast)
output = otype()
return Apply(self, [v] + shape, [output])
# This is required because the superclass (GpuAllocEmpty) also has it.
......
......@@ -2726,6 +2726,36 @@ def identity_like(x):
return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)
def alloc_validate_shape(shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for i, s in enumerate(sh):
def err_str():
if config.exception_verbosity == 'high':
return '\n' + min_informative_str(s)
else:
return str(s)
if s.type.dtype[:3] not in ('int', 'uin'):
s_as_str = err_str()
raise TypeError('Shape arguments to Alloc must be integers, '
'but argument %s is not for apply node: %s' %
(i, s_as_str))
if s.ndim != 0:
s_as_str = err_str()
raise TypeError(
"Each shape dimension to Alloc must be a scalar, ",
'but dimension %s have %d dimensions for apply node: %s' %
(i, s.ndim, s_as_str))
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
return sh, bcast
class Alloc(gof.Op):
"""Create a Tensor from an initial value and a desired shape.
......@@ -2747,37 +2777,11 @@ class Alloc(gof.Op):
__props__ = ()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for i, s in enumerate(sh):
def err_str():
if config.exception_verbosity == 'high':
return '\n' + min_informative_str(s)
else:
return str(s)
if s.type.dtype[:3] not in ('int', 'uin'):
s_as_str = err_str()
raise TypeError('Shape arguments to Alloc must be integers, '
'but argument %s is not for apply node: %s' %
(i, s_as_str))
if s.ndim != 0:
s_as_str = err_str()
raise TypeError(
"Each shape dimension to Alloc must be a scalar, ",
'but dimension %s have %d dimensions for apply node: %s' %
(i, s.ndim, s_as_str))
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
return sh, bcast
return alloc_validate_shape(shape)
def make_node(self, value, *shape):
v = as_tensor_variable(value)
sh, bcast = self.validate_shape(shape)
sh, bcast = alloc_validate_shape(shape)
if v.ndim > len(sh):
raise TypeError("The Alloc value to use has more dimensions"
" than the specified dimensions",
......@@ -6356,24 +6360,11 @@ class AllocEmpty(gof.Op):
assert isinstance(dtype, str), dtype
self.dtype = dtype.lower()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments must be integers', s)
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
def make_node(self, *shape):
shape, bcast = alloc_validate_shape(shape)
otype = TensorType(dtype=self.dtype, broadcastable=bcast)
output = otype()
return sh, output
def make_node(self, *shape):
shape, output = self.validate_shape(shape)
output.tag.values_eq_approx = values_eq_approx_always_true
# The outut can contain nan/inf. output.type is a new
# instance, so we can do this only for that variable.
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论