提交 e8d45dc8 authored 作者: Frederic's avatar Frederic

Move numpy_scalar outside of the function that use it.

Indent what is inside. I'll use that in the next commit. This will make the change more visible.
上级 4a77221b
...@@ -508,6 +508,24 @@ class EmptyConstantError(NotScalarConstantError): ...@@ -508,6 +508,24 @@ class EmptyConstantError(NotScalarConstantError):
""" """
def numpy_scalar(data):
""" Return a scalar stored in a numpy ndarray, or raise
NotScalarConstantError if the numpy ndarray is not a scalar
"""
# handle case where data is numpy.array([])
if data.ndim > 0 and (len(data.shape) == 0 or
__builtins__['max'](data.shape) == 0):
assert numpy.all(numpy.array([]) == data)
raise EmptyConstantError()
try:
numpy.complex(data) # works for all numeric scalars
return data
except Exception:
raise NotScalarConstantError(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value', data)
get_scalar_constant_value_elemwises = ( get_scalar_constant_value_elemwises = (
scal.Cast, scal.Switch, scal.Cast, scal.Switch,
scal.NEQ, scal.EQ, scal.NEQ, scal.EQ,
...@@ -526,166 +544,148 @@ def get_scalar_constant_value(v): ...@@ -526,166 +544,148 @@ def get_scalar_constant_value(v):
:note: There may be another function similar to this one in the :note: There may be another function similar to this one in the
code, but I'm not sure where it is. code, but I'm not sure where it is.
""" """
if True:
if v is None:
# None is not a scalar (and many uses of this function seem to depend
# on passing it None)
raise NotScalarConstantError()
if v is None: if isinstance(v, (numpy.integer, int, float)):
# None is not a scalar (and many uses of this function seem to depend return numpy.asarray(v)
# on passing it None)
raise NotScalarConstantError()
if isinstance(v, (numpy.integer, int, float)):
return numpy.asarray(v)
def numpy_scalar(data): if isinstance(v, numpy.ndarray):
""" Return a scalar stored in a numpy ndarray, or raise return numpy_scalar(v)
NotScalarConstantError if the numpy ndarray is not a scalar
"""
# handle case where data is numpy.array([]) if isinstance(v, Constant):
if data.ndim > 0 and (len(data.shape) == 0 or if getattr(v.tag, 'unique_value', None) is not None:
__builtins__['max'](data.shape) == 0): data = v.tag.unique_value
assert numpy.all(numpy.array([]) == data) else:
raise EmptyConstantError() data = v.data
try: return numpy_scalar(data)
numpy.complex(data) # works for all numeric scalars
return data if getattr(v, 'owner', None):
except Exception: if isinstance(v.owner.op, (Alloc, DimShuffle, Rebroadcast,
raise NotScalarConstantError( compile.ops.OutputGuard,
'v.data is non-numeric, non-scalar, or has more than one' compile.DeepCopyOp)):
' unique value', data) return get_scalar_constant_value(v.owner.inputs[0])
elif isinstance(v.owner.op, theano.compile.ops.Shape_i):
if isinstance(v, numpy.ndarray): if isinstance(v.owner.inputs[0], Constant):
return numpy_scalar(v) return v.owner.inputs[0].data.shape[v.owner.op.i]
# Don't act as the constant_folding optimization here as this
if isinstance(v, Constant): # fct is used too early in the optimization phase. This would
if getattr(v.tag, 'unique_value', None) is not None: # mess with the stabilization optimization and be too slow.
data = v.tag.unique_value # We put all the scalar Ops used by get_canonical_form_slice()
else: # to allow it to determine the broadcast pattern correctly.
data = v.data elif isinstance(v.owner.op, scal.ScalarOp):
return numpy_scalar(data) if isinstance(v.owner.op, scal.Second):
# We don't need both input to be constant for second
if getattr(v, 'owner', None): shape, val = v.owner.inputs
if isinstance(v.owner.op, (Alloc, DimShuffle, Rebroadcast, return get_scalar_constant_value(val)
compile.ops.OutputGuard, if isinstance(v.owner.op, get_scalar_constant_value_elemwises):
compile.DeepCopyOp)): const = [get_scalar_constant_value(i)
return get_scalar_constant_value(v.owner.inputs[0]) for i in v.owner.inputs]
elif (isinstance(v.owner.op, theano.compile.ops.Shape_i) and ret = [[None]]
isinstance(v.owner.inputs[0], Constant)): v.owner.op.perform(v.owner, const, ret)
return v.owner.inputs[0].data.shape[v.owner.op.i] return ret[0][0]
# Don't act as the constant_folding optimization here as this elif isinstance(v.owner.op, Elemwise):
# fct is used too early in the optimization phase. This would if isinstance(v.owner.op.scalar_op, scal.Second):
# mess with the stabilization optimization and be too slow. # We don't need both input to be constant for second
# We put all the scalar Ops used by get_canonical_form_slice() shape, val = v.owner.inputs
# to allow it to determine the broadcast pattern correctly. return get_scalar_constant_value(val)
elif isinstance(v.owner.op, scal.ScalarOp): elif isinstance(v.owner.op.scalar_op,
if isinstance(v.owner.op, scal.Second): get_scalar_constant_value_elemwises):
# We don't need both input to be constant for second const = [get_scalar_constant_value(i) for i in v.owner.inputs]
shape, val = v.owner.inputs ret = [[None]]
return get_scalar_constant_value(val) v.owner.op.perform(v.owner, const, ret)
if isinstance(v.owner.op, get_scalar_constant_value_elemwises): return ret[0][0]
const = [get_scalar_constant_value(i) elif isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and v.ndim == 0:
for i in v.owner.inputs] if isinstance(v.owner.inputs[0], TensorConstant):
ret = [[None]] cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))
v.owner.op.perform(v.owner, const, ret) try:
return ret[0][0] return v.owner.inputs[0].data.__getitem__(cdata)
elif isinstance(v.owner.op, Elemwise): except IndexError:
if isinstance(v.owner.op.scalar_op, scal.Second): raise IndexError(
# We don't need both input to be constant for second str(tuple(v.owner.op.idx_list)) +
shape, val = v.owner.inputs " is not a valid index into " +
return get_scalar_constant_value(val) str(v.owner.inputs[0].data))
elif isinstance(v.owner.op.scalar_op,
get_scalar_constant_value_elemwises): # The index list 'idx_list' should have length the same
const = [get_scalar_constant_value(i) for i in v.owner.inputs] # shape as the input.
ret = [[None]] # TODO: implement the case where we take a scalar in a matrix
v.owner.op.perform(v.owner, const, ret) assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim
return ret[0][0]
elif isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and v.ndim == 0: # Needed to make better graph in this test in theano/tensor/tests:
if isinstance(v.owner.inputs[0], TensorConstant): # test_sharedvar.py:test_shared_options.test_specify_shape_partial
cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs)) if (v.owner.inputs[0].owner and
try: isinstance(v.owner.inputs[0].owner.op, Join) and
return v.owner.inputs[0].data.__getitem__(cdata) # Ensure the Join is joining only scalar variables (so that
except IndexError: # the constant value can be found at the same index as the one
raise IndexError( # used in the sub-tensor).
str(tuple(v.owner.op.idx_list)) + python_all(var.ndim == 0 for var in
" is not a valid index into " + v.owner.inputs[0].owner.inputs) and
str(v.owner.inputs[0].data)) len(v.owner.op.idx_list) == 1):
# The index list 'idx_list' should have length the same idx = v.owner.op.idx_list[0]
# shape as the input. if isinstance(idx, gof.Type):
# TODO: implement the case where we take a scalar in a matrix idx = get_scalar_constant_value(v.owner.inputs[1])
assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim # Note the '+ 1' is because the first argument to Join is the
# axis.
# Needed to make better graph in this test in theano/tensor/tests: ret = v.owner.inputs[0].owner.inputs[idx + 1]
# test_sharedvar.py:test_shared_options.test_specify_shape_partial ret = get_scalar_constant_value(ret)
if (v.owner.inputs[0].owner and # join can cast implicitly its input in some case.
isinstance(v.owner.inputs[0].owner.op, Join) and return theano._asarray(ret, dtype=v.type.dtype)
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the one elif (v.owner.inputs[0].owner and
# used in the sub-tensor). isinstance(v.owner.inputs[0].owner.op,
python_all(var.ndim == 0 for var in theano.tensor.opt.MakeVector) and
v.owner.inputs[0].owner.inputs) and # MakeVector normally accept only scalar as input.
len(v.owner.op.idx_list) == 1): # We put this check in case there is change in the future
python_all(var.ndim == 0 for var in
idx = v.owner.op.idx_list[0] v.owner.inputs[0].owner.inputs) and
if isinstance(idx, gof.Type): len(v.owner.op.idx_list) == 1):
idx = get_scalar_constant_value(v.owner.inputs[1]) idx = v.owner.op.idx_list[0]
# Note the '+ 1' is because the first argument to Join is the if isinstance(idx, gof.Type):
# axis. idx = get_scalar_constant_value(v.owner.inputs[1])
ret = v.owner.inputs[0].owner.inputs[idx + 1] # Python 2.4 does not support indexing with numpy.integer
ret = get_scalar_constant_value(ret) # So we cast it.
# join can cast implicitly its input in some case. idx = int(idx)
return theano._asarray(ret, dtype=v.type.dtype) ret = v.owner.inputs[0].owner.inputs[idx]
ret = get_scalar_constant_value(ret)
elif (v.owner.inputs[0].owner and # MakeVector can cast implicitly its input in some case.
isinstance(v.owner.inputs[0].owner.op, return theano._asarray(ret, dtype=v.type.dtype)
theano.tensor.opt.MakeVector) and
# MakeVector normally accept only scalar as input. # This is needed when we take the grad as the Shape op
# We put this check in case there is change in the future # are not already changed into MakeVector
python_all(var.ndim == 0 for var in owner = v.owner
v.owner.inputs[0].owner.inputs) and leftmost_parent = owner.inputs[0]
len(v.owner.op.idx_list) == 1): if (leftmost_parent.owner and
idx = v.owner.op.idx_list[0] isinstance(leftmost_parent.owner.op,
if isinstance(idx, gof.Type): theano.tensor.Shape)):
idx = get_scalar_constant_value(v.owner.inputs[1]) op = owner.op
# Python 2.4 does not support indexing with numpy.integer idx_list = op.idx_list
# So we cast it. idx = idx_list[0]
idx = int(idx) if isinstance(idx, gof.Type):
ret = v.owner.inputs[0].owner.inputs[idx] idx = get_scalar_constant_value(owner.inputs[1])
ret = get_scalar_constant_value(ret) grandparent = leftmost_parent.owner.inputs[0]
# MakeVector can cast implicitly its input in some case. gp_broadcastable = grandparent.type.broadcastable
return theano._asarray(ret, dtype=v.type.dtype) ndim = grandparent.type.ndim
# This is needed when we take the grad as the Shape op assert ndim == len(gp_broadcastable)
# are not already changed into MakeVector
owner = v.owner if not (idx < len(gp_broadcastable)):
leftmost_parent = owner.inputs[0] msg = ("get_scalar_constant_value detected " +
if (leftmost_parent.owner and "deterministic IndexError: x.shape[%d] " +
isinstance(leftmost_parent.owner.op, "when x.ndim=%d.") % (ndim, idx)
theano.tensor.Shape)): if config.exception_verbosity == 'high':
op = owner.op msg += 'x=%s' % min_informative_str(v)
idx_list = op.idx_list else:
idx = idx_list[0] msg += 'x=%s' % str(v)
if isinstance(idx, gof.Type): raise ValueError(msg)
idx = get_scalar_constant_value(owner.inputs[1])
grandparent = leftmost_parent.owner.inputs[0] if gp_broadcastable[idx]:
gp_broadcastable = grandparent.type.broadcastable return numpy.asarray(1)
ndim = grandparent.type.ndim
raise NotScalarConstantError(v)
assert ndim == len(gp_broadcastable)
if not (idx < len(gp_broadcastable)):
msg = ("get_scalar_constant_value detected " +
"deterministic IndexError: x.shape[%d] " +
"when x.ndim=%d.") % (ndim, idx)
if config.exception_verbosity == 'high':
msg += 'x=%s' % min_informative_str(v)
else:
msg += 'x=%s' % str(v)
raise ValueError(msg)
if gp_broadcastable[idx]:
return numpy.asarray(1)
raise NotScalarConstantError(v)
# Easy constructors # Easy constructors
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论