提交 f02a596e authored 作者: abergeron's avatar abergeron

Merge pull request #2101 from nouiz/mixed

Mixed
......@@ -98,3 +98,6 @@ inside a function would provide float64 as output.
Since the GPU can't compute this kind of output, it would be
preferable not to use those dtypes together.
To help you find where float64 are created, see the
:attr:`warn_float64` Theano flag.
......@@ -584,7 +584,7 @@ class Function(object):
# done by raise_with_op is not implemented in C.
if hasattr(self.fn, 'thunks'):
# For the CVM
gof.vm.raise_with_op(
gof.link.raise_with_op(
self.fn.nodes[self.fn.position_of_error],
self.fn.thunks[self.fn.position_of_error])
else:
......
......@@ -314,15 +314,11 @@ class Shape_i(gof.Op):
check_input = False
__props__ = ("i",)
def __init__(self, i):
self.i = i
def __hash__(self):
return hash(type(self)) ^ self.i
def __eq__(self, other):
return type(self) == type(other) and self.i == other.i
def __str__(self):
return '%s{%i}' % (self.__class__.__name__, self.i)
......
......@@ -19,6 +19,7 @@ import pickle
def mul(a, b):
return a*b
class OpDecoratorTests(utt.InferShapeTester):
def test_1arg(self):
x = dmatrix('x')
......@@ -77,3 +78,8 @@ class OpDecoratorTests(utt.InferShapeTester):
m2 = pickle.loads(s)
assert m2.owner.op == m.owner.op
def test_shape_i_hash():
assert isinstance(theano.tensor.opt.Shape_i(np.int64(1)).__hash__(),
int)
......@@ -1836,6 +1836,20 @@ class CAReduceDtype(CAReduce):
assert op.acc_dtype is not None
return CAReduce.make_node(op, input)
def __str__(self):
name = self.__class__.__name__
if self.__class__.__name__ == "CAReduceDtype":
name = "ReduceDtype{%s}" % self.scalar_op,
axis = ""
if self.axis is not None:
axis = ", ".join(str(x) for x in self.axis)
axis = "axis=[%s], " % axis
return "%s{%sacc_dtype=%s}" % (
name,
axis,
str(self.acc_dtype)
)
class Sum(CAReduceDtype):
"""
......@@ -1908,12 +1922,6 @@ class Sum(CAReduceDtype):
return [None]
return self(*eval_points, **dict(return_list=True))
def __str__(self):
if self.axis is None:
return "Sum"
else:
return "Sum{%s}" % ", ".join(map(str, self.axis))
class Prod(CAReduceDtype):
"""
......@@ -2067,12 +2075,6 @@ class Prod(CAReduceDtype):
return [final_grad]
def __str__(self):
if self.axis is None:
return "Prod"
else:
return "Prod{%s}" % ", ".join(map(str, self.axis))
def c_code_cache_version(self):
return (1,)
......@@ -2112,10 +2114,4 @@ mul_without_zeros = MulWithoutZeros(scalar.upcast_out,
class ProdWithoutZeros(CAReduceDtype):
def __init__(self, axis=None, dtype=None, acc_dtype=None):
CAReduceDtype.__init__(self, mul_without_zeros, axis=axis,
dtype=dtype, acc_dtype=acc_dtype)
def __str__(self):
if self.axis is None:
return "ProdWithoutZeros"
else:
return "ProdWithoutZeros{%s}" % ", ".join(map(str, self.axis))
dtype=dtype, acc_dtype=acc_dtype)
......@@ -1427,6 +1427,8 @@ class Assert(T.Op):
self.msg = "Theano Assert failed!"
def make_node(self, value, *conds):
if not isinstance(value, Variable):
value = T.as_tensor_variable(value)
cond = [T.as_tensor_variable(c) for c in conds]
assert numpy.all([c.type.ndim == 0 for c in cond])
return gof.Apply(self, [value] + cond, [value.type()])
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论