提交 67ff204d authored 作者: Hengjean's avatar Hengjean

Added check-input to :

<class 'theano.tensor.opt.Assert'> 16 <class 'theano.compile.ops.Rebroadcast'> 19 <class 'theano.tensor.basic.Join'> 32 <class 'theano.compile.ops.OutputGuard'> 44 <class 'theano.tensor.basic.Reshape'> 46 <class 'theano.compile.ops.DeepCopyOp'> 56 <class 'theano.sandbox.cuda.basic_ops.GpuFromHost'> 58 <class 'theano.sandbox.cuda.basic_ops.HostFromGpu'> 70
上级 9da6df1a
...@@ -109,6 +109,8 @@ class OutputGuard(ViewOp): ...@@ -109,6 +109,8 @@ class OutputGuard(ViewOp):
""" """
destroy_map = {0: [0]} destroy_map = {0: [0]}
check_input = False
_output_guard = OutputGuard() _output_guard = OutputGuard()
...@@ -131,6 +133,8 @@ class DeepCopyOp(gof.Op): ...@@ -131,6 +133,8 @@ class DeepCopyOp(gof.Op):
# the output variable is %(oname)s. # the output variable is %(oname)s.
c_code_and_version = {} c_code_and_version = {}
check_input = False
def __init__(self): def __init__(self):
pass pass
...@@ -169,6 +173,8 @@ class DeepCopyOp(gof.Op): ...@@ -169,6 +173,8 @@ class DeepCopyOp(gof.Op):
return () return ()
version.append((str(t), v)) version.append((str(t), v))
if version:
version.append(1)
return tuple(version) return tuple(version)
def c_code(self, node, name, inames, onames, sub): def c_code(self, node, name, inames, onames, sub):
...@@ -528,6 +534,8 @@ class Rebroadcast(gof.Op): ...@@ -528,6 +534,8 @@ class Rebroadcast(gof.Op):
# the output variable is %(oname)s. # the output variable is %(oname)s.
c_code_and_version = {} c_code_and_version = {}
check_input = False
def __init__(self, *axis): def __init__(self, *axis):
self.axis = dict(axis) self.axis = dict(axis)
for axis, broad in self.axis.iteritems(): for axis, broad in self.axis.iteritems():
...@@ -629,6 +637,8 @@ class Rebroadcast(gof.Op): ...@@ -629,6 +637,8 @@ class Rebroadcast(gof.Op):
return () return ()
version.append((str(t), v)) version.append((str(t), v))
if version:
version.append(1)
return tuple(version) return tuple(version)
......
...@@ -53,6 +53,8 @@ class HostFromGpu(GpuOp): ...@@ -53,6 +53,8 @@ class HostFromGpu(GpuOp):
""" """
Implement the transfer from gpu to the cpu. Implement the transfer from gpu to the cpu.
""" """
check_input = False
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
...@@ -102,7 +104,7 @@ class HostFromGpu(GpuOp): ...@@ -102,7 +104,7 @@ class HostFromGpu(GpuOp):
""" % locals() """ % locals()
def c_code_cache_version(self): def c_code_cache_version(self):
return (2,) return (3,)
host_from_gpu = HostFromGpu() host_from_gpu = HostFromGpu()
...@@ -110,6 +112,8 @@ class GpuFromHost(GpuOp): ...@@ -110,6 +112,8 @@ class GpuFromHost(GpuOp):
""" """
Implement the transfer from cpu to the gpu. Implement the transfer from cpu to the gpu.
""" """
check_input = False
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
...@@ -166,7 +170,7 @@ class GpuFromHost(GpuOp): ...@@ -166,7 +170,7 @@ class GpuFromHost(GpuOp):
""" % locals() """ % locals()
def c_code_cache_version(self): def c_code_cache_version(self):
return (1,) return (2,)
gpu_from_host = GpuFromHost() gpu_from_host = GpuFromHost()
......
...@@ -3261,6 +3261,8 @@ class Join(Op): ...@@ -3261,6 +3261,8 @@ class Join(Op):
join(2, x, y, z) # WRONG: the axis has to be an index into the shape join(2, x, y, z) # WRONG: the axis has to be an index into the shape
join(0, x, u) # WRONG: joined tensors must have the same rank join(0, x, u) # WRONG: joined tensors must have the same rank
""" """
check_input = False
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
...@@ -3372,14 +3374,14 @@ class Join(Op): ...@@ -3372,14 +3374,14 @@ class Join(Op):
dtype=node.outputs[0].type.dtype) dtype=node.outputs[0].type.dtype)
def c_code_cache_version(self): def c_code_cache_version(self):
return (1,) return (2,)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
axis, tensors = inputs[0], inputs[1:] axis, tensors = inputs[0], inputs[1:]
l = len(tensors) l = len(tensors)
out, = outputs out, = outputs
fail = sub['fail'] fail = sub['fail']
adtype = node.inputs[0].type.dtype_specs()[1]
code = """ code = """
PyObject* list = PyList_New(%(l)s); PyObject* list = PyList_New(%(l)s);
""" % locals() """ % locals()
...@@ -3392,7 +3394,7 @@ class Join(Op): ...@@ -3392,7 +3394,7 @@ class Join(Op):
//PyObject* PyArray_Concatenate(PyObject* obj, int axis) //PyObject* PyArray_Concatenate(PyObject* obj, int axis)
Py_XDECREF(%(out)s); Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject *)PyArray_Concatenate(list, %(out)s = (PyArrayObject *)PyArray_Concatenate(list,
((dtype_%(axis)s *)PyArray_DATA(%(axis)s))[0]); ((%(adtype)s *)PyArray_DATA(%(axis)s))[0]);
Py_DECREF(list); Py_DECREF(list);
if(!%(out)s){ if(!%(out)s){
...@@ -3685,6 +3687,8 @@ class Reshape(Op): ...@@ -3685,6 +3687,8 @@ class Reshape(Op):
known at graph build time.""" known at graph build time."""
view_map = {0: [0]} # output 0 is potentially aliased to inputs [0] view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]
check_input = False
def __init__(self, ndim, name=None): def __init__(self, ndim, name=None):
self.ndim = ndim self.ndim = ndim
self.name = name self.name = name
...@@ -3814,13 +3818,14 @@ class Reshape(Op): ...@@ -3814,13 +3818,14 @@ class Reshape(Op):
return [tuple(oshape)] return [tuple(oshape)]
def c_code_cache_version(self): def c_code_cache_version(self):
return (5,) return (6,)
def c_code(self, node, name, inputs, outputs, sub): def c_code(self, node, name, inputs, outputs, sub):
if isinstance(node.inputs[0], TensorVariable): if isinstance(node.inputs[0], TensorVariable):
x, shp = inputs x, shp = inputs
z, = outputs z, = outputs
new_ndim = self.ndim new_ndim = self.ndim
sdtype = node.inputs[1].type.dtype_specs()[1]
fail = sub['fail'] fail = sub['fail']
return """ return """
assert (PyArray_NDIM(%(shp)s) == 1); assert (PyArray_NDIM(%(shp)s) == 1);
...@@ -3834,7 +3839,7 @@ class Reshape(Op): ...@@ -3834,7 +3839,7 @@ class Reshape(Op):
// -- int* dtype. The compiler will explicitly upcast it, but // -- int* dtype. The compiler will explicitly upcast it, but
// -- will err if this will downcast. This could happen if the // -- will err if this will downcast. This could happen if the
// -- user pass an int64 dtype, but npy_intp endup being int32. // -- user pass an int64 dtype, but npy_intp endup being int32.
new_dims[ii] = ((dtype_%(shp)s*)( new_dims[ii] = ((%(sdtype)s*)(
PyArray_BYTES(%(shp)s) + PyArray_BYTES(%(shp)s) +
ii * PyArray_STRIDES(%(shp)s)[0]))[0]; ii * PyArray_STRIDES(%(shp)s)[0]))[0];
} }
......
...@@ -1363,6 +1363,8 @@ class Assert(T.Op): ...@@ -1363,6 +1363,8 @@ class Assert(T.Op):
""" """
view_map = {0: [0]} view_map = {0: [0]}
check_input = False
def __init__(self, msg="Theano Assert failed!"): def __init__(self, msg="Theano Assert failed!"):
self.msg = msg self.msg = msg
...@@ -1415,7 +1417,7 @@ class Assert(T.Op): ...@@ -1415,7 +1417,7 @@ class Assert(T.Op):
""" % locals() """ % locals()
def c_code_cache_version(self): def c_code_cache_version(self):
return (1, 1) return (3, 0)
def infer_shape(self, node, input_shapes): def infer_shape(self, node, input_shapes):
return [input_shapes[0]] return [input_shapes[0]]
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论