提交 f1ff66e0 authored 作者: James Bergstra's avatar James Bergstra

merged

...@@ -4,10 +4,12 @@ import tensor ...@@ -4,10 +4,12 @@ import tensor
import sparse import sparse
import compile import compile
import gradient import gradient
import opt import tensor_opt
import scalar_opt
from tensor import * from tensor import *
from compile import * from compile import *
from opt import * from tensor_opt import *
from scalar_opt import *
from gradient import * from gradient import *
...@@ -5,11 +5,7 @@ import compile ...@@ -5,11 +5,7 @@ import compile
import gradient import gradient
from sparse import _is_dense, _is_sparse, _is_dense_result, _is_sparse_result from sparse import _is_dense, _is_sparse, _is_dense_result, _is_sparse_result
from sparse import _mtypes, _mtype_to_str
""" Types of sparse matrices to use for testing """
_mtypes = [sparse.csc_matrix, sparse.csr_matrix]
#_mtypes = [sparse.csc_matrix, sparse.csr_matrix, sparse.dok_matrix, sparse.lil_matrix, sparse.coo_matrix]
_mtype_to_str = {sparse.csc_matrix: "csc", sparse.csr_matrix: "csr"}
class T_transpose(unittest.TestCase): class T_transpose(unittest.TestCase):
def setUp(self): def setUp(self):
......
...@@ -321,8 +321,15 @@ class Broadcast(Op, Destroyer): ...@@ -321,8 +321,15 @@ class Broadcast(Op, Destroyer):
# the second calling form is used because in certain versions of numpy # the second calling form is used because in certain versions of numpy
# the first (faster) version leads to segfaults # the first (faster) version leads to segfaults
ufunc_args = [input.data for input in self.inputs]# + output_storage ufunc_args = [input.data for input in self.inputs]# + output_storage
#self.ufunc(*(ufunc_args+output_storage)) results = self.ufunc(*ufunc_args)
output_storage[0][:] = self.ufunc(*ufunc_args) if self.ufunc.nout == 1: results = [results]
for result, storage in zip(results, output_storage):
if storage.shape:
storage[:] = result
else:
storage.itemset(result)
# the following should be used instead of the previous loop, unfortunately it tends to segfault
# self.ufunc(*(ufunc_args+output_storage))
def _c_all(self, inames, onames, sub): def _c_all(self, inames, onames, sub):
_inames = inames _inames = inames
......
...@@ -89,6 +89,13 @@ class Op(object): ...@@ -89,6 +89,13 @@ class Op(object):
return self._hash_id return self._hash_id
def desc(self): def desc(self):
"""
Description (signature) of this L{Op}. L{Op}s with the same
signature may be collapsed by the L{MergeOptimizer}.
@attention: If your L{Op} has additional options or a different
constructor you probably want to override this.
"""
return self.__class__ return self.__class__
def strdesc(self): def strdesc(self):
...@@ -139,10 +146,10 @@ class Op(object): ...@@ -139,10 +146,10 @@ class Op(object):
""" """
Shallow copy of this L{Op}. The inputs are the exact same, but Shallow copy of this L{Op}. The inputs are the exact same, but
the outputs are recreated because of the one-owner-per-result the outputs are recreated because of the one-owner-per-result
policy. The default behavior is to call the constructor on policy. The default behavior is to call the constructor on this
this L{Op}'s inputs. L{Op}'s inputs.
To do a bottom-up copy of a graph, use clone_with_new_inputs. To do a bottom-up copy of a graph, use L{clone_with_new_inputs}.
@attention: If your L{Op} has additional options or a different @attention: If your L{Op} has additional options or a different
constructor you probably want to override this. constructor you probably want to override this.
......
...@@ -11,6 +11,11 @@ from gof import Result, GuardedOp, Env, utils ...@@ -11,6 +11,11 @@ from gof import Result, GuardedOp, Env, utils
def as_scalar(x, name = None): def as_scalar(x, name = None):
if isinstance(x, gof.Op):
if len(x.outputs) != 1:
raise ValueError("It is ambiguous which output of a multi-output Op has to be fetched.", x)
else:
x = x.outputs[0]
if isinstance(x, float): if isinstance(x, float):
s = Scalar('float64', name = name) s = Scalar('float64', name = name)
s.data = x s.data = x
...@@ -21,6 +26,7 @@ def as_scalar(x, name = None): ...@@ -21,6 +26,7 @@ def as_scalar(x, name = None):
return s return s
if isinstance(x, Scalar): if isinstance(x, Scalar):
return x return x
raise TypeError("Cannot convert %s to Scalar" % x)
def constant(x): def constant(x):
res = as_scalar(x) res = as_scalar(x)
...@@ -194,7 +200,7 @@ def _multi(*fns): ...@@ -194,7 +200,7 @@ def _multi(*fns):
else: else:
return [f(name) for name in names] return [f(name) for name in names]
if len(fns) == 1: if len(fns) == 1:
return partial(f2, fns) return partial(f2, fns[0])
else: else:
return [partial(f2, f) for f in fns] return [partial(f2, f) for f in fns]
......
...@@ -14,6 +14,11 @@ import gof.op, gof.result ...@@ -14,6 +14,11 @@ import gof.op, gof.result
import tensor import tensor
""" Types of sparse matrices to use for testing """
_mtypes = [sparse.csc_matrix, sparse.csr_matrix]
#_mtypes = [sparse.csc_matrix, sparse.csr_matrix, sparse.dok_matrix, sparse.lil_matrix, sparse.coo_matrix]
_mtype_to_str = {sparse.csc_matrix: "csc", sparse.csr_matrix: "csr"}
## Type checking ## Type checking
......
...@@ -304,6 +304,12 @@ s2t.Tensor = Tensor ...@@ -304,6 +304,12 @@ s2t.Tensor = Tensor
# alternate Tensor constructor # alternate Tensor constructor
def astensor(data, broadcastable=None, name=None): def astensor(data, broadcastable=None, name=None):
"""Return a L{Tensor} containing given data""" """Return a L{Tensor} containing given data"""
if isinstance(data, Op):
if len(data.outputs) != 1:
raise ValueError("It is ambiguous which output of a multi-output Op has to be fetched.", data)
else:
data = data.outputs[0]
if isinstance(data, Tensor): if isinstance(data, Tensor):
if broadcastable is not None and list(data.broadcastable) != list(broadcastable): if broadcastable is not None and list(data.broadcastable) != list(broadcastable):
raise TypeError("The data to wrap as a Tensor has the wrong broadcastable pattern. Expected %s, got %s." % (broadcastable, data.broadcastable)) raise TypeError("The data to wrap as a Tensor has the wrong broadcastable pattern. Expected %s, got %s." % (broadcastable, data.broadcastable))
...@@ -315,13 +321,17 @@ def astensor(data, broadcastable=None, name=None): ...@@ -315,13 +321,17 @@ def astensor(data, broadcastable=None, name=None):
if data is None and broadcastable is None: if data is None and broadcastable is None:
raise TypeError("Cannot make a Tensor out of None.") raise TypeError("Cannot make a Tensor out of None.")
_data = data
data = numpy.asarray(data) data = numpy.asarray(data)
if broadcastable is None: if broadcastable is None:
broadcastable = [s==1 for s in data.shape] broadcastable = [s==1 for s in data.shape]
elif broadcastable in [0, 1]: elif broadcastable in [0, 1]:
broadcastable = [broadcastable] * len(data.shape) broadcastable = [broadcastable] * len(data.shape)
rval = Tensor(data.dtype, broadcastable, name = name) try:
rval = Tensor(data.dtype, broadcastable, name = name)
except TypeError:
raise TypeError("Cannot convert %s to Tensor." % repr(_data))
rval.data = data # will raise if broadcastable was mis-specified rval.data = data # will raise if broadcastable was mis-specified
return rval return rval
s2t.astensor = astensor s2t.astensor = astensor
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论