提交 a35f1fee authored 作者: Frederic's avatar Frederic 提交者: Tanjay94

Use the new place for linalg.

上级 be21164a
...@@ -42,7 +42,7 @@ from theano.sandbox.cuda.elemwise import erfinv_gpu ...@@ -42,7 +42,7 @@ from theano.sandbox.cuda.elemwise import erfinv_gpu
from theano.sandbox.cuda.var import CudaNdarrayConstant from theano.sandbox.cuda.var import CudaNdarrayConstant
from theano.scan_module import scan_utils, scan_op, scan_opt from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.blas import _is_real_vector, _is_real_matrix from theano.tensor.blas import _is_real_vector, _is_real_matrix
linalg = None from theano.tensor import nlinalg
#optdb.print_summary() # shows what is currently registered #optdb.print_summary() # shows what is currently registered
...@@ -1643,31 +1643,26 @@ def tensor_to_cuda(x): ...@@ -1643,31 +1643,26 @@ def tensor_to_cuda(x):
@register_opt() @register_opt()
@local_optimizer(None) # XXX: linalg is in sandbox, so don't import it globally @local_optimizer([nlinalg.ExtractDiag])
def local_gpu_extract_diagonal(node): def local_gpu_extract_diagonal(node):
""" """
extract_diagonal(host_from_gpu()) -> host_from_gpu(extract_diagonal) extract_diagonal(host_from_gpu()) -> host_from_gpu(extract_diagonal)
gpu_from_host(extract_diagonal) -> extract_diagonal(gpu_from_host) gpu_from_host(extract_diagonal) -> extract_diagonal(gpu_from_host)
""" """
global linalg if (isinstance(node.op, nlinalg.ExtractDiag) and
if linalg is None:
from theano.sandbox import linalg
linalg = theano.sandbox.linalg
if (isinstance(node.op, linalg.ops.ExtractDiag) and
isinstance(node.inputs[0].type, isinstance(node.inputs[0].type,
theano.tensor.TensorType)): theano.tensor.TensorType)):
inp = node.inputs[0] inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, HostFromGpu): if inp.owner and isinstance(inp.owner.op, HostFromGpu):
return [host_from_gpu(linalg.extract_diag(gpu_from_host(inp)))] return [host_from_gpu(nlinalg.extract_diag(gpu_from_host(inp)))]
if isinstance(node.op, GpuFromHost): if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0] host_input = node.inputs[0]
if (host_input.owner and if (host_input.owner and
isinstance(host_input.owner.op, linalg.ops.ExtractDiag) and isinstance(host_input.owner.op, nlinalg.ExtractDiag) and
isinstance(host_input.owner.inputs[0].type, isinstance(host_input.owner.inputs[0].type,
theano.tensor.TensorType)): theano.tensor.TensorType)):
diag_node = host_input.owner diag_node = host_input.owner
return [linalg.extract_diag( return [nlinalg.extract_diag(
gpu_from_host(diag_node.inputs[0]))] gpu_from_host(diag_node.inputs[0]))]
return False return False
......
...@@ -4945,7 +4945,7 @@ class Diagonal(Op): ...@@ -4945,7 +4945,7 @@ class Diagonal(Op):
def diagonal(a, offset=0, axis1=0, axis2=1): def diagonal(a, offset=0, axis1=0, axis2=1):
if (offset, axis1, axis2) == (0, 0, 1): if (offset, axis1, axis2) == (0, 0, 1):
from theano.sandbox.linalg import extract_diag from theano.tensor.nlinalg import extract_diag
return extract_diag(a) return extract_diag(a)
return Diagonal(offset, axis1, axis2)(a) return Diagonal(offset, axis1, axis2)(a)
......
...@@ -727,8 +727,7 @@ class FillDiagonal(gof.Op): ...@@ -727,8 +727,7 @@ class FillDiagonal(gof.Op):
self.__class__.__name__) self.__class__.__name__)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices # diag is only valid for matrices
import theano.sandbox.linalg wr_val = theano.tensor.nlinalg.diag(grad).sum()
wr_val = theano.sandbox.linalg.ops.diag(grad).sum()
return [wr_a, wr_val] return [wr_a, wr_val]
fill_diagonal_ = FillDiagonal() fill_diagonal_ = FillDiagonal()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论