提交 4feabe53 authored 作者: Frederic's avatar Frederic 提交者: Tanjay94

Fix import loop problem.

上级 47e43343
......@@ -69,6 +69,8 @@ FancyModule = Module
from theano.printing import pprint, pp
from theano import tensor
from theano.scan_module import scan, map, reduce, foldl, foldr, clone
from theano.updates import Updates, OrderedUpdates
......@@ -197,7 +199,6 @@ else:
# This cannot be done in tensor/__init__.py due to a circular dependency -- randomstreams
# depends on raw_random which depends on tensor. As a work-around, we import RandomStreams
# here and inject an instance in tensor.
from theano import tensor
from theano.tensor.randomstreams import RandomStreams
# Imitate the numpy.random symbol with a tensor.random one
tensor.random = RandomStreams(seed=0xBAD5EED, no_warn=True)
......
......@@ -37,6 +37,8 @@ from theano.tensor import sharedvar # adds shared-variable constructors
from theano.tensor.sharedvar import tensor_constructor as _shared
from theano.tensor.io import *
from theano.tensor import nlinalg
def shared(*args, **kw):
"""
......
......@@ -5,10 +5,9 @@ import numpy
from theano.gof import Op, Apply
import theano.tensor
from theano.tensor import as_tensor_variable, dot, DimShuffle, Dot
from theano.tensor.blas import Dot22
from theano import tensor
import theano.tensor
from theano.tensor.opt import (register_stabilize,
register_specialize, register_canonicalize)
from theano.gof import local_optimizer
......@@ -170,7 +169,7 @@ class AllocDiag(Op):
x = as_tensor_variable(_x)
if x.type.ndim != 1:
raise TypeError('AllocDiag only works on vectors', _x)
return Apply(self, [x], [tensor.matrix(dtype=x.type.dtype)])
return Apply(self, [x], [theano.tensor.matrix(dtype=x.type.dtype)])
def grad(self, inputs, g_outputs):
return [extract_diag(g_outputs[0])]
......@@ -239,15 +238,15 @@ class ExtractDiag(Op):
return 'ExtractDiag{view=%s}' % self.view
def grad(self, inputs, g_outputs):
x = tensor.zeros_like(inputs[0])
x = theano.tensor.zeros_like(inputs[0])
xdiag = alloc_diag(g_outputs[0])
return [tensor.set_subtensor(
return [theano.tensor.set_subtensor(
x[:xdiag.shape[0], :xdiag.shape[1]],
xdiag)]
def infer_shape(self, node, shapes):
x_s, = shapes
shp = tensor.min(node.inputs[0].shape)
shp = theano.tensor.min(node.inputs[0].shape)
return [(shp,)]
extract_diag = ExtractDiag()
......
......@@ -9,7 +9,6 @@ from theano.gof import Constant, Variable
from theano.gof.utils import hashtype
from theano.tensor.utils import hash_from_ndarray
from theano.tensor.type import TensorType
from theano.tensor import nlinalg
class AsTensorError(TypeError):
......@@ -536,7 +535,7 @@ class _tensor_py_operators:
return theano.tensor.basic.round(self, mode)
def trace(self):
return theano.sandbox.linalg.ops.trace(self)
return theano.tensor.nlinalg.ops.trace(self)
# TO TRUMP NUMPY OPERATORS
__array_priority__ = 1000
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论