提交 ab17c207 authored 作者: Frederic Bastien's avatar Frederic Bastien

renamed import to don't collide with the new cuda.basic_op.scalar fct.

上级 17dfaa63
import sys import sys
import theano import theano
import numpy import numpy
from theano import tensor, scalar, compile from theano import scalar as scal
from theano import tensor, compile
from theano.gof import local_optimizer, EquilibriumDB, SequenceDB, Optimizer, toolbox, DestroyHandler from theano.gof import local_optimizer, EquilibriumDB, SequenceDB, Optimizer, toolbox, DestroyHandler
from theano.sandbox.cuda.basic_ops import * from theano.sandbox.cuda.basic_ops import *
...@@ -208,7 +209,7 @@ def local_gpu_gemm(node): ...@@ -208,7 +209,7 @@ def local_gpu_gemm(node):
@local_optimizer([]) @local_optimizer([])
def local_gpu_sum(node): def local_gpu_sum(node):
if isinstance(node.op, tensor.elemwise.CAReduce): if isinstance(node.op, tensor.elemwise.CAReduce):
if node.op.scalar_op == scalar.add: if node.op.scalar_op == scal.add:
x, = node.inputs x, = node.inputs
if x.owner and x.owner.op == host_from_gpu: if x.owner and x.owner.op == host_from_gpu:
if node.op.axis is None: if node.op.axis is None:
...@@ -332,8 +333,8 @@ def local_gpu_rebroadcast(node): ...@@ -332,8 +333,8 @@ def local_gpu_rebroadcast(node):
return [host_from_gpu(node.op(gpu_x))] return [host_from_gpu(node.op(gpu_x))]
def cast(x, dtype): def cast(x, dtype):
stype = theano.scalar.Scalar(dtype) stype = scal.Scalar(dtype)
cast_op = theano.tensor.Elemwise(scalar.Identity(scalar.specific_out(stype))) cast_op = theano.tensor.Elemwise(scal.Identity(scal.specific_out(stype)))
return cast_op(x) return cast_op(x)
import theano.tensor.nnet import theano.tensor.nnet
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论