Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
b4f4a23b
提交
b4f4a23b
authored
5月 24, 2016
作者:
Frederic Bastien
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
small opt to gpu speed up.
上级
c7f2dd05
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
24 行增加
和
24 行删除
+24
-24
opt.py
theano/sandbox/cuda/opt.py
+24
-24
没有找到文件。
theano/sandbox/cuda/opt.py
浏览文件 @
b4f4a23b
...
...
@@ -192,7 +192,7 @@ class InputToGpuOptimizer(Optimizer):
# This happen frequently as we do 2 pass of the gpu optimizations
if
(
len
(
input
.
clients
)
==
1
and
(
input
.
clients
[
0
][
0
]
==
'output'
or
i
nput
.
clients
[
0
][
0
]
.
op
==
gpu_from_host
)):
i
sinstance
(
input
.
clients
[
0
][
0
]
.
op
,
GpuFromHost
)
)):
continue
try
:
...
...
@@ -215,7 +215,7 @@ gpu_seqopt.register('InputToGpuOptimizer', InputToGpuOptimizer(),
'merge'
)
# TODO: how to make it mandatory for gpu_seqopt?
@local_optimizer
([
gpu_from_host
,
host_from_g
pu
])
@local_optimizer
([
GpuFromHost
,
HostFromG
pu
])
def
local_cut_gpu_host_gpu
(
node
):
if
tensor
.
opt
.
opt
.
check_chain
(
node
,
gpu_from_host
,
host_from_gpu
):
return
[
node
.
inputs
[
0
]
.
owner
.
inputs
[
0
]]
...
...
@@ -336,7 +336,7 @@ def local_gpu_elemwise_0(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
])
@local_optimizer
([
GpuFromH
ost
])
def
local_gpu_elemwise_1
(
node
):
"""
gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))
...
...
@@ -392,7 +392,7 @@ def local_gpu_split(node):
@register_opt
()
@local_optimizer
([
tensor
.
DimShuffle
,
gpu_from_h
ost
])
@local_optimizer
([
tensor
.
DimShuffle
,
GpuFromH
ost
])
def
local_gpu_dimshuffle_0
(
node
):
"""
dimshuffle(host_from_gpu()) -> host_from_gpu(gpu_dimshuffle)
...
...
@@ -421,7 +421,7 @@ def local_gpu_dimshuffle_0(node):
@register_opt
()
@local_optimizer
([
tensor
.
SpecifyShape
,
gpu_from_h
ost
])
@local_optimizer
([
tensor
.
SpecifyShape
,
GpuFromH
ost
])
def
local_gpu_specifyShape_0
(
node
):
"""
specify_shape(host_from_gpu()) -> host_from_gpu(specify_shape)
...
...
@@ -445,7 +445,7 @@ def local_gpu_specifyShape_0(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
basic
.
Dot
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
basic
.
Dot
])
def
local_gpu_dot_to_dot22
(
node
):
"""
gpu_from_host(dot) -> gpudot(gpu_from_host)
...
...
@@ -537,7 +537,7 @@ optdb.register('gpu_assert_no_cpu_op', assert_no_cpu_op, 49.2,
@register_opt
()
@local_optimizer
([
theano
.
ifelse
.
IfElse
,
gpu_from_h
ost
])
@local_optimizer
([
theano
.
ifelse
.
IfElse
,
GpuFromH
ost
])
def
local_gpu_lazy_ifelse
(
node
):
"""
gpu_from_host(ifelse) -> gpu_ifelse(gpu_from_host)
...
...
@@ -606,7 +606,7 @@ def local_gpu_lazy_ifelse(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
blas
.
Dot22
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
blas
.
Dot22
])
def
local_gpu_dot22
(
node
):
"""
gpu_from_host(dot22) -> gpudot(gpu_from_host)
...
...
@@ -631,7 +631,7 @@ def local_gpu_dot22(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
blas
.
BatchedDot
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
blas
.
BatchedDot
])
def
local_gpu_batched_dot
(
node
):
"""
gpu_from_host(batched_dot) -> gpu_batched_dot(gpu_from_host)
...
...
@@ -670,7 +670,7 @@ def local_gpu_batched_dot(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
blas
.
Dot22Scalar
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
blas
.
Dot22Scalar
])
def
local_gpu_dot22scalar
(
node
):
"""
gpu_from_host(dot22scalar) -> gpudot(gpu_from_host)
...
...
@@ -699,7 +699,7 @@ def local_gpu_dot22scalar(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
blas_c
.
CGemv
,
tensor
.
blas
.
Gemv
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
blas_c
.
CGemv
,
tensor
.
blas
.
Gemv
])
def
local_gpu_gemv
(
node
):
"""
gpu_from_host(gemv) -> gpu_gemv(gpu_from_host)
...
...
@@ -737,7 +737,7 @@ def local_gpu_gemv(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
blas_c
.
CGer
,
tensor
.
blas
.
Ger
,
@local_optimizer
([
GpuFromH
ost
,
tensor
.
blas_c
.
CGer
,
tensor
.
blas
.
Ger
,
tensor
.
blas_scipy
.
ScipyGer
])
def
local_gpu_ger
(
node
):
"""
...
...
@@ -777,7 +777,7 @@ def local_gpu_ger(node):
@register_opt
()
@local_optimizer
([
tensor
.
blas
.
Gemm
,
gpu_from_h
ost
])
@local_optimizer
([
tensor
.
blas
.
Gemm
,
GpuFromH
ost
])
def
local_gpu_gemm
(
node
):
"""
gpu_from_host(gemm) -> gpu_gemm(gpu_from_host)
...
...
@@ -966,7 +966,7 @@ def local_gpu_elemwise_careduce(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
Reshape
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
Reshape
])
def
local_gpu_reshape
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_input
=
node
.
inputs
[
0
]
...
...
@@ -999,7 +999,7 @@ def local_gpu_reshape(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
Flatten
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
Flatten
])
def
local_gpu_flatten
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_input
=
node
.
inputs
[
0
]
...
...
@@ -1019,7 +1019,7 @@ def local_gpu_flatten(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
Subtensor
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
Subtensor
])
def
local_gpu_subtensor
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_input
=
node
.
inputs
[
0
]
...
...
@@ -1062,7 +1062,7 @@ def local_gpu_subtensor(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
AdvancedSubtensor1
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
AdvancedSubtensor1
])
def
local_gpu_advanced_subtensor1
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_input
=
node
.
inputs
[
0
]
...
...
@@ -1083,7 +1083,7 @@ def local_gpu_advanced_subtensor1(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
AdvancedIncSubtensor1
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
AdvancedIncSubtensor1
])
def
local_gpu_advanced_incsubtensor1
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_input
=
node
.
inputs
[
0
]
...
...
@@ -1153,7 +1153,7 @@ def local_gpu_advanced_incsubtensor1(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
IncSubtensor
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
IncSubtensor
])
def
local_gpu_incsubtensor
(
node
):
if
isinstance
(
node
.
op
,
GpuFromHost
):
host_output
=
node
.
inputs
[
0
]
...
...
@@ -1463,7 +1463,7 @@ def values_eq_approx_high_tol(a, b):
return
CudaNdarrayType
.
values_eq_approx
(
a
,
b
,
atol
=
atol
)
@local_optimizer
([
gpu_from_h
ost
,
conv
.
ConvOp
])
@local_optimizer
([
GpuFromH
ost
,
conv
.
ConvOp
])
def
local_gpu_conv
(
node
):
"""
gpu_from_host(conv) -> gpu_conv(gpu_from_host)
...
...
@@ -2309,7 +2309,7 @@ def local_gpu_contiguous(node):
@register_opt
()
@local_optimizer
([
gpu_from_h
ost
,
tensor
.
Eye
])
@local_optimizer
([
GpuFromH
ost
,
tensor
.
Eye
])
def
local_gpu_eye
(
node
):
"""
gpu_from_host(eye) -> gpueye(gpu_from_host)
...
...
@@ -2438,7 +2438,7 @@ def typeConstructor(broadcastable, dtype):
@register_opt
(
'scan'
)
@local_optimizer
([
gpu_from_h
ost
,
scan_op
.
Scan
])
@local_optimizer
([
GpuFromH
ost
,
scan_op
.
Scan
])
def
gpuScanOptimization
(
node
):
"""
scan(host_from_gpu) -> host_from_gpu(GPUscan)
...
...
@@ -2560,7 +2560,7 @@ def gpuScanOptimization(node):
@register_opt
()
@local_optimizer
([
tensor
.
AllocEmpty
,
gpu_from_h
ost
])
@local_optimizer
([
tensor
.
AllocEmpty
,
GpuFromH
ost
])
def
local_gpu_allocempty
(
node
):
if
(
isinstance
(
node
.
op
,
tensor
.
AllocEmpty
)
and
node
.
op
.
dtype
==
"float32"
):
...
...
@@ -2727,7 +2727,7 @@ optdb.register('local_inplace_gpu_sparse_block_outer',
# Move to Gpu optimization
@local_optimizer
([
gpu_from_h
ost
,
@local_optimizer
([
GpuFromH
ost
,
AbstractConv2d
,
AbstractConv2d_gradWeights
,
AbstractConv2d_gradInputs
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论