Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
8c4c9d67
提交
8c4c9d67
authored
8月 11, 2015
作者:
Iban Harlouchet
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
numpydoc for theano/sandbox/gpuarray/opt.py
上级
2f22839d
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
18 行增加
和
5 行删除
+18
-5
opt.py
theano/sandbox/gpuarray/opt.py
+18
-5
没有找到文件。
theano/sandbox/gpuarray/opt.py
浏览文件 @
8c4c9d67
...
@@ -89,7 +89,9 @@ def safe_to_cpu(x):
...
@@ -89,7 +89,9 @@ def safe_to_cpu(x):
def
op_lifter
(
OP
,
cuda_only
=
False
):
def
op_lifter
(
OP
,
cuda_only
=
False
):
"""
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
"""
def
f
(
maker
):
def
f
(
maker
):
def
local_opt
(
node
):
def
local_opt
(
node
):
...
@@ -122,7 +124,10 @@ def op_lifter(OP, cuda_only=False):
...
@@ -122,7 +124,10 @@ def op_lifter(OP, cuda_only=False):
class
InputToGpuOptimizer
(
Optimizer
):
class
InputToGpuOptimizer
(
Optimizer
):
"Transfer the input to the gpu to start the rolling wave."
"""
Transfer the input to the gpu to start the rolling wave.
"""
def
add_requirements
(
self
,
fgraph
):
def
add_requirements
(
self
,
fgraph
):
fgraph
.
attach_feature
(
toolbox
.
ReplaceValidate
())
fgraph
.
attach_feature
(
toolbox
.
ReplaceValidate
())
...
@@ -173,6 +178,7 @@ def local_gpuaalloc2(node):
...
@@ -173,6 +178,7 @@ def local_gpuaalloc2(node):
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
Moves an alloc that is an input to join to the gpu.
"""
"""
if
(
isinstance
(
node
.
op
,
tensor
.
Alloc
)
and
if
(
isinstance
(
node
.
op
,
tensor
.
Alloc
)
and
all
(
c
!=
'output'
and
all
(
c
!=
'output'
and
...
@@ -654,6 +660,7 @@ def local_gpu_conv(node):
...
@@ -654,6 +660,7 @@ def local_gpu_conv(node):
gpu_from_host(conv) -> gpu_conv(gpu_from_host)
gpu_from_host(conv) -> gpu_conv(gpu_from_host)
conv(host_from_gpu) -> host_from_gpu(gpu_conv)
conv(host_from_gpu) -> host_from_gpu(gpu_conv)
"""
"""
def
GpuConvOp_from_ConvOp
(
op
):
def
GpuConvOp_from_ConvOp
(
op
):
logical_img_hw
=
None
logical_img_hw
=
None
...
@@ -698,7 +705,8 @@ def local_gpu_conv(node):
...
@@ -698,7 +705,8 @@ def local_gpu_conv(node):
return
ret
return
ret
def
values_eq_approx
(
a
,
b
):
def
values_eq_approx
(
a
,
b
):
"""This fct is needed to don't have DebugMode raise useless
"""
This fct is needed to don't have DebugMode raise useless
error due to ronding error.
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
This happen as We reduce on the two last dimensions, so this
...
@@ -736,7 +744,10 @@ register_opt()(conv_groupopt)
...
@@ -736,7 +744,10 @@ register_opt()(conv_groupopt)
@register_opt
(
"low_memory"
)
@register_opt
(
"low_memory"
)
@local_optimizer
([
GpuCAReduceCuda
])
@local_optimizer
([
GpuCAReduceCuda
])
def
local_gpu_elemwise_careduce
(
node
):
def
local_gpu_elemwise_careduce
(
node
):
""" Merge some GpuCAReduceCuda and GPUElemwise"""
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if
(
isinstance
(
node
.
op
,
GpuCAReduceCuda
)
and
if
(
isinstance
(
node
.
op
,
GpuCAReduceCuda
)
and
node
.
op
.
pre_scalar_op
is
None
and
node
.
op
.
pre_scalar_op
is
None
and
node
.
inputs
[
0
]
.
owner
and
node
.
inputs
[
0
]
.
owner
and
...
@@ -767,10 +778,11 @@ def tensor_to_gpu(x):
...
@@ -767,10 +778,11 @@ def tensor_to_gpu(x):
def
gpu_safe_new
(
x
,
tag
=
''
):
def
gpu_safe_new
(
x
,
tag
=
''
):
"""
"""
Internal function that constructs a new variable from x with the same
Internal function that constructs a new variable from x with the same
type, but with a different name (
old name + tag). This function is used
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
graph and the newly constructed graph.
"""
"""
if
hasattr
(
x
,
'name'
)
and
x
.
name
is
not
None
:
if
hasattr
(
x
,
'name'
)
and
x
.
name
is
not
None
:
nw_name
=
x
.
name
+
tag
nw_name
=
x
.
name
+
tag
...
@@ -788,8 +800,9 @@ def gpu_reconstruct_graph(inputs, outputs, tag=None):
...
@@ -788,8 +800,9 @@ def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
"""
Different interface to clone, that allows you to pass inputs.
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (
in the same
new variables of the same type, and returns those (in the same
order as the original inputs).
order as the original inputs).
"""
"""
if
tag
is
None
:
if
tag
is
None
:
tag
=
''
tag
=
''
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论