Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
0f4d2013
提交
0f4d2013
authored
4月 03, 2012
作者:
Frederic
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Commented pycuda example that use the pycuda elemwise generator is it work only…
Commented pycuda example that use the pycuda elemwise generator is it work only with old version of pycuda.
上级
65ab2e1b
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
94 行增加
和
89 行删除
+94
-89
pycuda_example.py
theano/misc/pycuda_example.py
+90
-87
test_pycuda_example.py
theano/misc/tests/test_pycuda_example.py
+4
-2
没有找到文件。
theano/misc/pycuda_example.py
浏览文件 @
0f4d2013
...
@@ -6,10 +6,6 @@ You can use them as a guide to use your pycuda code into a Theano op.
...
@@ -6,10 +6,6 @@ You can use them as a guide to use your pycuda code into a Theano op.
The PycudaElemwiseSourceModuleOp is a Theano op use pycuda code
The PycudaElemwiseSourceModuleOp is a Theano op use pycuda code
generated with pycuda.compiler.SourceModule
generated with pycuda.compiler.SourceModule
The PycudaElemwiseKernelOp op use pycuda code generated with
pycuda.elementwise.ElementwiseKernel. It must be wrapper by
TheanoElementwiseKernel.
Their is a test in test_pycuda.py.
Their is a test in test_pycuda.py.
This don't work with broadcast and non-contiguous memory as pycuda
This don't work with broadcast and non-contiguous memory as pycuda
...
@@ -17,6 +13,11 @@ don't support that, but we make sure we don't introduce problem.
...
@@ -17,6 +13,11 @@ don't support that, but we make sure we don't introduce problem.
If the memory is non-contiguous, we create a new copy that is contiguous.
If the memory is non-contiguous, we create a new copy that is contiguous.
If their is broadcasted dimensions, we raise an error.
If their is broadcasted dimensions, we raise an error.
#The following is commented as it work only with old pycuda version
The PycudaElemwiseKernelOp op use pycuda code generated with
pycuda.elementwise.ElementwiseKernel. It must be wrapper by
TheanoElementwiseKernel.
"""
"""
import
numpy
import
numpy
...
@@ -50,7 +51,7 @@ def theano_parse_c_arg(c_arg):
...
@@ -50,7 +51,7 @@ def theano_parse_c_arg(c_arg):
c_arg
=
c_arg
.
replace
(
'npy_uint8'
,
'unsigned char'
)
c_arg
=
c_arg
.
replace
(
'npy_uint8'
,
'unsigned char'
)
return
pycuda
.
tools
.
parse_c_arg
(
c_arg
)
return
pycuda
.
tools
.
parse_c_arg
(
c_arg
)
"""
class TheanoElementwiseKernel(pycuda.elementwise.ElementwiseKernel):
class TheanoElementwiseKernel(pycuda.elementwise.ElementwiseKernel):
def __init__(self, arguments, operation,
def __init__(self, arguments, operation,
name="kernel", keep=False, options=[], **kwargs):
name="kernel", keep=False, options=[], **kwargs):
...
@@ -83,6 +84,90 @@ class TheanoElementwiseKernel(pycuda.elementwise.ElementwiseKernel):
...
@@ -83,6 +84,90 @@ class TheanoElementwiseKernel(pycuda.elementwise.ElementwiseKernel):
self.func.prepared_call(_grid, *invocation_args)
self.func.prepared_call(_grid, *invocation_args)
class PycudaElemwiseKernelOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
def __init__(self, scalar_op, inplace_pattern={}, name=None):
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = None
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = self.inplace_pattern.items()
items.sort()
return self.__class__.__name__ + "{
%
s}
%
s"
%
(self.scalar_op,
str(items))
else:
return self.__class__.__name__ + "{
%
s}"
%
(self.scalar_op)
else:
return self.name
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op and
self.inplace_pattern == other.inplace_pattern)
def __hash__(self):
return (hash(type(self)) ^ hash(self.scalar_op) ^
hash(self.inplace_pattern))
def make_node(self, *inputs):
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
raise TypeError('Wrong argument count', (self.nin, len(_inputs)))
for i in _inputs[1:]:
if i.type.ndim != inputs[0].type.ndim:
raise TypeError('different ranks among inputs')
if any([any(i.type.broadcastable) for i in inputs]):
raise Exception("pycuda don't support broadcasted dimensions")
assert len(inputs) == 2 # TODO remove
# output is broadcastable only along dimensions where all inputs are
# broadcastable
broadcastable = []
for d in xrange(_inputs[0].type.ndim):
bcast_d = True
for i in _inputs:
if not i.type.broadcastable[d]:
bcast_d = False
break
broadcastable.append(bcast_d)
assert len(broadcastable) == _inputs[0].type.ndim
otype = CudaNdarrayType(broadcastable=broadcastable)
assert self.nout == 1
out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)])
in_name = ["i" + str(id) for id in range(len(inputs))]
out_name = ["o" + str(id) for id in range(self.nout)]
c_code = self.scalar_op.c_code(out_node, "some_name",
tuple([n + "[i]"for n in in_name]),
tuple(n + "[i]"for n in out_name), {})
self.pycuda_fct = TheanoElementwiseKernel(
", ".join([var.type.dtype_specs()[1] + " *" + name
for var, name in (zip(inputs, in_name) +
zip(out_node.outputs, out_name))]),
c_code,
"pycuda_elemwise_kernel_
%
s"
%
str(self.scalar_op),
preamble=("#include<Python.h>
\n
"
"#include <numpy/arrayobject.h>"))
return out_node
def perform(self, node, inputs, out):
#TODO assert all input have the same shape
z, = out
if z[0] is None or z[0].shape != inputs[0].shape:
z[0] = theano.sandbox.cuda.CudaNdarray.zeros(inputs[0].shape)
i = inputs + z
self.pycuda_fct(*i)
"""
class
PycudaElemwiseSourceModuleOp
(
GpuOp
):
class
PycudaElemwiseSourceModuleOp
(
GpuOp
):
nin
=
property
(
lambda
self
:
self
.
scalar_op
.
nin
)
nin
=
property
(
lambda
self
:
self
.
scalar_op
.
nin
)
nout
=
property
(
lambda
self
:
self
.
scalar_op
.
nout
)
nout
=
property
(
lambda
self
:
self
.
scalar_op
.
nout
)
...
@@ -268,88 +353,6 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op):
...
@@ -268,88 +353,6 @@ class PycudaElemwiseSourceModuleMakeThunkOp(Op):
return
thunk
return
thunk
class
PycudaElemwiseKernelOp
(
GpuOp
):
nin
=
property
(
lambda
self
:
self
.
scalar_op
.
nin
)
nout
=
property
(
lambda
self
:
self
.
scalar_op
.
nout
)
def
__init__
(
self
,
scalar_op
,
inplace_pattern
=
{},
name
=
None
):
self
.
name
=
name
self
.
scalar_op
=
scalar_op
self
.
inplace_pattern
=
None
def
__str__
(
self
):
if
self
.
name
is
None
:
if
self
.
inplace_pattern
:
items
=
self
.
inplace_pattern
.
items
()
items
.
sort
()
return
self
.
__class__
.
__name__
+
"{
%
s}
%
s"
%
(
self
.
scalar_op
,
str
(
items
))
else
:
return
self
.
__class__
.
__name__
+
"{
%
s}"
%
(
self
.
scalar_op
)
else
:
return
self
.
name
def
__eq__
(
self
,
other
):
return
(
type
(
self
)
==
type
(
other
)
and
self
.
scalar_op
==
other
.
scalar_op
and
self
.
inplace_pattern
==
other
.
inplace_pattern
)
def
__hash__
(
self
):
return
(
hash
(
type
(
self
))
^
hash
(
self
.
scalar_op
)
^
hash
(
self
.
inplace_pattern
))
def
make_node
(
self
,
*
inputs
):
_inputs
=
[
gpu_contiguous
(
as_cuda_ndarray_variable
(
i
))
for
i
in
inputs
]
if
self
.
nin
>
0
and
len
(
_inputs
)
!=
self
.
nin
:
raise
TypeError
(
'Wrong argument count'
,
(
self
.
nin
,
len
(
_inputs
)))
for
i
in
_inputs
[
1
:]:
if
i
.
type
.
ndim
!=
inputs
[
0
]
.
type
.
ndim
:
raise
TypeError
(
'different ranks among inputs'
)
if
any
([
any
(
i
.
type
.
broadcastable
)
for
i
in
inputs
]):
raise
Exception
(
"pycuda don't support broadcasted dimensions"
)
assert
len
(
inputs
)
==
2
# TODO remove
# output is broadcastable only along dimensions where all inputs are
# broadcastable
broadcastable
=
[]
for
d
in
xrange
(
_inputs
[
0
]
.
type
.
ndim
):
bcast_d
=
True
for
i
in
_inputs
:
if
not
i
.
type
.
broadcastable
[
d
]:
bcast_d
=
False
break
broadcastable
.
append
(
bcast_d
)
assert
len
(
broadcastable
)
==
_inputs
[
0
]
.
type
.
ndim
otype
=
CudaNdarrayType
(
broadcastable
=
broadcastable
)
assert
self
.
nout
==
1
out_node
=
Apply
(
self
,
_inputs
,
[
otype
()
for
o
in
xrange
(
self
.
nout
)])
in_name
=
[
"i"
+
str
(
id
)
for
id
in
range
(
len
(
inputs
))]
out_name
=
[
"o"
+
str
(
id
)
for
id
in
range
(
self
.
nout
)]
c_code
=
self
.
scalar_op
.
c_code
(
out_node
,
"some_name"
,
tuple
([
n
+
"[i]"
for
n
in
in_name
]),
tuple
(
n
+
"[i]"
for
n
in
out_name
),
{})
self
.
pycuda_fct
=
TheanoElementwiseKernel
(
", "
.
join
([
var
.
type
.
dtype_specs
()[
1
]
+
" *"
+
name
for
var
,
name
in
(
zip
(
inputs
,
in_name
)
+
zip
(
out_node
.
outputs
,
out_name
))]),
c_code
,
"pycuda_elemwise_kernel_
%
s"
%
str
(
self
.
scalar_op
),
preamble
=
"""#include<Python.h>
#include <numpy/arrayobject.h>"""
)
return
out_node
def
perform
(
self
,
node
,
inputs
,
out
):
#TODO assert all input have the same shape
z
,
=
out
if
z
[
0
]
is
None
or
z
[
0
]
.
shape
!=
inputs
[
0
]
.
shape
:
z
[
0
]
=
theano
.
sandbox
.
cuda
.
CudaNdarray
.
zeros
(
inputs
[
0
]
.
shape
)
i
=
inputs
+
z
self
.
pycuda_fct
(
*
i
)
pycuda_optimizer
=
EquilibriumDB
()
pycuda_optimizer
=
EquilibriumDB
()
gpu_seqopt
.
register
(
"pycuda_optimizer"
,
pycuda_optimizer
,
1.5
,
"fast_run"
)
gpu_seqopt
.
register
(
"pycuda_optimizer"
,
pycuda_optimizer
,
1.5
,
"fast_run"
)
...
...
theano/misc/tests/test_pycuda_example.py
浏览文件 @
0f4d2013
...
@@ -16,7 +16,7 @@ if cuda_ndarray.cuda_available == False:
...
@@ -16,7 +16,7 @@ if cuda_ndarray.cuda_available == False:
import
theano
import
theano
import
theano.tensor
as
T
import
theano.tensor
as
T
from
theano.misc.pycuda_example
import
(
PycudaElemwiseSourceModuleOp
,
from
theano.misc.pycuda_example
import
(
PycudaElemwiseSourceModuleOp
,
PycudaElemwiseKernelOp
,
#
PycudaElemwiseKernelOp,
PycudaElemwiseSourceModuleMakeThunkOp
)
PycudaElemwiseSourceModuleMakeThunkOp
)
if
theano
.
config
.
mode
==
'FAST_COMPILE'
:
if
theano
.
config
.
mode
==
'FAST_COMPILE'
:
...
@@ -67,7 +67,8 @@ def test_pycuda_elemwise_source_module():
...
@@ -67,7 +67,8 @@ def test_pycuda_elemwise_source_module():
#print f(val1,val2)
#print f(val1,val2)
#print f2(val1,val2)
#print f2(val1,val2)
"""
#commented as it work only with old pycuda version.
def test_pycuda_elemwise_kernel():
def test_pycuda_elemwise_kernel():
x = T.fmatrix('x')
x = T.fmatrix('x')
y = T.fmatrix('y')
y = T.fmatrix('y')
...
@@ -103,3 +104,4 @@ def test_pycuda_elemwise_kernel():
...
@@ -103,3 +104,4 @@ def test_pycuda_elemwise_kernel():
print val1
print val1
print f4(val1, val1, val1)
print f4(val1, val1, val1)
assert numpy.allclose(f4(val1, val1, val1), val1 * val1 + val1)
assert numpy.allclose(f4(val1, val1, val1), val1 * val1 + val1)
"""
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论