Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
a536464a
提交
a536464a
authored
4月 19, 2016
作者:
Frédéric Bastien
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #4323 from abergeron/gpua_newelem
Use the new GpuElemwise from libgpuarray
上级
57ffd6a0
0dbb97c6
全部展开
显示空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
77 行增加
和
119 行删除
+77
-119
__init__.py
theano/sandbox/gpuarray/__init__.py
+1
-1
elemwise.py
theano/sandbox/gpuarray/elemwise.py
+0
-0
subtensor.py
theano/sandbox/gpuarray/subtensor.py
+36
-72
test_elemwise.py
theano/sandbox/gpuarray/tests/test_elemwise.py
+1
-23
test_elemwise.py
theano/tensor/tests/test_elemwise.py
+39
-23
没有找到文件。
theano/sandbox/gpuarray/__init__.py
浏览文件 @
a536464a
...
@@ -42,7 +42,7 @@ register_transfer(transfer)
...
@@ -42,7 +42,7 @@ register_transfer(transfer)
def
init_dev
(
dev
,
name
=
None
):
def
init_dev
(
dev
,
name
=
None
):
v
=
pygpu
.
gpuarray
.
api_version
()
v
=
pygpu
.
gpuarray
.
api_version
()
if
v
[
0
]
!=
-
10000
:
if
v
[
0
]
!=
-
9999
:
raise
RuntimeError
(
"Wrong major API version for gpuarray:"
,
v
[
0
],
raise
RuntimeError
(
"Wrong major API version for gpuarray:"
,
v
[
0
],
"Make sure Theano and libgpuarray/pygpu "
"Make sure Theano and libgpuarray/pygpu "
"are in sync."
)
"are in sync."
)
...
...
theano/sandbox/gpuarray/elemwise.py
浏览文件 @
a536464a
差异被折叠。
点击展开。
theano/sandbox/gpuarray/subtensor.py
浏览文件 @
a536464a
from
__future__
import
absolute_import
,
print_function
,
division
from
__future__
import
absolute_import
,
print_function
,
division
import
os
import
os
import
copy
import
numpy
import
numpy
from
six
import
integer_types
from
six
import
integer_types
from
six.moves
import
StringIO
from
six.moves
import
StringIO
import
theano
from
theano
import
tensor
,
gof
from
theano
import
tensor
,
gof
from
theano.tensor.subtensor
import
IncSubtensor
,
Subtensor
,
get_idx_list
from
theano.tensor.subtensor
import
IncSubtensor
,
Subtensor
,
get_idx_list
import
theano.tensor.inplace
try
:
try
:
import
pygpu
import
pygpu
...
@@ -18,10 +15,9 @@ try:
...
@@ -18,10 +15,9 @@ try:
except
ImportError
:
except
ImportError
:
pass
pass
from
.type
import
GpuArrayType
from
.type
import
GpuArrayType
,
gpu_context_type
from
.basic_ops
import
(
as_gpuarray_variable
,
HideC
,
GpuKernelBase
,
Kernel
,
from
.basic_ops
import
(
as_gpuarray_variable
,
HideC
,
GpuKernelBase
,
Kernel
,
infer_context_name
)
infer_context_name
)
from
.elemwise
import
GpuElemwise
class
GpuSubtensor
(
HideC
,
Subtensor
):
class
GpuSubtensor
(
HideC
,
Subtensor
):
...
@@ -168,7 +164,7 @@ class GpuSubtensor(HideC, Subtensor):
...
@@ -168,7 +164,7 @@ class GpuSubtensor(HideC, Subtensor):
return
(
6
,)
return
(
6
,)
class
GpuIncSubtensor
(
GpuKernelBase
,
IncSubtensor
):
class
GpuIncSubtensor
(
IncSubtensor
):
"""
"""
Implement IncSubtensor on the gpu.
Implement IncSubtensor on the gpu.
...
@@ -181,45 +177,20 @@ class GpuIncSubtensor(GpuKernelBase, IncSubtensor):
...
@@ -181,45 +177,20 @@ class GpuIncSubtensor(GpuKernelBase, IncSubtensor):
:meth:`copy_of_x`, etc. specialize the c_code for this Op.
:meth:`copy_of_x`, etc. specialize the c_code for this Op.
"""
"""
_f16_ok
=
True
@property
params_type
=
gpu_context_type
def
_f16_ok
(
self
):
return
self
.
iadd_node
.
op
.
_f16_ok
def
c_headers
(
self
):
return
self
.
iadd_node
.
op
.
c_headers
()
def
c_init_code
(
self
):
return
self
.
iadd_node
.
op
.
c_init_code
()
def
gpu_kernels
(
self
,
node
,
nodename
):
subname
=
nodename
+
"_add_to_zview"
return
self
.
iadd_node
.
op
.
gpu_kernels
(
self
.
iadd_node
,
subname
)
def
make_node
(
self
,
x
,
y
,
*
inputs
):
def
make_node
(
self
,
x
,
y
,
*
inputs
):
ctx_name
=
infer_context_name
(
x
,
y
)
ctx_name
=
infer_context_name
(
x
,
y
)
x
=
as_gpuarray_variable
(
x
,
ctx_name
)
x
=
as_gpuarray_variable
(
x
,
ctx_name
)
y
=
as_gpuarray_variable
(
y
,
ctx_name
)
y
=
as_gpuarray_variable
(
y
,
ctx_name
)
rval
=
tensor
.
IncSubtensor
.
make_node
(
self
,
x
,
y
,
*
inputs
)
rval
=
tensor
.
IncSubtensor
.
make_node
(
self
,
x
,
y
,
*
inputs
)
op
=
copy
.
copy
(
self
)
ret
=
gof
.
Apply
(
self
,
[
x
,
y
]
+
rval
.
inputs
[
2
:],
[
x
.
type
()])
ret
=
gof
.
Apply
(
op
,
[
x
,
y
]
+
rval
.
inputs
[
2
:],
[
x
.
type
()])
op
.
create_iadd_node
(
ret
)
return
ret
return
ret
def
get_params
(
self
,
node
):
def
get_params
(
self
,
node
):
return
node
.
outputs
[
0
]
.
type
.
context
return
node
.
outputs
[
0
]
.
type
.
context
def
create_iadd_node
(
self
,
node
):
# We store a iadd_node in the op that contain the info needed
# for the inplace add.
cop
=
theano
.
tensor
.
inplace
.
add_inplace
gop
=
GpuElemwise
(
cop
.
scalar_op
,
copy
.
copy
(
cop
.
inplace_pattern
),
"Gpu"
+
cop
.
name
,
cop
.
nfunc_spec
)
y
=
node
.
inputs
[
1
]
xview
=
y
.
type
()
iadd_node
=
gop
(
xview
,
y
)
.
owner
self
.
iadd_node
=
iadd_node
def
perform
(
self
,
node
,
inputs
,
out_
,
ctx
):
def
perform
(
self
,
node
,
inputs
,
out_
,
ctx
):
out
,
=
out_
out
,
=
out_
x
,
y
=
inputs
[:
2
]
x
,
y
=
inputs
[:
2
]
...
@@ -261,18 +232,6 @@ class GpuIncSubtensor(GpuKernelBase, IncSubtensor):
...
@@ -261,18 +232,6 @@ class GpuIncSubtensor(GpuKernelBase, IncSubtensor):
x
.
__setitem__
(
cdata
,
y
)
x
.
__setitem__
(
cdata
,
y
)
out
[
0
]
=
x
out
[
0
]
=
x
def
__setstate__
(
self
,
d
):
self
.
__dict__
.
update
(
d
)
owner
=
getattr
(
self
,
"owner"
,
None
)
if
owner
:
self
.
create_iadd_node
(
owner
)
def
__getstate__
(
self
):
d
=
copy
.
copy
(
self
.
__dict__
)
if
"iadd_node"
in
d
:
d
.
pop
(
'iadd_node'
)
return
d
def
do_type_checking
(
self
,
node
):
def
do_type_checking
(
self
,
node
):
"""
"""
Should raise NotImplementedError if c_code does not support
Should raise NotImplementedError if c_code does not support
...
@@ -365,47 +324,52 @@ class GpuIncSubtensor(GpuKernelBase, IncSubtensor):
...
@@ -365,47 +324,52 @@ class GpuIncSubtensor(GpuKernelBase, IncSubtensor):
"""
"""
return
"""GpuArray_setarray(&
%(view)
s->ga, &
%(source)
s->ga)"""
%
locals
()
return
"""GpuArray_setarray(&
%(view)
s->ga, &
%(source)
s->ga)"""
%
locals
()
def
c_headers
(
self
):
return
[
'<numpy_compat.h>'
,
'<gpuarray/error.h>'
,
'<gpuarray/array.h>'
,
'<gpuarray/elemwise.h>'
]
def
c_support_code_struct
(
self
,
node
,
nodename
):
def
c_support_code_struct
(
self
,
node
,
nodename
):
gop
=
self
.
iadd_node
.
op
return
"
\n
GpuElemwise *iadd;
\n
"
sub_name
=
nodename
+
"_add_to_zview"
ret
=
gop
.
c_support_code_struct
(
self
.
iadd_node
,
sub_name
)
ret
+=
"""
PyGpuArrayObject* inc_sub_iadd_
%(nodename)
s(PyGpuArrayObject* dst,
PyGpuArrayObject* src){
PyGpuArrayObject* ret = NULL;
"""
%
locals
()
inputs
=
[
"dst"
,
"src"
]
outputs
=
[
"ret"
]
sub
=
{
"fail"
:
"return NULL;"
,
"params"
:
"dst->context"
}
ret
+=
gop
.
c_code
(
self
.
iadd_node
,
sub_name
,
inputs
,
outputs
,
sub
)
ret
+=
"""
return ret;
def
c_init_code_struct
(
self
,
node
,
name
,
sub
):
return
"""
gpuelemwise_arg args[2] = {{0}};
args[0].name = "a";
args[0].typecode =
%(type1)
s;
args[0].flags = GE_READ|GE_WRITE;
args[1].name = "b";
args[1].typecode =
%(type2)
s;
args[1].flags = GE_READ;
iadd = GpuElemwise_new(
%(ctx)
s->ops,
%(ctx)
s->ctx, "", "a += b",
2, args,
%(nd)
s, 0);
if (iadd == NULL) {
PyErr_SetString(PyExc_RuntimeError, "Could not intialize inplace add support");
%(fail)
s
}
}
"""
"""
%
dict
(
ctx
=
sub
[
'params'
],
fail
=
sub
[
'fail'
],
return
ret
type1
=
node
.
inputs
[
0
]
.
type
.
typecode
,
type2
=
node
.
inputs
[
1
]
.
type
.
typecode
,
nd
=
node
.
inputs
[
1
]
.
ndim
)
def
add_to_zview
(
self
,
nodename
,
x
,
fail
):
def
add_to_zview
(
self
,
nodename
,
x
,
fail
):
return
"""
return
"""
PyGpuArrayObject * add_result = inc_sub_iadd_
%(nodename)
s(zview,
%(x)
s);
if (! add_result )
{
{
void *args[2];
args[0] = &zview->ga;
args[1] = &
%(x)
s->ga;
if (GpuElemwise_call(iadd, args, GE_BROADCAST) != GA_NO_ERROR) {
PyErr_SetString(PyExc_RuntimeError, "Error doing inplace add");
Py_DECREF(zview);
Py_DECREF(zview);
%(fail)
s
;
%(fail)
s
}
}
else
{
Py_DECREF(add_result);
}
}
"""
%
locals
()
"""
%
locals
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
parent_version
=
super
(
GpuIncSubtensor
,
self
)
.
c_code_cache_version
()
parent_version
=
super
(
GpuIncSubtensor
,
self
)
.
c_code_cache_version
()
elemwise_version
=
self
.
iadd_node
.
c_code_cache_version
()
if
not
parent_version
:
if
not
parent_version
or
not
elemwise_version
:
return
return
return
parent_version
+
elemwise_version
+
(
3
,)
return
parent_version
+
(
5
,)
class
GpuAdvancedSubtensor1
(
HideC
,
tensor
.
AdvancedSubtensor1
):
class
GpuAdvancedSubtensor1
(
HideC
,
tensor
.
AdvancedSubtensor1
):
...
...
theano/sandbox/gpuarray/tests/test_elemwise.py
浏览文件 @
a536464a
...
@@ -18,40 +18,18 @@ from pygpu import ndgpuarray as gpuarray
...
@@ -18,40 +18,18 @@ from pygpu import ndgpuarray as gpuarray
# This is acutally a test for GpuElemwise
# This is acutally a test for GpuElemwise
class
test_gpu_Broadcast
(
test_elemwise
.
test_Broadcast
):
class
test_gpu_Broadcast
(
test_elemwise
.
test_Broadcast
):
op
=
GpuElemwise
type
=
GpuArrayType
cop
=
GpuElemwise
cop
=
GpuElemwise
ctype
=
GpuArrayType
ctype
=
GpuArrayType
# The order is important
# The order is important
linkers
=
[
gof
.
PerformLinker
,
gof
.
CLinker
]
linkers
=
[
gof
.
PerformLinker
,
gof
.
CLinker
]
def
setUp
(
self
):
if
get_context
(
test_ctx_name
)
.
kind
!=
'cuda'
:
self
.
linkers
=
[
gof
.
PerformLinker
]
def
rand_val
(
self
,
shp
):
return
rand_gpuarray
(
*
shp
,
**
dict
(
cls
=
gpuarray
))
def
rand_cval
(
self
,
shp
):
def
rand_cval
(
self
,
shp
):
return
rand_gpuarray
(
*
shp
,
**
dict
(
cls
=
gpuarray
))
return
rand_gpuarray
(
*
shp
,
**
dict
(
cls
=
gpuarray
))
def
test_c
(
self
):
if
get_context
(
test_ctx_name
)
.
kind
!=
'cuda'
:
raise
SkipTest
(
"Cuda specific tests"
)
super
(
test_gpu_Broadcast
,
self
)
.
test_c
()
def
test_c_inplace
(
self
):
if
get_context
(
test_ctx_name
)
.
kind
!=
'cuda'
:
raise
SkipTest
(
"Cuda specific tests"
)
super
(
test_gpu_Broadcast
,
self
)
.
test_c_inplace
()
def
test_elemwise_pow
():
def
test_elemwise_pow
():
# Test that GpuElemwise(pow) can compile with any combination of integer
# Test that GpuElemwise(pow) can compile with any combination of integer
# or float input dtype.
# or float input dtype.
if
get_context
(
test_ctx_name
)
.
kind
!=
'cuda'
:
raise
SkipTest
(
"Cuda specific tests"
)
dtypes
=
[
"uint8"
,
"uint16"
,
"uint32"
,
"uint64"
,
dtypes
=
[
"uint8"
,
"uint16"
,
"uint32"
,
"uint64"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float16"
,
"float32"
,
"float64"
]
"float16"
,
"float32"
,
"float64"
]
...
@@ -65,10 +43,10 @@ def test_elemwise_pow():
...
@@ -65,10 +43,10 @@ def test_elemwise_pow():
output
=
base
**
exp
output
=
base
**
exp
f
=
theano
.
function
([
base
,
exp
],
output
)
f
=
theano
.
function
([
base
,
exp
],
output
)
# Call the function to make sure the output is valid
base_val
=
numpy
.
random
.
randint
(
0
,
5
,
size
=
10
)
.
astype
(
dtype_base
)
base_val
=
numpy
.
random
.
randint
(
0
,
5
,
size
=
10
)
.
astype
(
dtype_base
)
exp_val
=
numpy
.
random
.
randint
(
0
,
3
,
size
=
10
)
.
astype
(
dtype_exp
)
exp_val
=
numpy
.
random
.
randint
(
0
,
3
,
size
=
10
)
.
astype
(
dtype_exp
)
# Call the function to make sure the output is valid
out
=
f
(
base_val
,
exp_val
)
out
=
f
(
base_val
,
exp_val
)
expected_out
=
base_val
**
exp_val
expected_out
=
base_val
**
exp_val
assert_allclose
(
out
,
expected_out
)
assert_allclose
(
out
,
expected_out
)
...
...
theano/tensor/tests/test_elemwise.py
浏览文件 @
a536464a
...
@@ -166,10 +166,12 @@ class test_Broadcast(unittest.TestCase):
...
@@ -166,10 +166,12 @@ class test_Broadcast(unittest.TestCase):
linkers
=
[
gof
.
PerformLinker
,
gof
.
CLinker
]
linkers
=
[
gof
.
PerformLinker
,
gof
.
CLinker
]
def
rand_val
(
self
,
shp
):
def
rand_val
(
self
,
shp
):
return
numpy
.
asarray
(
numpy
.
random
.
rand
(
*
shp
))
return
numpy
.
asarray
(
numpy
.
random
.
rand
(
*
shp
),
dtype
=
theano
.
config
.
floatX
)
def
rand_cval
(
self
,
shp
):
def
rand_cval
(
self
,
shp
):
return
numpy
.
asarray
(
numpy
.
random
.
rand
(
*
shp
))
return
numpy
.
asarray
(
numpy
.
random
.
rand
(
*
shp
),
dtype
=
theano
.
config
.
floatX
)
def
setUp
(
self
):
def
setUp
(
self
):
unittest_tools
.
seed_rng
()
unittest_tools
.
seed_rng
()
...
@@ -189,8 +191,10 @@ class test_Broadcast(unittest.TestCase):
...
@@ -189,8 +191,10 @@ class test_Broadcast(unittest.TestCase):
((
2
,
3
,
4
,
5
),
(
1
,
3
,
1
,
5
)),
((
2
,
3
,
4
,
5
),
(
1
,
3
,
1
,
5
)),
((
2
,
3
,
4
,
5
),
(
1
,
1
,
1
,
1
)),
((
2
,
3
,
4
,
5
),
(
1
,
1
,
1
,
1
)),
((),
())]:
((),
())]:
x
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
x
=
type
(
theano
.
config
.
floatX
,
y
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
y
=
type
(
theano
.
config
.
floatX
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
e
=
op
(
scalar
.
add
)(
x
,
y
)
e
=
op
(
scalar
.
add
)(
x
,
y
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
xv
=
rand_val
(
xsh
)
xv
=
rand_val
(
xsh
)
...
@@ -202,8 +206,10 @@ class test_Broadcast(unittest.TestCase):
...
@@ -202,8 +206,10 @@ class test_Broadcast(unittest.TestCase):
# test Elemwise.infer_shape
# test Elemwise.infer_shape
# the Shape op don't implement c_code!
# the Shape op don't implement c_code!
if
isinstance
(
linker
,
gof
.
PerformLinker
):
if
isinstance
(
linker
,
gof
.
PerformLinker
):
x
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
x
=
type
(
theano
.
config
.
floatX
,
y
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
y
=
type
(
theano
.
config
.
floatX
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
e
=
op
(
scalar
.
add
)(
x
,
y
)
e
=
op
(
scalar
.
add
)(
x
,
y
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
(
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
(
[
x
,
y
],
[
e
.
shape
]))
.
make_function
()
[
x
,
y
],
[
e
.
shape
]))
.
make_function
()
...
@@ -218,8 +224,10 @@ class test_Broadcast(unittest.TestCase):
...
@@ -218,8 +224,10 @@ class test_Broadcast(unittest.TestCase):
((
2
,
3
,
4
,
5
),
(
1
,
3
,
1
,
5
)),
((
2
,
3
,
4
,
5
),
(
1
,
3
,
1
,
5
)),
((
2
,
3
,
4
,
5
),
(
1
,
1
,
1
,
1
)),
((
2
,
3
,
4
,
5
),
(
1
,
1
,
1
,
1
)),
((),
())]:
((),
())]:
x
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
x
=
type
(
theano
.
config
.
floatX
,
y
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
y
=
type
(
theano
.
config
.
floatX
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
e
=
op
(
scalar
.
Add
(
scalar
.
transfer_type
(
0
)),
{
0
:
0
})(
x
,
y
)
e
=
op
(
scalar
.
Add
(
scalar
.
transfer_type
(
0
)),
{
0
:
0
})(
x
,
y
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
xv
=
rand_val
(
xsh
)
xv
=
rand_val
(
xsh
)
...
@@ -232,8 +240,10 @@ class test_Broadcast(unittest.TestCase):
...
@@ -232,8 +240,10 @@ class test_Broadcast(unittest.TestCase):
# test Elemwise.infer_shape
# test Elemwise.infer_shape
# the Shape op don't implement c_code!
# the Shape op don't implement c_code!
if
isinstance
(
linker
,
gof
.
PerformLinker
):
if
isinstance
(
linker
,
gof
.
PerformLinker
):
x
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
x
=
type
(
theano
.
config
.
floatX
,
y
=
type
(
'float64'
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
[(
entry
==
1
)
for
entry
in
xsh
])(
'x'
)
y
=
type
(
theano
.
config
.
floatX
,
[(
entry
==
1
)
for
entry
in
ysh
])(
'y'
)
e
=
op
(
scalar
.
Add
(
scalar
.
transfer_type
(
0
)),
{
0
:
0
})(
x
,
y
)
e
=
op
(
scalar
.
Add
(
scalar
.
transfer_type
(
0
)),
{
0
:
0
})(
x
,
y
)
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
(
f
=
copy
(
linker
)
.
accept
(
FunctionGraph
(
[
x
,
y
],
[
e
.
shape
]))
.
make_function
()
[
x
,
y
],
[
e
.
shape
]))
.
make_function
()
...
@@ -267,13 +277,15 @@ class test_Broadcast(unittest.TestCase):
...
@@ -267,13 +277,15 @@ class test_Broadcast(unittest.TestCase):
def
test_fill
(
self
):
def
test_fill
(
self
):
if
not
theano
.
config
.
cxx
:
if
not
theano
.
config
.
cxx
:
raise
SkipTest
(
"G++ not available, so we need to skip this test."
)
raise
SkipTest
(
"G++ not available, so we need to skip this test."
)
x
=
self
.
ctype
(
'float64'
,
[
0
,
0
])(
'x'
)
for
linker
,
op
,
t
,
rval
in
zip
(
self
.
linkers
,
[
self
.
op
,
self
.
cop
],
y
=
self
.
ctype
(
'float64'
,
[
1
,
1
])(
'y'
)
[
self
.
type
,
self
.
ctype
],
for
linker
,
op
in
zip
(
self
.
linkers
,
[
self
.
op
,
self
.
cop
]):
[
self
.
rand_val
,
self
.
rand_cval
]):
x
=
t
(
theano
.
config
.
floatX
,
[
0
,
0
])(
'x'
)
y
=
t
(
theano
.
config
.
floatX
,
[
1
,
1
])(
'y'
)
e
=
op
(
scalar
.
Second
(
scalar
.
transfer_type
(
0
)),
{
0
:
0
})(
x
,
y
)
e
=
op
(
scalar
.
Second
(
scalar
.
transfer_type
(
0
)),
{
0
:
0
})(
x
,
y
)
f
=
linker
()
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
f
=
linker
()
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
xv
=
self
.
rand_c
val
((
5
,
5
))
xv
=
r
val
((
5
,
5
))
yv
=
self
.
rand_c
val
((
1
,
1
))
yv
=
r
val
((
1
,
1
))
f
(
xv
,
yv
)
f
(
xv
,
yv
)
assert
(
xv
==
yv
)
.
all
()
assert
(
xv
==
yv
)
.
all
()
...
@@ -292,24 +304,28 @@ class test_Broadcast(unittest.TestCase):
...
@@ -292,24 +304,28 @@ class test_Broadcast(unittest.TestCase):
def
test_weird_strides
(
self
):
def
test_weird_strides
(
self
):
if
not
theano
.
config
.
cxx
:
if
not
theano
.
config
.
cxx
:
raise
SkipTest
(
"G++ not available, so we need to skip this test."
)
raise
SkipTest
(
"G++ not available, so we need to skip this test."
)
x
=
self
.
ctype
(
'float64'
,
[
0
,
0
,
0
,
0
,
0
])(
'x'
)
for
linker
,
op
,
t
,
rval
in
zip
(
self
.
linkers
,
[
self
.
op
,
self
.
cop
],
y
=
self
.
ctype
(
'float64'
,
[
0
,
0
,
0
,
0
,
0
])(
'y'
)
[
self
.
type
,
self
.
ctype
],
for
linker
,
op
in
zip
(
self
.
linkers
,
[
self
.
op
,
self
.
cop
]):
[
self
.
rand_val
,
self
.
rand_cval
]):
x
=
t
(
theano
.
config
.
floatX
,
[
0
,
0
,
0
,
0
,
0
])(
'x'
)
y
=
t
(
theano
.
config
.
floatX
,
[
0
,
0
,
0
,
0
,
0
])(
'y'
)
e
=
op
(
scalar
.
add
)(
x
,
y
)
e
=
op
(
scalar
.
add
)(
x
,
y
)
f
=
linker
()
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
f
=
linker
()
.
accept
(
FunctionGraph
([
x
,
y
],
[
e
]))
.
make_function
()
xv
=
self
.
rand_c
val
((
2
,
2
,
2
,
2
,
2
))
xv
=
r
val
((
2
,
2
,
2
,
2
,
2
))
yv
=
self
.
rand_c
val
((
2
,
2
,
2
,
2
,
2
))
.
transpose
(
4
,
0
,
3
,
1
,
2
)
yv
=
r
val
((
2
,
2
,
2
,
2
,
2
))
.
transpose
(
4
,
0
,
3
,
1
,
2
)
zv
=
xv
+
yv
zv
=
xv
+
yv
assert
(
f
(
xv
,
yv
)
==
zv
)
.
all
()
assert
(
f
(
xv
,
yv
)
==
zv
)
.
all
()
def
test_same_inputs
(
self
):
def
test_same_inputs
(
self
):
if
not
theano
.
config
.
cxx
:
if
not
theano
.
config
.
cxx
:
raise
SkipTest
(
"G++ not available, so we need to skip this test."
)
raise
SkipTest
(
"G++ not available, so we need to skip this test."
)
x
=
self
.
ctype
(
'float64'
,
[
0
,
0
])(
'x'
)
for
linker
,
op
,
t
,
rval
in
zip
(
self
.
linkers
,
[
self
.
op
,
self
.
cop
],
for
linker
,
op
in
zip
(
self
.
linkers
,
[
self
.
op
,
self
.
cop
]):
[
self
.
type
,
self
.
ctype
],
[
self
.
rand_val
,
self
.
rand_cval
]):
x
=
t
(
theano
.
config
.
floatX
,
[
0
,
0
])(
'x'
)
e
=
op
(
scalar
.
add
)(
x
,
x
)
e
=
op
(
scalar
.
add
)(
x
,
x
)
f
=
linker
()
.
accept
(
FunctionGraph
([
x
],
[
e
]))
.
make_function
()
f
=
linker
()
.
accept
(
FunctionGraph
([
x
],
[
e
]))
.
make_function
()
xv
=
self
.
rand_c
val
((
2
,
2
))
xv
=
r
val
((
2
,
2
))
zv
=
xv
+
xv
zv
=
xv
+
xv
assert
(
f
(
xv
)
==
zv
)
.
all
()
assert
(
f
(
xv
)
==
zv
)
.
all
()
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论