Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
447ab32d
提交
447ab32d
authored
11月 20, 2014
作者:
abergeron
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2265 from nouiz/warning
Remove warning caused by AdvancedSubtensor.infer_shape.
上级
881f00e9
11e3b03a
隐藏空白字符变更
内嵌
并排
正在显示
11 个修改的文件
包含
63 行增加
和
19 行删除
+63
-19
debugmode.py
theano/compile/debugmode.py
+1
-1
dnn.py
theano/sandbox/cuda/dnn.py
+4
-3
opt.py
theano/sandbox/cuda/opt.py
+4
-2
test_conv_cuda_ndarray.py
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
+7
-1
test_dnn.py
theano/sandbox/cuda/tests/test_dnn.py
+1
-1
test_nnet.py
theano/sandbox/cuda/tests/test_nnet.py
+7
-1
subtensor.py
theano/tensor/subtensor.py
+3
-1
test_basic.py
theano/tensor/tests/test_basic.py
+7
-5
test_gc.py
theano/tensor/tests/test_gc.py
+1
-1
test_subtensor.py
theano/tensor/tests/test_subtensor.py
+21
-0
unittest_tools.py
theano/tests/unittest_tools.py
+7
-3
没有找到文件。
theano/compile/debugmode.py
浏览文件 @
447ab32d
...
@@ -2098,7 +2098,7 @@ class _Linker(gof.link.LocalLinker):
...
@@ -2098,7 +2098,7 @@ class _Linker(gof.link.LocalLinker):
return
deco
return
deco
f
=
run_with_tensortype_filter_check
(
f
)
f
=
run_with_tensortype_filter_check
(
f
)
f
.
storage_map
=
storage_map
f
.
allow_gc
=
True
f
.
allow_gc
=
True
assert
len
(
fgraph
.
inputs
)
==
len
(
input_storage
)
assert
len
(
fgraph
.
inputs
)
==
len
(
input_storage
)
assert
len
(
fgraph
.
outputs
)
==
len
(
output_storage
)
assert
len
(
fgraph
.
outputs
)
==
len
(
output_storage
)
...
...
theano/sandbox/cuda/dnn.py
浏览文件 @
447ab32d
...
@@ -1136,8 +1136,8 @@ if cuda_available:
...
@@ -1136,8 +1136,8 @@ if cuda_available:
subsample
=
node
.
op
.
subsample
subsample
=
node
.
op
.
subsample
return
[
dnn_conv
(
gpu_contiguous
(
img
),
gpu_contiguous
(
kern
),
return
[
dnn_conv
(
gpu_contiguous
(
img
),
gpu_contiguous
(
kern
),
border_mode
=
border_mode
,
subsample
=
subsample
)]
border_mode
=
border_mode
,
subsample
=
subsample
)]
# DISABLED as there is problems in the handling of borders
@register_opt
(
'cudnn'
)
#
@register_opt('cudnn')
@local_optimizer
([
GpuDownsampleFactorMax
])
@local_optimizer
([
GpuDownsampleFactorMax
])
def
local_pool_dnn
(
node
):
def
local_pool_dnn
(
node
):
if
not
dnn_available
():
if
not
dnn_available
():
...
@@ -1149,7 +1149,8 @@ if cuda_available:
...
@@ -1149,7 +1149,8 @@ if cuda_available:
ds
=
node
.
op
.
ds
ds
=
node
.
op
.
ds
return
[
dnn_pool
(
gpu_contiguous
(
img
),
ds
,
ds
)]
return
[
dnn_pool
(
gpu_contiguous
(
img
),
ds
,
ds
)]
@register_opt
(
'cudnn'
)
# DISABLED as there is problems in the handling of borders
# @register_opt('cudnn')
@local_optimizer
([
GpuDownsampleFactorMaxGrad
])
@local_optimizer
([
GpuDownsampleFactorMaxGrad
])
def
local_pool_dnn_grad
(
node
):
def
local_pool_dnn_grad
(
node
):
if
not
dnn_available
():
if
not
dnn_available
():
...
...
theano/sandbox/cuda/opt.py
浏览文件 @
447ab32d
...
@@ -1337,11 +1337,13 @@ conv_groupopt.register('conv_fft_full', local_conv_fft_full, 10,
...
@@ -1337,11 +1337,13 @@ conv_groupopt.register('conv_fft_full', local_conv_fft_full, 10,
# It can be disabled by excluding 'conv_dnn' or 'cudnn'.
# It can be disabled by excluding 'conv_dnn' or 'cudnn'.
from
.
import
dnn
from
.
import
dnn
if
dnn
.
dnn_available
():
if
dnn
.
dnn_available
():
conv_groupopt
.
register
(
'conv_dnn'
,
dnn
.
local_conv_dnn
,
20
,
conv_groupopt
.
register
(
'local_conv_dnn'
,
dnn
.
local_conv_dnn
,
20
,
'conv_dnn'
,
'fast_compile'
,
'fast_run'
,
'cudnn'
)
'fast_compile'
,
'fast_run'
,
'cudnn'
)
# The GEMM-based convolution comes last to catch all remaining cases.
# The GEMM-based convolution comes last to catch all remaining cases.
# It can be disabled by excluding 'conv_gemm'.
# It can be disabled by excluding 'conv_gemm'.
conv_groupopt
.
register
(
'conv_gemm'
,
local_conv_gemm
,
30
,
conv_groupopt
.
register
(
'local_conv_gemm'
,
local_conv_gemm
,
30
,
'conv_gemm'
,
'fast_compile'
,
'fast_run'
)
'fast_compile'
,
'fast_run'
)
...
...
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
浏览文件 @
447ab32d
...
@@ -616,7 +616,13 @@ def test_default_conv():
...
@@ -616,7 +616,13 @@ def test_default_conv():
assert
any
([
isinstance
(
a
.
op
,
cuda
.
blas
.
GpuCorrMM
)
assert
any
([
isinstance
(
a
.
op
,
cuda
.
blas
.
GpuCorrMM
)
for
a
in
f
.
maker
.
fgraph
.
apply_nodes
])
for
a
in
f
.
maker
.
fgraph
.
apply_nodes
])
mode
=
theano_mode
.
excluding
(
'local_gpu_conv'
,
'local_conv_gemm'
)
mode
=
theano_mode
.
excluding
(
'local_conv_dnn'
,
'local_conv_gemm'
)
f
=
theano
.
function
([
img
,
fil
],
c
,
mode
=
mode
)
assert
any
([
isinstance
(
a
.
op
,
cuda
.
blas
.
GpuConv
)
for
a
in
f
.
maker
.
fgraph
.
apply_nodes
])
mode
=
theano_mode
.
excluding
(
'conv_dnn'
,
'conv_gemm'
)
f
=
theano
.
function
([
img
,
fil
],
c
,
mode
=
mode
)
f
=
theano
.
function
([
img
,
fil
],
c
,
mode
=
mode
)
assert
any
([
isinstance
(
a
.
op
,
cuda
.
blas
.
GpuConv
)
assert
any
([
isinstance
(
a
.
op
,
cuda
.
blas
.
GpuConv
)
...
...
theano/sandbox/cuda/tests/test_dnn.py
浏览文件 @
447ab32d
...
@@ -121,7 +121,7 @@ def test_dnn_tag():
...
@@ -121,7 +121,7 @@ def test_dnn_tag():
[
x
],
[
x
],
max_pool_2d
(
x
,
ds
=
(
2
,
2
)),
max_pool_2d
(
x
,
ds
=
(
2
,
2
)),
mode
=
mode_with_gpu
.
including
(
"cudnn"
))
mode
=
mode_with_gpu
.
including
(
"cudnn"
))
except
RuntimeError
,
e
:
except
(
AssertionError
,
RuntimeError
)
,
e
:
assert
not
cuda
.
dnn
.
dnn_available
()
assert
not
cuda
.
dnn
.
dnn_available
()
raised
=
True
raised
=
True
finally
:
finally
:
...
...
theano/sandbox/cuda/tests/test_nnet.py
浏览文件 @
447ab32d
...
@@ -301,6 +301,12 @@ class test_SoftMax(unittest.TestCase):
...
@@ -301,6 +301,12 @@ class test_SoftMax(unittest.TestCase):
self
.
_cmp
(
2
<<
15
,
5
,
f
,
f_gpu
)
self
.
_cmp
(
2
<<
15
,
5
,
f
,
f_gpu
)
self
.
_cmp
(
0
,
10
,
f
,
f_gpu
)
self
.
_cmp
(
0
,
10
,
f
,
f_gpu
)
def
test_softmax_cudnn
(
self
):
if
not
cuda
.
dnn
.
dnn_available
():
raise
SkipTest
(
cuda
.
dnn
.
dnn_available
.
msg
)
x
=
T
.
fmatrix
(
'x'
)
z
=
T
.
nnet
.
softmax
def
check_types_with_cudnn
(
graph
,
graph_gpu
):
def
check_types_with_cudnn
(
graph
,
graph_gpu
):
self
.
_check_types
(
self
.
_check_types
(
graph
,
graph
,
...
@@ -320,7 +326,7 @@ class test_SoftMax(unittest.TestCase):
...
@@ -320,7 +326,7 @@ class test_SoftMax(unittest.TestCase):
check_types_with_cudnn
check_types_with_cudnn
)
)
def
test_cudnn_softmax
(
self
):
def
test_cudnn_softmax
_grad
(
self
):
if
not
cuda
.
dnn
.
dnn_available
():
if
not
cuda
.
dnn
.
dnn_available
():
raise
SkipTest
(
cuda
.
dnn
.
dnn_available
.
msg
)
raise
SkipTest
(
cuda
.
dnn
.
dnn_available
.
msg
)
...
...
theano/tensor/subtensor.py
浏览文件 @
447ab32d
...
@@ -1893,7 +1893,9 @@ class AdvancedSubtensor(Op):
...
@@ -1893,7 +1893,9 @@ class AdvancedSubtensor(Op):
# Really special case
# Really special case
if
len
(
ishapes
)
==
3
:
if
len
(
ishapes
)
==
3
:
xshp
,
ind1shp
,
ind2shp
=
ishapes
xshp
,
ind1shp
,
ind2shp
=
ishapes
if
len
(
xshp
)
==
2
and
len
(
ind1shp
)
==
1
and
len
(
ind2shp
)
==
1
:
if
(
len
(
xshp
)
==
2
and
ind1shp
is
not
None
and
len
(
ind1shp
)
==
1
and
ind2shp
is
not
None
and
len
(
ind2shp
)
==
1
):
# if the graph is correct, we can assume ind1shp[0] and
# if the graph is correct, we can assume ind1shp[0] and
# ind2shp[0] will have the same value.
# ind2shp[0] will have the same value.
# Try to return the one closest to the graph input.
# Try to return the one closest to the graph input.
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
447ab32d
...
@@ -3481,7 +3481,7 @@ class T_Join_and_Split(unittest.TestCase):
...
@@ -3481,7 +3481,7 @@ class T_Join_and_Split(unittest.TestCase):
def
test_join_matrixV
(
self
):
def
test_join_matrixV
(
self
):
"""variable join axis"""
"""variable join axis"""
v
=
numpy
.
array
([[
1.
,
2.
,
3.
],
[
4.
,
5.
,
6.
]],
dtype
=
self
.
floatX
)
v
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]],
dtype
=
self
.
floatX
)
a
=
self
.
shared
(
v
.
copy
())
a
=
self
.
shared
(
v
.
copy
())
b
=
as_tensor_variable
(
v
.
copy
())
b
=
as_tensor_variable
(
v
.
copy
())
ax
=
lscalar
()
ax
=
lscalar
()
...
@@ -3491,13 +3491,15 @@ class T_Join_and_Split(unittest.TestCase):
...
@@ -3491,13 +3491,15 @@ class T_Join_and_Split(unittest.TestCase):
topo
=
f
.
maker
.
fgraph
.
toposort
()
topo
=
f
.
maker
.
fgraph
.
toposort
()
assert
[
True
for
node
in
topo
if
isinstance
(
node
.
op
,
self
.
join_op
)]
assert
[
True
for
node
in
topo
if
isinstance
(
node
.
op
,
self
.
join_op
)]
want
=
numpy
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
],
[
1
,
2
,
3
],
[
4
,
5
,
6
]])
want
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
],
[
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
]])
got
=
f
(
0
)
got
=
f
(
0
)
self
.
assertTrue
((
got
==
want
)
.
all
(),
(
got
,
want
)
)
assert
numpy
.
allclose
(
got
,
want
)
want
=
numpy
.
array
([[
1
,
2
,
3
,
1
,
2
,
3
],
[
4
,
5
,
6
,
4
,
5
,
6
]])
want
=
numpy
.
array
([[
.
1
,
.
2
,
.
3
,
.
1
,
.
2
,
.
3
],
[
.
4
,
.
5
,
.
6
,
.
4
,
.
5
,
.
6
]])
got
=
f
(
1
)
got
=
f
(
1
)
self
.
assertTrue
((
got
==
want
)
.
all
(),
(
got
,
want
)
)
assert
numpy
.
allclose
(
got
,
want
)
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
0
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
0
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
1
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
utt
.
verify_grad
(
lambda
a
,
b
:
join
(
1
,
a
,
b
),
[
v
,
2
*
v
],
mode
=
self
.
mode
)
...
...
theano/tensor/tests/test_gc.py
浏览文件 @
447ab32d
...
@@ -94,7 +94,7 @@ def test_gc_never_pickles_temporaries():
...
@@ -94,7 +94,7 @@ def test_gc_never_pickles_temporaries():
# that weren't collected shouldn't be pickled anyway
# that weren't collected shouldn't be pickled anyway
# Allow for a couple of bytes of difference, since timing info,
# Allow for a couple of bytes of difference, since timing info,
# for instance, can be represented as text of varying size.
# for instance, can be represented as text of varying size.
assert
abs
(
len_post_f
-
len_post_g
)
<
4
,
(
assert
abs
(
len_post_f
-
len_post_g
)
<
16
,
(
f_linker
,
len_post_f
,
len_post_g
)
f_linker
,
len_post_f
,
len_post_g
)
...
...
theano/tensor/tests/test_subtensor.py
浏览文件 @
447ab32d
...
@@ -1455,3 +1455,24 @@ class TestInferShape(utt.InferShapeTester):
...
@@ -1455,3 +1455,24 @@ class TestInferShape(utt.InferShapeTester):
self
.
_compile_and_check
([
admat
,
advec
],
self
.
_compile_and_check
([
admat
,
advec
],
[
set_subtensor
(
admat
[
aivec_val
,
bivec_val
],
advec
)],
[
set_subtensor
(
admat
[
aivec_val
,
bivec_val
],
advec
)],
[
admat_val
,
advec_val
],
AdvancedIncSubtensor
)
[
admat_val
,
advec_val
],
AdvancedIncSubtensor
)
def
test_adv_sub
(
self
):
admat
=
dmatrix
()
aivec
=
lvector
()
bivec
=
lvector
()
admat_val
=
rand
(
5
,
4
)
aivec_val
=
[
1
,
3
,
2
]
bivec_val
=
[
0
,
3
,
3
]
self
.
_compile_and_check
([
admat
,
aivec
,
bivec
],
[
admat
[
aivec
,
bivec
]],
[
admat_val
,
aivec_val
,
bivec_val
],
AdvancedSubtensor
)
# Test case that aren't implemented, but make sure they do not crash.
self
.
_compile_and_check
([
admat
,
aivec
],
[
admat
[
aivec
,
1
:
3
]],
[
admat_val
,
aivec_val
],
AdvancedSubtensor
,
check_topo
=
False
)
self
.
_compile_and_check
([
admat
,
aivec
],
[
admat
[
1
:
3
,
aivec
]],
[
admat_val
,
aivec_val
],
AdvancedSubtensor
,
check_topo
=
False
)
theano/tests/unittest_tools.py
浏览文件 @
447ab32d
...
@@ -191,7 +191,7 @@ class InferShapeTester(unittest.TestCase):
...
@@ -191,7 +191,7 @@ class InferShapeTester(unittest.TestCase):
self
.
mode
=
mode
.
including
(
"canonicalize"
)
self
.
mode
=
mode
.
including
(
"canonicalize"
)
def
_compile_and_check
(
self
,
inputs
,
outputs
,
numeric_inputs
,
cls
,
def
_compile_and_check
(
self
,
inputs
,
outputs
,
numeric_inputs
,
cls
,
excluding
=
None
,
warn
=
True
):
excluding
=
None
,
warn
=
True
,
check_topo
=
True
):
"""This tests the infer_shape method only
"""This tests the infer_shape method only
When testing with input values with shapes that take the same
When testing with input values with shapes that take the same
...
@@ -204,6 +204,9 @@ class InferShapeTester(unittest.TestCase):
...
@@ -204,6 +204,9 @@ class InferShapeTester(unittest.TestCase):
matrices will not detect the problem. If warn=True, we emit a
matrices will not detect the problem. If warn=True, we emit a
warning when testing with such values.
warning when testing with such values.
:param check_topo: If True, we check that the Op where removed
from the graph. False is useful to test not implemented case.
"""
"""
mode
=
self
.
mode
mode
=
self
.
mode
if
excluding
:
if
excluding
:
...
@@ -236,8 +239,9 @@ class InferShapeTester(unittest.TestCase):
...
@@ -236,8 +239,9 @@ class InferShapeTester(unittest.TestCase):
mode
=
mode
)
mode
=
mode
)
#theano.printing.debugprint(shapes_function)
#theano.printing.debugprint(shapes_function)
# Check that the Op is removed from the compiled function.
# Check that the Op is removed from the compiled function.
topo_shape
=
shapes_function
.
maker
.
fgraph
.
toposort
()
if
check_topo
:
assert
not
any
(
isinstance
(
t
.
op
,
cls
)
for
t
in
topo_shape
)
topo_shape
=
shapes_function
.
maker
.
fgraph
.
toposort
()
assert
not
any
(
isinstance
(
t
.
op
,
cls
)
for
t
in
topo_shape
)
topo_out
=
outputs_function
.
maker
.
fgraph
.
toposort
()
topo_out
=
outputs_function
.
maker
.
fgraph
.
toposort
()
assert
any
(
isinstance
(
t
.
op
,
cls
)
for
t
in
topo_out
)
assert
any
(
isinstance
(
t
.
op
,
cls
)
for
t
in
topo_out
)
# Check that the shape produced agrees with the actual shape.
# Check that the shape produced agrees with the actual shape.
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论