Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
fc75dbf9
提交
fc75dbf9
authored
5月 10, 2012
作者:
lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #642 from nouiz/scipy
Fix tests error when scipy is not there
上级
421b712f
45a97cb2
隐藏空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
204 行增加
和
143 行删除
+204
-143
test_neighbours.py
theano/sandbox/test_neighbours.py
+2
-2
test_rng_mrg.py
theano/sandbox/test_rng_mrg.py
+191
-134
test_opt.py
theano/sparse/tests/test_opt.py
+4
-4
test_sp2.py
theano/sparse/tests/test_sp2.py
+3
-3
test_utils.py
theano/sparse/tests/test_utils.py
+4
-0
没有找到文件。
theano/sandbox/test_neighbours.py
浏览文件 @
fc75dbf9
...
...
@@ -102,7 +102,7 @@ def test_neibs_manual():
#print images.get_value(borrow=True)
neibs
=
f
()
print
neibs
#
print neibs
assert
numpy
.
allclose
(
neibs
,[[
0
,
1
,
4
,
5
],
[
2
,
3
,
6
,
7
],
[
8
,
9
,
12
,
13
],
...
...
@@ -410,7 +410,7 @@ def tes_neibs2images_crash_on_grad():
to_images
=
T
.
sum
(
neibs2images
(
neibs
,
(
2
,
2
),
(
2
,
3
,
20
,
20
)))
g
=
T
.
grad
(
to_images
,
neibs
)
fn
=
theano
.
function
([
neibs
],
to_images
,
mode
=
mode_without_gpu
)
print
"Compiled"
#
print "Compiled"
fn
(
neibs_val
)
if
__name__
==
'__main__'
:
...
...
theano/sandbox/test_rng_mrg.py
浏览文件 @
fc75dbf9
import
os
,
sys
,
time
import
os
import
sys
import
time
import
numpy
import
theano
...
...
@@ -22,7 +24,8 @@ from nose.plugins.skip import SkipTest
# Partly done in test_consistency_randomstreams
#TODO: test optimizer mrg_random_make_inplace
#TODO: make tests work when no flags gived. Now need: THEANO_FLAGS=device=gpu0,floatX=float32
#TODO: make tests work when no flags gived. Now need:
# THEANO_FLAGS=device=gpu0,floatX=float32
# Partly done, in test_consistency_GPU_{serial,parallel}
...
...
@@ -36,7 +39,8 @@ utt.seed_rng()
# 7 substreams for each stream
# 5 samples drawn from each substream
java_samples
=
numpy
.
loadtxt
(
os
.
path
.
join
(
os
.
path
.
split
(
theano
.
__file__
)[
0
],
'sandbox'
,
'samples_MRG31k3p_12_7_5.txt'
))
'sandbox'
,
'samples_MRG31k3p_12_7_5.txt'
))
def
test_deterministic
():
...
...
@@ -48,7 +52,7 @@ def test_deterministic():
test_use_cuda
.
append
(
True
)
for
use_cuda
in
test_use_cuda
:
print
'use_cuda ='
,
use_cuda
#
print 'use_cuda =', use_cuda
R
=
MRG_RandomStreams
(
seed
=
seed
,
use_cuda
=
use_cuda
)
u
=
R
.
uniform
(
size
=
sample_size
)
f
=
theano
.
function
([],
u
)
...
...
@@ -81,9 +85,9 @@ def test_consistency_randomstreams():
test_use_cuda
.
append
(
True
)
for
use_cuda
in
test_use_cuda
:
print
'use_cuda ='
,
use_cuda
#
print 'use_cuda =', use_cuda
samples
=
[]
rng
=
MRG_RandomStreams
(
seed
=
seed
,
use_cuda
=
False
)
rng
=
MRG_RandomStreams
(
seed
=
seed
,
use_cuda
=
False
)
for
i
in
range
(
n_streams
):
stream_samples
=
[]
u
=
rng
.
uniform
(
size
=
(
n_substreams
,),
nstreams
=
n_substreams
)
...
...
@@ -98,6 +102,7 @@ def test_consistency_randomstreams():
samples
=
numpy
.
array
(
samples
)
.
flatten
()
assert
(
numpy
.
allclose
(
samples
,
java_samples
))
def
test_consistency_cpu_serial
():
'''Verify that the random numbers generated by mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
...
...
@@ -113,9 +118,13 @@ def test_consistency_cpu_serial():
for
i
in
range
(
n_streams
):
stream_rstate
=
curr_rstate
.
copy
()
for
j
in
range
(
n_substreams
):
rstate
=
theano
.
shared
(
numpy
.
array
([
stream_rstate
.
copy
()],
dtype
=
'int32'
))
new_rstate
,
sample
=
rng_mrg
.
mrg_uniform
.
new
(
rstate
,
ndim
=
None
,
dtype
=
config
.
floatX
,
size
=
(
1
,))
# Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
rstate
=
theano
.
shared
(
numpy
.
array
([
stream_rstate
.
copy
()],
dtype
=
'int32'
))
new_rstate
,
sample
=
rng_mrg
.
mrg_uniform
.
new
(
rstate
,
ndim
=
None
,
dtype
=
config
.
floatX
,
size
=
(
1
,))
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample
.
rstate
=
rstate
sample
.
update
=
(
rstate
,
new_rstate
)
...
...
@@ -134,6 +143,7 @@ def test_consistency_cpu_serial():
samples
=
numpy
.
array
(
samples
)
.
flatten
()
assert
(
numpy
.
allclose
(
samples
,
java_samples
))
def
test_consistency_cpu_parallel
():
'''Verify that the random numbers generated by mrg_uniform, in parallel,
are the same as the reference (Java) implementation by L'Ecuyer et al.
...
...
@@ -141,10 +151,10 @@ def test_consistency_cpu_parallel():
seed
=
12345
n_samples
=
5
n_streams
=
12
n_substreams
=
7
# 7 samples will be drawn in parallel
n_substreams
=
7
# 7 samples will be drawn in parallel
samples
=
[]
curr_rstate
=
numpy
.
array
([
seed
]
*
6
,
dtype
=
'int32'
)
curr_rstate
=
numpy
.
array
([
seed
]
*
6
,
dtype
=
'int32'
)
for
i
in
range
(
n_streams
):
stream_samples
=
[]
...
...
@@ -156,7 +166,8 @@ def test_consistency_cpu_parallel():
new_rstate
,
sample
=
rng_mrg
.
mrg_uniform
.
new
(
rstate
,
ndim
=
None
,
dtype
=
config
.
floatX
,
size
=
(
n_substreams
,))
# Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample
.
rstate
=
rstate
sample
.
update
=
(
rstate
,
new_rstate
)
...
...
@@ -175,6 +186,7 @@ def test_consistency_cpu_parallel():
samples
=
numpy
.
array
(
samples
)
.
flatten
()
assert
(
numpy
.
allclose
(
samples
,
java_samples
))
def
test_consistency_GPU_serial
():
'''Verify that the random numbers generated by GPU_mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
...
...
@@ -200,14 +212,17 @@ def test_consistency_GPU_serial():
substream_rstate
=
numpy
.
array
(
stream_rstate
.
copy
(),
dtype
=
'int32'
)
# HACK - we transfer these int32 to the GPU memory as float32
# (reinterpret_cast)
tmp_float_buf
=
numpy
.
frombuffer
(
substream_rstate
.
data
,
dtype
=
'float32'
)
rstate
=
float32_shared_constructor
(
tmp_float_buf
)
# Transfer to device
tmp_float_buf
=
numpy
.
frombuffer
(
substream_rstate
.
data
,
dtype
=
'float32'
)
# Transfer to device
rstate
=
float32_shared_constructor
(
tmp_float_buf
)
new_rstate
,
sample
=
rng_mrg
.
GPU_mrg_uniform
.
new
(
rstate
,
ndim
=
None
,
dtype
=
'float32'
,
size
=
(
1
,))
rstate
.
default_update
=
new_rstate
# Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample
.
rstate
=
rstate
sample
.
update
=
(
rstate
,
new_rstate
)
...
...
@@ -227,9 +242,12 @@ def test_consistency_GPU_serial():
samples
=
numpy
.
array
(
samples
)
.
flatten
()
assert
(
numpy
.
allclose
(
samples
,
java_samples
))
def
test_consistency_GPU_parallel
():
'''Verify that the random numbers generated by GPU_mrg_uniform, in parallel,
are the same as the reference (Java) implementation by L'Ecuyer et al.
'''Verify that the random numbers generated by GPU_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
L'Ecuyer et al.
'''
if
not
cuda_available
:
raise
SkipTest
(
'Optional package cuda not available'
)
...
...
@@ -241,10 +259,10 @@ def test_consistency_GPU_parallel():
seed
=
12345
n_samples
=
5
n_streams
=
12
n_substreams
=
7
# 7 samples will be drawn in parallel
n_substreams
=
7
# 7 samples will be drawn in parallel
samples
=
[]
curr_rstate
=
numpy
.
array
([
seed
]
*
6
,
dtype
=
'int32'
)
curr_rstate
=
numpy
.
array
([
seed
]
*
6
,
dtype
=
'int32'
)
for
i
in
range
(
n_streams
):
stream_samples
=
[]
...
...
@@ -255,13 +273,15 @@ def test_consistency_GPU_parallel():
# HACK - transfer these int32 to the GPU memory as float32
# (reinterpret_cast)
tmp_float_buf
=
numpy
.
frombuffer
(
rstate
.
data
,
dtype
=
'float32'
)
rstate
=
float32_shared_constructor
(
tmp_float_buf
)
# Transfer to device
# Transfer to device
rstate
=
float32_shared_constructor
(
tmp_float_buf
)
new_rstate
,
sample
=
rng_mrg
.
GPU_mrg_uniform
.
new
(
rstate
,
ndim
=
None
,
dtype
=
'float32'
,
size
=
(
n_substreams
,))
rstate
.
default_update
=
new_rstate
# Not really necessary, just mimicking rng_mrg.MRG_RandomStreams' behavior
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample
.
rstate
=
rstate
sample
.
update
=
(
rstate
,
new_rstate
)
...
...
@@ -281,6 +301,7 @@ def test_consistency_GPU_parallel():
samples
=
numpy
.
array
(
samples
)
.
flatten
()
assert
(
numpy
.
allclose
(
samples
,
java_samples
))
def
basictest
(
f
,
steps
,
sample_size
,
prefix
=
""
,
allow_01
=
False
,
inputs
=
None
,
target_avg
=
0.5
,
target_std
=
None
,
mean_rtol
=
0.01
):
if
inputs
is
None
:
...
...
@@ -291,41 +312,46 @@ def basictest(f, steps, sample_size, prefix="", allow_01=False, inputs=None,
for
i
in
xrange
(
steps
):
t0
=
time
.
time
()
ival
=
f
(
*
inputs
)
assert
ival
.
shape
==
sample_size
assert
ival
.
shape
==
sample_size
dt
+=
time
.
time
()
-
t0
ival
=
numpy
.
asarray
(
ival
)
if
i
==
0
:
mean
=
numpy
.
array
(
ival
,
copy
=
True
)
#avg_std = numpy.std(ival)
avg_std
=
numpy
.
sqrt
(
numpy
.
mean
((
ival
-
target_avg
)
**
2
))
avg_std
=
numpy
.
sqrt
(
numpy
.
mean
((
ival
-
target_avg
)
**
2
))
min_
=
ival
.
min
()
max_
=
ival
.
max
()
else
:
alpha
=
1.0
/
(
1
+
i
)
mean
=
alpha
*
ival
+
(
1
-
alpha
)
*
mean
alpha
=
1.0
/
(
1
+
i
)
mean
=
alpha
*
ival
+
(
1
-
alpha
)
*
mean
#avg_std = alpha * numpy.std(ival) + (1-alpha)*avg_std
avg_std
=
alpha
*
numpy
.
sqrt
(
numpy
.
mean
((
ival
-
target_avg
)
**
2
))
+
(
1
-
alpha
)
*
avg_std
min_
=
min
(
min_
,
ival
.
min
())
max_
=
max
(
max_
,
ival
.
max
())
avg_std
=
alpha
*
numpy
.
sqrt
(
numpy
.
mean
((
ival
-
target_avg
)
**
2
))
+
(
1
-
alpha
)
*
avg_std
min_
=
min
(
min_
,
ival
.
min
())
max_
=
max
(
max_
,
ival
.
max
())
if
not
allow_01
:
assert
min_
>
0
assert
max_
<
1
if
hasattr
(
target_avg
,
'shape'
):
# looks if target_avg is an array
if
hasattr
(
target_avg
,
'shape'
):
# looks if target_avg is an array
diff
=
numpy
.
mean
(
abs
(
mean
-
target_avg
))
print
prefix
,
'mean diff with mean'
,
diff
#
print prefix, 'mean diff with mean', diff
assert
diff
<
mean_rtol
,
'bad mean?
%
f
%
f'
%
(
mean
,
target_avg
)
else
:
# if target_avg is a scalar, then we can do the mean of `mean` to get something more precise
else
:
# if target_avg is a scalar, then we can do the mean of
# `mean` to get something more precise
mean
=
numpy
.
mean
(
mean
)
print
prefix
,
'mean'
,
mean
assert
abs
(
mean
-
target_avg
)
<
mean_rtol
,
'bad mean?
%
f
%
f'
%
(
numpy
.
mean
(
mean
),
target_avg
)
print
prefix
,
'std'
,
avg_std
#print prefix, 'mean', mean
assert
abs
(
mean
-
target_avg
)
<
mean_rtol
,
'bad mean?
%
f
%
f'
%
(
numpy
.
mean
(
mean
),
target_avg
)
#print prefix, 'std', avg_std
if
target_std
is
not
None
:
assert
abs
(
avg_std
-
target_std
)
<
.
01
,
'bad std?
%
f
%
f'
%
(
avg_std
,
target_std
)
print
prefix
,
'time'
,
dt
print
prefix
,
'elements'
,
steps
*
sample_size
[
0
]
*
sample_size
[
1
]
print
prefix
,
'samples/sec'
,
steps
*
sample_size
[
0
]
*
sample_size
[
1
]
/
dt
print
prefix
,
'min'
,
min_
,
'max'
,
max_
assert
abs
(
avg_std
-
target_std
)
<
.
01
,
'bad std?
%
f
%
f'
%
(
avg_std
,
target_std
)
#print prefix, 'time', dt
#print prefix, 'elements', steps * sample_size[0] * sample_size[1]
#print prefix, 'samples/sec', steps * sample_size[0] * sample_size[1] / dt
#print prefix, 'min', min_, 'max', max_
def
test_uniform
():
#TODO: test param low, high
...
...
@@ -333,11 +359,11 @@ def test_uniform():
#TODO: test ndim!=size.ndim
#TODO: test bad seed
#TODO: test size=Var, with shape that change from call to call
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
10
,
100
)
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
10
,
100
)
steps
=
50
else
:
sample_size
=
(
500
,
50
)
sample_size
=
(
500
,
50
)
steps
=
int
(
1e3
)
x
=
tensor
.
matrix
()
...
...
@@ -348,8 +374,8 @@ def test_uniform():
#### TEST CPU IMPLEMENTATION ####
# The python and C implementation are tested with DebugMode
print
''
print
'ON CPU with size=(
%
s):'
%
str
(
size
)
#
print ''
#print 'ON CPU with size=(%s):' %
str(size)
x
=
tensor
.
matrix
()
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
False
)
# Note: we specify `nstreams` to avoid a warning.
...
...
@@ -359,46 +385,51 @@ def test_uniform():
u
=
R
.
uniform
(
size
=
size
,
nstreams
=
rng_mrg
.
guess_n_streams
(
size
,
warn
=
False
))
f
=
theano
.
function
(
var_input
,
u
,
mode
=
mode
)
assert
any
([
isinstance
(
node
.
op
,
theano
.
sandbox
.
rng_mrg
.
mrg_uniform
)
assert
any
([
isinstance
(
node
.
op
,
theano
.
sandbox
.
rng_mrg
.
mrg_uniform
)
for
node
in
f
.
maker
.
env
.
toposort
()])
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
cpu_out
=
f
(
*
input
)
print
'CPU: random?[:10], random?[-10:]'
print
cpu_out
[
0
,
0
:
10
]
print
cpu_out
[
-
1
,
-
10
:]
#
print 'CPU: random?[:10], random?[-10:]'
#print cpu_out[0,
0:10]
#print cpu_out[-1,
-10:]
basictest
(
f
,
steps
,
sample_size
,
prefix
=
'mrg cpu'
,
inputs
=
input
)
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
print
''
print
'ON GPU with size=(
%
s):'
%
str
(
size
)
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
#
print ''
#print 'ON GPU with size=(%s):' %
str(size)
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
True
)
u
=
R
.
uniform
(
size
=
size
,
dtype
=
'float32'
,
nstreams
=
rng_mrg
.
guess_n_streams
(
size
,
warn
=
False
))
assert
u
.
dtype
==
'float32'
#well, it's really that this test w GPU doesn't make sense otw
# well, it's really that this test w GPU doesn't make sense otw
assert
u
.
dtype
==
'float32'
f
=
theano
.
function
(
var_input
,
theano
.
Out
(
theano
.
sandbox
.
cuda
.
basic_ops
.
gpu_from_host
(
u
),
borrow
=
True
),
mode
=
mode_with_gpu
)
assert
any
([
isinstance
(
node
.
op
,
theano
.
sandbox
.
rng_mrg
.
GPU_mrg_uniform
)
assert
any
([
isinstance
(
node
.
op
,
theano
.
sandbox
.
rng_mrg
.
GPU_mrg_uniform
)
for
node
in
f
.
maker
.
env
.
toposort
()])
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
gpu_out
=
numpy
.
asarray
(
f
(
*
input
))
print
'GPU: random?[:10], random?[-10:]'
print
gpu_out
[
0
,
0
:
10
]
print
gpu_out
[
-
1
,
-
10
:]
#
print 'GPU: random?[:10], random?[-10:]'
#print gpu_out[0,
0:10]
#print gpu_out[-1,
-10:]
basictest
(
f
,
steps
,
sample_size
,
prefix
=
'mrg gpu'
,
inputs
=
input
)
numpy
.
testing
.
assert_array_almost_equal
(
cpu_out
,
gpu_out
,
decimal
=
6
)
numpy
.
testing
.
assert_array_almost_equal
(
cpu_out
,
gpu_out
,
decimal
=
6
)
print
''
print
'ON CPU w Numpy with size=(
%
s):'
%
str
(
size
)
#
print ''
#print 'ON CPU w Numpy with size=(%s):' %
str(size)
RR
=
theano
.
tensor
.
shared_randomstreams
.
RandomStreams
(
234
)
uu
=
RR
.
uniform
(
size
=
size
)
ff
=
theano
.
function
(
var_input
,
uu
,
mode
=
mode
)
# It's not our problem if numpy generates 0 or 1
basictest
(
ff
,
steps
,
sample_size
,
prefix
=
'numpy'
,
allow_01
=
True
,
inputs
=
input
)
basictest
(
ff
,
steps
,
sample_size
,
prefix
=
'numpy'
,
allow_01
=
True
,
inputs
=
input
)
def
test_binomial
():
#TODO: test size=None, ndim=X
...
...
@@ -409,14 +440,14 @@ def test_binomial():
#we test size in a tuple of int and a tensor.shape.
#we test the param p with int.
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
10
,
50
)
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
10
,
50
)
steps
=
50
rtol
=
0.02
rtol
=
0.02
else
:
sample_size
=
(
500
,
50
)
sample_size
=
(
500
,
50
)
steps
=
int
(
1e3
)
rtol
=
0.01
rtol
=
0.01
x
=
tensor
.
matrix
()
v
=
tensor
.
vector
()
...
...
@@ -426,114 +457,137 @@ def test_binomial():
(
x
.
shape
,
[
x
],
[
numpy
.
zeros
(
sample_size
,
dtype
=
config
.
floatX
)])
]:
print
''
print
'ON CPU with size=(
%
s) and mean(
%
d):'
%
(
str
(
size
),
mean
)
#
print ''
#print 'ON CPU with size=(%s) and mean(%d):' % (str(size),
mean)
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
False
)
# Note: we specify `nstreams` to avoid a warning.
u
=
R
.
binomial
(
size
=
size
,
p
=
mean
,
nstreams
=
rng_mrg
.
guess_n_streams
(
size
,
warn
=
False
))
f
=
theano
.
function
(
var_input
,
u
,
mode
=
mode
)
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
out
=
f
(
*
input
)
print
'random?[:10]
\n
'
,
out
[
0
,
0
:
10
]
print
'random?[-1,-10:]
\n
'
,
out
[
-
1
,
-
10
:]
basictest
(
f
,
steps
,
sample_size
,
prefix
=
'mrg cpu'
,
inputs
=
input
,
allow_01
=
True
,
target_avg
=
mean
,
mean_rtol
=
rtol
)
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
print
''
print
'ON GPU with size=(
%
s) and mean(
%
d):'
%
(
str
(
size
),
mean
)
#print 'random?[:10]\n', out[0, 0:10]
#print 'random?[-1,-10:]\n', out[-1, -10:]
basictest
(
f
,
steps
,
sample_size
,
prefix
=
'mrg cpu'
,
inputs
=
input
,
allow_01
=
True
,
target_avg
=
mean
,
mean_rtol
=
rtol
)
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
#print ''
#print 'ON GPU with size=(%s) and mean(%d):' % (str(size), mean)
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
True
)
u
=
R
.
binomial
(
size
=
size
,
p
=
mean
,
dtype
=
'float32'
,
nstreams
=
rng_mrg
.
guess_n_streams
(
size
,
warn
=
False
))
assert
u
.
dtype
==
'float32'
#well, it's really that this test w GPU doesn't make sense otw
#well, it's really that this test w GPU doesn't make sense otw
assert
u
.
dtype
==
'float32'
f
=
theano
.
function
(
var_input
,
theano
.
Out
(
theano
.
sandbox
.
cuda
.
basic_ops
.
gpu_from_host
(
u
),
borrow
=
True
),
mode
=
mode_with_gpu
)
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
gpu_out
=
numpy
.
asarray
(
f
(
*
input
))
print
'random?[:10]
\n
'
,
gpu_out
[
0
,
0
:
10
]
print
'random?[-1,-10:]
\n
'
,
gpu_out
[
-
1
,
-
10
:]
basictest
(
f
,
steps
,
sample_size
,
prefix
=
'mrg gpu'
,
inputs
=
input
,
allow_01
=
True
,
target_avg
=
mean
,
mean_rtol
=
rtol
)
numpy
.
testing
.
assert_array_almost_equal
(
out
,
gpu_out
,
decimal
=
6
)
print
''
print
'ON CPU w NUMPY with size=(
%
s) and mean(
%
d):'
%
(
str
(
size
),
mean
)
#print 'random?[:10]\n', gpu_out[0, 0:10]
#print 'random?[-1,-10:]\n', gpu_out[-1, -10:]
basictest
(
f
,
steps
,
sample_size
,
prefix
=
'mrg gpu'
,
inputs
=
input
,
allow_01
=
True
,
target_avg
=
mean
,
mean_rtol
=
rtol
)
numpy
.
testing
.
assert_array_almost_equal
(
out
,
gpu_out
,
decimal
=
6
)
#print ''
#print 'ON CPU w NUMPY with size=(%s) and mean(%d):' % (str(size),
# mean)
RR
=
theano
.
tensor
.
shared_randomstreams
.
RandomStreams
(
234
)
uu
=
RR
.
binomial
(
size
=
size
,
p
=
mean
)
ff
=
theano
.
function
(
var_input
,
uu
,
mode
=
mode
)
# It's not our problem if numpy generates 0 or 1
basictest
(
ff
,
steps
,
sample_size
,
prefix
=
'numpy'
,
allow_01
=
True
,
inputs
=
input
,
target_avg
=
mean
,
mean_rtol
=
rtol
)
basictest
(
ff
,
steps
,
sample_size
,
prefix
=
'numpy'
,
allow_01
=
True
,
inputs
=
input
,
target_avg
=
mean
,
mean_rtol
=
rtol
)
def
test_normal0
():
steps
=
50
std
=
2.
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
25
,
30
)
default_rtol
=
.
02
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
25
,
30
)
default_rtol
=
.
02
else
:
sample_size
=
(
999
,
50
)
default_rtol
=
.
01
sample_size_odd
=
(
sample_size
[
0
],
sample_size
[
1
]
-
1
)
sample_size
=
(
999
,
50
)
default_rtol
=
.
01
sample_size_odd
=
(
sample_size
[
0
],
sample_size
[
1
]
-
1
)
x
=
tensor
.
matrix
()
for
size
,
const_size
,
var_input
,
input
,
avg
,
rtol
in
[
(
sample_size
,
sample_size
,
[],
[],
-
5.
,
default_rtol
),
(
x
.
shape
,
sample_size
,
[
x
],
[
numpy
.
zeros
(
sample_size
,
dtype
=
config
.
floatX
)],
-
5.
,
default_rtol
),
(
sample_size_odd
,
sample_size_odd
,
[],
[],
-
5.
,
default_rtol
),
#test odd value
(
x
.
shape
,
sample_size_odd
,
[
x
],
[
numpy
.
zeros
(
sample_size_odd
,
dtype
=
config
.
floatX
)],
-
5.
,
default_rtol
),
#test odd value
(
sample_size
,
sample_size
,
[],
[],
numpy
.
arange
(
numpy
.
prod
(
sample_size
),
dtype
=
'float32'
)
.
reshape
(
sample_size
),
10.
*
std
/
numpy
.
sqrt
(
steps
)),
(
sample_size
,
sample_size
,
[],
[],
-
5.
,
default_rtol
),
(
x
.
shape
,
sample_size
,
[
x
],
[
numpy
.
zeros
(
sample_size
,
dtype
=
config
.
floatX
)],
-
5.
,
default_rtol
),
#test odd value
(
sample_size_odd
,
sample_size_odd
,
[],
[],
-
5.
,
default_rtol
),
#test odd value
(
x
.
shape
,
sample_size_odd
,
[
x
],
[
numpy
.
zeros
(
sample_size_odd
,
dtype
=
config
.
floatX
)],
-
5.
,
default_rtol
),
(
sample_size
,
sample_size
,
[],
[],
numpy
.
arange
(
numpy
.
prod
(
sample_size
),
dtype
=
'float32'
)
.
reshape
(
sample_size
),
10.
*
std
/
numpy
.
sqrt
(
steps
)),
]:
print
''
print
'ON CPU:'
#
print ''
#
print 'ON CPU:'
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
False
)
# Note: we specify `nstreams` to avoid a warning.
n
=
R
.
normal
(
size
=
size
,
avg
=
avg
,
std
=
std
,
nstreams
=
rng_mrg
.
guess_n_streams
(
size
,
warn
=
False
))
f
=
theano
.
function
(
var_input
,
n
,
mode
=
mode
)
theano
.
printing
.
debugprint
(
f
)
out
=
f
(
*
input
)
print
'random?[:10]
\n
'
,
out
[
0
,
0
:
10
]
basictest
(
f
,
steps
,
const_size
,
target_avg
=
avg
,
target_std
=
std
,
prefix
=
'mrg '
,
allow_01
=
True
,
inputs
=
input
,
mean_rtol
=
rtol
)
#theano.printing.debugprint(f)
out
=
f
(
*
input
)
#print 'random?[:10]\n', out[0, 0:10]
basictest
(
f
,
steps
,
const_size
,
target_avg
=
avg
,
target_std
=
std
,
prefix
=
'mrg '
,
allow_01
=
True
,
inputs
=
input
,
mean_rtol
=
rtol
)
sys
.
stdout
.
flush
()
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
print
''
print
'ON GPU:'
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
#
print ''
#
print 'ON GPU:'
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
True
)
n
=
R
.
normal
(
size
=
size
,
avg
=
avg
,
std
=
std
,
dtype
=
'float32'
,
nstreams
=
rng_mrg
.
guess_n_streams
(
size
,
warn
=
False
))
assert
n
.
dtype
==
'float32'
#well, it's really that this test w GPU doesn't make sense otw
#well, it's really that this test w GPU doesn't make sense otw
assert
n
.
dtype
==
'float32'
f
=
theano
.
function
(
var_input
,
theano
.
Out
(
theano
.
sandbox
.
cuda
.
basic_ops
.
gpu_from_host
(
n
),
borrow
=
True
),
mode
=
mode_with_gpu
)
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
sys
.
stdout
.
flush
()
gpu_out
=
numpy
.
asarray
(
f
(
*
input
))
print
'random?[:10]
\n
'
,
gpu_out
[
0
,
0
:
10
]
print
'----'
#print 'random?[:10]\n', gpu_out[0,
0:10]
#
print '----'
sys
.
stdout
.
flush
()
basictest
(
f
,
steps
,
const_size
,
target_avg
=
avg
,
target_std
=
std
,
prefix
=
'gpu mrg '
,
allow_01
=
True
,
inputs
=
input
,
mean_rtol
=
rtol
)
basictest
(
f
,
steps
,
const_size
,
target_avg
=
avg
,
target_std
=
std
,
prefix
=
'gpu mrg '
,
allow_01
=
True
,
inputs
=
input
,
mean_rtol
=
rtol
)
# Need to allow some rounding error as their is float
# computation that are done on the gpu vs cpu
assert
numpy
.
allclose
(
out
,
gpu_out
,
rtol
=
5e-6
,
atol
=
5e-6
)
print
''
print
'ON CPU w NUMPY:'
#print ''
#print 'ON CPU w NUMPY:'
RR
=
theano
.
tensor
.
shared_randomstreams
.
RandomStreams
(
234
)
nn
=
RR
.
normal
(
size
=
size
,
avg
=
avg
,
std
=
std
)
ff
=
theano
.
function
(
var_input
,
nn
)
basictest
(
ff
,
steps
,
const_size
,
target_avg
=
avg
,
target_std
=
std
,
prefix
=
'numpy '
,
allow_01
=
True
,
inputs
=
input
,
mean_rtol
=
rtol
)
basictest
(
ff
,
steps
,
const_size
,
target_avg
=
avg
,
target_std
=
std
,
prefix
=
'numpy '
,
allow_01
=
True
,
inputs
=
input
,
mean_rtol
=
rtol
)
def
basic_multinomialtest
(
f
,
steps
,
sample_size
,
target_pvals
,
prefix
=
""
,
mean_rtol
=
0.04
):
def
basic_multinomialtest
(
f
,
steps
,
sample_size
,
target_pvals
,
prefix
=
""
,
mean_rtol
=
0.04
):
dt
=
0.0
avg_pvals
=
numpy
.
zeros
(
target_pvals
.
shape
,
dtype
=
config
.
floatX
)
...
...
@@ -541,18 +595,20 @@ def basic_multinomialtest(f, steps, sample_size, target_pvals, prefix="", mean_r
for
i
in
xrange
(
steps
):
t0
=
time
.
time
()
ival
=
f
()
assert
ival
.
shape
==
sample_size
assert
ival
.
shape
==
sample_size
dt
+=
time
.
time
()
-
t0
#ival = numpy.asarray(ival)
avg_pvals
+=
ival
avg_pvals
/=
steps
avg_pvals
/=
steps
print
'random?[:10]
\n
'
,
numpy
.
asarray
(
f
()[:
10
])
print
prefix
,
'mean'
,
avg_pvals
print
numpy
.
mean
(
abs
(
avg_pvals
-
target_pvals
))
# < mean_rtol, 'bad mean? %s %s' % (str(avg_pvals), str(target_pvals))
# < mean_rtol, 'bad mean? %s %s' % (str(avg_pvals), str(target_pvals))
print
numpy
.
mean
(
abs
(
avg_pvals
-
target_pvals
))
print
prefix
,
'time'
,
dt
print
prefix
,
'elements'
,
steps
*
numpy
.
prod
(
target_pvals
.
shape
)
print
prefix
,
'samples/sec'
,
steps
*
numpy
.
prod
(
target_pvals
.
shape
)
/
dt
print
prefix
,
'elements'
,
steps
*
numpy
.
prod
(
target_pvals
.
shape
)
print
prefix
,
'samples/sec'
,
steps
*
numpy
.
prod
(
target_pvals
.
shape
)
/
dt
def
test_multinomial
():
...
...
@@ -561,40 +617,41 @@ def test_multinomial():
if
mode
==
'FAST_COMPILE'
:
mode_
=
'FAST_RUN'
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
49
,
5
)
if
mode
in
[
'DEBUG_MODE'
,
'DebugMode'
,
'FAST_COMPILE'
]:
sample_size
=
(
49
,
5
)
else
:
sample_size
=
(
450
,
6
)
sample_size
=
(
450
,
6
)
mode_
=
theano
.
compile
.
mode
.
get_mode
(
mode_
)
print
''
print
'ON CPU:'
#
print ''
#
print 'ON CPU:'
pvals
=
numpy
.
asarray
(
numpy
.
random
.
uniform
(
size
=
sample_size
))
pvals
=
numpy
.
apply_along_axis
(
lambda
row
:
row
/
numpy
.
sum
(
row
),
1
,
pvals
)
pvals
=
numpy
.
apply_along_axis
(
lambda
row
:
row
/
numpy
.
sum
(
row
),
1
,
pvals
)
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
False
)
# Note: we specify `nstreams` to avoid a warning.
m
=
R
.
multinomial
(
pvals
=
pvals
,
dtype
=
config
.
floatX
,
nstreams
=
30
*
256
)
f
=
theano
.
function
([],
m
,
mode
=
mode_
)
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
out
=
f
()
basic_multinomialtest
(
f
,
steps
,
sample_size
,
pvals
,
prefix
=
'mrg '
)
sys
.
stdout
.
flush
()
if
mode
!=
'FAST_COMPILE'
and
cuda_available
:
print
''
print
'ON GPU:'
#
print ''
#
print 'ON GPU:'
R
=
MRG_RandomStreams
(
234
,
use_cuda
=
True
)
pvals
=
numpy
.
asarray
(
pvals
,
dtype
=
'float32'
)
# We give the number of streams to avoid a warning.
n
=
R
.
multinomial
(
pvals
=
pvals
,
dtype
=
'float32'
,
nstreams
=
30
*
256
)
assert
n
.
dtype
==
'float32'
#well, it's really that this test w GPU doesn't make sense otw
#well, it's really that this test w GPU doesn't make sense otw
assert
n
.
dtype
==
'float32'
f
=
theano
.
function
(
[],
theano
.
sandbox
.
cuda
.
basic_ops
.
gpu_from_host
(
n
),
mode
=
mode_
.
including
(
'gpu'
))
theano
.
printing
.
debugprint
(
f
)
#
theano.printing.debugprint(f)
gpu_out
=
f
()
sys
.
stdout
.
flush
()
basic_multinomialtest
(
f
,
steps
,
sample_size
,
pvals
,
prefix
=
'gpu mrg '
)
...
...
theano/sparse/tests/test_opt.py
浏览文件 @
fc75dbf9
...
...
@@ -7,14 +7,14 @@ except ImportError:
import
theano
from
theano
import
config
,
tensor
from
theano.sparse
import
(
enable_sparse
,
CSM
,
CSMProperties
,
csm_properties
,
CSC
,
CSR
)
from
theano.sparse.tests.test_basic
import
random_lil
from
theano.sparse
import
enable_sparse
from
theano.gof.python25
import
any
if
not
enable_sparse
:
raise
SkipTest
(
'Optional package sparse disabled'
)
from
theano.sparse
import
CSM
,
CSMProperties
,
csm_properties
,
CSC
,
CSR
from
theano.sparse.tests.test_basic
import
random_lil
def
test_local_csm_properties_csm
():
data
=
tensor
.
vector
()
...
...
theano/sparse/tests/test_sp2.py
浏览文件 @
fc75dbf9
...
...
@@ -13,13 +13,13 @@ import theano
from
theano
import
tensor
as
T
from
theano
import
sparse
as
S
if
not
S
.
enable_sparse
:
raise
SkipTest
(
'Optional package sparse disabled'
)
from
theano.sparse.sandbox
import
sp2
as
S2
from
theano.tests
import
unittest_tools
as
utt
if
S
.
enable_sparse
==
False
:
raise
SkipTest
(
'Optional package sparse disabled'
)
def
as_sparse_format
(
data
,
format
):
if
format
==
'csc'
:
return
scipy
.
sparse
.
csc_matrix
(
data
)
...
...
theano/sparse/tests/test_utils.py
浏览文件 @
fc75dbf9
import
numpy
import
theano.sparse
if
not
theano
.
sparse
.
enable_sparse
:
raise
SkipTest
(
'Optional package sparse disabled'
)
from
theano.sparse.utils
import
hash_from_sparse
from
theano.sparse.tests.test_basic
import
as_sparse_format
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论