Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
a2fd617c
提交
a2fd617c
authored
3月 09, 2010
作者:
James Bergstra
浏览文件
操作
浏览文件
下载
差异文件
merging new GEMM optimization code
上级
abd9bef4
5d229740
显示空白字符变更
内嵌
并排
正在显示
12 个修改的文件
包含
99 行增加
和
67 行删除
+99
-67
configdefaults.py
theano/configdefaults.py
+3
-1
cmodule.py
theano/gof/cmodule.py
+9
-2
printing.py
theano/printing.py
+3
-7
__init__.py
theano/sandbox/cuda/__init__.py
+9
-3
blas.py
theano/sandbox/cuda/blas.py
+6
-3
conv.cu
theano/sandbox/cuda/conv.cu
+3
-2
nvcc_compiler.py
theano/sandbox/cuda/nvcc_compiler.py
+10
-0
test_nnet.py
theano/sandbox/cuda/tests/test_nnet.py
+41
-34
basic.py
theano/tensor/basic.py
+0
-0
blas.py
theano/tensor/blas.py
+0
-6
opt.py
theano/tensor/opt.py
+13
-7
downsample.py
theano/tensor/signal/downsample.py
+2
-2
没有找到文件。
theano/configdefaults.py
浏览文件 @
a2fd617c
...
@@ -8,9 +8,11 @@ AddConfigVar('floatX',
...
@@ -8,9 +8,11 @@ AddConfigVar('floatX',
EnumStr
(
'float64'
,
'float32'
),
EnumStr
(
'float64'
,
'float32'
),
)
)
#gpu mean let the driver select the gpu. Needed in case of gpu in exclusive mode.
#gpuX mean use the gpu number X.
AddConfigVar
(
'device'
,
AddConfigVar
(
'device'
,
"Default device for computations"
,
"Default device for computations"
,
EnumStr
(
'cpu'
,
*
[
'gpu
%
i'
%
i
for
i
in
range
(
4
)])
EnumStr
(
'cpu'
,
'gpu'
,
*
[
'gpu
%
i'
%
i
for
i
in
range
(
4
)])
)
)
# keep the default mode.optimizer==config.optimizer and mode.linker==config.linker!
# keep the default mode.optimizer==config.optimizer and mode.linker==config.linker!
...
...
theano/gof/cmodule.py
浏览文件 @
a2fd617c
...
@@ -629,7 +629,6 @@ def gcc_module_compile_str(module_name, src_code, location=None, include_dirs=[]
...
@@ -629,7 +629,6 @@ def gcc_module_compile_str(module_name, src_code, location=None, include_dirs=[]
python_inc
=
distutils
.
sysconfig
.
get_python_inc
()
python_inc
=
distutils
.
sysconfig
.
get_python_inc
()
libname
=
os
.
path
.
basename
(
python_inc
)
libname
=
os
.
path
.
basename
(
python_inc
)
#DSE Patch 1 for supporting OSX frameworks; add -framework Python
#DSE Patch 1 for supporting OSX frameworks; add -framework Python
if
sys
.
platform
==
'darwin'
:
if
sys
.
platform
==
'darwin'
:
preargs
.
extend
([
'-undefined'
,
'dynamic_lookup'
])
preargs
.
extend
([
'-undefined'
,
'dynamic_lookup'
])
...
@@ -639,8 +638,16 @@ def gcc_module_compile_str(module_name, src_code, location=None, include_dirs=[]
...
@@ -639,8 +638,16 @@ def gcc_module_compile_str(module_name, src_code, location=None, include_dirs=[]
if
python_inc
.
count
(
'Python.framework'
)
>
0
and
config
.
cmodule
.
mac_framework_link
:
if
python_inc
.
count
(
'Python.framework'
)
>
0
and
config
.
cmodule
.
mac_framework_link
:
preargs
.
extend
([
'-framework'
,
'Python'
])
preargs
.
extend
([
'-framework'
,
'Python'
])
workdir
=
location
# sometimes, the linker cannot find -lpython so we need to tell it
# explicitly where it is located
# this returns somepath/lib/python2.x
python_lib
=
distutils
.
sysconfig
.
get_python_lib
(
plat_specific
=
1
,
\
standard_lib
=
1
)
python_lib
=
os
.
path
.
dirname
(
python_lib
)
if
python_lib
not
in
lib_dirs
:
lib_dirs
.
append
(
python_lib
)
workdir
=
location
cppfilename
=
os
.
path
.
join
(
location
,
'mod.cpp'
)
cppfilename
=
os
.
path
.
join
(
location
,
'mod.cpp'
)
cppfile
=
file
(
cppfilename
,
'w'
)
cppfile
=
file
(
cppfilename
,
'w'
)
...
...
theano/printing.py
浏览文件 @
a2fd617c
...
@@ -88,7 +88,7 @@ class Print(Op):
...
@@ -88,7 +88,7 @@ class Print(Op):
if
callable
(
temp
):
if
callable
(
temp
):
pmsg
=
temp
()
pmsg
=
temp
()
else
:
else
:
p
sm
g
=
temp
p
ms
g
=
temp
print
self
.
message
,
attr
,
'='
,
pmsg
print
self
.
message
,
attr
,
'='
,
pmsg
#backport
#backport
#print self.message, attr,'=', temp() if callable(temp) else temp
#print self.message, attr,'=', temp() if callable(temp) else temp
...
@@ -441,12 +441,8 @@ def pydotprint(fct, outfile=os.path.join(config.compiledir,'theano.pydotprint.pn
...
@@ -441,12 +441,8 @@ def pydotprint(fct, outfile=os.path.join(config.compiledir,'theano.pydotprint.pn
g
.
add_node
(
pd
.
Node
(
varstr
,
color
=
'grey'
))
g
.
add_node
(
pd
.
Node
(
varstr
,
color
=
'grey'
))
elif
var
.
name
or
not
compact
:
elif
var
.
name
or
not
compact
:
g
.
add_edge
(
pd
.
Edge
(
astr
,
varstr
))
g
.
add_edge
(
pd
.
Edge
(
astr
,
varstr
))
else
:
# else:
#no name, so we don't make a var ellipse
#don't add egde here as it is already added from the inputs.
for
client
in
var
.
clients
:
edge
=
pd
.
Edge
(
astr
,
apply_name
(
client
[
0
]))
g
.
add_edge
(
edge
)
g
.
set_simplify
(
True
)
g
.
write_png
(
outfile
,
prog
=
'dot'
)
g
.
write_png
(
outfile
,
prog
=
'dot'
)
print
'The output file is available at'
,
outfile
print
'The output file is available at'
,
outfile
...
...
theano/sandbox/cuda/__init__.py
浏览文件 @
a2fd617c
...
@@ -112,7 +112,9 @@ if cuda_available:
...
@@ -112,7 +112,9 @@ if cuda_available:
def
use
(
device
):
def
use
(
device
):
global
cuda_enabled
,
enabled_cuda
global
cuda_enabled
,
enabled_cuda
if
device
.
startswith
(
'gpu'
):
if
device
==
'gpu'
:
pass
elif
device
.
startswith
(
'gpu'
):
device
=
int
(
device
[
3
:])
device
=
int
(
device
[
3
:])
elif
device
==
'cpu'
:
elif
device
==
'cpu'
:
device
=
-
1
device
=
-
1
...
@@ -120,13 +122,17 @@ def use(device):
...
@@ -120,13 +122,17 @@ def use(device):
raise
ValueError
(
"Invalid device identifier"
,
device
)
raise
ValueError
(
"Invalid device identifier"
,
device
)
if
use
.
device_number
is
None
:
if
use
.
device_number
is
None
:
# No successful call to use() has been made yet
# No successful call to use() has been made yet
if
device
<
0
:
if
device
!=
'gpu'
and
device
<
0
:
return
return
if
device
in
[
None
,
""
]:
if
device
in
[
None
,
""
]:
device
=
0
device
=
0
device
=
int
(
device
)
try
:
try
:
if
device
!=
'gpu'
:
gpu_init
(
device
)
gpu_init
(
device
)
else
:
#warning To let people see that the gpu will be used.
_logger
.
warn
(
"We let the driver select the gpu device to use"
)
handle_shared_float32
(
True
)
handle_shared_float32
(
True
)
use
.
device_number
=
device
use
.
device_number
=
device
cuda_enabled
=
True
cuda_enabled
=
True
...
...
theano/sandbox/cuda/blas.py
浏览文件 @
a2fd617c
...
@@ -162,16 +162,19 @@ class GpuConv(Op):
...
@@ -162,16 +162,19 @@ class GpuConv(Op):
and
self
.
logical_img_hw
==
other
.
logical_img_hw
\
and
self
.
logical_img_hw
==
other
.
logical_img_hw
\
and
self
.
logical_kern_hw
==
other
.
logical_kern_hw
\
and
self
.
logical_kern_hw
==
other
.
logical_kern_hw
\
and
self
.
logical_kern_align_top
==
other
.
logical_kern_align_top
\
and
self
.
logical_kern_align_top
==
other
.
logical_kern_align_top
\
and
self
.
version
==
other
.
version
and
self
.
version
==
other
.
version
\
and
self
.
verbose
==
other
.
verbose
def
__hash__
(
self
):
def
__hash__
(
self
):
# don't use hash(self.version) as hash(-1)==-2 and hash(-2)==-2 in python!
return
hash
(
type
(
self
))
\
return
hash
(
type
(
self
))
\
^
hash
(
self
.
border_mode
)
\
^
hash
(
self
.
border_mode
)
\
^
hash
(
self
.
subsample
)
\
^
hash
(
self
.
subsample
)
\
^
hash
(
self
.
logical_img_hw
)
\
^
hash
(
self
.
logical_img_hw
)
\
^
hash
(
self
.
logical_kern_hw
)
\
^
hash
(
self
.
logical_kern_hw
)
\
^
hash
(
self
.
logical_kern_align_top
)
\
^
hash
(
self
.
logical_kern_align_top
)
\
^
self
.
version
# don't use hash as hash(-1)==-2 and hash(-2)==-2 in python!
^
self
.
version
\
^
self
.
verbose
def
__str__
(
self
):
def
__str__
(
self
):
return
'
%
s{
%
s,
%
s,
%
s,
%
s,
%
s}'
%
(
self
.
__class__
.
__name__
,
return
'
%
s{
%
s,
%
s,
%
s,
%
s,
%
s}'
%
(
self
.
__class__
.
__name__
,
...
@@ -200,7 +203,7 @@ class GpuConv(Op):
...
@@ -200,7 +203,7 @@ class GpuConv(Op):
return
[
'cuda_ndarray.cuh'
,
'<stdio.h>'
]
return
[
'cuda_ndarray.cuh'
,
'<stdio.h>'
]
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
(
0
,
4
)
return
(
0
,
5
)
def
c_support_code_apply
(
self
,
node
,
nodename
):
def
c_support_code_apply
(
self
,
node
,
nodename
):
return
open
(
os
.
path
.
join
(
os
.
path
.
split
(
__file__
)[
0
],
'conv_kernel.cu'
))
.
read
()
+
\
return
open
(
os
.
path
.
join
(
os
.
path
.
split
(
__file__
)[
0
],
'conv_kernel.cu'
))
.
read
()
+
\
...
...
theano/sandbox/cuda/conv.cu
浏览文件 @
a2fd617c
...
@@ -307,7 +307,7 @@ CudaNdarray_conv_valid(const CudaNdarray *img, const CudaNdarray * kern,
...
@@ -307,7 +307,7 @@ CudaNdarray_conv_valid(const CudaNdarray *img, const CudaNdarray * kern,
#define CONV_ROWS_STACK_SPECIAL(kern_wid) \
#define CONV_ROWS_STACK_SPECIAL(kern_wid) \
if(!img_contiguous_2d || !kern_contiguous_2d) f = conv_rows_stack<kern_wid, false>;\
if(!img_contiguous_2d || !kern_contiguous_2d) f = conv_rows_stack<kern_wid, false>;\
else f = conv_rows_stack<kern_wid, true>;
\
else f = conv_rows_stack<kern_wid, true>;
CONV_ROWS_STACK_SPECIAL(THEANO_KERN_WID);
CONV_ROWS_STACK_SPECIAL(THEANO_KERN_WID);
f<<< grid, threads, shared_size >>>
f<<< grid, threads, shared_size >>>
...
@@ -379,7 +379,8 @@ CudaNdarray_conv_valid(const CudaNdarray *img, const CudaNdarray * kern,
...
@@ -379,7 +379,8 @@ CudaNdarray_conv_valid(const CudaNdarray *img, const CudaNdarray * kern,
if((!img_contiguous_2d || !kern_contiguous_2d)&&version==9) f = conv_rows_stack2<kern_wid, false,true>;\
if((!img_contiguous_2d || !kern_contiguous_2d)&&version==9) f = conv_rows_stack2<kern_wid, false,true>;\
else if(version==9) f = conv_rows_stack2<kern_wid, true,true>;\
else if(version==9) f = conv_rows_stack2<kern_wid, true,true>;\
else if(!img_contiguous_2d || !kern_contiguous_2d) f = conv_rows_stack2<kern_wid, false, false>;\
else if(!img_contiguous_2d || !kern_contiguous_2d) f = conv_rows_stack2<kern_wid, false, false>;\
else f = conv_rows_stack2<kern_wid, true, false>;\
else f = conv_rows_stack2<kern_wid, true, false>;
CONV_ROWS_STACK2_SPECIAL(THEANO_KERN_WID);
CONV_ROWS_STACK2_SPECIAL(THEANO_KERN_WID);
f<<< grid, threads, shared_size >>>
f<<< grid, threads, shared_size >>>
...
...
theano/sandbox/cuda/nvcc_compiler.py
浏览文件 @
a2fd617c
...
@@ -2,6 +2,7 @@ import sys, os, subprocess, logging
...
@@ -2,6 +2,7 @@ import sys, os, subprocess, logging
from
theano.gof.cmodule
import
(
std_libs
,
std_lib_dirs
,
std_include_dirs
,
dlimport
,
from
theano.gof.cmodule
import
(
std_libs
,
std_lib_dirs
,
std_include_dirs
,
dlimport
,
get_lib_extension
)
get_lib_extension
)
from
theano
import
config
from
theano
import
config
import
distutils
_logger
=
logging
.
getLogger
(
"theano.sandbox.cuda.nvcc_compiler"
)
_logger
=
logging
.
getLogger
(
"theano.sandbox.cuda.nvcc_compiler"
)
_logger
.
setLevel
(
logging
.
WARN
)
_logger
.
setLevel
(
logging
.
WARN
)
...
@@ -68,6 +69,15 @@ def nvcc_module_compile_str(module_name, src_code, location=None, include_dirs=[
...
@@ -68,6 +69,15 @@ def nvcc_module_compile_str(module_name, src_code, location=None, include_dirs=[
if
cuda_root
:
if
cuda_root
:
lib_dirs
.
append
(
os
.
path
.
join
(
cuda_root
,
'lib'
))
lib_dirs
.
append
(
os
.
path
.
join
(
cuda_root
,
'lib'
))
# sometimes, the linker cannot find -lpython so we need to tell it
# explicitly where it is located
# this returns somepath/lib/python2.x
python_lib
=
distutils
.
sysconfig
.
get_python_lib
(
plat_specific
=
1
,
\
standard_lib
=
1
)
python_lib
=
os
.
path
.
dirname
(
python_lib
)
if
python_lib
not
in
lib_dirs
:
lib_dirs
.
append
(
python_lib
)
cppfilename
=
os
.
path
.
join
(
location
,
'mod.cu'
)
cppfilename
=
os
.
path
.
join
(
location
,
'mod.cu'
)
cppfile
=
file
(
cppfilename
,
'w'
)
cppfile
=
file
(
cppfilename
,
'w'
)
...
...
theano/sandbox/cuda/tests/test_nnet.py
浏览文件 @
a2fd617c
...
@@ -14,7 +14,7 @@ import numpy
...
@@ -14,7 +14,7 @@ import numpy
# Skip test if cuda_ndarray is not available.
# Skip test if cuda_ndarray is not available.
from
nose.plugins.skip
import
SkipTest
from
nose.plugins.skip
import
SkipTest
import
theano.sandbox.cuda
as
cuda_ndarray
import
theano.sandbox.cuda
as
cuda_ndarray
if
cuda_ndarray
.
cuda_
enabled
==
False
:
if
cuda_ndarray
.
cuda_
available
==
False
:
raise
SkipTest
(
'Optional package cuda disabled'
)
raise
SkipTest
(
'Optional package cuda disabled'
)
import
theano.sandbox.cuda
as
tcn
import
theano.sandbox.cuda
as
tcn
...
@@ -23,6 +23,13 @@ import logging
...
@@ -23,6 +23,13 @@ import logging
logging
.
getLogger
(
'theano.sandbox.cuda.tests.test_nnet'
)
.
setLevel
(
logging
.
INFO
)
logging
.
getLogger
(
'theano.sandbox.cuda.tests.test_nnet'
)
.
setLevel
(
logging
.
INFO
)
def
my_rand
(
*
shape
):
return
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape
),
dtype
=
'float32'
)
def
my_randn
(
*
shape
):
return
theano
.
_asarray
(
numpy
.
random
.
randn
(
*
shape
),
dtype
=
'float32'
)
def
my_zeros
(
*
shape
):
return
theano
.
_asarray
(
numpy
.
zeros
(
*
shape
),
dtype
=
'float32'
)
def
get_mode
(
use_gpu
):
def
get_mode
(
use_gpu
):
ret
=
theano
.
compile
.
get_default_mode
()
ret
=
theano
.
compile
.
get_default_mode
()
if
isinstance
(
ret
,
theano
.
compile
.
ProfileMode
):
if
isinstance
(
ret
,
theano
.
compile
.
ProfileMode
):
...
@@ -44,15 +51,15 @@ def print_diff_mode(a,b):
...
@@ -44,15 +51,15 @@ def print_diff_mode(a,b):
def
run_nnet
(
use_gpu
,
n_batch
=
60
,
n_in
=
1024
,
n_hid
=
2048
,
n_out
=
10
,
n_iter
=
100
):
def
run_nnet
(
use_gpu
,
n_batch
=
60
,
n_in
=
1024
,
n_hid
=
2048
,
n_out
=
10
,
n_iter
=
100
):
if
use_gpu
:
if
use_gpu
:
w
=
tcn
.
shared_constructor
(
0.01
*
(
numpy
.
random
.
rand
(
n_in
,
n_hid
)
-
0.5
),
'w'
)
w
=
tcn
.
shared_constructor
(
0.01
*
(
my_
rand
(
n_in
,
n_hid
)
-
0.5
),
'w'
)
b
=
tcn
.
shared_constructor
(
numpy
.
zeros
(
n_hid
),
'b'
)
b
=
tcn
.
shared_constructor
(
my_
zeros
(
n_hid
),
'b'
)
v
=
tcn
.
shared_constructor
(
numpy
.
zeros
((
n_hid
,
n_out
)),
'c'
)
v
=
tcn
.
shared_constructor
(
my_
zeros
((
n_hid
,
n_out
)),
'c'
)
c
=
tcn
.
shared_constructor
(
numpy
.
zeros
(
n_out
),
'c'
)
c
=
tcn
.
shared_constructor
(
my_
zeros
(
n_out
),
'c'
)
else
:
else
:
w
=
shared
(
theano
.
_asarray
(
0.01
*
(
numpy
.
random
.
rand
(
n_in
,
n_hid
)
-
0.5
),
dtype
=
'float32'
),
'w'
)
w
=
shared
(
0.01
*
(
my_rand
(
n_in
,
n_hid
)
-
0.5
),
'w'
)
b
=
shared
(
theano
.
_asarray
(
numpy
.
zeros
(
n_hid
),
dtype
=
'float32'
),
'b'
)
b
=
shared
(
my_zeros
(
n_hid
),
'b'
)
v
=
shared
(
theano
.
_asarray
(
numpy
.
zeros
((
n_hid
,
n_out
)),
dtype
=
'float32'
),
'c'
)
v
=
shared
(
my_zeros
((
n_hid
,
n_out
)
),
'c'
)
c
=
shared
(
theano
.
_asarray
(
numpy
.
zeros
(
n_out
),
dtype
=
'float32'
),
'c'
)
c
=
shared
(
my_zeros
(
n_out
),
'c'
)
x
=
tensor
.
fmatrix
(
'x'
)
x
=
tensor
.
fmatrix
(
'x'
)
y
=
tensor
.
fmatrix
(
'y'
)
y
=
tensor
.
fmatrix
(
'y'
)
...
@@ -75,8 +82,8 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100):
...
@@ -75,8 +82,8 @@ def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10, n_iter=100):
for
i
,
n
in
enumerate
(
train
.
maker
.
env
.
toposort
()):
for
i
,
n
in
enumerate
(
train
.
maker
.
env
.
toposort
()):
print
i
,
n
print
i
,
n
xval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
n_batch
,
n_in
),
dtype
=
'float32'
)
xval
=
my_rand
(
n_batch
,
n_in
)
yval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
n_batch
,
n_out
),
dtype
=
'float32'
)
yval
=
my_rand
(
n_batch
,
n_out
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
t0
=
time
.
time
()
t0
=
time
.
time
()
...
@@ -123,10 +130,10 @@ def run_conv_nnet1(use_gpu):
...
@@ -123,10 +130,10 @@ def run_conv_nnet1(use_gpu):
n_hid
=
n_kern
*
logical_hid_shape
[
0
]
*
logical_hid_shape
[
1
]
n_hid
=
n_kern
*
logical_hid_shape
[
0
]
*
logical_hid_shape
[
1
]
n_out
=
10
n_out
=
10
w
=
shared_fn
(
theano
.
_asarray
(
0.01
*
(
numpy
.
random
.
rand
(
*
shape_kern
)
-
0.5
),
dtype
=
'float32'
),
'w'
)
w
=
shared_fn
(
0.01
*
(
my_rand
(
*
shape_kern
)
-
0.5
),
'w'
)
b
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_kern
,)),
dtype
=
'float32'
),
'b'
)
b
=
shared_fn
(
my_zeros
((
n_kern
,)
),
'b'
)
v
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_hid
,
n_out
)),
dtype
=
'float32'
),
'c'
)
v
=
shared_fn
(
my_zeros
((
n_hid
,
n_out
)
),
'c'
)
c
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
(
n_out
),
dtype
=
'float32'
),
'c'
)
c
=
shared_fn
(
my_zeros
(
n_out
),
'c'
)
x
=
tensor
.
Tensor
(
dtype
=
'float32'
,
broadcastable
=
(
0
,
1
,
0
,
0
))(
'x'
)
x
=
tensor
.
Tensor
(
dtype
=
'float32'
,
broadcastable
=
(
0
,
1
,
0
,
0
))(
'x'
)
y
=
tensor
.
fmatrix
(
'y'
)
y
=
tensor
.
fmatrix
(
'y'
)
...
@@ -152,8 +159,8 @@ def run_conv_nnet1(use_gpu):
...
@@ -152,8 +159,8 @@ def run_conv_nnet1(use_gpu):
# for i, n in enumerate(train.maker.env.toposort()):
# for i, n in enumerate(train.maker.env.toposort()):
# print i, n
# print i, n
xval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape_img
),
dtype
=
'float32'
)
xval
=
my_rand
(
*
shape_img
)
yval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
n_batch
,
n_out
),
dtype
=
'float32'
)
yval
=
my_rand
(
n_batch
,
n_out
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
for
i
in
xrange
(
10
):
for
i
in
xrange
(
10
):
...
@@ -204,12 +211,12 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
...
@@ -204,12 +211,12 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
n_hid
=
n_kern1
*
logical_hid_shape1
[
0
]
*
logical_hid_shape1
[
1
]
n_hid
=
n_kern1
*
logical_hid_shape1
[
0
]
*
logical_hid_shape1
[
1
]
n_out
=
10
n_out
=
10
w0
=
shared_fn
(
theano
.
_asarray
(
0.01
*
(
numpy
.
random
.
rand
(
*
shape_kern
)
-
0.5
),
dtype
=
'float32'
),
'w0'
)
w0
=
shared_fn
(
0.01
*
(
my_rand
(
*
shape_kern
)
-
0.5
),
'w0'
)
b0
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_kern
,)),
dtype
=
'float32'
),
'b0'
)
b0
=
shared_fn
(
my_zeros
((
n_kern
,)
),
'b0'
)
w1
=
shared_fn
(
theano
.
_asarray
(
0.01
*
(
numpy
.
random
.
rand
(
*
shape_kern1
)
-
0.5
),
dtype
=
'float32'
),
'w1'
)
w1
=
shared_fn
(
0.01
*
(
my_rand
(
*
shape_kern1
)
-
0.5
),
'w1'
)
b1
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_kern1
,)),
dtype
=
'float32'
),
'b1'
)
b1
=
shared_fn
(
my_zeros
((
n_kern1
,)
),
'b1'
)
v
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_hid
,
n_out
)),
dtype
=
'float32'
),
'c'
)
v
=
shared_fn
(
my_zeros
((
n_hid
,
n_out
)
),
'c'
)
c
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
(
n_out
),
dtype
=
'float32'
),
'c'
)
c
=
shared_fn
(
my_zeros
(
n_out
),
'c'
)
x
=
tensor
.
Tensor
(
dtype
=
'float32'
,
broadcastable
=
(
0
,
1
,
0
,
0
))(
'x'
)
x
=
tensor
.
Tensor
(
dtype
=
'float32'
,
broadcastable
=
(
0
,
1
,
0
,
0
))(
'x'
)
y
=
tensor
.
fmatrix
(
'y'
)
y
=
tensor
.
fmatrix
(
'y'
)
...
@@ -238,8 +245,8 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
...
@@ -238,8 +245,8 @@ def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
# for i, n in enumerate(train.maker.env.toposort()):
# for i, n in enumerate(train.maker.env.toposort()):
# print i, n
# print i, n
xval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape_img
),
dtype
=
'float32'
)
xval
=
my_rand
(
*
shape_img
)
yval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
n_batch
,
n_out
),
dtype
=
'float32'
)
#int32 make all 0...
yval
=
my_rand
(
n_batch
,
n_out
)
#int32 make all 0...
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
for
i
in
xrange
(
n_train
):
for
i
in
xrange
(
n_train
):
rval
=
train
(
xval
,
yval
,
lr
)
rval
=
train
(
xval
,
yval
,
lr
)
...
@@ -284,12 +291,12 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
...
@@ -284,12 +291,12 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
n_out
=
10
n_out
=
10
w0
=
shared_fn
(
theano
.
_asarray
(
0.01
*
(
numpy
.
random
.
rand
(
*
shape_kern
)
-
0.5
),
dtype
=
'float32'
),
'w0'
)
w0
=
shared_fn
(
0.01
*
(
my_rand
(
*
shape_kern
)
-
0.5
),
'w0'
)
b0
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_kern
,)),
dtype
=
'float32'
),
'b0'
)
b0
=
shared_fn
(
my_zeros
((
n_kern
,)
),
'b0'
)
w1
=
shared_fn
(
theano
.
_asarray
(
0.01
*
(
numpy
.
random
.
rand
(
*
shape_kern1
)
-
0.5
),
dtype
=
'float32'
),
'w1'
)
w1
=
shared_fn
(
0.01
*
(
my_rand
(
*
shape_kern1
)
-
0.5
),
'w1'
)
b1
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
((
n_kern1
,)),
dtype
=
'float32'
),
'b1'
)
b1
=
shared_fn
(
my_zeros
((
n_kern1
,)
),
'b1'
)
v
=
shared_fn
(
theano
.
_asarray
(
0.01
*
numpy
.
random
.
randn
(
n_hid
,
n_out
),
dtype
=
'float32'
),
'v'
)
v
=
shared_fn
(
0.01
*
my_randn
(
n_hid
,
n_out
),
'v'
)
c
=
shared_fn
(
theano
.
_asarray
(
numpy
.
zeros
(
n_out
),
dtype
=
'float32'
),
'c'
)
c
=
shared_fn
(
my_zeros
(
n_out
),
'c'
)
print
'ALLOCATING ARCH: w0 shape'
,
w0
.
value
.
shape
print
'ALLOCATING ARCH: w0 shape'
,
w0
.
value
.
shape
print
'ALLOCATING ARCH: w1 shape'
,
w1
.
value
.
shape
print
'ALLOCATING ARCH: w1 shape'
,
w1
.
value
.
shape
...
@@ -330,11 +337,11 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
...
@@ -330,11 +337,11 @@ def run_conv_nnet2_classif(use_gpu, isize, ksize, n_batch, n_iter,
for
i
,
n
in
enumerate
(
train
.
maker
.
env
.
toposort
()):
for
i
,
n
in
enumerate
(
train
.
maker
.
env
.
toposort
()):
print
i
,
n
print
i
,
n
xval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
*
shape_img
),
dtype
=
'float32'
)
xval
=
my_rand
(
*
shape_img
)
yval
=
theano
.
_asarray
(
numpy
.
random
.
rand
(
n_batch
,
n_out
),
dtype
=
'float32'
)
yval
=
my_rand
(
n_batch
,
n_out
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
lr
=
theano
.
_asarray
(
0.01
,
dtype
=
'float32'
)
rvals
=
numpy
.
zeros
(
n_iter
)
rvals
=
my_
zeros
(
n_iter
)
t0
=
time
.
time
()
t0
=
time
.
time
()
for
i
in
xrange
(
n_iter
):
for
i
in
xrange
(
n_iter
):
rvals
[
i
]
=
train
(
xval
,
yval
,
lr
)[
0
]
rvals
[
i
]
=
train
(
xval
,
yval
,
lr
)[
0
]
...
...
theano/tensor/basic.py
浏览文件 @
a2fd617c
theano/tensor/blas.py
浏览文件 @
a2fd617c
...
@@ -945,8 +945,6 @@ def local_dot22_to_dot22scalar(node):
...
@@ -945,8 +945,6 @@ def local_dot22_to_dot22scalar(node):
#we take the first _dot22 found. TODO check others!
#we take the first _dot22 found. TODO check others!
dot22_idx
=
i_dot22
.
index
(
True
)
dot22_idx
=
i_dot22
.
index
(
True
)
d
=
node
.
inputs
[
dot22_idx
]
d
=
node
.
inputs
[
dot22_idx
]
i_scalar
=
[
_as_scalar
(
x
)
for
x
in
node
.
inputs
]
i_scalar
=
[
_as_scalar
(
x
)
for
x
in
node
.
inputs
]
if
not
any
(
i_scalar
)
and
not
any
([
x
.
owner
and
x
.
owner
.
op
==
T
.
mul
for
x
in
node
.
inputs
]):
if
not
any
(
i_scalar
)
and
not
any
([
x
.
owner
and
x
.
owner
.
op
==
T
.
mul
for
x
in
node
.
inputs
]):
#no scalar in input and no multiplication
#no scalar in input and no multiplication
...
@@ -983,15 +981,11 @@ def local_dot22_to_dot22scalar(node):
...
@@ -983,15 +981,11 @@ def local_dot22_to_dot22scalar(node):
if
scalar_idx
<
0
:
if
scalar_idx
<
0
:
info
(
'Not optimizing dot22 with inputs'
,
node
.
inputs
,
[
x
.
type
for
x
in
node
.
inputs
],
'as the type of the scalar can
\'
t be upcasted to the matrix type'
)
info
(
'Not optimizing dot22 with inputs'
,
node
.
inputs
,
[
x
.
type
for
x
in
node
.
inputs
],
'as the type of the scalar can
\'
t be upcasted to the matrix type'
)
return
False
return
False
assert
scalar_idx
<
len
(
node
.
inputs
)
assert
scalar_idx
<
len
(
node
.
inputs
)
s
=
node
.
inputs
[
scalar_idx
]
s
=
node
.
inputs
[
scalar_idx
]
o
=
copy
.
copy
(
node
.
inputs
)
o
=
copy
.
copy
(
node
.
inputs
)
o
.
remove
(
d
)
o
.
remove
(
d
)
o
.
remove
(
s
)
o
.
remove
(
s
)
if
len
(
o
)
==
0
:
if
len
(
o
)
==
0
:
return
[
_dot22scalar
(
d
.
owner
.
inputs
[
0
],
d
.
owner
.
inputs
[
1
],
s
)]
return
[
_dot22scalar
(
d
.
owner
.
inputs
[
0
],
d
.
owner
.
inputs
[
1
],
s
)]
else
:
else
:
...
...
theano/tensor/opt.py
浏览文件 @
a2fd617c
...
@@ -24,6 +24,8 @@ from theano import compile #to register the optimizer built by this file
...
@@ -24,6 +24,8 @@ from theano import compile #to register the optimizer built by this file
from
theano.gof.python25
import
any
,
all
from
theano.gof.python25
import
any
,
all
from
theano.gof.opt
import
Optimizer
from
theano.gof.opt
import
Optimizer
from
theano.gof
import
toolbox
,
DestroyHandler
from
theano.gof
import
toolbox
,
DestroyHandler
# Utilities
# Utilities
def
out2in
(
*
local_opts
):
def
out2in
(
*
local_opts
):
...
@@ -395,6 +397,13 @@ class ShapeFeature(object):
...
@@ -395,6 +397,13 @@ class ShapeFeature(object):
else
:
else
:
self
.
shape_of
[
r
]
=
tuple
([
self
.
unpack
(
s_i
)
for
s_i
in
s
])
self
.
shape_of
[
r
]
=
tuple
([
self
.
unpack
(
s_i
)
for
s_i
in
s
])
def
init_r
(
self
,
r
):
if
r
not
in
self
.
shape_of
:
try
:
self
.
set_shape
(
r
,
self
.
shape_tuple
(
r
))
except
AttributeError
:
self
.
set_shape
(
r
,
None
)
def
make_vector_shape
(
self
,
r
):
def
make_vector_shape
(
self
,
r
):
return
make_vector
(
*
self
.
shape_of
[
r
])
return
make_vector
(
*
self
.
shape_of
[
r
])
#
#
...
@@ -421,11 +430,7 @@ class ShapeFeature(object):
...
@@ -421,11 +430,7 @@ class ShapeFeature(object):
for
i
,
r
in
enumerate
(
node
.
inputs
):
for
i
,
r
in
enumerate
(
node
.
inputs
):
# make sure we have shapes for the inputs
# make sure we have shapes for the inputs
if
r
not
in
self
.
shape_of
:
self
.
init_r
(
r
)
try
:
self
.
set_shape
(
r
,
self
.
shape_tuple
(
r
))
except
AttributeError
:
self
.
set_shape
(
r
,
None
)
# not a TensorType variable
try
:
try
:
shape_infer
=
node
.
op
.
infer_shape
shape_infer
=
node
.
op
.
infer_shape
...
@@ -453,7 +458,7 @@ class ShapeFeature(object):
...
@@ -453,7 +458,7 @@ class ShapeFeature(object):
# TODO:
# TODO:
# This tells us that r and new_r must have the same shape
# This tells us that r and new_r must have the same shape
# if we didn't know that the shapes are related, now we do.
# if we didn't know that the shapes are related, now we do.
self
.
init_r
(
new_r
)
# change_input happens in two cases:
# change_input happens in two cases:
# 1) we are trying to get rid of r, or
# 1) we are trying to get rid of r, or
# 2) we are putting things back after a failed transaction.
# 2) we are putting things back after a failed transaction.
...
@@ -1160,7 +1165,8 @@ register_canonicalize(local_mul_canonizer, name = 'local_mul_canonizer')
...
@@ -1160,7 +1165,8 @@ register_canonicalize(local_mul_canonizer, name = 'local_mul_canonizer')
@gof.local_optimizer
([
T
.
neg
])
@gof.local_optimizer
([
T
.
neg
])
def
local_neg_to_mul
(
node
):
def
local_neg_to_mul
(
node
):
if
node
.
op
==
T
.
neg
:
if
node
.
op
==
T
.
neg
:
return
[
T
.
mul
(
-
1
,
node
.
inputs
[
0
])]
return
[
T
.
mul
(
numpy
.
array
(
-
1
,
dtype
=
node
.
inputs
[
0
]
.
dtype
),
node
.
inputs
[
0
])]
register_canonicalize
(
local_neg_to_mul
)
register_canonicalize
(
local_neg_to_mul
)
@register_specialize
@register_specialize
...
...
theano/tensor/signal/downsample.py
浏览文件 @
a2fd617c
...
@@ -212,7 +212,7 @@ class DownsampleFactorMax(Op):
...
@@ -212,7 +212,7 @@ class DownsampleFactorMax(Op):
"""
%
locals
()
"""
%
locals
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
()
return
(
0
,
1
)
class
DownsampleFactorMaxGrad
(
Op
):
class
DownsampleFactorMaxGrad
(
Op
):
...
@@ -349,4 +349,4 @@ class DownsampleFactorMaxGrad(Op):
...
@@ -349,4 +349,4 @@ class DownsampleFactorMaxGrad(Op):
"""
%
locals
()
"""
%
locals
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
()
return
(
0
,
1
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论