Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
98213004
提交
98213004
authored
7月 05, 2011
作者:
James Bergstra
浏览文件
操作
浏览文件
下载
差异文件
merge w conflict in nvcc_compiler due to rpath thing
上级
8d431c50
7fa1cab9
全部展开
隐藏空白字符变更
内嵌
并排
正在显示
9 个修改的文件
包含
187 行增加
和
94 行删除
+187
-94
profiling.py
theano/compile/profiling.py
+0
-0
test_modes.py
theano/compile/tests/test_modes.py
+39
-0
__init__.py
theano/gof/__init__.py
+1
-1
nvcc_compiler.py
theano/sandbox/cuda/nvcc_compiler.py
+2
-4
basic.py
theano/sparse/basic.py
+76
-59
blas.py
theano/tensor/blas.py
+3
-0
opt.py
theano/tensor/opt.py
+22
-19
test_raw_random.py
theano/tensor/tests/test_raw_random.py
+27
-8
diverse_tests.py
theano/tests/diverse_tests.py
+17
-3
没有找到文件。
theano/compile/profiling.py
0 → 100644
浏览文件 @
98213004
差异被折叠。
点击展开。
theano/compile/tests/test_modes.py
0 → 100644
浏览文件 @
98213004
"""
Test compilation modes
"""
from
nose.plugins.skip
import
SkipTest
import
unittest
import
theano
import
numpy
import
random
import
numpy.random
from
theano.tests
import
unittest_tools
as
utt
class
T_bunch_of_modes
(
unittest
.
TestCase
):
def
test1
(
self
):
# this is a quick test after the LazyLinker branch merge
# to check that all the current modes can still be used.
linker_classes_involved
=
[]
for
modename
in
theano
.
config
.
__class__
.
__dict__
[
'mode'
]
.
all
:
x
=
T
.
matrix
()
y
=
T
.
vector
()
f
=
theano
.
function
([
x
,
y
],
x
+
y
,
mode
=
modename
)
# test that it runs something
f
([[
1
,
2
],[
3
,
4
]],
[
5
,
6
])
linker_classes_involved
.
append
(
f
.
maker
.
mode
.
linker
.
__class__
)
print
'MODE:'
,
modename
,
f
.
maker
.
mode
.
linker
,
'stop'
# regression check:
# there should be
# - VM_Linker
# - OpWiseCLinker (FAST_RUN)
# - WrapLinker (PROFILE_MODE)
# - PerformLinker (FAST_COMPILE)
# - DebugMode's Linker (DEBUG_MODE)
assert
5
==
len
(
set
(
linker_classes_involved
))
if
__name__
==
'__main__'
:
unittest
.
main
()
theano/gof/__init__.py
浏览文件 @
98213004
...
...
@@ -146,7 +146,7 @@ from link import \
Container
,
Linker
,
LocalLinker
,
PerformLinker
,
WrapLinker
,
WrapLinkerMany
from
op
import
\
Op
Op
,
PureOp
from
opt
import
(
Optimizer
,
optimizer
,
SeqOptimizer
,
MergeOptimizer
,
MergeOptMerge
,
...
...
theano/sandbox/cuda/nvcc_compiler.py
浏览文件 @
98213004
...
...
@@ -13,7 +13,7 @@ AddConfigVar('nvcc.compiler_bindir',
"If defined, nvcc compiler driver will seek g++ and gcc in this directory"
,
StrParam
(
""
))
AddConfigVar
(
'
cuda.nvcc
flags'
,
AddConfigVar
(
'
nvcc.
flags'
,
"Extra compiler flags for nvcc"
,
StrParam
(
""
))
...
...
@@ -183,11 +183,9 @@ def nvcc_module_compile_str(
if
sys
.
platform
!=
'darwin'
:
# the 64bit CUDA libs are in the same files as are named by the function above
rpaths
.
append
(
os
.
path
.
join
(
config
.
cuda
.
root
,
'lib64'
))
for
rpath
in
rpaths
:
cmd
.
extend
([
'-Xlinker'
,
','
.
join
([
'-rpath'
,
rpath
])])
nvccflags
=
[
flag
for
flag
in
config
.
cuda
.
nvccflags
.
split
(
' '
)
if
flag
]
cmd
.
extend
(
nvccflags
)
cmd
.
extend
([
flag
for
flag
in
config
.
nvcc
.
flags
.
split
(
' '
)
if
flag
])
cmd
.
extend
(
'-I
%
s'
%
idir
for
idir
in
include_dirs
)
cmd
.
extend
([
'-o'
,
lib_filename
])
cmd
.
append
(
os
.
path
.
split
(
cppfilename
)[
-
1
])
...
...
theano/sparse/basic.py
浏览文件 @
98213004
...
...
@@ -133,6 +133,79 @@ def sp_ones_like(x):
data
,
indices
,
indptr
,
shape
=
csm_properties
(
x
)
#TODO: don't restrict to CSM formats
return
CSM
(
format
=
x
.
format
)(
tensor
.
ones_like
(
data
),
indices
,
indptr
,
shape
)
class
_sparse_py_operators
:
T
=
property
(
lambda
self
:
transpose
(
self
),
doc
=
"Return aliased transpose of self (read-only)"
)
def
__neg__
(
self
):
return
neg
(
self
)
def
__add__
(
left
,
right
):
return
add
(
left
,
right
)
def
__radd__
(
right
,
left
):
return
add
(
left
,
right
)
def
__sub__
(
left
,
right
):
return
sub
(
left
,
right
)
def
__rsub__
(
right
,
left
):
return
sub
(
left
,
right
)
def
__mul__
(
left
,
right
):
return
mul
(
left
,
right
)
def
__rmul__
(
left
,
right
):
return
mul
(
left
,
right
)
#extra pseudo-operator symbols
def
__dot__
(
left
,
right
):
return
structured_dot
(
left
,
right
)
def
__rdot__
(
right
,
left
):
return
structured_dot
(
left
,
right
)
#N.B. THIS IS COMMENTED OUT ON PURPOSE!!!
# Discussion with Fred & James (at least, and maybe others before)
# we decided that casting from a sparse to dense should be explicit
# because it's usually something you want to be pretty careful about,
# and not to do by accident.
#def _as_TensorVariable(self):
# return dense_from_sparse(self)
shape
=
property
(
lambda
self
:
tensor
.
shape
(
dense_from_sparse
(
self
)))
# don't worry!
# ... the plan is that the ShapeFeature in tensor.opt will do shape propagation
# ... and remove the dense_from_sparse from the graph. This will *NOT* actually expand
# ... your sparse matrix just to get the shape.
ndim
=
property
(
lambda
self
:
self
.
type
.
ndim
)
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
class
SparseVariable
(
gof
.
Variable
,
_sparse_py_operators
):
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
def
__str__
(
self
):
return
'
%
s{
%
s,
%
s}'
%
(
self
.
__class__
.
__name__
,
self
.
format
,
self
.
dtype
)
def
__repr__
(
self
):
return
str
(
self
)
class
SparseConstantSignature
(
tuple
):
def
__eq__
(
self
,
other
):
(
a
,
b
),
(
x
,
y
)
=
self
,
other
return
a
==
x
\
and
(
b
.
dtype
==
y
.
dtype
)
\
and
(
type
(
b
)
==
type
(
y
))
\
and
(
b
.
shape
==
y
.
shape
)
\
and
(
abs
(
b
-
y
)
.
sum
()
<
1e-6
*
b
.
nnz
)
def
__hash__
(
self
):
(
a
,
b
)
=
self
return
hash
(
type
(
self
))
^
hash
(
a
)
^
hash
(
type
(
b
))
class
SparseConstant
(
gof
.
Constant
,
_sparse_py_operators
):
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
def
signature
(
self
):
assert
self
.
data
is
not
None
return
SparseConstantSignature
((
self
.
type
,
self
.
data
))
def
__str__
(
self
):
return
'
%
s{
%
s,
%
s,shape=
%
s,nnz=
%
s}'
%
(
self
.
__class__
.
__name__
,
self
.
format
,
self
.
dtype
,
self
.
data
.
shape
,
self
.
data
.
nnz
)
def
__repr__
(
self
):
return
str
(
self
)
class
SparseValue
(
gof
.
Value
,
_sparse_py_operators
):
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
class
SparseType
(
gof
.
Type
):
"""
...
...
@@ -149,6 +222,9 @@ class SparseType(gof.Type):
dtype_set
=
set
([
'int'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
])
ndim
=
2
Variable
=
SparseVariable
Constant
=
SparseConstant
def
__init__
(
self
,
format
,
dtype
):
"""
Fundamental way to create a sparse node.
...
...
@@ -248,65 +324,6 @@ csr_dmatrix = SparseType(format='csr', dtype='float64')
csc_fmatrix
=
SparseType
(
format
=
'csc'
,
dtype
=
'float32'
)
csr_fmatrix
=
SparseType
(
format
=
'csr'
,
dtype
=
'float32'
)
class
_sparse_py_operators
:
T
=
property
(
lambda
self
:
transpose
(
self
),
doc
=
"Return aliased transpose of self (read-only)"
)
def
__neg__
(
self
):
return
neg
(
self
)
def
__add__
(
left
,
right
):
return
add
(
left
,
right
)
def
__radd__
(
right
,
left
):
return
add
(
left
,
right
)
def
__sub__
(
left
,
right
):
return
sub
(
left
,
right
)
def
__rsub__
(
right
,
left
):
return
sub
(
left
,
right
)
def
__mul__
(
left
,
right
):
return
mul
(
left
,
right
)
def
__rmul__
(
left
,
right
):
return
mul
(
left
,
right
)
#extra pseudo-operator symbols
def
__dot__
(
left
,
right
):
return
structured_dot
(
left
,
right
)
def
__rdot__
(
right
,
left
):
return
structured_dot
(
left
,
right
)
#N.B. THIS IS COMMENTED OUT ON PURPOSE!!!
# Discussion with Fred & James (at least, and maybe others before)
# we decided that casting from a sparse to dense should be explicit
# because it's usually something you want to be pretty careful about,
# and not to do by accident.
#def _as_TensorVariable(self):
# return dense_from_sparse(self)
shape
=
property
(
lambda
self
:
tensor
.
shape
(
dense_from_sparse
(
self
)))
# don't worry!
# ... the plan is that the ShapeFeature in tensor.opt will do shape propagation
# ... and remove the dense_from_sparse from the graph. This will *NOT* actually expand
# ... your sparse matrix just to get the shape.
ndim
=
property
(
lambda
self
:
self
.
type
.
ndim
)
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
class
SparseVariable
(
gof
.
Variable
,
_sparse_py_operators
):
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
class
SparseConstantSignature
(
tuple
):
def
__eq__
(
self
,
other
):
(
a
,
b
),
(
x
,
y
)
=
self
,
other
return
a
==
x
\
and
(
b
.
dtype
==
y
.
dtype
)
\
and
(
type
(
b
)
==
type
(
y
))
\
and
(
b
.
shape
==
y
.
shape
)
\
and
(
abs
(
b
-
y
)
.
sum
()
<
1e-6
*
b
.
nnz
)
def
__hash__
(
self
):
(
a
,
b
)
=
self
return
hash
(
type
(
self
))
^
hash
(
a
)
^
hash
(
type
(
b
))
class
SparseConstant
(
gof
.
Constant
,
_sparse_py_operators
):
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
def
signature
(
self
):
assert
self
.
data
is
not
None
return
SparseConstantSignature
((
self
.
type
,
self
.
data
))
class
SparseValue
(
gof
.
Value
,
_sparse_py_operators
):
dtype
=
property
(
lambda
self
:
self
.
type
.
dtype
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
# CONSTRUCTION
class
CSMProperties
(
gof
.
Op
):
"""Extract all of .data .indices and .indptr"""
...
...
theano/tensor/blas.py
浏览文件 @
98213004
...
...
@@ -937,6 +937,9 @@ def _gemm_from_node2(node):
lst
=
_factor_canonicalized
(
lst
)
rval
=
_gemm_from_factored_list
(
lst
)
#print "RVAL", rval
# THIS GOT COMMENTED OUT AT SOME POINT - ASK P.Lamblin maybe why?
#if rval:
# assert rval[0].type == node.outputs[0].type, (rval[0].type, node.outputs[0].type)
if
rval
and
(
rval
[
0
]
.
type
==
node
.
outputs
[
0
]
.
type
):
return
rval
...
...
theano/tensor/opt.py
浏览文件 @
98213004
...
...
@@ -3057,30 +3057,33 @@ def constant_folding(node):
for
input
in
node
.
inputs
:
if
not
isinstance
(
input
,
Constant
):
return
False
try
:
storage
=
[[
None
]
for
output
in
node
.
outputs
]
node
.
op
.
perform
(
node
,
[
x
.
data
for
x
in
node
.
inputs
],
storage
)
except
MethodNotDefined
:
tmp_inputs
=
[
x
.
type
()
for
x
in
node
.
inputs
]
f
=
compile
.
function
(
inputs
=
tmp_inputs
,
outputs
=
node
.
op
.
make_node
(
*
tmp_inputs
)
.
outputs
,
mode
=
compile
.
Mode
(
linker
=
'c|py'
,
optimizer
=
None
))
xvals
=
f
(
*
[
x
.
data
for
x
in
node
.
inputs
])
storage
=
[[
xv
]
for
xv
in
xvals
]
msg
=
[]
assert
len
(
storage
)
==
len
(
node
.
outputs
)
for
s
,
output
in
zip
(
storage
,
node
.
outputs
):
#condition: all inputs are constant
storage_map
=
dict
([(
i
,[
i
.
data
])
for
i
in
node
.
inputs
])
compute_map
=
dict
([(
i
,[
True
])
for
i
in
node
.
inputs
])
for
o
in
node
.
outputs
:
storage_map
[
o
]
=
[
None
]
compute_map
[
o
]
=
[
False
]
thunk
=
node
.
op
.
make_thunk
(
node
,
storage_map
,
compute_map
,
no_recycling
=
[])
required
=
thunk
()
assert
not
required
# a node whose inputs are all provided should always
# return successfully
rval
=
[]
for
output
in
node
.
outputs
:
assert
compute_map
[
output
][
0
],
(
output
,
storage_map
[
output
][
0
])
try
:
constant
=
output
.
type
.
Constant
except
:
except
AttributeError
:
constant
=
Constant
msg
+=
[
constant
(
output
.
type
,
s
[
0
])]
return
msg
rval
.
append
(
constant
(
output
.
type
,
storage_map
[
output
][
0
]))
return
rval
register_canonicalize
(
constant_folding
,
'fast_compile'
)
register_stabilize
(
constant_folding
)
# because
register_stabilize
(
constant_folding
)
register_specialize
(
constant_folding
)
def
_is_1
(
expr
):
...
...
theano/tensor/tests/test_raw_random.py
浏览文件 @
98213004
...
...
@@ -49,11 +49,14 @@ class T_random_function(unittest.TestCase):
rng_R
=
random_state_type
()
# use make_node to override some of the self.args
post_r2
,
out2
=
rf2
(
rng_R
,
(
4
,),
-
2
,
2
)
post_r2_4
,
out2_4
=
rf2
(
rng_R
,
(
4
,),
-
4.0
,
2
)
post_r2_4_4
,
out2_4_4
=
rf2
(
rng_R
,
(
4
,),
-
4.0
,
4.0
)
post_r4
,
out4
=
rf4
(
rng_R
,
(
4
,),
-
4
,
4
)
post_r2
,
out2
=
rf2
(
rng_R
,
(
4
,),
-
2
,
2
)
# NOT INPLACE
post_r4
,
out4
=
rf4
(
rng_R
,
(
4
,),
-
4
,
4
)
# INPLACE
post_r2_4
,
out2_4
=
rf2
(
rng_R
,
(
4
,),
-
4.0
,
2
)
# NOT INPLACE
post_r2_4_4
,
out2_4_4
=
rf2
(
rng_R
,
(
4
,),
-
4.0
,
4.0
)
# NOT INPLACE
# configure out4 to be computed inplace
# The update expression means that the random state rng_R will
# be maintained by post_r4
f
=
compile
.
function
(
[
compile
.
In
(
rng_R
,
value
=
numpy
.
random
.
RandomState
(
utt
.
fetch_seed
()),
...
...
@@ -65,9 +68,25 @@ class T_random_function(unittest.TestCase):
f2
,
f4
,
f2_4
,
f2_4_4
=
f
()
f2b
,
f4b
,
f2_4b
,
f2_4_4b
=
f
()
assert
numpy
.
allclose
(
f2
*
2
,
f4
)
assert
numpy
.
allclose
(
f2_4_4
,
f4
)
assert
not
numpy
.
allclose
(
f4
,
f4b
)
print
f2
print
f4
print
f2_4
print
f2_4_4
#print f2b
#print f4b
#print f2_4b
#print f2_4_4b
# setting bounds is same as multiplying by 2
assert
numpy
.
allclose
(
f2
*
2
,
f4
),
(
f2
,
f4
)
# retrieving from non-inplace generator
# is same as inplace one for first call
assert
numpy
.
allclose
(
f2_4_4
,
f4
),
(
f2_4_4
,
f4
)
# f4 changes from call to call, that the update has worked
assert
not
numpy
.
allclose
(
f4
,
f4b
),
(
f4
,
f4b
)
def
test_inplace_optimization
(
self
):
"""Test that FAST_RUN includes the random_make_inplace optimization"""
...
...
theano/tests/diverse_tests.py
浏览文件 @
98213004
...
...
@@ -13,19 +13,32 @@ from theano.tests import unittest_tools as utt
should ensure that it will remain operational
'''
class
T_
diverse
(
unittest
.
TestCase
):
class
T_
scipy
(
unittest
.
TestCase
):
def
setUp
(
self
):
utt
.
seed_rng
()
self
.
orig_floatX
=
theano
.
config
.
floatX
def
tearDown
(
self
):
theano
.
config
.
floatX
=
self
.
orig_floatX
def
scipy_paper_example1
(
self
):
def
test_
scipy_paper_example1
(
self
):
a
=
theano
.
tensor
.
vector
(
'a'
)
# declare variable
b
=
a
+
a
**
10
# build expression
f
=
theano
.
function
([
a
],
b
)
# compile function
assert
numpy
.
all
(
f
([
0
,
1
,
2
])
==
numpy
.
array
([
0
,
2
,
1026
]))
def
scipy_papa
er_example2
(
self
):
def
test_scipy_pap
er_example2
(
self
):
''' This just sees if things compile well and if they run '''
# PREAMPBLE
T
=
theano
.
tensor
shared
=
theano
.
shared
function
=
theano
.
function
rng
=
numpy
.
random
theano
.
config
.
floatX
=
'float64'
#
# ACTUAL SCRIPT FROM PAPER
x
=
T
.
matrix
()
y
=
T
.
vector
()
w
=
shared
(
rng
.
randn
(
100
))
...
...
@@ -52,6 +65,7 @@ class T_diverse(unittest.TestCase):
for
i
in
range
(
training_steps
):
pred
,
err
=
train
(
D
[
0
],
D
[
1
])
if
__name__
==
'__main__'
:
unittest
.
main
()
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论