Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
8e620d7b
提交
8e620d7b
authored
1月 07, 2009
作者:
james@X40
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
adding more sparse ops: sp_ones_like, MulSS, fixing grad on DenseFromSparse
上级
84c40292
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
211 行增加
和
13 行删除
+211
-13
basic.py
theano/sparse/basic.py
+184
-4
basic.py
theano/tensor/basic.py
+27
-9
没有找到文件。
theano/sparse/basic.py
浏览文件 @
8e620d7b
...
@@ -11,6 +11,11 @@ from scipy import sparse
...
@@ -11,6 +11,11 @@ from scipy import sparse
from
..
import
gof
from
..
import
gof
from
..
import
tensor
from
..
import
tensor
from
..
import
compile
#TODO: move this decorator to the compile submodule
def
register_specialize
(
lopt
,
*
tags
,
**
kwargs
):
compile
.
optdb
[
'specialize'
]
.
register
((
kwargs
and
kwargs
.
pop
(
'name'
))
or
lopt
.
__name__
,
lopt
,
'fast_run'
,
*
tags
)
""" Types of sparse matrices to use for testing """
""" Types of sparse matrices to use for testing """
...
@@ -98,6 +103,9 @@ def value(x):
...
@@ -98,6 +103,9 @@ def value(x):
except
TypeError
:
except
TypeError
:
raise
TypeError
(
"Could not convert
%
s to Sparse"
%
x
,
type
(
x
))
raise
TypeError
(
"Could not convert
%
s to Sparse"
%
x
,
type
(
x
))
def
sp_ones_like
(
x
):
data
,
indices
,
indptr
,
shape
=
csm_properties
(
x
)
#TODO: don't restrict to CSM formats
return
CSM
(
format
=
x
.
format
)(
tensor
.
ones_like
(
data
),
indices
,
indptr
,
shape
)
class
Sparse
(
gof
.
Type
):
class
Sparse
(
gof
.
Type
):
...
@@ -187,12 +195,159 @@ class SparseValue(gof.Value, _sparse_py_operators):
...
@@ -187,12 +195,159 @@ class SparseValue(gof.Value, _sparse_py_operators):
format
=
property
(
lambda
self
:
self
.
type
.
format
)
format
=
property
(
lambda
self
:
self
.
type
.
format
)
# CONSTRUCTION
class
CSMProperties
(
gof
.
Op
):
"""Extract all of .data .indices and .indptr"""
view_map
=
{
0
:[
0
],
1
:[
0
],
2
:[
0
],
3
:[
0
]}
def
__init__
(
self
,
map
=
None
):
self
.
map
=
map
def
make_node
(
self
,
csm
):
print
'******* sp:CSMProperties:make_node *******'
csm
=
as_sparse
(
csm
)
data
=
tensor
.
Tensor
(
dtype
=
csm
.
type
.
dtype
,
broadcastable
=
(
False
,))
.
make_result
()
return
gof
.
Apply
(
self
,
[
csm
],
[
data
,
tensor
.
ivector
(),
tensor
.
ivector
(),
tensor
.
ivector
()])
def
perform
(
self
,
node
,
(
csm
,),
out
):
print
'******* sp:CSMProperties:perform *******'
print
'self.map = '
,
self
.
map
print
'csm.data = '
,
csm
.
data
print
'size(csm.data) = '
,
numpy
.
size
(
csm
.
data
)
print
'csm.todense.shape = '
,
csm
.
todense
()
.
shape
print
'type(csm) = '
,
type
(
csm
)
out
[
0
][
0
]
=
csm
.
data
if
self
.
map
is
None
else
csm
.
data
[
self
.
map
]
out
[
1
][
0
]
=
numpy
.
asarray
(
csm
.
indices
,
dtype
=
'int32'
)
out
[
2
][
0
]
=
numpy
.
asarray
(
csm
.
indptr
,
dtype
=
'int32'
)
out
[
3
][
0
]
=
numpy
.
asarray
(
csm
.
shape
,
dtype
=
'int32'
)
# TODO FIX THIS
def
grad
(
self
,
(
csm
,),
g
):
print
'******* sp:CSMProperties:grad *******'
assert
[
gg
is
None
for
gg
in
g
[
1
:]]
data
,
indices
,
indptr
,
shape
=
csm_properties
(
csm
,
self
.
map
)
if
csm
.
format
==
'csc'
:
return
[
CSM
(
'csc'
,
self
.
map
)(
g_data
,
indices
,
indptr
,
shape
)]
else
:
return
[
CSR
(
'csm'
,
self
.
map
)(
g_data
,
indices
,
indptr
,
shape
)]
def
csm_properties
(
csm
,
map
=
None
):
return
CSMProperties
(
map
)(
csm
)
def
csm_data
(
csm
,
map
=
None
):
return
csm_properties
(
csm
,
map
)[
0
]
def
csm_indices
(
csm
):
return
csm_properties
(
csm
)[
1
]
def
csm_indptr
(
csm
):
return
csm_properties
(
csm
)[
2
]
def
csm_shape
(
csm
):
return
csm_properties
(
csm
)[
3
]
class
CSM
(
gof
.
Op
):
"""Construct a CSC or CSR matrix from the internal representation """
view_map
=
{
0
:[
0
]}
#should view the other inputs too, but viewing multiple inputs is not
#currently supported by the destroyhandler
def
__init__
(
self
,
format
,
map
=
None
):
if
format
not
in
(
'csr'
,
'csc'
):
raise
ValueError
(
"format must be one of: 'csr', 'csc'"
,
format
)
self
.
format
=
format
# for efficiency, if remap does nothing, then do not apply it
if
map
is
not
None
and
all
(
map
==
N
.
arange
(
N
.
size
(
map
))):
map
=
None
self
.
map
=
map
def
__eq__
(
self
,
other
):
return
type
(
other
)
is
CSM
\
and
other
.
format
==
self
.
format
and
other
.
map
==
self
.
map
def
__hash__
(
self
):
return
hash
(
CSM
)
^
hash
(
self
.
format
)
^
hash
(
numpy
.
str
(
self
.
map
))
def
make_node
(
self
,
data
,
indices
,
indptr
,
shape
):
"""Build a SparseResult from the internal parametrization
:param data:
:param indices:
:param indptr:
:type data: 1-d tensor
:type indices: 1-d tensor of ints
:type indptr: 1-d tensor of ints
"""
data
=
tensor
.
as_tensor
(
data
)
indices
=
tensor
.
as_tensor
(
indices
)
indptr
=
tensor
.
as_tensor
(
indptr
)
shape
=
tensor
.
as_tensor
(
shape
)
if
data
.
type
.
ndim
!=
1
:
raise
TypeError
(
'data argument must be a vector'
,
data
.
type
)
if
indices
.
type
not
in
tensor
.
int_vector_types
:
raise
TypeError
(
'indices must be vector of integers'
)
if
indptr
.
type
not
in
tensor
.
int_vector_types
:
raise
TypeError
(
'indices must be vector of integers'
)
if
shape
.
type
not
in
tensor
.
int_vector_types
:
raise
TypeError
(
'n_rows must be integer type'
)
return
gof
.
Apply
(
self
,
[
data
,
indices
,
indptr
,
shape
],
[
Sparse
(
dtype
=
data
.
type
.
dtype
,
format
=
self
.
format
)
.
make_result
()])
def
perform
(
self
,
node
,
(
data
,
indices
,
indptr
,
shape
),
(
out
,)):
"""Build a csc_matrix"""
#assert len(data.flatten()) == len(indices.flatten())
print
'********** sp:CSM:perform ***********'
print
'data ='
,
data
.
__repr__
()
print
'size(data) = '
,
numpy
.
size
(
data
)
print
'kmap ='
,
self
.
map
.
__repr__
()
data
=
data
[
self
.
map
]
if
self
.
map
!=
None
else
data
print
'data[kmap] ='
,
data
.
__repr__
()
if
len
(
shape
)
!=
2
:
raise
ValueError
(
'Shape should be an array of length 2'
)
if
data
.
shape
!=
indices
.
shape
:
raise
ValueError
(
'data indices shape mismatch'
,
(
data
.
shape
,
indices
.
shape
))
if
self
.
format
==
'csc'
:
out
[
0
]
=
sparse
.
csc_matrix
((
data
,
indices
.
copy
(),
indptr
.
copy
()),
numpy
.
asarray
(
shape
),
copy
=
False
#1000*len(data.flatten())
)
else
:
assert
self
.
format
==
'csr'
out
[
0
]
=
sparse
.
csr_matrix
((
data
,
indices
.
copy
(),
indptr
.
copy
()),
shape
.
copy
(),
copy
=
False
#1000*len(data.flatten())
)
if
0
:
print
'out[0] = '
,
out
[
0
]
.
todense
()
.
__repr__
()
def
grad
(
self
,
input
,
(
g_out
,)):
"""Return a gradient on the data vector"""
#unpack the data vector and wrap it as a 1d Tensor
return
[
csm_data
(
g_out
,
self
.
map
),
None
,
None
,
None
]
CSC
=
CSM
(
'csc'
)
CSR
=
CSM
(
'csr'
)
@gof.local_optimizer
([
csm_properties
])
def
skip_pack_csc01
(
node
):
"""if we find csm_properties(CSM(*args)), then we can replace that with the *args
directly"""
if
node
.
op
==
csm_properties
:
csm
,
=
node
.
inputs
if
csm
.
owner
and
(
csm
.
owner
.
op
==
CSC
or
csm
.
owner
.
op
==
CSR
):
return
csm
.
owner
.
inputs
return
False
register_specialize
(
skip_pack_csc01
)
#
#
# Conversion
# Conversion
#
#
# convert a sparse matrix to an ndarray
# convert a sparse matrix to an ndarray
class
DenseFromSparse
(
gof
.
op
.
Op
):
class
DenseFromSparse
(
gof
.
op
.
Op
):
sparse_grad
=
True
def
make_node
(
self
,
x
):
def
make_node
(
self
,
x
):
x
=
as_sparse
(
x
)
x
=
as_sparse
(
x
)
return
gof
.
Apply
(
self
,
return
gof
.
Apply
(
self
,
...
@@ -202,7 +357,10 @@ class DenseFromSparse(gof.op.Op):
...
@@ -202,7 +357,10 @@ class DenseFromSparse(gof.op.Op):
def
perform
(
self
,
node
,
(
x
,
),
(
out
,
)):
def
perform
(
self
,
node
,
(
x
,
),
(
out
,
)):
out
[
0
]
=
x
.
toarray
()
out
[
0
]
=
x
.
toarray
()
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
return
SparseFromDense
(
x
.
type
.
format
)(
gz
),
if
self
.
sparse_grad
:
return
[
sp_ones_like
(
x
)
*
gz
]
else
:
return
[
SparseFromDense
(
x
.
type
.
format
)(
gz
)]
dense_from_sparse
=
DenseFromSparse
()
dense_from_sparse
=
DenseFromSparse
()
class
SparseFromDense
(
gof
.
op
.
Op
):
class
SparseFromDense
(
gof
.
op
.
Op
):
...
@@ -253,6 +411,7 @@ class AddSS(gof.op.Op):
...
@@ -253,6 +411,7 @@ class AddSS(gof.op.Op):
if
x
.
type
.
dtype
!=
y
.
type
.
dtype
:
if
x
.
type
.
dtype
!=
y
.
type
.
dtype
:
raise
NotImplementedError
()
raise
NotImplementedError
()
if
x
.
type
.
format
!=
y
.
type
.
format
:
if
x
.
type
.
format
!=
y
.
type
.
format
:
print
x
.
type
.
format
,
y
.
type
.
format
raise
NotImplementedError
()
raise
NotImplementedError
()
return
gof
.
Apply
(
self
,
return
gof
.
Apply
(
self
,
[
x
,
y
],
[
x
,
y
],
...
@@ -303,6 +462,25 @@ def add(x,y):
...
@@ -303,6 +462,25 @@ def add(x,y):
elif
y_is_sparse_result
and
not
x_is_sparse_result
:
return
add_s_d
(
y
,
x
)
elif
y_is_sparse_result
and
not
x_is_sparse_result
:
return
add_s_d
(
y
,
x
)
else
:
raise
NotImplementedError
()
else
:
raise
NotImplementedError
()
class
MulSS
(
gof
.
op
.
Op
):
''' Elementwise multiply a sparse and a ndarray '''
def
make_node
(
self
,
x
,
y
):
x
,
y
=
as_sparse
(
x
),
as_sparse
(
y
)
if
x
.
type
!=
y
.
type
:
raise
NotImplementedError
()
return
gof
.
Apply
(
self
,
[
x
,
y
],
[
x
.
type
()])
def
perform
(
self
,
node
,
(
x
,
y
),
(
out
,
)):
assert
_is_sparse
(
x
)
and
_is_sparse
(
y
)
assert
len
(
x
.
shape
)
==
2
assert
y
.
shape
==
x
.
shape
if
(
numpy
.
all
(
y
.
indptr
==
x
.
indptr
)
and
numpy
.
all
(
y
.
indices
==
x
.
indices
)):
out
[
0
]
=
y
.
copy
()
out
[
0
]
.
data
*=
x
.
data
else
:
raise
NotImplementedError
()
#RowScale / ColScale
def
grad
(
self
,
(
x
,
y
),
(
gz
,)):
return
y
*
gz
,
x
*
gz
mul_s_s
=
MulSS
()
class
MulSD
(
gof
.
op
.
Op
):
class
MulSD
(
gof
.
op
.
Op
):
''' Elementwise multiply a sparse and a ndarray '''
''' Elementwise multiply a sparse and a ndarray '''
def
make_node
(
self
,
x
,
y
):
def
make_node
(
self
,
x
,
y
):
...
@@ -324,11 +502,11 @@ class MulSD(gof.op.Op):
...
@@ -324,11 +502,11 @@ class MulSD(gof.op.Op):
elif
len
(
y
.
shape
)
==
2
:
elif
len
(
y
.
shape
)
==
2
:
#if we have enough memory to fit y, maybe we can fit x.asarray() too?
#if we have enough memory to fit y, maybe we can fit x.asarray() too?
#TODO: change runtime from O(M*N) to O(nonzeros)
#TODO: change runtime from O(M*N) to O(nonzeros)
return
DenseFromSparse
(
x
)
*
y
out
[
0
]
=
type
(
x
)(
x
.
toarray
()
*
y
)
def
grad
(
self
,
(
x
,
y
),
(
gz
,)):
def
grad
(
self
,
(
x
,
y
),
(
gz
,)):
assert
_is_sparse_result
(
x
)
and
_is_dense_result
(
y
)
assert
_is_sparse_result
(
x
)
and
_is_dense_result
(
y
)
assert
_is_
den
se_result
(
gz
)
assert
_is_
spar
se_result
(
gz
)
return
SparseFromDense
(
x
.
type
.
format
)(
gz
),
gz
return
y
*
gz
,
x
*
gz
mul_s_d
=
MulSD
()
mul_s_d
=
MulSD
()
def
mul
(
x
,
y
):
def
mul
(
x
,
y
):
"""
"""
...
@@ -385,6 +563,8 @@ class Dot(gof.op.Op):
...
@@ -385,6 +563,8 @@ class Dot(gof.op.Op):
@todo: Verify that output is sufficiently sparse, and raise a warning if it is not
@todo: Verify that output is sufficiently sparse, and raise a warning if it is not
@todo: Also determine that we are storing the output in the best storage format?
@todo: Also determine that we are storing the output in the best storage format?
"""
"""
print
'x type is'
,
type
(
x
)
print
'y type is'
,
type
(
y
)
out
[
0
]
=
x
.
dot
(
y
)
out
[
0
]
=
x
.
dot
(
y
)
def
grad
(
self
,
(
x
,
y
),
(
gz
,)):
def
grad
(
self
,
(
x
,
y
),
(
gz
,)):
assert
_is_sparse_result
(
gz
)
assert
_is_sparse_result
(
gz
)
...
...
theano/tensor/basic.py
浏览文件 @
8e620d7b
...
@@ -490,20 +490,38 @@ class _tensor_py_operators:
...
@@ -490,20 +490,38 @@ class _tensor_py_operators:
# def __ixor__(self, other): return _xor_inplace(self, other)
# def __ixor__(self, other): return _xor_inplace(self, other)
#ARITHMETIC - NORMAL
#ARITHMETIC - NORMAL
def
__add__
(
self
,
other
):
return
add
(
self
,
other
)
def
__add__
(
self
,
other
):
def
__sub__
(
self
,
other
):
return
sub
(
self
,
other
)
try
:
return
add
(
self
,
other
)
except
Exception
,
e
:
raise
NotImplemented
def
__sub__
(
self
,
other
):
try
:
return
sub
(
self
,
other
)
except
Exception
,
e
:
raise
NotImplemented
def
__mul__
(
self
,
other
):
def
__mul__
(
self
,
other
):
try
:
try
:
return
mul
(
self
,
other
)
return
mul
(
self
,
other
)
except
Exception
,
e
:
except
Exception
,
e
:
try
:
raise
NotImplemented
return
other
*
self
def
__div__
(
self
,
other
):
except
:
try
:
raise
e
return
div
(
self
,
other
)
def
__div__
(
self
,
other
):
return
div
(
self
,
other
)
except
Exception
,
e
:
def
__pow__
(
self
,
other
):
return
pow
(
self
,
other
)
raise
NotImplemented
def
__mod__
(
self
,
other
):
return
mod
(
self
,
other
)
def
__pow__
(
self
,
other
):
try
:
return
pow
(
self
,
other
)
except
Exception
,
e
:
raise
NotImplemented
def
__mod__
(
self
,
other
):
try
:
return
mod
(
self
,
other
)
except
Exception
,
e
:
raise
NotImplemented
# ##### DON"T USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED BY OPTIMIZATION ONLY
# #ARITHMETIC - INPLACE
# #ARITHMETIC - INPLACE
# def __iadd__(self,other): return _add_inplace(self,other)
# def __iadd__(self,other): return _add_inplace(self,other)
# def __isub__(self,other): return _sub_inplace(self,other)
# def __isub__(self,other): return _sub_inplace(self,other)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论