Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
b128f2b8
提交
b128f2b8
authored
9月 25, 2008
作者:
Olivier Delalleau
浏览文件
操作
浏览文件
下载
差异文件
Merged with all other changes
上级
304a40a9
e672e509
显示空白字符变更
内嵌
并排
正在显示
15 个修改的文件
包含
783 行增加
和
326 行删除
+783
-326
.hgignore
.hgignore
+1
-0
__init__.py
__init__.py
+2
-0
_test_compile.py
_test_compile.py
+2
-2
_test_tensor.py
_test_tensor.py
+48
-37
_test_tensor_opt.py
_test_tensor_opt.py
+12
-12
apirst2html.py
doc/apirst2html.py
+41
-0
doc.idx
doc/doc.idx
+1
-0
epydoc
epydoc
+63
-152
gen_oplist.py
gen_oplist.py
+102
-0
op.py
gof/op.py
+13
-13
local.build_html.sh
local.build_html.sh
+21
-8
local.epydoc
local.epydoc
+6
-11
scalar.py
scalar.py
+1
-1
tensor.py
tensor.py
+469
-89
tensor_opt.py
tensor_opt.py
+1
-1
没有找到文件。
.hgignore
浏览文件 @
b128f2b8
...
@@ -2,6 +2,7 @@ syntax: glob
...
@@ -2,6 +2,7 @@ syntax: glob
*.pyo
*.pyo
*~
*~
\#*\#
\#*\#
doc/oplist.txt
compiled/*.cpp
compiled/*.cpp
cutils_ext.cpp
cutils_ext.cpp
html
html
...
...
__init__.py
浏览文件 @
b128f2b8
...
@@ -19,6 +19,8 @@ To learn more, check out:
...
@@ -19,6 +19,8 @@ To learn more, check out:
- Index of Howto documents (:wiki:`IndexHowto`)
- Index of Howto documents (:wiki:`IndexHowto`)
- Op List (:doc:`oplist`)
"""
"""
__docformat__
=
"restructuredtext en"
__docformat__
=
"restructuredtext en"
...
...
_test_compile.py
浏览文件 @
b128f2b8
...
@@ -77,7 +77,7 @@ class T_Function(unittest.TestCase):
...
@@ -77,7 +77,7 @@ class T_Function(unittest.TestCase):
def
test_closure
(
self
):
def
test_closure
(
self
):
x
,
y
,
z
=
tensor
.
scalars
(
'xyz'
)
x
,
y
,
z
=
tensor
.
scalars
(
'xyz'
)
v
=
tensor
.
value
(
numpy
.
zeros
(()))
v
=
tensor
.
value
(
numpy
.
zeros
(()))
e
=
x
+
tensor
.
add_inplace
(
v
,
1
)
e
=
x
+
tensor
.
_
add_inplace
(
v
,
1
)
f
=
function
([
x
],
[
e
])
f
=
function
([
x
],
[
e
])
assert
f
(
1.
)
==
2.
assert
f
(
1.
)
==
2.
assert
f
(
1.
)
==
3.
assert
f
(
1.
)
==
3.
...
@@ -109,7 +109,7 @@ class T_Function(unittest.TestCase):
...
@@ -109,7 +109,7 @@ class T_Function(unittest.TestCase):
def
test_borrow_false_through_inplace
(
self
):
def
test_borrow_false_through_inplace
(
self
):
x
,
y
,
z
=
tensor
.
scalars
(
'xyz'
)
x
,
y
,
z
=
tensor
.
scalars
(
'xyz'
)
# if borrow_outputs is False, we must not reuse the temporary created for x+y
# if borrow_outputs is False, we must not reuse the temporary created for x+y
e
=
tensor
.
add_inplace
(
x
+
y
,
z
)
e
=
tensor
.
_
add_inplace
(
x
+
y
,
z
)
for
linker
in
'py c c|py c&py'
.
split
():
for
linker
in
'py c c|py c&py'
.
split
():
f
=
function
([
x
,
y
,
z
],
[
e
],
borrow_outputs
=
False
,
linker
=
linker
)
f
=
function
([
x
,
y
,
z
],
[
e
],
borrow_outputs
=
False
,
linker
=
linker
)
res1
=
f
(
1.0
,
2.0
,
3.0
)
res1
=
f
(
1.0
,
2.0
,
3.0
)
...
...
_test_tensor.py
浏览文件 @
b128f2b8
...
@@ -21,9 +21,16 @@ def _numpy_checker(x, y):
...
@@ -21,9 +21,16 @@ def _numpy_checker(x, y):
Used in DualLinker to compare C version with Python version.
Used in DualLinker to compare C version with Python version.
"""
"""
x
,
y
=
x
[
0
],
y
[
0
]
x
,
y
=
x
[
0
],
y
[
0
]
if
x
.
dtype
!=
y
.
dtype
or
x
.
shape
!=
y
.
shape
or
numpy
.
any
(
abs
(
x
-
y
)
>
1e-10
):
if
x
.
dtype
!=
y
.
dtype
or
x
.
shape
!=
y
.
shape
or
numpy
.
any
(
numpy
.
abs
(
x
-
y
)
>
1e-10
):
raise
Exception
(
"Output mismatch."
,
{
'performlinker'
:
x
,
'clinker'
:
y
})
raise
Exception
(
"Output mismatch."
,
{
'performlinker'
:
x
,
'clinker'
:
y
})
def
safe_make_node
(
op
,
*
inputs
):
"""Emulate the behaviour of make_node when op is a function instead of an Op instance."""
node
=
op
(
*
inputs
)
if
isinstance
(
node
,
list
):
return
node
[
0
]
.
owner
else
:
return
node
.
owner
def
make_tester
(
name
,
op
,
expected
,
checks
=
{},
good
=
{},
bad_build
=
{},
bad_runtime
=
{},
grad
=
{}):
def
make_tester
(
name
,
op
,
expected
,
checks
=
{},
good
=
{},
bad_build
=
{},
bad_runtime
=
{},
grad
=
{}):
if
grad
is
True
:
if
grad
is
True
:
...
@@ -46,7 +53,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
...
@@ -46,7 +53,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
inputs
=
[
copy
(
input
)
for
input
in
inputs
]
inputs
=
[
copy
(
input
)
for
input
in
inputs
]
inputrs
=
[
value
(
input
)
for
input
in
inputs
]
inputrs
=
[
value
(
input
)
for
input
in
inputs
]
try
:
try
:
node
=
self
.
op
.
make_node
(
*
inputrs
)
#node = self.op.make_node(*inputrs)
node
=
safe_make_node
(
self
.
op
,
*
inputrs
)
except
:
except
:
type
,
exc_value
,
traceback
=
sys
.
exc_info
()
type
,
exc_value
,
traceback
=
sys
.
exc_info
()
err_msg
=
"Test
%
s::
%
s: Error occurred while making a node with inputs
%
s"
\
err_msg
=
"Test
%
s::
%
s: Error occurred while making a node with inputs
%
s"
\
...
@@ -80,7 +88,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
...
@@ -80,7 +88,8 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
if
not
isinstance
(
expecteds
,
(
list
,
tuple
)):
if
not
isinstance
(
expecteds
,
(
list
,
tuple
)):
expecteds
=
(
expecteds
,
)
expecteds
=
(
expecteds
,
)
for
i
,
(
result
,
expected
)
in
enumerate
(
zip
(
results
,
expecteds
)):
for
i
,
(
result
,
expected
)
in
enumerate
(
zip
(
results
,
expecteds
)):
if
result
.
dtype
!=
expected
.
dtype
or
result
.
shape
!=
expected
.
shape
or
numpy
.
any
(
abs
(
result
-
expected
)
>
1e-10
):
if
result
.
dtype
!=
expected
.
dtype
or
result
.
shape
!=
expected
.
shape
or
\
numpy
.
any
(
numpy
.
abs
(
result
-
expected
)
>
1e-10
):
self
.
fail
(
"Test
%
s::
%
s: Output
%
s gave the wrong value. With inputs
%
s, expected
%
s, got
%
s."
self
.
fail
(
"Test
%
s::
%
s: Output
%
s gave the wrong value. With inputs
%
s, expected
%
s, got
%
s."
%
(
self
.
op
,
testname
,
i
,
inputs
,
expected
,
result
))
%
(
self
.
op
,
testname
,
i
,
inputs
,
expected
,
result
))
...
@@ -94,7 +103,7 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
...
@@ -94,7 +103,7 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
inputs
=
[
copy
(
input
)
for
input
in
inputs
]
inputs
=
[
copy
(
input
)
for
input
in
inputs
]
inputrs
=
[
value
(
input
)
for
input
in
inputs
]
inputrs
=
[
value
(
input
)
for
input
in
inputs
]
try
:
try
:
node
=
s
elf
.
op
.
make_node
(
*
inputrs
)
node
=
s
afe_make_node
(
self
.
op
,
*
inputrs
)
except
:
except
:
return
return
self
.
fail
(
"Test
%
s::
%
s:
%
s was successfully instantiated on the following bad inputs:
%
s"
self
.
fail
(
"Test
%
s::
%
s:
%
s was successfully instantiated on the following bad inputs:
%
s"
...
@@ -105,7 +114,7 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
...
@@ -105,7 +114,7 @@ def make_tester(name, op, expected, checks = {}, good = {}, bad_build = {}, bad_
inputs
=
[
copy
(
input
)
for
input
in
inputs
]
inputs
=
[
copy
(
input
)
for
input
in
inputs
]
inputrs
=
[
value
(
input
)
for
input
in
inputs
]
inputrs
=
[
value
(
input
)
for
input
in
inputs
]
try
:
try
:
node
=
s
elf
.
op
.
make_node
(
*
inputrs
)
node
=
s
afe_make_node
(
self
.
op
,
*
inputrs
)
except
:
except
:
type
,
exc_value
,
traceback
=
sys
.
exc_info
()
type
,
exc_value
,
traceback
=
sys
.
exc_info
()
err_msg
=
"Test
%
s::
%
s: Error occurred while trying to make a node with inputs
%
s"
\
err_msg
=
"Test
%
s::
%
s: Error occurred while trying to make a node with inputs
%
s"
\
...
@@ -204,7 +213,7 @@ AddTester = make_broadcast_tester(op = add,
...
@@ -204,7 +213,7 @@ AddTester = make_broadcast_tester(op = add,
**
_good_broadcast_binary_normal
),
**
_good_broadcast_binary_normal
),
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_runtime
=
_bad_runtime_broadcast_binary_normal
)
bad_runtime
=
_bad_runtime_broadcast_binary_normal
)
AddInplaceTester
=
make_broadcast_tester
(
op
=
add_inplace
,
AddInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
add_inplace
,
expected
=
lambda
x
,
y
:
x
+
y
,
expected
=
lambda
x
,
y
:
x
+
y
,
good
=
_good_broadcast_binary_normal
,
good
=
_good_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
...
@@ -218,7 +227,7 @@ SubTester = make_broadcast_tester(op = sub,
...
@@ -218,7 +227,7 @@ SubTester = make_broadcast_tester(op = sub,
bad_runtime
=
_bad_runtime_broadcast_binary_normal
,
bad_runtime
=
_bad_runtime_broadcast_binary_normal
,
grad
=
_grad_broadcast_binary_normal
)
grad
=
_grad_broadcast_binary_normal
)
SubInplaceTester
=
make_broadcast_tester
(
op
=
sub_inplace
,
SubInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
sub_inplace
,
expected
=
lambda
x
,
y
:
x
-
y
,
expected
=
lambda
x
,
y
:
x
-
y
,
good
=
_good_broadcast_binary_normal
,
good
=
_good_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
...
@@ -236,7 +245,7 @@ MulTester = make_broadcast_tester(op = mul,
...
@@ -236,7 +245,7 @@ MulTester = make_broadcast_tester(op = mul,
grad
=
dict
(
three_inputs_same_shapes
=
(
rand
(
2
,
3
),
rand
(
2
,
3
),
rand
(
2
,
3
)),
grad
=
dict
(
three_inputs_same_shapes
=
(
rand
(
2
,
3
),
rand
(
2
,
3
),
rand
(
2
,
3
)),
four_inputs_broadcast
=
(
rand
(
2
,
3
),
rand
(
1
,
3
),
rand
(
2
,
1
),
rand
(
1
,
1
)),
four_inputs_broadcast
=
(
rand
(
2
,
3
),
rand
(
1
,
3
),
rand
(
2
,
1
),
rand
(
1
,
1
)),
**
_grad_broadcast_binary_normal
))
**
_grad_broadcast_binary_normal
))
MulInplaceTester
=
make_broadcast_tester
(
op
=
mul_inplace
,
MulInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
mul_inplace
,
expected
=
lambda
x
,
y
:
x
*
y
,
expected
=
lambda
x
,
y
:
x
*
y
,
good
=
_good_broadcast_binary_normal
,
good
=
_good_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
bad_build
=
_bad_build_broadcast_binary_normal
,
...
@@ -262,7 +271,7 @@ DivTester = make_broadcast_tester(op = div,
...
@@ -262,7 +271,7 @@ DivTester = make_broadcast_tester(op = div,
scalar
=
(
rand
(
2
,
3
),
rand
(
1
,
1
)),
scalar
=
(
rand
(
2
,
3
),
rand
(
1
,
1
)),
row
=
(
rand
(
2
,
3
),
rand
(
1
,
3
)),
row
=
(
rand
(
2
,
3
),
rand
(
1
,
3
)),
column
=
(
rand
(
2
,
3
),
rand
(
2
,
1
))))
column
=
(
rand
(
2
,
3
),
rand
(
2
,
1
))))
DivInplaceTester
=
make_broadcast_tester
(
op
=
div_inplace
,
DivInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
div_inplace
,
expected
=
lambda
x
,
y
:
x
/
y
,
expected
=
lambda
x
,
y
:
x
/
y
,
good
=
dict
(
same_shapes
=
(
rand
(
2
,
3
),
rand
(
2
,
3
)),
good
=
dict
(
same_shapes
=
(
rand
(
2
,
3
),
rand
(
2
,
3
)),
scalar
=
(
rand
(
2
,
3
),
rand
(
1
,
1
)),
scalar
=
(
rand
(
2
,
3
),
rand
(
1
,
1
)),
...
@@ -292,7 +301,7 @@ ModTester = make_broadcast_tester(op = mod,
...
@@ -292,7 +301,7 @@ ModTester = make_broadcast_tester(op = mod,
# dtype_mixup_1 = (rand(2, 3), randint_nonzero(2, 3)),
# dtype_mixup_1 = (rand(2, 3), randint_nonzero(2, 3)),
# dtype_mixup_2 = (randint_nonzero(2, 3), rand(2, 3))),
# dtype_mixup_2 = (randint_nonzero(2, 3), rand(2, 3))),
)
)
ModInplaceTester
=
make_broadcast_tester
(
op
=
mod_inplace
,
ModInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
mod_inplace
,
expected
=
lambda
x
,
y
:
x
%
y
,
expected
=
lambda
x
,
y
:
x
%
y
,
good
=
dict
(
same_shapes
=
(
rand
(
2
,
3
),
rand
(
2
,
3
)),
good
=
dict
(
same_shapes
=
(
rand
(
2
,
3
),
rand
(
2
,
3
)),
scalar
=
(
rand
(
2
,
3
),
rand
(
1
,
1
)),
scalar
=
(
rand
(
2
,
3
),
rand
(
1
,
1
)),
...
@@ -315,7 +324,7 @@ PowTester = make_broadcast_tester(op = pow,
...
@@ -315,7 +324,7 @@ PowTester = make_broadcast_tester(op = pow,
row
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
1
,
3
))),
row
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
1
,
3
))),
column
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
2
,
1
))))
column
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
2
,
1
))))
)
)
PowInplaceTester
=
make_broadcast_tester
(
op
=
pow_inplace
,
PowInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
pow_inplace
,
expected
=
lambda
x
,
y
:
x
**
y
,
expected
=
lambda
x
,
y
:
x
**
y
,
good
=
dict
(
same_shapes
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
2
,
3
))),
good
=
dict
(
same_shapes
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
2
,
3
))),
scalar
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
1
,
1
))),
scalar
=
(
rand_ranged
(
1
,
5
,
(
2
,
3
)),
rand_ranged
(
-
3
,
3
,
(
1
,
1
))),
...
@@ -340,8 +349,8 @@ AbsTester = make_broadcast_tester(op = tensor._abs,
...
@@ -340,8 +349,8 @@ AbsTester = make_broadcast_tester(op = tensor._abs,
expected
=
lambda
x
:
abs
(
x
),
expected
=
lambda
x
:
abs
(
x
),
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
AbsInplaceTester
=
make_broadcast_tester
(
op
=
abs_inplace
,
AbsInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
__
abs_inplace
,
expected
=
lambda
x
:
abs
(
x
),
expected
=
lambda
x
:
numpy
.
abs
(
x
),
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
inplace
=
True
)
inplace
=
True
)
...
@@ -350,7 +359,7 @@ NegTester = make_broadcast_tester(op = neg,
...
@@ -350,7 +359,7 @@ NegTester = make_broadcast_tester(op = neg,
expected
=
lambda
x
:
-
x
,
expected
=
lambda
x
:
-
x
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
NegInplaceTester
=
make_broadcast_tester
(
op
=
neg_inplace
,
NegInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
neg_inplace
,
expected
=
lambda
x
:
-
x
,
expected
=
lambda
x
:
-
x
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
@@ -359,7 +368,7 @@ NegInplaceTester = make_broadcast_tester(op = neg_inplace,
...
@@ -359,7 +368,7 @@ NegInplaceTester = make_broadcast_tester(op = neg_inplace,
SgnTester
=
make_broadcast_tester
(
op
=
sgn
,
SgnTester
=
make_broadcast_tester
(
op
=
sgn
,
expected
=
numpy
.
sign
,
expected
=
numpy
.
sign
,
good
=
_good_broadcast_unary_normal
)
good
=
_good_broadcast_unary_normal
)
SgnInplaceTester
=
make_broadcast_tester
(
op
=
sgn_inplace
,
SgnInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
sgn_inplace
,
expected
=
numpy
.
sign
,
expected
=
numpy
.
sign
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
inplace
=
True
)
inplace
=
True
)
...
@@ -368,7 +377,7 @@ SqrTester = make_broadcast_tester(op = sqr,
...
@@ -368,7 +377,7 @@ SqrTester = make_broadcast_tester(op = sqr,
expected
=
numpy
.
square
,
expected
=
numpy
.
square
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
SqrInplaceTester
=
make_broadcast_tester
(
op
=
sqr_inplace
,
SqrInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
sqr_inplace
,
expected
=
numpy
.
square
,
expected
=
numpy
.
square
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
@@ -378,7 +387,7 @@ ExpTester = make_broadcast_tester(op = exp,
...
@@ -378,7 +387,7 @@ ExpTester = make_broadcast_tester(op = exp,
expected
=
numpy
.
exp
,
expected
=
numpy
.
exp
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
ExpInplaceTester
=
make_broadcast_tester
(
op
=
exp_inplace
,
ExpInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
exp_inplace
,
expected
=
numpy
.
exp
,
expected
=
numpy
.
exp
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
@@ -394,7 +403,7 @@ LogTester = make_broadcast_tester(op = log,
...
@@ -394,7 +403,7 @@ LogTester = make_broadcast_tester(op = log,
expected
=
numpy
.
log
,
expected
=
numpy
.
log
,
good
=
_good_broadcast_unary_positive
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
grad
=
_grad_broadcast_unary_positive
)
LogInplaceTester
=
make_broadcast_tester
(
op
=
log_inplace
,
LogInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
log_inplace
,
expected
=
numpy
.
log
,
expected
=
numpy
.
log
,
good
=
_good_broadcast_unary_positive
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
,
...
@@ -404,7 +413,7 @@ Log2Tester = make_broadcast_tester(op = log2,
...
@@ -404,7 +413,7 @@ Log2Tester = make_broadcast_tester(op = log2,
expected
=
numpy
.
log2
,
expected
=
numpy
.
log2
,
good
=
_good_broadcast_unary_positive
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
grad
=
_grad_broadcast_unary_positive
)
Log2InplaceTester
=
make_broadcast_tester
(
op
=
log2_inplace
,
Log2InplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
log2_inplace
,
expected
=
numpy
.
log2
,
expected
=
numpy
.
log2
,
good
=
_good_broadcast_unary_positive
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
,
...
@@ -414,7 +423,7 @@ SqrtTester = make_broadcast_tester(op = sqrt,
...
@@ -414,7 +423,7 @@ SqrtTester = make_broadcast_tester(op = sqrt,
expected
=
numpy
.
sqrt
,
expected
=
numpy
.
sqrt
,
good
=
_good_broadcast_unary_positive
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
)
grad
=
_grad_broadcast_unary_positive
)
SqrtInplaceTester
=
make_broadcast_tester
(
op
=
sqrt_inplace
,
SqrtInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
sqrt_inplace
,
expected
=
numpy
.
sqrt
,
expected
=
numpy
.
sqrt
,
good
=
_good_broadcast_unary_positive
,
good
=
_good_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
,
grad
=
_grad_broadcast_unary_positive
,
...
@@ -432,7 +441,7 @@ SinTester = make_broadcast_tester(op = sin,
...
@@ -432,7 +441,7 @@ SinTester = make_broadcast_tester(op = sin,
expected
=
numpy
.
sin
,
expected
=
numpy
.
sin
,
good
=
_good_broadcast_unary_wide
,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
)
grad
=
_grad_broadcast_unary_wide
)
SinInplaceTester
=
make_broadcast_tester
(
op
=
sin_inplace
,
SinInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
sin_inplace
,
expected
=
numpy
.
sin
,
expected
=
numpy
.
sin
,
good
=
_good_broadcast_unary_wide
,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
,
...
@@ -442,7 +451,7 @@ CosTester = make_broadcast_tester(op = cos,
...
@@ -442,7 +451,7 @@ CosTester = make_broadcast_tester(op = cos,
expected
=
numpy
.
cos
,
expected
=
numpy
.
cos
,
good
=
_good_broadcast_unary_wide
,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
)
grad
=
_grad_broadcast_unary_wide
)
CosInplaceTester
=
make_broadcast_tester
(
op
=
cos_inplace
,
CosInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
cos_inplace
,
expected
=
numpy
.
cos
,
expected
=
numpy
.
cos
,
good
=
_good_broadcast_unary_wide
,
good
=
_good_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
,
grad
=
_grad_broadcast_unary_wide
,
...
@@ -454,7 +463,7 @@ TanTester = make_broadcast_tester(op = tan,
...
@@ -454,7 +463,7 @@ TanTester = make_broadcast_tester(op = tan,
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)),
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)),
grad
=
dict
(
normal
=
(
rand_ranged
(
-
3.14
,
3.14
,
(
2
,
3
)),),
grad
=
dict
(
normal
=
(
rand_ranged
(
-
3.14
,
3.14
,
(
2
,
3
)),),
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)))
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)))
TanInplaceTester
=
make_broadcast_tester
(
op
=
tan_inplace
,
TanInplaceTester
=
make_broadcast_tester
(
op
=
t
ensor
.
_t
an_inplace
,
expected
=
numpy
.
tan
,
expected
=
numpy
.
tan
,
good
=
dict
(
normal
=
(
rand_ranged
(
-
3.14
,
3.14
,
(
2
,
3
)),),
good
=
dict
(
normal
=
(
rand_ranged
(
-
3.14
,
3.14
,
(
2
,
3
)),),
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)),
shifted
=
(
rand_ranged
(
3.15
,
6.28
,
(
2
,
3
)),)),
...
@@ -467,7 +476,7 @@ CoshTester = make_broadcast_tester(op = cosh,
...
@@ -467,7 +476,7 @@ CoshTester = make_broadcast_tester(op = cosh,
expected
=
numpy
.
cosh
,
expected
=
numpy
.
cosh
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
CoshInplaceTester
=
make_broadcast_tester
(
op
=
cosh_inplace
,
CoshInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
cosh_inplace
,
expected
=
numpy
.
cosh
,
expected
=
numpy
.
cosh
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
@@ -477,7 +486,7 @@ SinhTester = make_broadcast_tester(op = sinh,
...
@@ -477,7 +486,7 @@ SinhTester = make_broadcast_tester(op = sinh,
expected
=
numpy
.
sinh
,
expected
=
numpy
.
sinh
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
SinhInplaceTester
=
make_broadcast_tester
(
op
=
sinh_inplace
,
SinhInplaceTester
=
make_broadcast_tester
(
op
=
tensor
.
_
sinh_inplace
,
expected
=
numpy
.
sinh
,
expected
=
numpy
.
sinh
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
@@ -487,7 +496,7 @@ TanhTester = make_broadcast_tester(op = tanh,
...
@@ -487,7 +496,7 @@ TanhTester = make_broadcast_tester(op = tanh,
expected
=
numpy
.
tanh
,
expected
=
numpy
.
tanh
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
)
grad
=
_grad_broadcast_unary_normal
)
TanhInplaceTester
=
make_broadcast_tester
(
op
=
tanh_inplace
,
TanhInplaceTester
=
make_broadcast_tester
(
op
=
t
ensor
.
_t
anh_inplace
,
expected
=
numpy
.
tanh
,
expected
=
numpy
.
tanh
,
good
=
_good_broadcast_unary_normal
,
good
=
_good_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
grad
=
_grad_broadcast_unary_normal
,
...
@@ -519,7 +528,9 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=0.0000001, to
...
@@ -519,7 +528,9 @@ def verify_grad(testcase, op, pt, n_tests=1, rng=numpy.random, eps=0.0000001, to
for
test_num
in
xrange
(
n_tests
):
for
test_num
in
xrange
(
n_tests
):
# tensor_pt = [as_tensor(p,name='input %i'%i) for i,p in enumerate(pt)]
# tensor_pt = [as_tensor(p,name='input %i'%i) for i,p in enumerate(pt)]
tensor_pt
=
[
constant
(
p
)
.
type
(
'input
%
i'
%
i
)
for
i
,
p
in
enumerate
(
pt
)]
tensor_pt
=
[
constant
(
p
)
.
type
(
'input
%
i'
%
i
)
for
i
,
p
in
enumerate
(
pt
)]
o
=
op
.
make_node
(
*
[
tpt
.
copy
()
for
tpt
in
tensor_pt
])
#o = op.make_node(*[tpt.copy() for tpt in tensor_pt])
o
=
safe_make_node
(
op
,
*
[
tpt
.
copy
()
for
tpt
in
tensor_pt
])
if
hasattr
(
o
,
'outputs'
):
if
hasattr
(
o
,
'outputs'
):
o_outputs
=
o
.
outputs
o_outputs
=
o
.
outputs
else
:
else
:
...
@@ -689,7 +700,7 @@ class T_transpose(unittest.TestCase):
...
@@ -689,7 +700,7 @@ class T_transpose(unittest.TestCase):
def
test0
(
self
):
def
test0
(
self
):
n
=
as_tensor
(
numpy
.
ones
(()))
n
=
as_tensor
(
numpy
.
ones
(()))
t
=
transpose
(
n
)
t
=
transpose
(
n
)
self
.
failUnless
(
t
.
owner
.
op
==
transpose_inplace
)
self
.
failUnless
(
t
.
owner
.
op
==
t
ensor
.
_t
ranspose_inplace
)
f
=
function
([
n
],
[
t
])
f
=
function
([
n
],
[
t
])
tval
=
f
(
n
.
data
)
tval
=
f
(
n
.
data
)
self
.
failUnless
(
tval
.
shape
==
n
.
data
.
shape
)
self
.
failUnless
(
tval
.
shape
==
n
.
data
.
shape
)
...
@@ -701,7 +712,7 @@ class T_transpose(unittest.TestCase):
...
@@ -701,7 +712,7 @@ class T_transpose(unittest.TestCase):
def
test1
(
self
):
def
test1
(
self
):
n
=
as_tensor
(
numpy
.
ones
(
5
))
n
=
as_tensor
(
numpy
.
ones
(
5
))
t
=
transpose
(
n
)
t
=
transpose
(
n
)
self
.
failUnless
(
t
.
owner
.
op
==
transpose_inplace
)
self
.
failUnless
(
t
.
owner
.
op
==
t
ensor
.
_t
ranspose_inplace
)
f
=
function
([
n
],
[
t
])
f
=
function
([
n
],
[
t
])
tval
=
f
(
n
.
data
)
tval
=
f
(
n
.
data
)
self
.
failUnless
(
tval
.
shape
==
n
.
data
.
shape
)
self
.
failUnless
(
tval
.
shape
==
n
.
data
.
shape
)
...
@@ -712,7 +723,7 @@ class T_transpose(unittest.TestCase):
...
@@ -712,7 +723,7 @@ class T_transpose(unittest.TestCase):
def
test2
(
self
):
def
test2
(
self
):
n
=
as_tensor
(
numpy
.
ones
((
5
,
3
)))
n
=
as_tensor
(
numpy
.
ones
((
5
,
3
)))
t
=
transpose
(
n
)
t
=
transpose
(
n
)
self
.
failUnless
(
t
.
owner
.
op
==
transpose_inplace
)
self
.
failUnless
(
t
.
owner
.
op
==
t
ensor
.
_t
ranspose_inplace
)
f
=
function
([
n
],
[
t
])
f
=
function
([
n
],
[
t
])
tval
=
f
(
n
.
data
)
tval
=
f
(
n
.
data
)
self
.
failUnless
(
tval
.
shape
==
(
3
,
5
))
self
.
failUnless
(
tval
.
shape
==
(
3
,
5
))
...
@@ -723,8 +734,8 @@ class T_transpose(unittest.TestCase):
...
@@ -723,8 +734,8 @@ class T_transpose(unittest.TestCase):
def
test3
(
self
):
def
test3
(
self
):
"""Test transpose of tensor, inplace version"""
"""Test transpose of tensor, inplace version"""
n
=
as_tensor
(
numpy
.
ones
((
5
,
3
,
2
)))
n
=
as_tensor
(
numpy
.
ones
((
5
,
3
,
2
)))
t
=
transpose_inplace
(
n
)
t
=
t
ensor
.
_t
ranspose_inplace
(
n
)
self
.
failUnless
(
t
.
owner
.
op
==
transpose_inplace
)
self
.
failUnless
(
t
.
owner
.
op
==
t
ensor
.
_t
ranspose_inplace
)
f
=
function
([
n
],
[
t
])
f
=
function
([
n
],
[
t
])
tval
=
f
(
n
.
data
)
tval
=
f
(
n
.
data
)
self
.
failUnless
(
tval
.
shape
==
(
2
,
3
,
5
))
self
.
failUnless
(
tval
.
shape
==
(
2
,
3
,
5
))
...
@@ -732,8 +743,8 @@ class T_transpose(unittest.TestCase):
...
@@ -732,8 +743,8 @@ class T_transpose(unittest.TestCase):
tval
+=
55.0
tval
+=
55.0
self
.
failUnless
(
n
.
data
[
0
,
0
,
0
]
==
56.0
)
self
.
failUnless
(
n
.
data
[
0
,
0
,
0
]
==
56.0
)
def
test_grad
(
self
):
def
test_grad
(
self
):
verify_grad
(
self
,
transpose_inplace
,
[
numpy
.
random
.
rand
(
2
,
3
)])
verify_grad
(
self
,
t
ensor
.
_t
ranspose_inplace
,
[
numpy
.
random
.
rand
(
2
,
3
)])
verify_grad
(
self
,
transpose_inplace
,
[
numpy
.
ones
(
3
)])
verify_grad
(
self
,
t
ensor
.
_t
ranspose_inplace
,
[
numpy
.
ones
(
3
)])
class
T_subtensor
(
unittest
.
TestCase
):
class
T_subtensor
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
...
@@ -1066,7 +1077,7 @@ class T_exp(unittest.TestCase):
...
@@ -1066,7 +1077,7 @@ class T_exp(unittest.TestCase):
numpy
.
asarray
([[
1.5089518
,
1.48439076
,
-
4.7820262
],
numpy
.
asarray
([[
1.5089518
,
1.48439076
,
-
4.7820262
],
[
2.04832468
,
0.50791564
,
-
1.58892269
]])])
[
2.04832468
,
0.50791564
,
-
1.58892269
]])])
def
test_grad_1
(
self
):
def
test_grad_1
(
self
):
verify_grad
(
self
,
exp_inplace
,
[
verify_grad
(
self
,
tensor
.
_
exp_inplace
,
[
numpy
.
asarray
([[
1.5089518
,
1.48439076
,
-
4.7820262
],
numpy
.
asarray
([[
1.5089518
,
1.48439076
,
-
4.7820262
],
[
2.04832468
,
0.50791564
,
-
1.58892269
]])])
[
2.04832468
,
0.50791564
,
-
1.58892269
]])])
...
@@ -1434,7 +1445,7 @@ class t_gemm(unittest.TestCase):
...
@@ -1434,7 +1445,7 @@ class t_gemm(unittest.TestCase):
Z
=
as_tensor
(
self
.
rand
(
2
,
2
))
Z
=
as_tensor
(
self
.
rand
(
2
,
2
))
A
=
as_tensor
(
self
.
rand
(
2
,
2
))
A
=
as_tensor
(
self
.
rand
(
2
,
2
))
try
:
try
:
gemm
(
Z
,
1.0
,
A
,
transpose_inplace
(
Z
),
1.0
)
gemm
(
Z
,
1.0
,
A
,
t
ensor
.
_t
ranspose_inplace
(
Z
),
1.0
)
except
ValueError
,
e
:
except
ValueError
,
e
:
if
e
[
0
]
==
Gemm
.
E_z_uniq
:
if
e
[
0
]
==
Gemm
.
E_z_uniq
:
return
return
...
@@ -1444,7 +1455,7 @@ class t_gemm(unittest.TestCase):
...
@@ -1444,7 +1455,7 @@ class t_gemm(unittest.TestCase):
Z
=
as_tensor
(
self
.
rand
(
2
,
2
))
Z
=
as_tensor
(
self
.
rand
(
2
,
2
))
A
=
as_tensor
(
self
.
rand
(
2
,
2
))
A
=
as_tensor
(
self
.
rand
(
2
,
2
))
try
:
try
:
gemm
(
Z
,
1.0
,
transpose_inplace
(
Z
),
A
,
1.0
)
gemm
(
Z
,
1.0
,
t
ensor
.
_t
ranspose_inplace
(
Z
),
A
,
1.0
)
except
ValueError
,
e
:
except
ValueError
,
e
:
if
e
[
0
]
==
Gemm
.
E_z_uniq
:
if
e
[
0
]
==
Gemm
.
E_z_uniq
:
return
return
...
...
_test_tensor_opt.py
浏览文件 @
b128f2b8
...
@@ -3,12 +3,12 @@
...
@@ -3,12 +3,12 @@
import
unittest
import
unittest
from
theano
import
gof
import
gof
from
t
heano.t
ensor_opt
import
*
from
tensor_opt
import
*
from
theano
import
tensor
import
tensor
from
t
heano.t
ensor
import
Tensor
from
tensor
import
Tensor
from
theano.
gof
import
Env
from
gof
import
Env
from
theano.
elemwise
import
DimShuffle
from
elemwise
import
DimShuffle
import
numpy
import
numpy
#import scalar_opt
#import scalar_opt
...
@@ -43,7 +43,7 @@ def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)):
...
@@ -43,7 +43,7 @@ def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)):
# def test_user_inplace(self):
# def test_user_inplace(self):
# x, y, z = inputs()
# x, y, z = inputs()
# e0 = x + y
# e0 = x + y
# e1 = tensor.mul_inplace(x, y)
# e1 = tensor.
_
mul_inplace(x, y)
# g = Env([x, y], [e0, e1])
# g = Env([x, y], [e0, e1])
# self.failUnless(str(g) == "[Broadcast{Add}(x, y), Broadcast{Mul}{0: 0}(x, y)]")
# self.failUnless(str(g) == "[Broadcast{Add}(x, y), Broadcast{Mul}{0: 0}(x, y)]")
# inplace_optimizer.optimize(g)
# inplace_optimizer.optimize(g)
...
@@ -52,7 +52,7 @@ def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)):
...
@@ -52,7 +52,7 @@ def inputs(xbc = (0, 0), ybc = (0, 0), zbc = (0, 0)):
# def test_inplace_on_second_argument(self):
# def test_inplace_on_second_argument(self):
# x, y, z = inputs()
# x, y, z = inputs()
# e0 = x + y
# e0 = x + y
# e1 = tensor.mul_inplace(x, z)
# e1 = tensor.
_
mul_inplace(x, z)
# g = Env([x, y], [e0, e1])
# g = Env([x, y], [e0, e1])
# self.failUnless(str(g) == "[Broadcast{Add}(x, y), Broadcast{Mul}{0: 0}(x, z)]")
# self.failUnless(str(g) == "[Broadcast{Add}(x, y), Broadcast{Mul}{0: 0}(x, z)]")
# inplace_optimizer.optimize(g)
# inplace_optimizer.optimize(g)
...
@@ -98,9 +98,9 @@ class _test_dimshuffle_lift(unittest.TestCase):
...
@@ -98,9 +98,9 @@ class _test_dimshuffle_lift(unittest.TestCase):
from
t
heano.t
ensor
import
*
from
tensor
import
*
from
theano.
sandbox
import
pprint
from
sandbox
import
pprint
class
_test_greedy_distribute
(
unittest
.
TestCase
):
class
_test_greedy_distribute
(
unittest
.
TestCase
):
def
test_main
(
self
):
def
test_main
(
self
):
...
@@ -279,8 +279,8 @@ class _test_canonize(unittest.TestCase):
...
@@ -279,8 +279,8 @@ class _test_canonize(unittest.TestCase):
# # def test_inplace(self):
# # def test_inplace(self):
# # x, y, z = inputs()
# # x, y, z = inputs()
# # #e = tensor.add_inplace(x, y + z)
# # #e = tensor.
_
add_inplace(x, y + z)
# # e = x + tensor.add_inplace(y, z)
# # e = x + tensor.
_
add_inplace(y, z)
# # g = Env([x, y, z], [e])
# # g = Env([x, y, z], [e])
# # opt = CliqueOptimizer(through_broadcast = False,
# # opt = CliqueOptimizer(through_broadcast = False,
# # scalar_optimizer = None,
# # scalar_optimizer = None,
...
...
doc/apirst2html.py
0 → 100755
浏览文件 @
b128f2b8
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
"""An HTML writer supporting link to external documentation.
This module is a frontend for the Docutils_ HTML writer. It allows a document
to reference objects documented in the API documentation generated by
extraction tools such as Doxygen_ or Epydoc_.
.. _Docutils: http://docutils.sourceforge.net/
.. _Doxygen: http://www.doxygen.org/
.. _Epydoc: http://epydoc.sourceforge.net/
"""
# $Id: apirst2html.py 1531 2007-02-18 23:07:25Z dvarrazzo $
__version__
=
"$Revision: 1531 $"
[
11
:
-
2
]
__author__
=
"Daniele Varrazzo"
__copyright__
=
"Copyright (C) 2007 by Daniele Varrazzo"
__docformat__
=
'reStructuredText en'
try
:
import
locale
locale
.
setlocale
(
locale
.
LC_ALL
,
''
)
except
:
pass
# We have to do some path magic to prevent Python from getting
# confused about the difference between the ``epydoc.py`` script, and the
# real ``epydoc`` package. So remove ``sys.path[0]``, which contains the
# directory of the script.
import
sys
,
os
.
path
script_path
=
os
.
path
.
abspath
(
sys
.
path
[
0
])
sys
.
path
=
[
p
for
p
in
sys
.
path
if
os
.
path
.
abspath
(
p
)
!=
script_path
]
import
epydoc.docwriter.xlink
as
xlink
from
docutils.core
import
publish_cmdline
,
default_description
description
=
(
'Generates (X)HTML documents with API documentation links. '
+
default_description
)
publish_cmdline
(
reader
=
xlink
.
ApiLinkReader
(),
writer_name
=
'html'
,
description
=
description
)
doc/doc.idx
浏览文件 @
b128f2b8
...
@@ -2,3 +2,4 @@ graph graph.html
...
@@ -2,3 +2,4 @@ graph graph.html
tensor graph.html
tensor graph.html
result graph.html
result graph.html
howto graph.html
howto graph.html
oplist oplist.html
epydoc
100644 → 100755
浏览文件 @
b128f2b8
# TODO:
#!/usr/bin/python
# Get all graphs to work!
#
# Call the command line interface for Epydoc.
#
# Make sure that we don't get confused between an epydoc.py script and
# the real epydoc package.
import
sys
,
os
.
path
,
inspect
if
os
.
path
.
exists
(
os
.
path
.
join
(
sys
.
path
[
0
],
'epydoc.py'
)):
del
sys
.
path
[
0
]
from
epydoc
import
docintrospecter
from
epydoc.apidoc
import
RoutineDoc
def
Op_to_RoutineDoc
(
op
,
routine_doc
,
module_name
=
None
):
routine_doc
.
specialize_to
(
RoutineDoc
)
#NB: this code is lifted from
# /u/bergstrj/pub/prefix/x86_64-unknown-linux-gnu-Fedora_release_7__Moonshine_/lib/python2.5/site-packages/epydoc
# /u/bergstrj/pub/prefix/x86_64-unknown-linux-gnu-Fedora_release_7__Moonshine_/lib/python2.5/site-packages/epydoc/docintrospecter.py
# op should be an op instance
assert
hasattr
(
op
,
'perform'
)
# Record the function's docstring.
routine_doc
.
docstring
=
getattr
(
op
,
'__doc__'
,
''
)
# Record the function's signature.
func
=
op
.
__epydoc_asRoutine
if
isinstance
(
func
,
type
(
Op_to_RoutineDoc
)):
(
args
,
vararg
,
kwarg
,
defaults
)
=
inspect
.
getargspec
(
func
)
# Add the arguments.
routine_doc
.
posargs
=
args
routine_doc
.
vararg
=
vararg
routine_doc
.
kwarg
=
kwarg
# Set default values for positional arguments.
routine_doc
.
posarg_defaults
=
[
None
]
*
len
(
args
)
# Set the routine's line number.
if
hasattr
(
func
,
'func_code'
):
routine_doc
.
lineno
=
func
.
func_code
.
co_firstlineno
else
:
# [XX] I should probably use UNKNOWN here??
# dvarrazzo: if '...' is to be changed, also check that
# `docstringparser.process_arg_field()` works correctly.
# See SF bug #1556024.
routine_doc
.
posargs
=
[
'...'
]
routine_doc
.
posarg_defaults
=
[
None
]
routine_doc
.
kwarg
=
None
routine_doc
.
vararg
=
None
return
routine_doc
docintrospecter
.
register_introspecter
(
lambda
value
:
getattr
(
value
,
'__epydoc_asRoutine'
,
False
),
Op_to_RoutineDoc
,
priority
=-
1
)
from
epydoc.cli
import
cli
cli
()
[epydoc] # Epydoc section marker (required by ConfigParser)
# The list of objects to document. Objects can be named using
# dotted names, module filenames, or package directory names.
# Alases for this option include "objects" and "values".
modules: *.py, gof/*.py, theano/*.py, theano/gof/*.py, joseph/*.py, pylearn/*.py, scipy, numpy
# The type of output that should be generated. Should be one
# of: html, text, latex, dvi, ps, pdf.
output: html
# The path to the output directory. May be relative or absolute.
target: html/
# An integer indicating how verbose epydoc should be. The default
# value is 0; negative values will supress warnings and errors;
# positive values will give more verbose output.
verbosity: 1
# A boolean value indicating that Epydoc should show a tracaback
# in case of unexpected error. By default don't show tracebacks
debug: 0
# If True, don't try to use colors or cursor control when doing
# textual output. The default False assumes a rich text prompt
simple-term: 0
### Generation options
# The default markup language for docstrings, for modules that do
# not define __docformat__. Defaults to epytext.
docformat: epytext
# Whether or not parsing should be used to examine objects.
parse: yes
# Whether or not introspection should be used to examine objects.
introspect: yes
# Don't examine in any way the modules whose dotted name match this
# regular expression pattern.
#exclude
# Don't perform introspection on the modules whose dotted name match this
# regular expression pattern.
#exclude-introspect
# Don't perform parsing on the modules whose dotted name match this
# regular expression pattern.
#exclude-parse
# The format for showing inheritance objects.
# It should be one of: 'grouped', 'listed', 'included'.
inheritance: grouped
# Whether or not to inclue private variables. (Even if included,
# private variables will be hidden by default.)
private: yes
# Whether or not to list each module's imports.
imports: yes
# Whether or not to include syntax highlighted source code in
# the output (HTML only).
sourcecode: yes
# Whether or not to includea a page with Epydoc log, containing
# effective option at the time of generation and the reported logs.
include-log: yes
### Output options
# The documented project's name.
name: Theano
# The CSS stylesheet for HTML output. Can be the name of a builtin
# stylesheet, or the name of a file.
css: white
# The documented project's URL.
url: http://lgcm.iro.umontreal.ca/theano/
# HTML code for the project link in the navigation bar. If left
# unspecified, the project link will be generated based on the
# project's name and URL.
#link: <a href="somewhere">My Cool Project</a>
# The "top" page for the documentation. Can be a URL, the name
# of a module or class, or one of the special names "trees.html",
# "indices.html", or "help.html"
#top: os.path
# An alternative help file. The named file should contain the
# body of an HTML file; navigation bars will be added to it.
#help: my_helpfile.html
# Whether or not to include a frames-based table of contents.
#frames: yes
frames: no
# Whether each class should be listed in its own section when
# generating LaTeX or PDF output.
separate-classes: no
### API linking options
# Define a new API document. A new interpreted text role
# will be created
#external-api: epydoc
# Use the records in this file to resolve objects in the API named NAME.
#external-api-file: epydoc:api-objects.txt
# Use this URL prefix to configure the string returned for external API.
#external-api-root: epydoc:http://epydoc.sourceforge.net/api
### Graph options
# The list of graph types that should be automatically included
# in the output. Graphs are generated using the Graphviz "dot"
# executable. Graph types include: "classtree", "callgraph",
# "umlclass". Use "all" to include all graph types
graph: all
# The path to the Graphviz "dot" executable, used to generate
# graphs.
dotpath: /usr/bin/dot
# The name of one or more pstat files (generated by the profile
# or hotshot module). These are used to generate call graphs.
pstat: autotest.pstat
# Specify the font used to generate Graphviz graphs.
# (e.g., helvetica or times).
graph-font: Helvetica
# Specify the font size used to generate Graphviz graphs.
graph-font-size: 10
### Return value options
# The condition upon which Epydoc should exit with a non-zero
# exit status. Possible values are error, warning, docstring_warning
#fail-on: error
gen_oplist.py
0 → 100644
浏览文件 @
b128f2b8
"""script to generate doc/oplist.txt, which compiles to :doc:`oplist`. """
__docformat__
=
"restructuredtext en"
import
sys
import
gof
def
isOpClass
(
thing
):
return
hasattr
(
thing
,
'perform'
)
and
not
isinstance
(
thing
,
gof
.
Op
)
def
isOpConstructor
(
thing
,
module
):
return
hasattr
(
thing
,
'perform'
)
and
isinstance
(
thing
,
gof
.
Op
)
\
or
thing
in
getattr
(
module
,
'_constructor_list'
,
[])
def
print_title
(
title_string
,
under_char
):
print
title_string
print
under_char
*
len
(
title_string
)
print
""
def
chomp
(
s
):
"""interpret and left-align a docstring"""
if
'subtensor'
in
s
:
debug
=
0
else
:
debug
=
0
r
=
[]
leadspace
=
True
for
c
in
s
:
if
leadspace
and
c
in
'
\n\t
'
:
continue
else
:
leadspace
=
False
if
c
==
'
\n
'
:
if
debug
:
print
>>
sys
.
stderr
,
'breaking'
break
if
c
==
'
\t
'
:
c
=
' '
;
r
.
append
(
c
)
if
debug
:
print
>>
sys
.
stderr
,
r
return
""
.
join
(
r
)
def
generate
():
"""Generate the op list"""
import
scalar
,
sparse
,
tensor
print_title
(
"Theano Op List"
,
"~"
)
print
""
print
".. contents:: "
print
""
for
module
in
[
scalar
,
sparse
,
tensor
]:
print_title
(
'module: `
%
s`'
%
module
.
__name__
,
'='
)
print_title
(
'Op Classes'
,
'-'
)
symbol_name_list
=
[
s
for
s
in
dir
(
module
)
if
not
s
[
0
]
==
'_'
]
for
symbol_name
in
symbol_name_list
:
symbol
=
getattr
(
module
,
symbol_name
)
if
isOpClass
(
symbol
):
print
""
print
"- :api:`
%
s.
%
s`"
%
(
symbol
.
__module__
,
symbol_name
)
docstring
=
getattr
(
symbol
,
'__doc__'
,
""
)
if
not
docstring
:
print
" "
,
'(no doc)'
elif
len
(
docstring
)
<
50
:
print
" "
,
chomp
(
docstring
)
else
:
print
" "
,
chomp
(
docstring
[:
40
]),
"..."
# a little trailing whitespace
print
""
print_title
(
'Op Constructors'
,
'-'
)
for
symbol_name
in
symbol_name_list
:
symbol
=
getattr
(
module
,
symbol_name
)
if
isOpConstructor
(
symbol
,
module
):
print
""
print
"- :api:`
%
s.
%
s`"
%
(
symbol
.
__module__
,
symbol_name
)
docstring
=
getattr
(
symbol
,
'__doc__'
,
""
)
if
not
docstring
:
print
" "
,
'No documentation'
elif
len
(
docstring
)
<
50
:
print
" "
,
chomp
(
docstring
)
else
:
print
" "
,
chomp
(
docstring
[:
40
]),
"..."
# a little trailing whitespace
print
""
if
__name__
==
"__main__"
:
generate
()
gof/op.py
浏览文件 @
b128f2b8
...
@@ -29,18 +29,18 @@ class CLinkerOp(object):
...
@@ -29,18 +29,18 @@ class CLinkerOp(object):
given names for the inputs and outputs.
given names for the inputs and outputs.
:Parameters:
:Parameters:
`node`: Apply instance
`node`
: Apply instance
WRITEME
WRITEME
`name`: WRITEME
`name`
: WRITEME
WRITEME
WRITEME
`inputs`: list of strings
`inputs`
: list of strings
There is a string for each input of the function, and the string is the name of a C
There is a string for each input of the function, and the string is the name of a C
`PyObject` variable pointing to that input.
`PyObject` variable pointing to that input.
`outputs`: list of strings
`outputs`
: list of strings
Each string is the name of a `PyObject` pointer where the Op should store its
Each string is the name of a `PyObject` pointer where the Op should store its
results. The `CLinker` guarantees that on entry to this code block, each pointer
results. The `CLinker` guarantees that on entry to this code block, each pointer
is either NULL or is unchanged from the end of the previous execution.
is either NULL or is unchanged from the end of the previous execution.
`sub`: dict of strings
`sub`
: dict of strings
extra symbols defined in `CLinker` sub symbols (such as 'fail').
extra symbols defined in `CLinker` sub symbols (such as 'fail').
WRITEME
WRITEME
...
@@ -59,18 +59,18 @@ class CLinkerOp(object):
...
@@ -59,18 +59,18 @@ class CLinkerOp(object):
This is a convenient place to clean up things allocated by c_code().
This is a convenient place to clean up things allocated by c_code().
:Parameters:
:Parameters:
`node`: Apply instance
`node`
: Apply instance
WRITEME
WRITEME
`name`: WRITEME
`name`
: WRITEME
WRITEME
WRITEME
`inputs`: list of strings
`inputs`
: list of strings
There is a string for each input of the function, and the string is the name of a C
There is a string for each input of the function, and the string is the name of a C
`PyObject` variable pointing to that input.
`PyObject` variable pointing to that input.
`outputs`: list of strings
`outputs`
: list of strings
Each string is the name of a `PyObject` pointer where the Op should store its
Each string is the name of a `PyObject` pointer where the Op should store its
results. The `CLinker` guarantees that on entry to this code block, each pointer
results. The `CLinker` guarantees that on entry to this code block, each pointer
is either NULL or is unchanged from the end of the previous execution.
is either NULL or is unchanged from the end of the previous execution.
`sub`: dict of strings
`sub`
: dict of strings
extra symbols defined in `CLinker` sub symbols (such as 'fail').
extra symbols defined in `CLinker` sub symbols (such as 'fail').
WRITEME
WRITEME
...
@@ -225,11 +225,11 @@ class PureOp(object):
...
@@ -225,11 +225,11 @@ class PureOp(object):
output storage. Return None.
output storage. Return None.
:Parameters:
:Parameters:
`node`: Apply instance
`node`
: Apply instance
contains the symbolic inputs and outputs
contains the symbolic inputs and outputs
`inputs`: list
`inputs`
: list
sequence of inputs (immutable)
sequence of inputs (immutable)
`output_storage`: list
`output_storage`
: list
list of mutable 1-element lists (do not change the length of these lists)
list of mutable 1-element lists (do not change the length of these lists)
The `output_storage` list might contain data. If an element of
The `output_storage` list might contain data. If an element of
...
...
local.build_html.sh
100644 → 100755
浏览文件 @
b128f2b8
#!/bin/bash
#!/bin/bash
mkdir
-p
html/api
APIRST2HTML
=
doc/apirst2html.py
epydoc
--config
local.epydoc
EPYDOC_ARGS
=
'--external-api=api --external-api-file=api:html/api/api-objects.txt --external-api-root=api:../api/'
cd
doc
sh build_html.sh
mkdir
-p
html/api
&&
mkdir
-p
html/doc
cd
../
rm
-Rf
html/doc
# this builds some stuff or something... basically makes the rest work properly
mv
doc/html html/doc
# for a reason I don't understand. -JB 20080924
python __init__.py
if
[
"
$1
"
!=
" rst"
]
;
then
./epydoc
--config
local.epydoc
fi
if
[
"
$1
"
!=
" epydoc"
]
;
then
python gen_oplist.py
>
doc/oplist.txt
for
RST
in
graph oplist
;
do
$APIRST2HTML
$EPYDOC_ARGS
doc/
$RST
.txt html/doc/
$RST
.html
done
fi
local.epydoc
浏览文件 @
b128f2b8
...
@@ -7,17 +7,12 @@
...
@@ -7,17 +7,12 @@
# The list of objects to document. Objects can be named using
# The list of objects to document. Objects can be named using
# dotted names, module filenames, or package directory names.
# dotted names, module filenames, or package directory names.
# Alases for this option include "objects" and "values".
# Alases for this option include "objects" and "values".
modules: *.py, gof/__init__.py,
modules: __init__.py,
gof/a*,
[a-z]*.py,
gof/c*,
[A-Z]*.py,
gof/d*,
gof/__init__.py,
gof/e*,
gof/[a-z]*.py,
gof/g*,
gof/[A-Z]*.py
gof/l*,
gof/o*,
gof/p*,
gof/t*,
gof/u*
# The type of output that should be generated. Should be one
# The type of output that should be generated. Should be one
# of: html, text, latex, dvi, ps, pdf.
# of: html, text, latex, dvi, ps, pdf.
...
...
scalar.py
浏览文件 @
b128f2b8
...
@@ -586,7 +586,7 @@ class Abs(UnaryScalarOp):
...
@@ -586,7 +586,7 @@ class Abs(UnaryScalarOp):
return
"
%(z)
s = fabs(
%(x)
s);"
%
locals
()
return
"
%(z)
s = fabs(
%(x)
s);"
%
locals
()
#complex, other?
#complex, other?
raise
NotImplementedError
(
'type not supported'
,
type
)
raise
NotImplementedError
(
'type not supported'
,
type
)
abs
=
Abs
(
same_out
)
_
abs
=
Abs
(
same_out
)
class
Sgn
(
UnaryScalarOp
):
class
Sgn
(
UnaryScalarOp
):
def
impl
(
self
,
x
):
def
impl
(
self
,
x
):
...
...
tensor.py
浏览文件 @
b128f2b8
"""A L{Result} to store L{numpy.ndarray} with basic accompanying L{Op}s"""
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
__docformat__
=
"restructuredtext en"
import
sys
# for sys.maxint
import
sys
# for sys.maxint
import
inspect
import
inspect
import
functools
import
functools
...
@@ -23,8 +26,36 @@ from elemwise import Elemwise, DimShuffle, CAReduce, Sum
...
@@ -23,8 +26,36 @@ from elemwise import Elemwise, DimShuffle, CAReduce, Sum
import
tensor_random
as
random
import
tensor_random
as
random
_constructor_list
=
[]
"""List of functions to be listed as op constructors in the oplist (`gen_oplist`, doc/oplist.txt)."""
def
constructor
(
f
):
"""Make `f` appear as a constructor in the oplist (`gen_oplist`, doc/oplist.txt)."""
_constructor_list
.
append
(
f
)
return
f
def
as_tensor
(
x
,
name
=
None
):
def
as_tensor
(
x
,
name
=
None
):
"""Return `x`, transformed into a `Tensor`
This function is often used by `make_node` methods of `Op` subclasses to
turn ndarrays, numbers, `Scalar` instances, `Apply` instances and `Tensor`
instances into valid input list elemnts.
:Parameters:
- `x`: Apply instance, Result instance, numpy.ndarray, or number
This thing will be transformed into a `Result` in a sensible way. An
ndarray argument will not be copied, but a list of numbers will be copied
to make an ndarray.
- `name`: str or None
If a new `Result` instance is created, it will be named with this string.
:Exceptions:
- `ValueError`: raised if an `Apply` with no default output is fetched
- `TypeError`: raised if `x` cannot be converted to a Tensor Result
"""
if
isinstance
(
x
,
gof
.
Apply
):
if
isinstance
(
x
,
gof
.
Apply
):
#TODO: use Apply's default output mechanism
if
len
(
x
.
outputs
)
!=
1
:
if
len
(
x
.
outputs
)
!=
1
:
raise
ValueError
(
"It is ambiguous which output of a multi-output Op has to be fetched."
,
x
)
raise
ValueError
(
"It is ambiguous which output of a multi-output Op has to be fetched."
,
x
)
else
:
else
:
...
@@ -39,56 +70,74 @@ def as_tensor(x, name = None):
...
@@ -39,56 +70,74 @@ def as_tensor(x, name = None):
return
constant
(
x
)
return
constant
(
x
)
except
TypeError
:
except
TypeError
:
raise
TypeError
(
"Cannot convert
%
s to Tensor"
%
x
,
type
(
x
))
raise
TypeError
(
"Cannot convert
%
s to Tensor"
%
x
,
type
(
x
))
# this has a different name, because _as_tensor is the function which ops use
# this has a different name, because _as_tensor is the function which ops use
# to upcast their arguments... this internal-use function is a good place to put debugging stuff, better than the global astensor.
# to upcast their arguments... this internal-use function is a good place to put debugging stuff, better than the global astensor.
_as_tensor
=
as_tensor
_as_tensor
=
as_tensor
def
constant
(
_x
):
def
constant
(
x
):
if
not
isinstance
(
_x
,
numpy
.
ndarray
):
"""Return a symbolic `Constant` with value `x`
x
=
numpy
.
asarray
(
_x
)
:Exceptions:
- `TypeError`: `x` could not be converted to a numpy.ndarray
"""
if
isinstance
(
x
,
numpy
.
ndarray
):
x_
=
x
else
:
else
:
x
=
_x
x
_
=
numpy
.
asarray
(
x
)
try
:
try
:
return
TensorConstant
(
Tensor
(
dtype
=
x
.
dtype
,
return
TensorConstant
(
Tensor
(
dtype
=
x
_
.
dtype
,
broadcastable
=
[
d
==
1
for
d
in
x
.
shape
]),
x
)
broadcastable
=
[
d
==
1
for
d
in
x
_
.
shape
]),
x_
)
except
:
except
:
raise
TypeError
(
"Could not convert
%
s to Tensor"
%
_x
,
type
(
_
x
))
raise
TypeError
(
"Could not convert
%
s to Tensor"
%
x
,
type
(
x
))
def
value
(
x
):
def
value
(
x
):
if
not
isinstance
(
x
,
numpy
.
ndarray
):
"""Return a symbolic `Value` with default value `x`
x
=
numpy
.
asarray
(
x
)
:Exceptions:
- `TypeError`: `x` could not be converted to a numpy.ndarray
"""
if
isinstance
(
x
,
numpy
.
ndarray
):
x_
=
x
else
:
x_
=
numpy
.
asarray
(
x
)
try
:
try
:
return
TensorValue
(
Tensor
(
dtype
=
x
.
dtype
,
return
TensorValue
(
Tensor
(
dtype
=
x
_
.
dtype
,
broadcastable
=
[
d
==
1
for
d
in
x
.
shape
]),
x
)
broadcastable
=
[
d
==
1
for
d
in
x
_
.
shape
]),
x_
)
except
:
except
:
raise
TypeError
(
"Could not convert
%
s to Tensor"
%
_x
,
type
(
_
x
))
raise
TypeError
(
"Could not convert
%
s to Tensor"
%
x
,
type
(
x
))
class
Tensor
(
Type
):
class
Tensor
(
Type
):
"""
"""Symbolic `Type` representing a numpy.ndarray value."""
L{Type} representing L{numpy.ndarray} in Theano.
@todo: At some point we should document a glossary, such as terms like
broadcasting and shape.
@type dtype: numpy dtype string such as 'int64' or 'float64' (among others)
@type broadcastable: tuple or list or array of boolean values, whose length
is the number of dimensions of the L{ndarray} represented by this Type.
@ivar broadcastable: Each element of the broadcastable vector tells us
something about the corresponding dimension:
- False means the dimension can be anything.
- True means the dimension must be 1. Also, this dimension will be considered
for L{broadcasting}, as described and implemented in Numpy.
"""
def
__init__
(
self
,
dtype
,
broadcastable
):
def
__init__
(
self
,
dtype
,
broadcastable
):
"""Initialize self.dtype and self.broadcastable.
:Parameters:
- `dtype`: str corresponding to numpy dtype (e.g., 'int64')
The value (ndarray) associated to a `Result` of this `Type` will have
this dtype.
- `broadcastable`: tuple, list, or array of boolean values
This argument serves two purposes. First, the True elements of this
list indicate the dimensions where the shape of an associated value
must be 1. Secondly, the length of this list is the number of
dimensions that an associated value must have. See
:doc:`broadcasting` for an explanation of how this list is used.
"""
self
.
dtype
=
str
(
dtype
)
self
.
dtype
=
str
(
dtype
)
self
.
broadcastable
=
tuple
(
broadcastable
)
self
.
broadcastable
=
tuple
(
broadcastable
)
self
.
dtype_specs
()
# error checking is done there
self
.
dtype_specs
()
# error checking is done there
def
filter
(
self
,
data
,
strict
=
False
):
def
filter
(
self
,
data
,
strict
=
False
):
"""Convert `data` to something which can be associated to a `TensorResult`.
This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph.
"""
_data
=
data
_data
=
data
if
strict
:
if
strict
:
if
not
isinstance
(
data
,
numpy
.
ndarray
):
if
not
isinstance
(
data
,
numpy
.
ndarray
):
...
@@ -107,10 +156,10 @@ class Tensor(Type):
...
@@ -107,10 +156,10 @@ class Tensor(Type):
return
data
return
data
def
dtype_specs
(
self
):
def
dtype_specs
(
self
):
"""Return python - C type correspondance tuple for self.data
"""Return a tuple (python type, c type, numpy typenum) that corresponds to
self.dtype.
Return a tuple (python type, c type, numpy typenum) that corresponds to
This function is used internally as part of C code generation.
L{self.dtype}. It is for use in C code generation.
"""
"""
#TODO: add more type correspondances for e.g. int32, int64, float32,
#TODO: add more type correspondances for e.g. int32, int64, float32,
#complex64, etc.
#complex64, etc.
...
@@ -131,14 +180,29 @@ class Tensor(Type):
...
@@ -131,14 +180,29 @@ class Tensor(Type):
raise
TypeError
(
"Unsupported dtype for
%
s:
%
s"
%
(
self
.
__class__
.
__name__
,
self
.
dtype
))
raise
TypeError
(
"Unsupported dtype for
%
s:
%
s"
%
(
self
.
__class__
.
__name__
,
self
.
dtype
))
def
__eq__
(
self
,
other
):
def
__eq__
(
self
,
other
):
"""Compare True iff other is the same kind of Tensor"""
return
type
(
self
)
==
type
(
other
)
and
other
.
dtype
==
self
.
dtype
and
other
.
broadcastable
==
self
.
broadcastable
return
type
(
self
)
==
type
(
other
)
and
other
.
dtype
==
self
.
dtype
and
other
.
broadcastable
==
self
.
broadcastable
def
__hash__
(
self
):
def
__hash__
(
self
):
"""Hash equal for same kinds of Tensor"""
return
hash
(
self
.
dtype
)
^
hash
(
self
.
broadcastable
)
return
hash
(
self
.
dtype
)
^
hash
(
self
.
broadcastable
)
ndim
=
property
(
lambda
self
:
len
(
self
.
broadcastable
),
doc
=
"read-only access to the number of dimensions"
)
ndim
=
property
(
lambda
self
:
len
(
self
.
broadcastable
),
doc
=
"number of dimensions"
)
"""Number of dimensions
This read-only property is the preferred way to get the number of dimensions
of a `Tensor`.
"""
def
make_result
(
self
,
name
=
None
):
def
make_result
(
self
,
name
=
None
):
"""Return a `TensorResult` of this type
:Parameters:
- `name`: str
A pretty name to identify this `Result` when printing and debugging
"""
return
TensorResult
(
self
,
name
=
name
)
return
TensorResult
(
self
,
name
=
name
)
def
__str__
(
self
):
def
__str__
(
self
):
...
@@ -148,6 +212,7 @@ class Tensor(Type):
...
@@ -148,6 +212,7 @@ class Tensor(Type):
return
"Tensor{
%
s,
%
s}"
%
(
str
(
self
.
dtype
),
str
(
self
.
broadcastable
))
return
"Tensor{
%
s,
%
s}"
%
(
str
(
self
.
dtype
),
str
(
self
.
broadcastable
))
def
c_declare
(
self
,
name
,
sub
):
def
c_declare
(
self
,
name
,
sub
):
"""Override `CLinkerOp.c_declare` """
return
"""
return
"""
PyArrayObject*
%(name)
s;
PyArrayObject*
%(name)
s;
int type_num_
%(name)
s;
int type_num_
%(name)
s;
...
@@ -155,12 +220,14 @@ class Tensor(Type):
...
@@ -155,12 +220,14 @@ class Tensor(Type):
"""
%
dict
(
sub
,
name
=
name
,
dtype
=
self
.
dtype_specs
()[
1
])
"""
%
dict
(
sub
,
name
=
name
,
dtype
=
self
.
dtype_specs
()[
1
])
def
c_init
(
self
,
name
,
sub
):
def
c_init
(
self
,
name
,
sub
):
"""Override `CLinkerOp.c_init` """
return
"""
return
"""
%(name)
s = NULL;
%(name)
s = NULL;
type_num_
%(name)
s =
%(type_num)
s;
type_num_
%(name)
s =
%(type_num)
s;
"""
%
dict
(
sub
,
name
=
name
,
type_num
=
self
.
dtype_specs
()[
2
])
"""
%
dict
(
sub
,
name
=
name
,
type_num
=
self
.
dtype_specs
()[
2
])
def
c_extract
(
self
,
name
,
sub
):
def
c_extract
(
self
,
name
,
sub
):
"""Override `CLinkerOp.c_extract` """
return
"""
return
"""
%(name)
s = NULL;
%(name)
s = NULL;
type_num_
%(name)
s =
%(type_num)
s;
type_num_
%(name)
s =
%(type_num)
s;
...
@@ -186,6 +253,7 @@ class Tensor(Type):
...
@@ -186,6 +253,7 @@ class Tensor(Type):
"""
%
dict
(
sub
,
name
=
name
,
type_num
=
self
.
dtype_specs
()[
2
])
"""
%
dict
(
sub
,
name
=
name
,
type_num
=
self
.
dtype_specs
()[
2
])
def
c_cleanup
(
self
,
name
,
sub
):
def
c_cleanup
(
self
,
name
,
sub
):
"""Override `CLinkerOp.c_cleanup` """
return
"""
return
"""
if (
%(name)
s) {
if (
%(name)
s) {
Py_XDECREF(
%(name)
s);
Py_XDECREF(
%(name)
s);
...
@@ -193,6 +261,7 @@ class Tensor(Type):
...
@@ -193,6 +261,7 @@ class Tensor(Type):
"""
%
locals
()
"""
%
locals
()
def
c_sync
(
self
,
name
,
sub
):
def
c_sync
(
self
,
name
,
sub
):
"""Override `CLinkerOp.c_sync` """
return
"""
return
"""
Py_XDECREF(py_
%(name)
s);
Py_XDECREF(py_
%(name)
s);
if (!
%(name)
s) {
if (!
%(name)
s) {
...
@@ -205,12 +274,14 @@ class Tensor(Type):
...
@@ -205,12 +274,14 @@ class Tensor(Type):
"""
%
locals
()
"""
%
locals
()
def
c_headers
(
self
):
def
c_headers
(
self
):
"""Override `CLinkerOp.c_headers` """
return
[]
return
[]
def
c_libraries
(
self
):
def
c_libraries
(
self
):
return
[]
return
[]
def
c_support_code
(
cls
):
def
c_support_code
(
cls
):
"""Override `CLinkerOp.c_support_code` """
template
=
"""
template
=
"""
struct theano_complex
%(nbits)
s : public npy_complex
%(nbits)
s
struct theano_complex
%(nbits)
s : public npy_complex
%(nbits)
s
{
{
...
@@ -344,9 +415,9 @@ class _tensor_py_operators:
...
@@ -344,9 +415,9 @@ class _tensor_py_operators:
def
__rand__
(
self
,
other
):
return
and_
(
other
,
self
)
def
__rand__
(
self
,
other
):
return
and_
(
other
,
self
)
def
__ror__
(
self
,
other
):
return
or_
(
other
,
self
)
def
__ror__
(
self
,
other
):
return
or_
(
other
,
self
)
def
__rxor__
(
self
,
other
):
return
xor
(
other
,
self
)
def
__rxor__
(
self
,
other
):
return
xor
(
other
,
self
)
def
__iand__
(
self
,
other
):
return
and_inplace
(
self
,
other
)
def
__iand__
(
self
,
other
):
return
_
and_inplace
(
self
,
other
)
def
__ior__
(
self
,
other
):
return
or_inplace
(
self
,
other
)
def
__ior__
(
self
,
other
):
return
_
or_inplace
(
self
,
other
)
def
__ixor__
(
self
,
other
):
return
xor_inplace
(
self
,
other
)
def
__ixor__
(
self
,
other
):
return
_
xor_inplace
(
self
,
other
)
#ARITHMETIC - NORMAL
#ARITHMETIC - NORMAL
def
__add__
(
self
,
other
):
return
add
(
self
,
other
)
def
__add__
(
self
,
other
):
return
add
(
self
,
other
)
...
@@ -357,11 +428,11 @@ class _tensor_py_operators:
...
@@ -357,11 +428,11 @@ class _tensor_py_operators:
def
__mod__
(
self
,
other
):
return
mod
(
self
,
other
)
def
__mod__
(
self
,
other
):
return
mod
(
self
,
other
)
#ARITHMETIC - INPLACE
#ARITHMETIC - INPLACE
def
__iadd__
(
self
,
other
):
return
add_inplace
(
self
,
other
)
def
__iadd__
(
self
,
other
):
return
_
add_inplace
(
self
,
other
)
def
__isub__
(
self
,
other
):
return
sub_inplace
(
self
,
other
)
def
__isub__
(
self
,
other
):
return
_
sub_inplace
(
self
,
other
)
def
__imul__
(
self
,
other
):
return
mul_inplace
(
self
,
other
)
def
__imul__
(
self
,
other
):
return
_
mul_inplace
(
self
,
other
)
def
__idiv__
(
self
,
other
):
return
div_inplace
(
self
,
other
)
def
__idiv__
(
self
,
other
):
return
_
div_inplace
(
self
,
other
)
def
__ipow__
(
self
,
other
):
return
pow_inplace
(
self
,
other
)
def
__ipow__
(
self
,
other
):
return
_
pow_inplace
(
self
,
other
)
#ARITHMETIC - RIGHT-OPERAND
#ARITHMETIC - RIGHT-OPERAND
def
__radd__
(
self
,
other
):
return
add
(
other
,
self
)
def
__radd__
(
self
,
other
):
return
add
(
other
,
self
)
...
@@ -406,6 +477,7 @@ class TensorConstant(Constant, _tensor_py_operators):
...
@@ -406,6 +477,7 @@ class TensorConstant(Constant, _tensor_py_operators):
class
TensorValue
(
Value
,
_tensor_py_operators
):
class
TensorValue
(
Value
,
_tensor_py_operators
):
pass
pass
#QUESTION: why are we doing this!?
elemwise
.
as_tensor
=
as_tensor
elemwise
.
as_tensor
=
as_tensor
elemwise
.
Tensor
=
Tensor
elemwise
.
Tensor
=
Tensor
elemwise
.
TensorResult
=
TensorResult
elemwise
.
TensorResult
=
TensorResult
...
@@ -418,12 +490,60 @@ elemwise.TensorValue = TensorValue
...
@@ -418,12 +490,60 @@ elemwise.TensorValue = TensorValue
# Utilities
# Utilities
#########################
#########################
def
_elemwise
(
scalar_op
,
name
):
def
_elemwise
(
scalar_op
,
name
,
doc_prefix
=
''
):
straight
=
elemwise
.
Elemwise
(
scalar_op
,
name
=
name
)
straight
=
elemwise
.
Elemwise
(
scalar_op
,
name
=
name
)
inplace_scalar_op
=
scalar_op
.
__class__
(
scal
.
transfer_type
(
0
))
inplace_scalar_op
=
scalar_op
.
__class__
(
scal
.
transfer_type
(
0
))
inplace
=
elemwise
.
Elemwise
(
inplace_scalar_op
,
{
0
:
0
},
name
=
name
+
"_inplace"
)
inplace
=
elemwise
.
Elemwise
(
inplace_scalar_op
,
{
0
:
0
},
name
=
'_'
+
name
+
"_inplace"
)
# don't add the inplace versions, they aren't supposed to be part of the user interface
_constructor_list
.
append
(
straight
)
# This is here so that gen_oplist can detect which module declared these variables.
straight
.
__module__
=
'tensor'
inplace
.
__module__
=
'tensor'
if
doc_prefix
:
straight
.
__doc__
=
doc_prefix
+
'
\n
'
+
straight
.
__doc__
return
straight
,
inplace
return
straight
,
inplace
def
_redefine
(
real_symbol_value
):
"""Replace the value associated with a function symbol.
This is useful to trick epydoc into doing what we want. It's a hack.
"""
def
decorator
(
f
):
return
real_symbol_value
return
decorator
def
_redefine_asRoutine
(
real_symbol_value
):
real_symbol_value
.
__epydoc_asRoutine
=
True
def
decorator
(
f
):
return
real_symbol_value
return
decorator
def
_scal_elemwise
(
symbol
):
"""Replace a symbol definition with an elementwise version of the corresponding scalar Op"""
symbolname
=
symbol
.
__name__
inplace
=
symbolname
.
endswith
(
'_inplace'
)
if
inplace
:
scalar_op
=
getattr
(
scal
,
symbolname
[
1
:
-
len
(
'_inplace'
)])
inplace_scalar_op
=
scalar_op
.
__class__
(
scal
.
transfer_type
(
0
))
rval
=
elemwise
.
Elemwise
(
inplace_scalar_op
,
{
0
:
0
},
name
=
symbolname
)
else
:
scalar_op
=
getattr
(
scal
,
symbolname
)
rval
=
elemwise
.
Elemwise
(
scalar_op
,
name
=
symbolname
)
if
getattr
(
symbol
,
'__doc__'
,
False
):
rval
.
__doc__
=
symbol
.
__doc__
+
'
\n
'
+
rval
.
__doc__
#for the meaning of this see the ./epydoc script
# it makes epydoc display rval as if it were a function, not an object
rval
.
__epydoc_asRoutine
=
symbol
return
rval
#########################
#########################
...
@@ -457,6 +577,7 @@ class ScalarFromTensor(Op):
...
@@ -457,6 +577,7 @@ class ScalarFromTensor(Op):
scalar_from_tensor
=
ScalarFromTensor
()
scalar_from_tensor
=
ScalarFromTensor
()
@constructor
def
cast
(
t
,
dtype
):
def
cast
(
t
,
dtype
):
mapping
=
{
'int8'
:
convert_to_int8
,
mapping
=
{
'int8'
:
convert_to_int8
,
'int16'
:
convert_to_int16
,
'int16'
:
convert_to_int16
,
...
@@ -468,14 +589,33 @@ def cast(t, dtype):
...
@@ -468,14 +589,33 @@ def cast(t, dtype):
'complex128'
:
convert_to_complex128
}
'complex128'
:
convert_to_complex128
}
return
mapping
[
dtype
](
t
)
return
mapping
[
dtype
](
t
)
convert_to_int8
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int8
)))
#to be removed as we get the epydoc routine-documenting thing going -JB 20080924
convert_to_int16
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int16
)))
def
_conversion
(
real_value
):
convert_to_int32
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int32
)))
return
real_value
convert_to_int64
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int64
)))
convert_to_float32
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
float32
)))
convert_to_int8
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int8
))))
convert_to_float64
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
float64
)))
"""Cast to 8-bit integer"""
convert_to_complex64
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
complex64
)))
convert_to_complex128
=
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
complex128
)))
convert_to_int16
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int16
))))
"""Cast to 16-bit integer"""
convert_to_int32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int32
))))
"""Cast to 32-bit integer"""
convert_to_int64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int64
))))
"""Cast to 64-bit integer"""
convert_to_float32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
float32
))))
"""Cast to single-precision floating point"""
convert_to_float64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
float64
))))
"""Cast to double-precision floating point"""
convert_to_complex64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
complex64
))))
"""Cast to single-precision complex"""
convert_to_complex128
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
complex128
))))
"""Cast to double-precision complex"""
...
@@ -496,7 +636,9 @@ class Shape(Op):
...
@@ -496,7 +636,9 @@ class Shape(Op):
out
[
0
]
=
numpy
.
asarray
(
x
.
shape
)
out
[
0
]
=
numpy
.
asarray
(
x
.
shape
)
def
grad
(
self
,
(
x
,),
(
gz
,)):
def
grad
(
self
,
(
x
,),
(
gz
,)):
return
[
None
]
return
[
None
]
shape
=
Shape
()
@_redefine_asRoutine
(
Shape
())
def
shape
(
a
):
pass
class
MaxAndArgmax
(
Op
):
class
MaxAndArgmax
(
Op
):
"""Calculate the max and argmax over a given axis"""
"""Calculate the max and argmax over a given axis"""
...
@@ -529,10 +671,12 @@ class MaxAndArgmax(Op):
...
@@ -529,10 +671,12 @@ class MaxAndArgmax(Op):
assert
axis
.
data
==
0
assert
axis
.
data
==
0
g_x
=
eq
(
max
(
x
,
axis
),
x
)
*
g_max
g_x
=
eq
(
max
(
x
,
axis
),
x
)
*
g_max
return
g_x
,
None
return
g_x
,
None
max_and_argmax
=
MaxAndArgmax
()
@_redefine_asRoutine
(
MaxAndArgmax
())
def
max_and_argmax
(
a
):
pass
@constructor
def
max
(
x
,
axis
=
None
):
def
max
(
x
,
axis
=
None
):
"""Return indexes of maximum elements obtained by iterating over given axis
"""Return indexes of maximum elements obtained by iterating over given axis
...
@@ -543,6 +687,7 @@ def max(x, axis=None):
...
@@ -543,6 +687,7 @@ def max(x, axis=None):
# but when Argmax.c_impl() is in place, it should be fine.
# but when Argmax.c_impl() is in place, it should be fine.
return
max_and_argmax
(
x
,
axis
)[
0
]
return
max_and_argmax
(
x
,
axis
)[
0
]
@constructor
def
argmax
(
x
,
axis
=
None
):
def
argmax
(
x
,
axis
=
None
):
"""Return maximum elements obtained by iterating over given axis
"""Return maximum elements obtained by iterating over given axis
...
@@ -558,58 +703,237 @@ def argmax(x, axis=None):
...
@@ -558,58 +703,237 @@ def argmax(x, axis=None):
# Comparison
# Comparison
##########################
##########################
lt
,
lt_inplace
=
_elemwise
(
scal
.
lt
,
'lt'
)
@_scal_elemwise
gt
,
gt_inplace
=
_elemwise
(
scal
.
gt
,
'gt'
)
def
lt
(
a
,
b
):
le
,
le_inplace
=
_elemwise
(
scal
.
le
,
'le'
)
"""a < b"""
ge
,
ge_inplace
=
_elemwise
(
scal
.
ge
,
'ge'
)
eq
,
eq_inplace
=
_elemwise
(
scal
.
eq
,
'eq'
)
@_scal_elemwise
neq
,
neq_inplace
=
_elemwise
(
scal
.
neq
,
'neq'
)
def
_lt_inplace
(
a
,
b
):
"""a < b (inplace on a)"""
@_scal_elemwise
def
gt
(
a
,
b
):
"""a > b"""
@_scal_elemwise
def
_gt_inplace
(
a
,
b
):
"""a > b (inplace on a)"""
@_scal_elemwise
def
le
(
a
,
b
):
"""a <= b"""
@_scal_elemwise
def
_le_inplace
(
a
,
b
):
"""a <= b (inplace on a)"""
@_scal_elemwise
def
ge
(
a
,
b
):
"""a >= b"""
@_scal_elemwise
def
_ge_inplace
(
a
,
b
):
"""a >= b (inplace on a)"""
@_scal_elemwise
def
eq
(
a
,
b
):
"""a == b"""
@_scal_elemwise
def
_eq_inplace
(
a
,
b
):
"""a == b (inplace on a)"""
@_scal_elemwise
def
neq
(
a
,
b
):
"""a != b"""
@_scal_elemwise
def
_neq_inplace
(
a
,
b
):
"""a != b (inplace on a)"""
##########################
##########################
# Bit-wise
# Bit-wise
##########################
##########################
and_
,
and_inplace
=
_elemwise
(
scal
.
and_
,
'and_'
)
@_scal_elemwise
or_
,
or_inplace
=
_elemwise
(
scal
.
or_
,
'or_'
)
def
and_
(
a
,
b
):
xor
,
xor_inplace
=
_elemwise
(
scal
.
xor
,
'xor'
)
"""bitwise a & b"""
invert
,
invert_inplace
=
_elemwise
(
scal
.
invert
,
'invert'
)
@_scal_elemwise
def
_and__inplace
(
a
,
b
):
"""bitwise a & b (inplace on a)"""
@_scal_elemwise
def
or_
(
a
,
b
):
"""bitwise a | b"""
@_scal_elemwise
def
_or__inplace
(
a
,
b
):
"""bitwise a | b (inplace on a)"""
@_scal_elemwise
def
xor
(
a
,
b
):
"""bitwise a ^ b"""
@_scal_elemwise
def
_xor_inplace
(
a
,
b
):
"""bitwise a ^ b (inplace on a)"""
@_scal_elemwise
def
invert
(
a
):
"""bitwise ~a"""
@_scal_elemwise
def
_invert_inplace
(
a
):
"""bitwise ~a (inplace on a)"""
##########################
##########################
# Math
# Math
##########################
##########################
_abs
,
abs_inplace
=
_elemwise
(
scal
.
abs
,
'abs'
)
@_scal_elemwise
exp
,
exp_inplace
=
_elemwise
(
scal
.
exp
,
'exp'
)
def
_abs
(
a
):
neg
,
neg_inplace
=
_elemwise
(
scal
.
neg
,
'neg'
)
"""|`a`|
inv
,
inv_inplace
=
_elemwise
(
scal
.
inv
,
'inv'
)
log
,
log_inplace
=
_elemwise
(
scal
.
log
,
'log'
)
_abs has a leading underscore because abs() is a builtin. TensorResult overloads the
log2
,
log2_inplace
=
_elemwise
(
scal
.
log2
,
'log2'
)
`TensorResult.__abs__` operator so that this function is called when you type abs(a).
sgn
,
sgn_inplace
=
_elemwise
(
scal
.
sgn
,
'sgn'
)
sqr
,
sqr_inplace
=
_elemwise
(
scal
.
sqr
,
'sqr'
)
"""
sqrt
,
sqrt_inplace
=
_elemwise
(
scal
.
sqrt
,
'sqrt'
)
cos
,
cos_inplace
=
_elemwise
(
scal
.
cos
,
'cos'
)
@_scal_elemwise
sin
,
sin_inplace
=
_elemwise
(
scal
.
sin
,
'sin'
)
def
__abs_inplace
(
a
):
tan
,
tan_inplace
=
_elemwise
(
scal
.
tan
,
'tan'
)
"""|`a`| (inplace on `a`)"""
cosh
,
cosh_inplace
=
_elemwise
(
scal
.
cosh
,
'cosh'
)
sinh
,
sinh_inplace
=
_elemwise
(
scal
.
sinh
,
'sinh'
)
@_scal_elemwise
tanh
,
tanh_inplace
=
_elemwise
(
scal
.
tanh
,
'tanh'
)
def
exp
(
a
):
"""e^`a`"""
@_scal_elemwise
def
_exp_inplace
(
a
):
"""e^`a` (inplace on `a`)"""
@_scal_elemwise
def
neg
(
a
):
"""-a"""
@_scal_elemwise
def
_neg_inplace
(
a
):
"""-a (inplace on a)"""
@_scal_elemwise
def
inv
(
a
):
"""1.0/a (inplace on a)"""
@_scal_elemwise
def
_inv_inplace
(
a
):
"""1.0/a (inplace on a)"""
@_scal_elemwise
def
log
(
a
):
"""base e logarithm of a"""
@_scal_elemwise
def
_log_inplace
(
a
):
"""base e logarithm of a (inplace on a)"""
@_scal_elemwise
def
log2
(
a
):
"""base 2 logarithm of a"""
@_scal_elemwise
def
_log2_inplace
(
a
):
"""base 2 logarithm of a (inplace on a)"""
@_scal_elemwise
def
sgn
(
a
):
"""sign of a"""
@_scal_elemwise
def
_sgn_inplace
(
a
):
"""sign of `a` (inplace on `a`)"""
@_scal_elemwise
def
sqr
(
a
):
"""square of a"""
@_scal_elemwise
def
_sqr_inplace
(
a
):
"""square of `a` (inplace on `a`)"""
@_scal_elemwise
def
sqrt
(
a
):
"""square root of a"""
@_scal_elemwise
def
_sqrt_inplace
(
a
):
"""square root of `a` (inplace on `a`)"""
@_scal_elemwise
def
cos
(
a
):
"""cosine of a"""
@_scal_elemwise
def
_cos_inplace
(
a
):
"""cosine of `a` (inplace on `a`)"""
@_scal_elemwise
def
sin
(
a
):
"""sine of a"""
@_scal_elemwise
def
_sin_inplace
(
a
):
"""sine of `a` (inplace on `a`)"""
@_scal_elemwise
def
tan
(
a
):
"""tangent of a"""
@_scal_elemwise
def
_tan_inplace
(
a
):
"""tangent of `a` (inplace on `a`)"""
@_scal_elemwise
def
cosh
(
a
):
"""hyperbolic cosine of a"""
@_scal_elemwise
def
_cosh_inplace
(
a
):
"""hyperbolic cosine of `a` (inplace on `a`)"""
@_scal_elemwise
def
sinh
(
a
):
"""hyperbolic sine of a"""
@_scal_elemwise
def
_sinh_inplace
(
a
):
"""hyperbolic sine of `a` (inplace on `a`)"""
@_scal_elemwise
def
tanh
(
a
):
"""hyperbolic tangent of a"""
@_scal_elemwise
def
_tanh_inplace
(
a
):
"""hyperbolic tangent of `a` (inplace on `a`)"""
##########################
##########################
# Misc
# Misc
##########################
##########################
fill
,
fill_inplace
=
_elemwise
(
scal
.
second
,
'fill'
)
#fill, _fill_inplace = _elemwise(scal.second, 'fill',
#"""fill WRITEME (elemwise)""")
@_scal_elemwise
def
second
(
a
,
b
):
"""Create a matrix by filling the shape of a with b"""
@_scal_elemwise
def
_second_inplace
(
a
):
"""Fill `a` with `b`"""
fill
=
second
_fill_inplace
=
_second_inplace
@constructor
def
ones_like
(
model
):
def
ones_like
(
model
):
"""WRITEME"""
#return Ones(model.type.ndim)(shape(model))
#return Ones(model.type.ndim)(shape(model))
return
fill
(
model
,
1.0
)
return
fill
(
model
,
1.0
)
@constructor
def
zeros_like
(
model
):
def
zeros_like
(
model
):
"""WRITEME"""
#return Zeros(model.type.ndim)(shape(model))
#return Zeros(model.type.ndim)(shape(model))
return
fill
(
model
,
0.0
)
return
fill
(
model
,
0.0
)
class
Filler
(
gof
.
Op
):
class
Filler
(
gof
.
Op
):
"""WRITEME"""
def
__init__
(
self
,
value
,
ndim
,
dtype
=
'float64'
):
def
__init__
(
self
,
value
,
ndim
,
dtype
=
'float64'
):
self
.
value
=
value
self
.
value
=
value
self
.
ndim
=
ndim
self
.
ndim
=
ndim
...
@@ -643,25 +967,40 @@ class Filler(gof.Op):
...
@@ -643,25 +967,40 @@ class Filler(gof.Op):
return
hash
(
self
.
ndim
)
^
hash
(
self
.
dtype
)
return
hash
(
self
.
ndim
)
^
hash
(
self
.
dtype
)
Zeros
=
functools
.
partial
(
Filler
,
0
)
Zeros
=
functools
.
partial
(
Filler
,
0
)
"""WRITEME"""
Ones
=
functools
.
partial
(
Filler
,
1
)
Ones
=
functools
.
partial
(
Filler
,
1
)
"""WRITEME"""
@constructor
def
zero
():
def
zero
():
"""
"""
Return a scalar zero, e.g. for initializing sums.
Return a scalar zero, e.g. for initializing sums.
"""
"""
return
Zeros
(
0
)([])
return
Zeros
(
0
)([])
@constructor
def
one
():
def
one
():
"""WRITEME"""
return
Ones
(
0
)([])
return
Ones
(
0
)([])
tensor_copy
=
elemwise
.
Elemwise
(
scal
.
identity
)
@_redefine
(
elemwise
.
Elemwise
(
scal
.
identity
))
identity
=
elemwise
.
Elemwise
(
scal
.
identity
,
inplace_pattern
=
{
0
:
[
0
]})
def
tensor_copy
(
a
):
"""Create a duplicate of `a` (with duplicated storage)"""
@_redefine
(
elemwise
.
Elemwise
(
scal
.
identity
,
inplace_pattern
=
{
0
:
[
0
]}))
def
view
(
a
):
"""Create a duplicate of `a` (with shared storage)"""
@constructor
def
sum
(
input
,
axis
=
None
):
def
sum
(
input
,
axis
=
None
):
"""WRITEME"""
return
elemwise
.
Sum
(
axis
)(
input
)
return
elemwise
.
Sum
(
axis
)(
input
)
@constructor
def
mean
(
input
,
axis
=
None
):
def
mean
(
input
,
axis
=
None
):
"""WRITEME"""
s
=
sum
(
input
,
axis
)
s
=
sum
(
input
,
axis
)
shp
=
shape
(
input
)
shp
=
shape
(
input
)
if
axis
is
None
:
if
axis
is
None
:
...
@@ -697,12 +1036,47 @@ repeat = Repeat()
...
@@ -697,12 +1036,47 @@ repeat = Repeat()
# Arithmetics
# Arithmetics
##########################
##########################
add
,
add_inplace
=
_elemwise
(
scal
.
add
,
'add'
)
@_scal_elemwise
sub
,
sub_inplace
=
_elemwise
(
scal
.
sub
,
'sub'
)
def
add
(
a
,
b
):
mul
,
mul_inplace
=
_elemwise
(
scal
.
mul
,
'mul'
)
"""elementwise addition"""
div
,
div_inplace
=
_elemwise
(
scal
.
div
,
'div'
)
@_scal_elemwise
mod
,
mod_inplace
=
_elemwise
(
scal
.
mod
,
'mod'
)
def
_add_inplace
(
a
,
b
):
pow
,
pow_inplace
=
_elemwise
(
scal
.
pow
,
'pow'
)
"""elementwise addition (inplace on `a`)"""
@_scal_elemwise
def
sub
(
a
,
b
):
"""elementwise subtraction"""
@_scal_elemwise
def
_sub_inplace
(
a
,
b
):
"""elementwise subtraction (inplace on `a`)"""
@_scal_elemwise
def
mul
(
a
,
b
):
"""elementwise multiplication"""
@_scal_elemwise
def
_mul_inplace
(
a
,
b
):
"""elementwise multiplication (inplace on `a`)"""
@_scal_elemwise
def
div
(
a
,
b
):
"""elementwise division"""
@_scal_elemwise
def
_div_inplace
(
a
,
b
):
"""elementwise division (inplace on `a`)"""
@_scal_elemwise
def
mod
(
a
,
b
):
"""elementwise modulo"""
@_scal_elemwise
def
_mod_inplace
(
a
,
b
):
"""elementwise modulo (inplace on `a`)"""
@_scal_elemwise
def
pow
(
a
,
b
):
"""elementwise power"""
@_scal_elemwise
def
_pow_inplace
(
a
,
b
):
"""elementwise power (inplace on `a`)"""
##########################
##########################
...
@@ -734,9 +1108,12 @@ class TransposeInplace(Op):
...
@@ -734,9 +1108,12 @@ class TransposeInplace(Op):
def
__str__
(
self
):
def
__str__
(
self
):
return
"TransposeView"
return
"TransposeView"
transpose_inplace
=
TransposeInplace
()
_transpose_inplace
=
TransposeInplace
()
"""WRITEME"""
def
transpose
(
x
,
**
kwargs
):
def
transpose
(
x
,
**
kwargs
):
return
transpose_inplace
(
tensor_copy
(
x
),
**
kwargs
)
"""WRITEME"""
return
_transpose_inplace
(
tensor_copy
(
x
),
**
kwargs
)
...
@@ -871,6 +1248,7 @@ class Subtensor(Op):
...
@@ -871,6 +1248,7 @@ class Subtensor(Op):
class
SetSubtensor
(
Subtensor
):
class
SetSubtensor
(
Subtensor
):
"""WRITEME"""
view_map
=
{}
view_map
=
{}
destroy_map
=
{
0
:
[
0
]}
destroy_map
=
{
0
:
[
0
]}
...
@@ -923,6 +1301,7 @@ class SetSubtensor(Subtensor):
...
@@ -923,6 +1301,7 @@ class SetSubtensor(Subtensor):
class
MakeVector
(
Op
):
class
MakeVector
(
Op
):
"""WRITEME"""
def
__init__
(
self
,
stype
):
def
__init__
(
self
,
stype
):
self
.
stype
=
stype
self
.
stype
=
stype
def
make_node
(
self
,
*
inputs
):
def
make_node
(
self
,
*
inputs
):
...
@@ -935,6 +1314,7 @@ class MakeVector(Op):
...
@@ -935,6 +1314,7 @@ class MakeVector(Op):
return
[
None
]
*
len
(
inputs
)
return
[
None
]
*
len
(
inputs
)
make_lvector
=
MakeVector
(
lscalar
)
make_lvector
=
MakeVector
(
lscalar
)
"""WRITEME"""
class
Concatenate
(
Op
):
class
Concatenate
(
Op
):
"""
"""
...
...
tensor_opt.py
浏览文件 @
b128f2b8
...
@@ -20,7 +20,7 @@ def in2out(*local_opts):
...
@@ -20,7 +20,7 @@ def in2out(*local_opts):
# gemm: (d,a,b,c,s) -> d = d*s + a*dot(b,c)
# gemm: (d,a,b,c,s) -> d = d*s + a*dot(b,c)
# Transforms d -= a * dot(b, c) into gemm(d, -a, b, c, 1.0)
# Transforms d -= a * dot(b, c) into gemm(d, -a, b, c, 1.0)
gemm_pattern_1
=
gof
.
PatternSub
((
T
.
sub_inplace
,
gemm_pattern_1
=
gof
.
PatternSub
((
T
.
_
sub_inplace
,
'd'
,
'd'
,
(
T
.
mul
,
(
T
.
mul
,
dict
(
pattern
=
(
T
.
DimShuffle
((),
[
'x'
,
'x'
],
inplace
=
True
),
'a'
),
dict
(
pattern
=
(
T
.
DimShuffle
((),
[
'x'
,
'x'
],
inplace
=
True
),
'a'
),
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论