Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
ce7533df
提交
ce7533df
authored
9月 23, 2009
作者:
James Bergstra
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
567acc71
f7bf373e
显示空白字符变更
内嵌
并排
正在显示
19 个修改的文件
包含
514 行增加
和
155 行删除
+514
-155
builders.py
theano/compile/builders.py
+21
-1
__init__.py
theano/compile/sandbox/__init__.py
+2
-0
sharedvalue.py
theano/compile/sandbox/sharedvalue.py
+40
-7
test_nnet.py
theano/compile/sandbox/tests/test_nnet.py
+1
-0
test_pfunc.py
theano/compile/sandbox/tests/test_pfunc.py
+1
-1
test_shared.py
theano/compile/sandbox/tests/test_shared.py
+15
-5
test_builders.py
theano/compile/tests/test_builders.py
+4
-1
cc.py
theano/gof/cc.py
+11
-7
op.py
theano/gof/op.py
+20
-1
type.py
theano/gof/type.py
+10
-0
gradient.py
theano/gradient.py
+2
-1
basic.py
theano/scalar/basic.py
+94
-36
basic.py
theano/tensor/basic.py
+116
-23
blas.py
theano/tensor/blas.py
+12
-0
elemwise.py
theano/tensor/elemwise.py
+34
-3
nnet.py
theano/tensor/nnet.py
+19
-9
test_basic.py
theano/tensor/tests/test_basic.py
+0
-60
test_casting.py
theano/tensor/tests/test_casting.py
+89
-0
test_complex.py
theano/tensor/tests/test_complex.py
+23
-0
没有找到文件。
theano/compile/builders.py
浏览文件 @
ce7533df
...
@@ -35,11 +35,21 @@ class OpFromGraph(gof.Op):
...
@@ -35,11 +35,21 @@ class OpFromGraph(gof.Op):
"""
"""
def
__init__
(
self
,
inputs
,
outputs
,
grad_depth
=
1
,
**
kwargs
):
def
__init__
(
self
,
inputs
,
outputs
,
grad_depth
=
1
,
**
kwargs
):
if
not
isinstance
(
outputs
,
list
):
raise
TypeError
(
'outputs must be list'
,
outputs
)
for
i
in
inputs
+
outputs
:
if
not
isinstance
(
i
,
gof
.
Variable
):
raise
TypeError
(
'inputs and outputs must be Variable instances'
,
i
)
if
'updates'
in
kwargs
:
raise
TypeError
(
'updates are not allowed in kwargs'
)
# TODO: the graph may have implicit inputs like Value and SharedVariable instances.
# what impact to they have on the validity of this Op?
self
.
fn
=
function
(
inputs
,
outputs
,
**
kwargs
)
self
.
fn
=
function
(
inputs
,
outputs
,
**
kwargs
)
self
.
inputs
=
inputs
self
.
inputs
=
inputs
self
.
outputs
=
outputs
self
.
outputs
=
outputs
self
.
input_types
=
[
input
.
type
for
input
in
inputs
]
self
.
input_types
=
[
input
.
type
for
input
in
inputs
]
self
.
output_types
=
[
output
.
type
for
output
in
outputs
]
self
.
output_types
=
[
output
.
type
for
output
in
outputs
]
if
grad_depth
>
0
:
if
grad_depth
>
0
:
output_grads
=
[
t
()
for
t
in
self
.
output_types
]
output_grads
=
[
t
()
for
t
in
self
.
output_types
]
gd
=
G
.
grad_sources_inputs
(
zip
(
self
.
outputs
,
output_grads
),
self
.
inputs
)
gd
=
G
.
grad_sources_inputs
(
zip
(
self
.
outputs
,
output_grads
),
self
.
inputs
)
...
@@ -52,6 +62,13 @@ class OpFromGraph(gof.Op):
...
@@ -52,6 +62,13 @@ class OpFromGraph(gof.Op):
self
.
grad_ops
.
append
(
OpFromGraph
(
inputs
+
output_grads
,
self
.
grad_ops
.
append
(
OpFromGraph
(
inputs
+
output_grads
,
[
g
],
[
g
],
grad_depth
=
grad_depth
-
1
))
grad_depth
=
grad_depth
-
1
))
def
__eq__
(
self
,
other
):
#TODO: recognize a copy
return
self
is
other
def
__hash__
(
self
):
#TODO: use internal variables in hash
return
hash
(
type
(
self
))
def
make_node
(
self
,
*
inputs
):
def
make_node
(
self
,
*
inputs
):
for
input
,
type
in
zip
(
inputs
,
self
.
input_types
):
for
input
,
type
in
zip
(
inputs
,
self
.
input_types
):
...
@@ -63,8 +80,11 @@ class OpFromGraph(gof.Op):
...
@@ -63,8 +80,11 @@ class OpFromGraph(gof.Op):
def
perform
(
self
,
node
,
inputs
,
outputs
):
def
perform
(
self
,
node
,
inputs
,
outputs
):
variables
=
self
.
fn
(
*
inputs
)
variables
=
self
.
fn
(
*
inputs
)
assert
len
(
variables
)
==
len
(
outputs
)
for
output
,
variable
in
zip
(
outputs
,
variables
):
for
output
,
variable
in
zip
(
outputs
,
variables
):
output
[
0
]
=
variable
##TODO: when function's output-borrowing semantics are correct, we wont need this
# copy anymore
output
[
0
]
=
variable
.
copy
()
def
grad
(
self
,
inputs
,
output_grads
):
def
grad
(
self
,
inputs
,
output_grads
):
if
hasattr
(
self
,
'grad_ops'
):
if
hasattr
(
self
,
'grad_ops'
):
...
...
theano/compile/sandbox/__init__.py
浏览文件 @
ce7533df
from
.sharedvalue
import
shared
from
.pfunc
import
pfunc
theano/compile/sandbox/sharedvalue.py
浏览文件 @
ce7533df
"""Provide a simple user friendly API """
"""Provide a simple user friendly API """
__docformat__
=
'restructuredtext en'
__docformat__
=
'restructuredtext en'
import
traceback
import
copy
import
copy
import
numpy
import
numpy
...
@@ -10,6 +11,14 @@ from theano.tensor import TensorType
...
@@ -10,6 +11,14 @@ from theano.tensor import TensorType
from
theano.scalar
import
Scalar
from
theano.scalar
import
Scalar
from
theano.compile
import
function
from
theano.compile
import
function
import
logging
_logger
=
logging
.
getLogger
(
'theano.compile.sandbox.sharedvalue'
)
_logger
.
setLevel
(
logging
.
DEBUG
)
def
debug
(
*
msg
):
_logger
.
debug
(
' '
.
join
(
str
(
m
)
for
m
in
msg
))
def
info
(
*
msg
):
_logger
.
info
(
' '
.
join
(
str
(
m
)
for
m
in
msg
))
def
warn
(
*
msg
):
_logger
.
warn
(
' '
.
join
(
str
(
m
)
for
m
in
msg
))
def
warning
(
*
msg
):
_logger
.
warning
(
' '
.
join
(
str
(
m
)
for
m
in
msg
))
def
error
(
*
msg
):
_logger
.
error
(
' '
.
join
(
str
(
m
)
for
m
in
msg
))
class
SharedVariable
(
Variable
):
class
SharedVariable
(
Variable
):
"""
"""
...
@@ -92,6 +101,9 @@ class SharedVariable(Variable):
...
@@ -92,6 +101,9 @@ class SharedVariable(Variable):
:param update: the new value for this shared variable when updated by a pfunc.
:param update: the new value for this shared variable when updated by a pfunc.
:returns: a Variable whose value will be assigned to this SharedVariable by a pfunc.
:returns: a Variable whose value will be assigned to this SharedVariable by a pfunc.
:note: The return value of this function must match the self.type, or else pfunc()
will raise a TypeError.
"""
"""
if
not
isinstance
(
update
,
Variable
):
if
not
isinstance
(
update
,
Variable
):
# The value for the update is not a Variable: we cast it into
# The value for the update is not a Variable: we cast it into
...
@@ -148,14 +160,35 @@ def tensor_constructor(value, name=None, strict=False, broadcastable=None):
...
@@ -148,14 +160,35 @@ def tensor_constructor(value, name=None, strict=False, broadcastable=None):
type
=
TensorType
(
value
.
dtype
,
broadcastable
=
broadcastable
)
type
=
TensorType
(
value
.
dtype
,
broadcastable
=
broadcastable
)
return
TensorSharedVariable
(
type
=
type
,
value
=
value
,
name
=
name
,
strict
=
strict
)
return
TensorSharedVariable
(
type
=
type
,
value
=
value
,
name
=
name
,
strict
=
strict
)
# TensorSharedVariable brings in the tensor operators, is not ideal, but works as long as we
# dont do purely scalar-scalar operations
class
ScalarSharedVariable
(
SharedVariable
,
theano
.
tensor
.
basic
.
_tensor_py_operators
):
pass
@shared_constructor
@shared_constructor
def
scalar_constructor
(
value
,
name
=
None
,
strict
=
False
,
dtype
=
None
):
def
scalar_constructor
(
value
,
name
=
None
,
strict
=
False
,
dtype
=
None
):
"""SharedVariable constructor for scalar values. Defaults to int64 or float64"""
"""SharedVariable constructor for scalar values. Defaults to int64 or float64.
if
not
isinstance
(
value
,
(
float
,
int
)):
:note: We implement this using 0-d tensors for now.
"""
if
not
isinstance
(
value
,
(
numpy
.
number
,
float
,
int
)):
raise
TypeError
()
raise
TypeError
()
# use float64 and int64 by default, user can override
if
dtype
is
None
:
if
not
dtype
:
if
isinstance
(
value
,
float
):
dtype
=
'int64'
if
isinstance
(
value
,
int
)
else
'float64'
dtype
=
'float64'
type
=
Scalar
(
dtype
)
elif
isinstance
(
value
,
int
):
return
TensorSharedVariable
(
type
=
type
,
value
=
numpy
.
asarray
(
value
),
name
=
name
,
strict
=
strict
)
dtype
=
'int64'
else
:
dtype
=
type
(
value
)
.
__name__
type
=
TensorType
(
dtype
=
dtype
,
broadcastable
=
[])
try
:
# don't pass the dtype to asarray because we want this to fail if strict is True and the
# types do not match
rval
=
ScalarSharedVariable
(
type
=
type
,
value
=
numpy
.
asarray
(
value
),
name
=
name
,
strict
=
strict
)
return
rval
except
:
traceback
.
print_exc
()
raise
theano/compile/sandbox/tests/test_nnet.py
浏览文件 @
ce7533df
...
@@ -18,6 +18,7 @@ class NNet(object):
...
@@ -18,6 +18,7 @@ class NNet(object):
self
.
lr
=
shared
(
lr
,
'learning_rate'
)
self
.
lr
=
shared
(
lr
,
'learning_rate'
)
self
.
w1
=
shared
(
numpy
.
zeros
((
n_hidden
,
n_input
)),
'w1'
)
self
.
w1
=
shared
(
numpy
.
zeros
((
n_hidden
,
n_input
)),
'w1'
)
self
.
w2
=
shared
(
numpy
.
zeros
((
n_output
,
n_hidden
)),
'w2'
)
self
.
w2
=
shared
(
numpy
.
zeros
((
n_output
,
n_hidden
)),
'w2'
)
print
self
.
lr
.
type
self
.
hidden
=
sigmoid
(
tensor
.
dot
(
self
.
w1
,
self
.
input
))
self
.
hidden
=
sigmoid
(
tensor
.
dot
(
self
.
w1
,
self
.
input
))
self
.
output
=
tensor
.
dot
(
self
.
w2
,
self
.
hidden
)
self
.
output
=
tensor
.
dot
(
self
.
w2
,
self
.
hidden
)
...
...
theano/compile/sandbox/tests/test_pfunc.py
浏览文件 @
ce7533df
...
@@ -172,7 +172,7 @@ class Test_pfunc(unittest.TestCase):
...
@@ -172,7 +172,7 @@ class Test_pfunc(unittest.TestCase):
# Same but using a mutable constant to show how it can be used to
# Same but using a mutable constant to show how it can be used to
# modify the update value after the function is created.
# modify the update value after the function is created.
x
.
value
=
0
x
.
value
=
0
y
=
numpy
.
ones
(())
y
=
numpy
.
ones
(()
,
dtype
=
'int64'
)
assign_mutable
=
pfunc
([],
[],
updates
=
{
x
:
y
})
assign_mutable
=
pfunc
([],
[],
updates
=
{
x
:
y
})
assign_mutable
()
assign_mutable
()
self
.
failUnless
(
x
.
value
==
1
)
self
.
failUnless
(
x
.
value
==
1
)
...
...
theano/compile/sandbox/tests/test_shared.py
浏览文件 @
ce7533df
...
@@ -10,10 +10,16 @@ class Test_SharedVariable(unittest.TestCase):
...
@@ -10,10 +10,16 @@ class Test_SharedVariable(unittest.TestCase):
def
test_ctors
(
self
):
def
test_ctors
(
self
):
if
0
:
#when using an implementation that handles scalars with Scalar type
assert
shared
(
7
)
.
type
==
Scalar
(
'int64'
)
assert
shared
(
7
)
.
type
==
Scalar
(
'int64'
)
assert
shared
(
7.0
)
.
type
==
Scalar
(
'float64'
)
assert
shared
(
7.0
)
.
type
==
Scalar
(
'float64'
)
assert
shared
(
7
,
dtype
=
'float64'
)
.
type
==
Scalar
(
'float64'
)
assert
shared
(
7
,
dtype
=
'float64'
)
.
type
==
Scalar
(
'float64'
)
else
:
assert
shared
(
7
)
.
type
==
theano
.
tensor
.
lscalar
assert
shared
(
7.0
)
.
type
==
theano
.
tensor
.
dscalar
assert
shared
(
7
,
dtype
=
'float64'
)
.
type
==
theano
.
tensor
.
dscalar
# test tensor constructor
# test tensor constructor
b
=
shared
(
numpy
.
zeros
((
5
,
5
),
dtype
=
'int32'
))
b
=
shared
(
numpy
.
zeros
((
5
,
5
),
dtype
=
'int32'
))
assert
b
.
type
==
TensorType
(
'int32'
,
broadcastable
=
[
False
,
False
])
assert
b
.
type
==
TensorType
(
'int32'
,
broadcastable
=
[
False
,
False
])
...
@@ -107,13 +113,17 @@ class Test_SharedVariable(unittest.TestCase):
...
@@ -107,13 +113,17 @@ class Test_SharedVariable(unittest.TestCase):
def
test_strict
(
self
):
def
test_strict
(
self
):
def
f
(
var
,
val
):
var
.
value
=
val
def
f
(
var
,
val
):
var
.
value
=
val
b
=
shared
(
7
,
strict
=
True
)
b
=
shared
(
numpy
.
int64
(
7
),
strict
=
True
)
self
.
failUnlessRaises
(
TypeError
,
f
(
b
,
8.23
))
#assert b.type == Scalar('int64')
b
=
shared
(
7.234
,
strict
=
True
)
assert
b
.
type
==
theano
.
tensor
.
lscalar
self
.
failUnlessRaises
(
TypeError
,
f
(
b
,
8
))
self
.
failUnlessRaises
(
TypeError
,
f
,
b
,
8.23
)
b
=
shared
(
numpy
.
float64
(
7.234
),
strict
=
True
)
#assert b.type == Scalar('float64')
assert
b
.
type
==
theano
.
tensor
.
dscalar
self
.
failUnlessRaises
(
TypeError
,
f
,
b
,
8
)
c
=
shared
(
numpy
.
zeros
((
5
,
5
),
dtype
=
'float32'
))
c
=
shared
(
numpy
.
zeros
((
5
,
5
),
dtype
=
'float32'
))
self
.
failUnlessRaises
(
TypeError
,
f
(
b
,
numpy
.
random
.
rand
(
5
,
5
)
))
self
.
failUnlessRaises
(
TypeError
,
f
,
b
,
numpy
.
random
.
rand
(
5
,
5
))
theano/compile/tests/test_builders.py
浏览文件 @
ce7533df
...
@@ -20,9 +20,12 @@ class T_OpFromGraph(unittest.TestCase):
...
@@ -20,9 +20,12 @@ class T_OpFromGraph(unittest.TestCase):
x
,
y
,
z
=
T
.
matrices
(
'xyz'
)
x
,
y
,
z
=
T
.
matrices
(
'xyz'
)
e
=
x
+
y
*
z
e
=
x
+
y
*
z
op
=
OpFromGraph
([
x
,
y
,
z
],
[
e
],
mode
=
'FAST_RUN'
)
op
=
OpFromGraph
([
x
,
y
,
z
],
[
e
],
mode
=
'FAST_RUN'
)
f
=
op
(
x
,
y
,
z
)
-
op
(
y
,
z
,
x
)
f
=
op
(
x
,
y
,
z
)
-
op
(
y
,
z
,
x
)
#(1+3*5=array of 16) - (3+1*5=array of 8)
fn
=
function
([
x
,
y
,
z
],
f
)
fn
=
function
([
x
,
y
,
z
],
f
)
xv
,
yv
,
zv
=
N
.
ones
((
2
,
2
)),
N
.
ones
((
2
,
2
))
*
3
,
N
.
ones
((
2
,
2
))
*
5
xv
,
yv
,
zv
=
N
.
ones
((
2
,
2
)),
N
.
ones
((
2
,
2
))
*
3
,
N
.
ones
((
2
,
2
))
*
5
print
function
,
function
.
__module__
print
fn
.
maker
.
env
.
toposort
()
print
fn
(
xv
,
yv
,
zv
)
assert
numpy
.
all
(
8.0
==
fn
(
xv
,
yv
,
zv
))
assert
numpy
.
all
(
8.0
==
fn
(
xv
,
yv
,
zv
))
assert
numpy
.
all
(
8.0
==
fn
(
xv
,
yv
,
zv
))
assert
numpy
.
all
(
8.0
==
fn
(
xv
,
yv
,
zv
))
...
...
theano/gof/cc.py
浏览文件 @
ce7533df
...
@@ -6,9 +6,15 @@ Defines Linkers that deal with C implementations.
...
@@ -6,9 +6,15 @@ Defines Linkers that deal with C implementations.
from
copy
import
copy
from
copy
import
copy
import
re
#for set_compiledir
import
re
#for set_compiledir
import
os
,
sys
,
platform
,
StringIO
,
time
import
os
,
sys
,
platform
,
StringIO
,
time
import
md5
if
sys
.
version_info
[:
2
]
>=
(
2
,
5
):
if
sys
.
version_info
[:
2
]
>=
(
2
,
5
):
import
hashlib
import
hashlib
def
hash_from_code
(
msg
):
return
hashlib
.
md5
(
msg
)
.
hexdigest
()
else
:
import
md5
def
hash_from_code
(
msg
):
return
md5
.
new
(
struct_code
)
.
hexdigest
()
from
theano.gof.python25
import
any
,
all
from
theano.gof.python25
import
any
,
all
...
@@ -512,6 +518,8 @@ class CLinker(link.Linker):
...
@@ -512,6 +518,8 @@ class CLinker(link.Linker):
except
utils
.
MethodNotDefined
:
except
utils
.
MethodNotDefined
:
cleanup
=
""
cleanup
=
""
info
(
'compiling un-versioned Apply'
,
node
)
blocks
.
append
(
CodeBlock
(
""
,
behavior
,
cleanup
,
sub
))
blocks
.
append
(
CodeBlock
(
""
,
behavior
,
cleanup
,
sub
))
tasks
.
append
((
node
,
'code'
,
id
))
tasks
.
append
((
node
,
'code'
,
id
))
id
+=
1
id
+=
1
...
@@ -525,11 +533,7 @@ class CLinker(link.Linker):
...
@@ -525,11 +533,7 @@ class CLinker(link.Linker):
# The hash calculated on the code identifies it so weave can cache properly.
# The hash calculated on the code identifies it so weave can cache properly.
# (the hash has to be used outside of the support code because weave does not consider changes in the support code)
# (the hash has to be used outside of the support code because weave does not consider changes in the support code)
# hashlib is new to 2.5
hash
=
hash_from_code
(
struct_code
)
if
sys
.
version_info
[:
2
]
<
(
2
,
5
):
hash
=
md5
.
new
(
struct_code
)
.
hexdigest
()
else
:
hash
=
hashlib
.
md5
(
struct_code
)
.
hexdigest
()
struct_name
=
'__struct_compiled_op_
%
s'
%
hash
struct_name
=
'__struct_compiled_op_
%
s'
%
hash
#struct_code %= dict(name = struct_name)
#struct_code %= dict(name = struct_name)
...
@@ -811,7 +815,7 @@ class CLinker(link.Linker):
...
@@ -811,7 +815,7 @@ class CLinker(link.Linker):
return
(
op_pos
[
i
.
owner
],
i
.
owner
.
outputs
.
index
(
i
))
return
(
op_pos
[
i
.
owner
],
i
.
owner
.
outputs
.
index
(
i
))
for
opos
,
o
in
enumerate
(
order
):
for
opos
,
o
in
enumerate
(
order
):
version
.
append
(
o
.
op
.
c_code_cache_version
(
))
version
.
append
(
o
.
op
.
c_code_cache_version
_apply
(
o
))
for
i
in
o
.
inputs
:
for
i
in
o
.
inputs
:
version
.
append
(
i
.
type
.
c_code_cache_version
())
version
.
append
(
i
.
type
.
c_code_cache_version
())
for
i
in
o
.
outputs
:
for
i
in
o
.
outputs
:
...
...
theano/gof/op.py
浏览文件 @
ce7533df
...
@@ -106,8 +106,27 @@ class CLinkerObject(object):
...
@@ -106,8 +106,27 @@ class CLinkerObject(object):
The cache mechanism may erase cached modules that have been superceded by newer
The cache mechanism may erase cached modules that have been superceded by newer
versions. See `ModuleCache` for details.
versions. See `ModuleCache` for details.
:note: See also `c_code_cache_version_apply()`
"""
return
()
def
c_code_cache_version_apply
(
self
,
node
):
"""Return a tuple of integers indicating the version of this Op.
An empty tuple indicates an 'unversioned' Op that will not be cached between processes.
The cache mechanism may erase cached modules that have been superceded by newer
versions. See `ModuleCache` for details.
:note: See also `c_code_cache_version()`
:note: This function overrides `c_code_cache_version` unless it explicitly calls
`c_code_cache_version`. The default implementation simply calls `c_code_cache_version`
and ignores the `node` argument.
"""
"""
return
(
1
,)
return
self
.
c_code_cache_version
()
def
c_compile_args
(
self
):
def
c_compile_args
(
self
):
"""Optional: Return a list of compile args recommended to compile the
"""Optional: Return a list of compile args recommended to compile the
...
...
theano/gof/type.py
浏览文件 @
ce7533df
...
@@ -177,6 +177,16 @@ class CLinkerType(CLinkerObject):
...
@@ -177,6 +177,16 @@ class CLinkerType(CLinkerObject):
"""
"""
raise
MethodNotDefined
(
"c_sync"
,
type
(
self
),
self
.
__class__
.
__name__
)
raise
MethodNotDefined
(
"c_sync"
,
type
(
self
),
self
.
__class__
.
__name__
)
def
c_code_cache_version
(
self
):
"""Return a tuple of integers indicating the version of this Type.
An empty tuple indicates an 'unversioned' Type that will not be cached between processes.
The cache mechanism may erase cached modules that have been superceded by newer
versions. See `ModuleCache` for details.
"""
return
()
...
...
theano/gradient.py
浏览文件 @
ce7533df
...
@@ -108,7 +108,8 @@ def grad_sources_inputs(sources, graph_inputs, warn_type=True):
...
@@ -108,7 +108,8 @@ def grad_sources_inputs(sources, graph_inputs, warn_type=True):
if
g_r
and
(
getattr
(
r
,
'type'
,
0
)
!=
getattr
(
g_r
,
'type'
,
1
)):
if
g_r
and
(
getattr
(
r
,
'type'
,
0
)
!=
getattr
(
g_r
,
'type'
,
1
)):
r_type
=
getattr
(
r
,
'type'
,
None
)
r_type
=
getattr
(
r
,
'type'
,
None
)
g_r_type
=
getattr
(
g_r
,
'type'
,
None
)
g_r_type
=
getattr
(
g_r
,
'type'
,
None
)
warning
(
'
%
s.grad returned a different type for input
%
i:
%
s vs.
%
s'
%
(
node
.
op
,
ii
,
r_type
,
g_r_type
))
warning
(
'
%
s.grad returned a different type (
%
s) for input
%
i of type (
%
s)'
%
(
node
.
op
,
g_r_type
,
ii
,
r_type
))
if
g_r
and
len
(
sources
)
==
1
and
sources
[
0
][
0
]
.
name
and
r
.
name
:
if
g_r
and
len
(
sources
)
==
1
and
sources
[
0
][
0
]
.
name
and
r
.
name
:
g_r
.
name
=
"(d
%
s/d
%
s)"
%
(
sources
[
0
][
0
]
.
name
,
r
.
name
)
g_r
.
name
=
"(d
%
s/d
%
s)"
%
(
sources
[
0
][
0
]
.
name
,
r
.
name
)
if
g_r
is
not
None
:
if
g_r
is
not
None
:
...
...
theano/scalar/basic.py
浏览文件 @
ce7533df
...
@@ -61,7 +61,9 @@ class Scalar(Type):
...
@@ -61,7 +61,9 @@ class Scalar(Type):
def
filter
(
self
,
data
,
strict
=
False
):
def
filter
(
self
,
data
,
strict
=
False
):
py_type
=
self
.
dtype_specs
()[
0
]
py_type
=
self
.
dtype_specs
()[
0
]
if
strict
and
not
isinstance
(
data
,
py_type
):
if
strict
and
not
isinstance
(
data
,
py_type
):
raise
TypeError
(
"
%
s expected a
%
s"
%
(
self
,
self
.
dtype
),
data
)
raise
TypeError
(
"
%
s expected a
%
s, got
%
s of type
%
s"
%
(
self
,
py_type
,
data
,
type
(
data
)),
data
)
try
:
try
:
return
py_type
(
data
)
return
py_type
(
data
)
except
Exception
,
e
:
except
Exception
,
e
:
...
@@ -180,23 +182,44 @@ class Scalar(Type):
...
@@ -180,23 +182,44 @@ class Scalar(Type):
ret.imag = (this->imag * y.real - this->real * y.imag) / y_norm_square;
ret.imag = (this->imag * y.real - this->real * y.imag) / y_norm_square;
return ret;
return ret;
}
}
complex_type& operator =(const scalar_type& y) {
template <typename T>
this->real=y;
complex_type& operator =(const T& y);
this->imag=0;
return *this;
}
%(upcast)
s
};
};
"""
"""
operator_eq
=
"""
template <>
%(mytype)
s &
%(mytype)
s::operator =(const npy_int8 & y)
{ this->real=y; this->imag=0; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const npy_int16 & y)
{ this->real=y; this->imag=0; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const npy_int32 & y)
{ this->real=y; this->imag=0; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const npy_int64 & y)
{ this->real=y; this->imag=0; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const npy_float32 & y)
{ this->real=y; this->imag=0; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const npy_float64 & y)
{ this->real=y; this->imag=0; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const theano_complex128 & y)
{ this->real=y.real; this->imag=y.imag; return *this; }
template <>
%(mytype)
s &
%(mytype)
s::operator =(const theano_complex64 & y)
{ this->real=y.real; this->imag=y.imag; return *this; }
"""
# todo: use C templating
# todo: use C templating
return
template
%
dict
(
nbits
=
64
,
half_nbits
=
32
,
upcast
=
""
)
+
template
%
dict
(
nbits
=
128
,
half_nbits
=
64
,
upcast
=
"""
return
template
%
dict
(
nbits
=
64
,
half_nbits
=
32
)
\
complex_type& operator =(theano_complex64 y) {
+
template
%
dict
(
nbits
=
128
,
half_nbits
=
64
)
\
this->real=y.real;
+
operator_eq
%
dict
(
mytype
=
'theano_complex128'
)
\
this->imag=y.imag;
+
operator_eq
%
dict
(
mytype
=
'theano_complex64'
)
return *this;
}
"""
)
def
c_code_cache_version
(
self
):
return
(
2
,)
int8
=
Scalar
(
'int8'
)
int8
=
Scalar
(
'int8'
)
...
@@ -293,6 +316,8 @@ class transfer_type(gof.utils.object2):
...
@@ -293,6 +316,8 @@ class transfer_type(gof.utils.object2):
def
__init__
(
self
,
*
transfer
):
def
__init__
(
self
,
*
transfer
):
assert
all
(
type
(
x
)
==
int
for
x
in
transfer
)
assert
all
(
type
(
x
)
==
int
for
x
in
transfer
)
self
.
transfer
=
transfer
self
.
transfer
=
transfer
def
__str__
(
self
):
return
'transfer_type{
%
s}'
%
self
.
transfer
def
__call__
(
self
,
*
types
):
def
__call__
(
self
,
*
types
):
upcast
=
upcast_out
(
*
types
)
upcast
=
upcast_out
(
*
types
)
retval
=
[]
retval
=
[]
...
@@ -395,6 +420,9 @@ class ScalarOp(Op):
...
@@ -395,6 +420,9 @@ class ScalarOp(Op):
else
:
else
:
return
"
%
s{
%
s}"
%
(
self
.
__class__
.
__name__
,
", "
.
join
(
"
%
s=
%
s"
%
(
k
,
v
)
for
k
,
v
in
self
.
__dict__
.
items
()
if
k
!=
"name"
))
return
"
%
s{
%
s}"
%
(
self
.
__class__
.
__name__
,
", "
.
join
(
"
%
s=
%
s"
%
(
k
,
v
)
for
k
,
v
in
self
.
__dict__
.
items
()
if
k
!=
"name"
))
def
c_code_cache_version
(
self
):
return
(
2
,)
class
UnaryScalarOp
(
ScalarOp
):
class
UnaryScalarOp
(
ScalarOp
):
nin
=
1
nin
=
1
...
@@ -617,12 +645,10 @@ class Add(ScalarOp):
...
@@ -617,12 +645,10 @@ class Add(ScalarOp):
retval
=
[]
retval
=
[]
for
i
in
inputs
:
for
i
in
inputs
:
if
i
.
type
in
grad_types
:
if
i
.
type
in
grad_types
:
retval
+=
[
gz
]
retval
+=
[
cast
(
gz
,
i
.
type
.
dtype
)
]
else
:
else
:
retval
+=
[
None
]
retval
+=
[
None
]
return
retval
return
retval
#backport
#return [(gz if i.type in grad_types else None) for i in inputs]
add
=
Add
(
upcast_out
,
name
=
'add'
)
add
=
Add
(
upcast_out
,
name
=
'add'
)
class
Mul
(
ScalarOp
):
class
Mul
(
ScalarOp
):
...
@@ -658,18 +684,15 @@ class Sub(BinaryScalarOp):
...
@@ -658,18 +684,15 @@ class Sub(BinaryScalarOp):
return
"
%(z)
s =
%(x)
s -
%(y)
s;"
%
locals
()
return
"
%(z)
s =
%(x)
s -
%(y)
s;"
%
locals
()
def
grad
(
self
,
(
x
,
y
),
(
gz
,
)):
def
grad
(
self
,
(
x
,
y
),
(
gz
,
)):
if
x
.
type
in
grad_types
:
if
x
.
type
in
grad_types
:
first_part
=
gz
first_part
=
cast
(
gz
,
x
.
type
.
dtype
)
else
:
else
:
first_part
=
None
first_part
=
None
if
y
.
type
in
grad_types
:
if
y
.
type
in
grad_types
:
second_part
=
-
gz
second_part
=
cast
(
-
gz
,
y
.
type
.
dtype
)
else
:
else
:
second_part
=
None
second_part
=
None
return
first_part
,
second_part
return
first_part
,
second_part
#return gz if x.type in grad_types else None, -gz if y.type in grad_types else None
sub
=
Sub
(
upcast_out
,
name
=
'sub'
)
sub
=
Sub
(
upcast_out
,
name
=
'sub'
)
def
div_proxy
(
x
,
y
):
def
div_proxy
(
x
,
y
):
...
@@ -699,19 +722,15 @@ class TrueDiv(BinaryScalarOp):
...
@@ -699,19 +722,15 @@ class TrueDiv(BinaryScalarOp):
return
"
%(z)
s =
%(x)
s /
%(y)
s;"
%
locals
()
return
"
%(z)
s =
%(x)
s /
%(y)
s;"
%
locals
()
def
grad
(
self
,
(
x
,
y
),
(
gz
,
)):
def
grad
(
self
,
(
x
,
y
),
(
gz
,
)):
if
x
.
type
in
grad_types
:
if
x
.
type
in
grad_types
:
first_part
=
gz
/
y
first_part
=
cast
(
gz
/
y
,
x
.
type
.
dtype
)
else
:
else
:
first_part
=
None
first_part
=
None
if
y
.
type
in
grad_types
:
if
y
.
type
in
grad_types
:
second_part
=
-
(
gz
*
x
)
/
(
y
*
y
)
second_part
=
cast
(
-
(
gz
*
x
)
/
(
y
*
y
),
y
.
type
.
dtype
)
else
:
else
:
second_part
=
None
second_part
=
None
return
first_part
,
second_part
return
(
first_part
,
second_part
)
#return (gz / y if x.type in grad_types else None,
# -(gz * x) / (y * y) if y.type in grad_types else None)
true_div
=
TrueDiv
(
upcast_out
,
name
=
'true_div'
)
true_div
=
TrueDiv
(
upcast_out
,
name
=
'true_div'
)
class
IntDiv
(
BinaryScalarOp
):
class
IntDiv
(
BinaryScalarOp
):
...
@@ -811,8 +830,8 @@ second = Second(transfer_type(1), name = 'second')
...
@@ -811,8 +830,8 @@ second = Second(transfer_type(1), name = 'second')
class
Identity
(
UnaryScalarOp
):
class
Identity
(
UnaryScalarOp
):
def
impl
(
self
,
x
):
def
impl
(
self
,
input
):
return
x
return
input
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
return
"
%(z)
s =
%(x)
s;"
%
locals
()
return
"
%(z)
s =
%(x)
s;"
%
locals
()
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
...
@@ -820,11 +839,52 @@ class Identity(UnaryScalarOp):
...
@@ -820,11 +839,52 @@ class Identity(UnaryScalarOp):
return
gz
,
return
gz
,
else
:
else
:
return
None
,
return
None
,
#backport
#return gz if x.type in grad_types else None,
identity
=
Identity
(
same_out
,
name
=
'identity'
)
identity
=
Identity
(
same_out
,
name
=
'identity'
)
#### CASTING OPERATIONS
class
Cast
(
UnaryScalarOp
):
def
__init__
(
self
,
o_type
,
name
=
None
):
if
not
isinstance
(
o_type
,
Scalar
):
raise
TypeError
(
o_type
)
super
(
Cast
,
self
)
.
__init__
(
specific_out
(
o_type
),
name
=
name
)
self
.
o_type
=
o_type
self
.
ctor
=
getattr
(
numpy
,
o_type
.
dtype
)
def
impl
(
self
,
input
):
return
self
.
ctor
(
input
)
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
return
"
%(z)
s =
%(x)
s;"
%
locals
()
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
if
x
.
type
in
grad_types
:
return
[
cast
(
gz
,
x
.
type
.
dtype
)]
else
:
return
None
,
convert_to_int8
=
Cast
(
int8
,
name
=
'convert_to_int8'
)
convert_to_int16
=
Cast
(
int16
,
name
=
'convert_to_int16'
)
convert_to_int32
=
Cast
(
int32
,
name
=
'convert_to_int32'
)
convert_to_int64
=
Cast
(
int64
,
name
=
'convert_to_int64'
)
convert_to_float32
=
Cast
(
float32
,
name
=
'convert_to_float32'
)
convert_to_float64
=
Cast
(
float64
,
name
=
'convert_to_float64'
)
convert_to_complex64
=
Cast
(
complex64
,
name
=
'convert_to_complex64'
)
convert_to_complex128
=
Cast
(
complex128
,
name
=
'convert_to_complex128'
)
_cast_mapping
=
{
'int8'
:
convert_to_int8
,
'int16'
:
convert_to_int16
,
'int32'
:
convert_to_int32
,
'int64'
:
convert_to_int64
,
'float32'
:
convert_to_float32
,
'float64'
:
convert_to_float64
,
'complex64'
:
convert_to_complex64
,
'complex128'
:
convert_to_complex128
}
def
cast
(
x
,
dtype
):
"""Symbolically cast `x` to a Scalar of given `dtype`."""
_x
=
as_scalar
(
x
)
if
_x
.
type
.
dtype
==
dtype
:
return
_x
if
_x
.
type
.
dtype
.
startswith
(
'complex'
)
and
not
dtype
.
startswith
(
'complex'
):
raise
TypeError
(
'Casting from complex to real is ambiguous: consider real(), imag(), angle() or abs()'
)
return
_cast_mapping
[
dtype
](
_x
)
class
Abs
(
UnaryScalarOp
):
class
Abs
(
UnaryScalarOp
):
def
make_node
(
self
,
x
):
def
make_node
(
self
,
x
):
inputs
=
[
as_scalar
(
input
)
for
input
in
[
x
]]
inputs
=
[
as_scalar
(
input
)
for
input
in
[
x
]]
...
@@ -883,8 +943,6 @@ class Neg(UnaryScalarOp):
...
@@ -883,8 +943,6 @@ class Neg(UnaryScalarOp):
return
-
gz
,
return
-
gz
,
else
:
else
:
return
None
,
return
None
,
#backport
#return -gz if x.type in grad_types else None,
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
def
c_code
(
self
,
node
,
name
,
(
x
,
),
(
z
,
),
sub
):
return
"
%(z)
s = -
%(x)
s;"
%
locals
()
return
"
%(z)
s = -
%(x)
s;"
%
locals
()
neg
=
Neg
(
same_out
,
name
=
'neg'
)
neg
=
Neg
(
same_out
,
name
=
'neg'
)
...
...
theano/tensor/basic.py
浏览文件 @
ce7533df
...
@@ -465,9 +465,16 @@ class TensorType(Type):
...
@@ -465,9 +465,16 @@ class TensorType(Type):
def
c_libraries
(
self
):
def
c_libraries
(
self
):
return
[]
return
[]
def
c_support_code
(
cls
):
def
c_support_code
(
self
):
"""Override `CLinkerOp.c_support_code` """
"""Override `CLinkerOp.c_support_code` """
return
scal
.
Scalar
(
"int8"
)
.
c_support_code
()
return
scal
.
Scalar
(
self
.
dtype
)
.
c_support_code
()
def
c_code_cache_version
(
self
):
scalar_version
=
scal
.
Scalar
(
self
.
dtype
)
.
c_code_cache_version
()
if
scalar_version
:
return
(
1
,)
+
scalar_version
else
:
return
()
# Easy constructors
# Easy constructors
...
@@ -887,18 +894,6 @@ class ScalarFromTensor(Op):
...
@@ -887,18 +894,6 @@ class ScalarFromTensor(Op):
scalar_from_tensor
=
ScalarFromTensor
()
scalar_from_tensor
=
ScalarFromTensor
()
@constructor
def
cast
(
t
,
dtype
):
mapping
=
{
'int8'
:
convert_to_int8
,
'int16'
:
convert_to_int16
,
'int32'
:
convert_to_int32
,
'int64'
:
convert_to_int64
,
'float32'
:
convert_to_float32
,
'float64'
:
convert_to_float64
,
'complex64'
:
convert_to_complex64
,
'complex128'
:
convert_to_complex128
}
return
mapping
[
dtype
](
t
)
#to be removed as we get the epydoc routine-documenting thing going -JB 20080924
#to be removed as we get the epydoc routine-documenting thing going -JB 20080924
def
_conversion
(
real_value
,
name
):
def
_conversion
(
real_value
,
name
):
__oplist_tag
(
real_value
,
'casting'
)
__oplist_tag
(
real_value
,
'casting'
)
...
@@ -906,30 +901,52 @@ def _conversion(real_value, name):
...
@@ -906,30 +901,52 @@ def _conversion(real_value, name):
pprint
.
assign
(
real_value
,
printing
.
FunctionPrinter
(
name
))
pprint
.
assign
(
real_value
,
printing
.
FunctionPrinter
(
name
))
return
real_value
return
real_value
convert_to_int8
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int8
))),
'int8'
)
#
# These _conver_to_<type> functions have leading underscores to indicate that they should not
# be called directly. They do not perform sanity checks about what types you are casting to
# what. That logic is implemented by the `cast()` function below.
#
_convert_to_int8
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_int8
),
'int8'
)
"""Cast to 8-bit integer"""
"""Cast to 8-bit integer"""
convert_to_int16
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int16
))
),
'int16'
)
_convert_to_int16
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_int16
),
'int16'
)
"""Cast to 16-bit integer"""
"""Cast to 16-bit integer"""
convert_to_int32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int32
))
),
'int32'
)
_convert_to_int32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_int32
),
'int32'
)
"""Cast to 32-bit integer"""
"""Cast to 32-bit integer"""
convert_to_int64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
int64
))
),
'int64'
)
_convert_to_int64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_int64
),
'int64'
)
"""Cast to 64-bit integer"""
"""Cast to 64-bit integer"""
convert_to_float32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
float32
))
),
'float32'
)
_convert_to_float32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_float32
),
'float32'
)
"""Cast to single-precision floating point"""
"""Cast to single-precision floating point"""
convert_to_float64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
float64
))
),
'float64'
)
_convert_to_float64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_float64
),
'float64'
)
"""Cast to double-precision floating point"""
"""Cast to double-precision floating point"""
convert_to_complex64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
complex64
))
),
'complex64'
)
_convert_to_complex64
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_complex64
),
'complex64'
)
"""Cast to single-precision complex"""
"""Cast to single-precision complex"""
convert_to_complex128
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
Identity
(
scal
.
specific_out
(
scal
.
complex128
))
),
'complex128'
)
_convert_to_complex128
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_complex128
),
'complex128'
)
"""Cast to double-precision complex"""
"""Cast to double-precision complex"""
_cast_mapping
=
{
'int8'
:
_convert_to_int8
,
'int16'
:
_convert_to_int16
,
'int32'
:
_convert_to_int32
,
'int64'
:
_convert_to_int64
,
'float32'
:
_convert_to_float32
,
'float64'
:
_convert_to_float64
,
'complex64'
:
_convert_to_complex64
,
'complex128'
:
_convert_to_complex128
}
@constructor
def
cast
(
x
,
dtype
):
"""Symbolically cast `x` to a Tensor of type `dtype`."""
if
x
.
type
.
dtype
.
startswith
(
'complex'
)
and
not
dtype
.
startswith
(
'complex'
):
raise
TypeError
(
'Casting from complex to real is ambiguous: consider real(), imag(), angle() or abs()'
)
return
_cast_mapping
[
dtype
](
x
)
##########################
##########################
...
@@ -1145,7 +1162,6 @@ def abs_(a):
...
@@ -1145,7 +1162,6 @@ def abs_(a):
pprint
.
assign
(
abs_
,
printing
.
PatternPrinter
((
'|
%(0)
s|'
,
-
1000
)))
pprint
.
assign
(
abs_
,
printing
.
PatternPrinter
((
'|
%(0)
s|'
,
-
1000
)))
@_scal_elemwise
@_scal_elemwise
def
exp
(
a
):
def
exp
(
a
):
"""e^`a`"""
"""e^`a`"""
...
@@ -1210,6 +1226,83 @@ def sinh(a):
...
@@ -1210,6 +1226,83 @@ def sinh(a):
def
tanh
(
a
):
def
tanh
(
a
):
"""hyperbolic tangent of a"""
"""hyperbolic tangent of a"""
class
Real
(
Op
):
"""Extract the real elements of a complex ndarray"""
view_map
=
{
0
:[
0
]}
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
def
__hash__
(
self
):
return
hash
(
type
(
self
))
def
make_node
(
self
,
x
):
_x
=
as_tensor
(
x
)
y_dtype
=
_x
.
type
.
dtype
if
y_dtype
==
'complex64'
:
y_dtype
=
'float32'
if
y_dtype
==
'complex128'
:
y_dtype
=
'float64'
_y
=
Tensor
(
y_dtype
,
_x
.
type
.
broadcastable
)()
return
Apply
(
self
,
[
_x
],
[
_y
])
def
perform
(
self
,
node
,
(
x
,),
(
y
,)):
if
str
(
x
.
dtype
)
.
startswith
(
'complex'
):
y
[
0
]
=
x
.
real
else
:
y
[
0
]
=
x
def
grad
(
self
,
inputs
,
(
g_y
,)):
#TODO: waiting on a Complex(real=, imag=) op that can merge
#things back into a complex tensor
raise
NotImplementedError
()
_real
=
Real
()
@constructor
def
real
(
x
):
"""Return the real part of real or complex-valued `x`
For real-valued `x`, `x` itself is returned.
"""
_x
=
as_tensor_variable
(
x
)
if
_x
.
type
.
dtype
.
startswith
(
'complex'
):
return
_real
(
x
)
else
:
return
_x
class
Imag
(
Op
):
"""Extract the imaginary elements of a complex ndarray"""
view_map
=
{
0
:[
0
]}
def
__eq__
(
self
,
other
):
return
type
(
self
)
==
type
(
other
)
def
__hash__
(
self
):
return
hash
(
type
(
self
))
def
make_node
(
self
,
x
):
_x
=
as_tensor_variable
(
x
)
if
not
_x
.
type
.
dtype
.
startswith
(
'complex'
):
raise
TypeError
(
'Imag(x) requires complex x'
,
x
)
if
_x
.
type
.
dtype
==
'complex64'
:
y_dtype
=
'float32'
elif
_x
.
type
.
dtype
==
'complex128'
:
y_dtype
=
'float64'
else
:
raise
NotImplementedError
(
'what is this?'
,
y_dtype
)
_y
=
Tensor
(
y_dtype
,
_x
.
type
.
broadcastable
)()
return
Apply
(
self
,
[
_x
],
[
_y
])
def
perform
(
self
,
node
,
(
x
,),
(
y
,)):
if
str
(
x
.
dtype
)
.
startswith
(
'complex'
):
y
[
0
]
=
x
.
imag
else
:
y
[
0
]
=
x
*
0
def
grad
(
self
,
inputs
,
(
g_y
,)):
# TODO: waiting on a complex(real=, imag=) op that can merge
# things back into a complex tensor
raise
NotImplementedError
()
_imag
=
Imag
()
@constructor
def
imag
(
x
):
"""Return the imaginary part of real or complex-valued `x`
For real-valued 'x' this returns `zeros_like(x)`.
"""
_x
=
as_tensor_variable
(
x
)
if
_x
.
type
.
dtype
.
startswith
(
'complex'
):
return
_imag
(
x
)
else
:
return
zeros_like
(
x
)
##########################
##########################
# Misc
# Misc
...
...
theano/tensor/blas.py
浏览文件 @
ce7533df
...
@@ -255,6 +255,8 @@ class GemmRelated(Op):
...
@@ -255,6 +255,8 @@ class GemmRelated(Op):
self
.
case_double_gemm
,
self
.
case_double_gemm
,
self
.
end_switch_typenum
),
''
)
self
.
end_switch_typenum
),
''
)
def
build_gemm_version
(
self
):
return
(
1
,)
class
Gemm
(
GemmRelated
):
class
Gemm
(
GemmRelated
):
"""In-place version of matrix-matrix multiplication (with accumulation):
"""In-place version of matrix-matrix multiplication (with accumulation):
...
@@ -363,8 +365,15 @@ class Gemm(GemmRelated):
...
@@ -363,8 +365,15 @@ class Gemm(GemmRelated):
def
c_code
(
self
,
node
,
name
,
(
_z
,
_a
,
_x
,
_y
,
_b
),
(
_zout
,
),
sub
):
#DEBUG
def
c_code
(
self
,
node
,
name
,
(
_z
,
_a
,
_x
,
_y
,
_b
),
(
_zout
,
),
sub
):
#DEBUG
full_code
=
self
.
build_gemm_call
()
%
dict
(
locals
(),
**
sub
)
full_code
=
self
.
build_gemm_call
()
%
dict
(
locals
(),
**
sub
)
return
full_code
return
full_code
def
c_code_cache_version
(
self
):
return
(
1
,)
+
self
.
build_gemm_version
()
gemm
=
Gemm
()
gemm
=
Gemm
()
pprint
.
assign
(
gemm
,
FunctionPrinter
(
'gemm'
))
pprint
.
assign
(
gemm
,
FunctionPrinter
(
'gemm'
))
def
res_is_a
(
node
,
op
,
maxclients
=
None
):
def
res_is_a
(
node
,
op
,
maxclients
=
None
):
if
maxclients
is
not
None
:
if
maxclients
is
not
None
:
...
@@ -635,6 +644,9 @@ class Dot22(GemmRelated):
...
@@ -635,6 +644,9 @@ class Dot22(GemmRelated):
def
c_code
(
self
,
node
,
name
,
(
_x
,
_y
),
(
_z
,
),
sub
):
#DEBUG
def
c_code
(
self
,
node
,
name
,
(
_x
,
_y
),
(
_z
,
),
sub
):
#DEBUG
full_code
=
self
.
build_gemm_call
()
%
dict
(
locals
(),
**
sub
)
full_code
=
self
.
build_gemm_call
()
%
dict
(
locals
(),
**
sub
)
return
full_code
return
full_code
def
c_code_cache_version
(
self
):
return
(
1
,)
+
self
.
build_gemm_version
()
_dot22
=
Dot22
()
_dot22
=
Dot22
()
@local_optimizer
([
T
.
dot
])
@local_optimizer
([
T
.
dot
])
...
...
theano/tensor/elemwise.py
浏览文件 @
ce7533df
...
@@ -295,6 +295,9 @@ class DimShuffle(Op):
...
@@ -295,6 +295,9 @@ class DimShuffle(Op):
return
full_code
%
dict
(
locals
(),
**
sub
)
return
full_code
%
dict
(
locals
(),
**
sub
)
def
c_code_cache_version
(
self
):
return
(
1
,)
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
def
grad
(
self
,
(
x
,
),
(
gz
,
)):
gz
=
as_tensor_variable
(
gz
)
gz
=
as_tensor_variable
(
gz
)
grad_order
=
[
'x'
]
*
len
(
x
.
type
.
broadcastable
)
grad_order
=
[
'x'
]
*
len
(
x
.
type
.
broadcastable
)
...
@@ -487,7 +490,8 @@ class Elemwise(Op):
...
@@ -487,7 +490,8 @@ class Elemwise(Op):
return
self
.
name
return
self
.
name
def
grad
(
self
,
inputs
,
ograds
):
def
grad
(
self
,
inputs
,
ograds
):
ograds
=
map
(
as_tensor_variable
,
ograds
)
# this shouldn't be necessary...
# Gradients (especially on the final costs) don't have to be symbolic
ograds
=
map
(
as_tensor_variable
,
ograds
)
scalar_inputs
=
[
Scalar
(
dtype
=
t
.
type
.
dtype
)()
for
t
in
inputs
]
scalar_inputs
=
[
Scalar
(
dtype
=
t
.
type
.
dtype
)()
for
t
in
inputs
]
scalar_ograds
=
[
Scalar
(
dtype
=
ograd
.
type
.
dtype
)()
for
ograd
in
ograds
]
scalar_ograds
=
[
Scalar
(
dtype
=
ograd
.
type
.
dtype
)()
for
ograd
in
ograds
]
scalar_igrads
=
self
.
scalar_op
.
grad
(
scalar_inputs
,
scalar_ograds
)
scalar_igrads
=
self
.
scalar_op
.
grad
(
scalar_inputs
,
scalar_ograds
)
...
@@ -695,8 +699,20 @@ class Elemwise(Op):
...
@@ -695,8 +699,20 @@ class Elemwise(Op):
def
c_support_code
(
self
):
def
c_support_code
(
self
):
return
self
.
scalar_op
.
c_support_code
()
return
self
.
scalar_op
.
c_support_code
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version_apply
(
self
,
node
):
return
(
4
,)
version
=
[
4
]
# the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node
=
Apply
(
self
.
scalar_op
,
[
Scalar
(
dtype
=
input
.
type
.
dtype
)()
for
input
in
node
.
inputs
],
[
Scalar
(
dtype
=
output
.
type
.
dtype
)()
for
output
in
node
.
outputs
])
version
.
extend
(
self
.
scalar_op
.
c_code_cache_version_apply
(
scalar_node
))
for
i
in
node
.
inputs
+
node
.
outputs
:
version
.
extend
(
Scalar
(
dtype
=
i
.
type
.
dtype
)
.
c_code_cache_version
())
if
all
(
version
):
return
tuple
(
version
)
else
:
return
()
# def elemwise_to_scal(env):
# def elemwise_to_scal(env):
# mapping = {}
# mapping = {}
...
@@ -884,6 +900,21 @@ class CAReduce(Op):
...
@@ -884,6 +900,21 @@ class CAReduce(Op):
code
=
"
\n
"
.
join
(
self
.
_c_all
(
node
,
name
,
inames
,
onames
,
sub
))
code
=
"
\n
"
.
join
(
self
.
_c_all
(
node
,
name
,
inames
,
onames
,
sub
))
return
code
return
code
def
c_code_cache_version_apply
(
self
,
node
):
version
=
[
2
]
# the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node
=
Apply
(
self
.
scalar_op
,
[
Scalar
(
dtype
=
input
.
type
.
dtype
)()
for
input
in
node
.
inputs
],
[
Scalar
(
dtype
=
output
.
type
.
dtype
)()
for
output
in
node
.
outputs
])
version
.
extend
(
self
.
scalar_op
.
c_code_cache_version_apply
(
scalar_node
))
for
i
in
node
.
inputs
+
node
.
outputs
:
version
.
extend
(
Scalar
(
dtype
=
i
.
type
.
dtype
)
.
c_code_cache_version
())
if
all
(
version
):
return
tuple
(
version
)
else
:
return
()
class
Sum
(
CAReduce
):
class
Sum
(
CAReduce
):
"""
"""
...
...
theano/tensor/nnet.py
浏览文件 @
ce7533df
## This file contain ops that are not currently integrated in the core of threano.
"""Provides neural-network specific Ops.
## Not all of those ops have been thoroughly tested.
:note: TODO: factor this out into a neural-network toolbox.
"""
#from theano import tensor, scalar
from
theano
import
gof
from
theano
import
gof
from
theano
import
scalar
from
theano
import
scalar
from
theano
import
printing
from
theano
import
printing
...
@@ -39,6 +40,8 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
...
@@ -39,6 +40,8 @@ class ScalarSigmoid(scalar.UnaryScalarOp):
? 1.0
? 1.0
: 1.0 /(1.0+exp(-
%(x)
s));"""
%
locals
()
: 1.0 /(1.0+exp(-
%(x)
s));"""
%
locals
()
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
raise
NotImplementedError
(
'only floatingpoint is implemented'
)
def
c_code_cache_version
(
self
):
return
(
1
,)
scalar_sigmoid
=
ScalarSigmoid
(
scalar
.
upgrade_to_float
,
name
=
'scalar_sigmoid'
)
scalar_sigmoid
=
ScalarSigmoid
(
scalar
.
upgrade_to_float
,
name
=
'scalar_sigmoid'
)
sigmoid
=
elemwise
.
Elemwise
(
scalar_sigmoid
,
name
=
'sigmoid'
)
sigmoid
=
elemwise
.
Elemwise
(
scalar_sigmoid
,
name
=
'sigmoid'
)
...
@@ -66,6 +69,8 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
...
@@ -66,6 +69,8 @@ class ScalarSoftplus(scalar.UnaryScalarOp):
?
%(x)
s
?
%(x)
s
: log1p(exp(
%(x)
s));"""
%
locals
()
: log1p(exp(
%(x)
s));"""
%
locals
()
raise
NotImplementedError
(
'only floating point x is implemented'
)
raise
NotImplementedError
(
'only floating point x is implemented'
)
def
c_code_cache_version
(
self
):
return
(
1
,)
scalar_softplus
=
ScalarSoftplus
(
scalar
.
upgrade_to_float
,
name
=
'scalar_softplus'
)
scalar_softplus
=
ScalarSoftplus
(
scalar
.
upgrade_to_float
,
name
=
'scalar_softplus'
)
softplus
=
elemwise
.
Elemwise
(
scalar_softplus
,
name
=
'softplus'
)
softplus
=
elemwise
.
Elemwise
(
scalar_softplus
,
name
=
'softplus'
)
...
@@ -133,7 +138,7 @@ class SoftmaxWithBias(gof.Op):
...
@@ -133,7 +138,7 @@ class SoftmaxWithBias(gof.Op):
return
[
'<iostream>'
,
'<cmath>'
]
return
[
'<iostream>'
,
'<cmath>'
]
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
()
return
(
3
,
)
@staticmethod
@staticmethod
def
c_code_template
():
def
c_code_template
():
# this implementation was lifted from
# this implementation was lifted from
...
@@ -294,7 +299,7 @@ class SoftmaxGrad(gof.Op):
...
@@ -294,7 +299,7 @@ class SoftmaxGrad(gof.Op):
raise
NotImplementedError
()
raise
NotImplementedError
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
()
return
(
3
,
)
def
c_code
(
self
,
node
,
name
,
(
dy
,
sm
),
(
dx
,),
sub
):
def
c_code
(
self
,
node
,
name
,
(
dy
,
sm
),
(
dx
,),
sub
):
return
'''
return
'''
if ((
%(dy)
s->descr->type_num != PyArray_DOUBLE) && (
%(dy)
s->descr->type_num != PyArray_FLOAT))
if ((
%(dy)
s->descr->type_num != PyArray_DOUBLE) && (
%(dy)
s->descr->type_num != PyArray_FLOAT))
...
@@ -402,10 +407,15 @@ def local_softmax_with_bias(node):
...
@@ -402,10 +407,15 @@ def local_softmax_with_bias(node):
non_vectors
=
[]
non_vectors
=
[]
for
x_in
in
x
.
owner
.
inputs
:
for
x_in
in
x
.
owner
.
inputs
:
if
list
(
x_in
.
type
.
broadcastable
)
==
[
True
,
False
]:
if
list
(
x_in
.
type
.
broadcastable
)
==
[
True
,
False
]:
if
x_in
.
owner
and
isinstance
(
x_in
.
owner
.
op
,
tensor
.
DimShuffle
):
print
isinstance
(
x_in
.
owner
.
op
,
tensor
.
DimShuffle
)
assert
len
(
x_in
.
owner
.
inputs
)
==
1
#since specialization comes relatively late in optimization,
# we don't want to put in extra DimShuffles un-necessarily.
if
x_in
.
owner
and
isinstance
(
x_in
.
owner
.
op
,
tensor
.
DimShuffle
)
\
and
list
(
x_in
.
owner
.
inputs
[
0
]
.
type
.
broadcastable
)
==
[
False
]:
# cut out the DimShuffle that was broadcasting a vector
vectors
.
append
(
x_in
.
owner
.
inputs
[
0
])
vectors
.
append
(
x_in
.
owner
.
inputs
[
0
])
else
:
else
:
# insert an extra DimShuffle to correct the old one
vectors
.
append
(
tensor
.
DimShuffle
((
True
,
False
),
(
1
,))(
x_in
))
vectors
.
append
(
tensor
.
DimShuffle
((
True
,
False
),
(
1
,))(
x_in
))
else
:
else
:
non_vectors
.
append
(
x_in
)
non_vectors
.
append
(
x_in
)
...
@@ -627,7 +637,7 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
...
@@ -627,7 +637,7 @@ class CrossentropySoftmaxArgmax1HotWithBias(gof.Op):
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
()
return
(
2
,
)
def
c_code
(
self
,
node
,
name
,
(
x
,
b
,
y_idx
),
(
nll
,
sm
,
am
),
sub
):
def
c_code
(
self
,
node
,
name
,
(
x
,
b
,
y_idx
),
(
nll
,
sm
,
am
),
sub
):
y_idx_type
=
node
.
inputs
[
2
]
.
type
.
dtype_specs
()[
1
]
y_idx_type
=
node
.
inputs
[
2
]
.
type
.
dtype_specs
()[
1
]
am_type
=
y_idx_type
am_type
=
y_idx_type
...
@@ -659,7 +669,7 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op):
...
@@ -659,7 +669,7 @@ class CrossentropySoftmax1HotWithBiasDx (gof.Op):
def
grad
(
self
,
*
args
):
def
grad
(
self
,
*
args
):
raise
NotImplementedError
()
raise
NotImplementedError
()
def
c_code_cache_version
(
self
):
def
c_code_cache_version
(
self
):
return
()
return
(
2
,
)
def
c_code
(
self
,
node
,
name
,
(
dnll
,
sm
,
y_idx
),
(
dx
,),
sub
):
def
c_code
(
self
,
node
,
name
,
(
dnll
,
sm
,
y_idx
),
(
dx
,),
sub
):
y_idx_type
=
node
.
inputs
[
2
]
.
type
.
dtype_specs
()[
1
]
y_idx_type
=
node
.
inputs
[
2
]
.
type
.
dtype_specs
()[
1
]
return
"""
return
"""
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
ce7533df
...
@@ -594,19 +594,6 @@ class T_Shape(unittest.TestCase):
...
@@ -594,19 +594,6 @@ class T_Shape(unittest.TestCase):
s
=
shape
(
numpy
.
ones
((
5
,
3
,
10
)))
s
=
shape
(
numpy
.
ones
((
5
,
3
,
10
)))
self
.
failUnless
((
eval_outputs
([
s
])
==
[
5
,
3
,
10
])
.
all
())
self
.
failUnless
((
eval_outputs
([
s
])
==
[
5
,
3
,
10
])
.
all
())
class
T_Cast
(
unittest
.
TestCase
):
def
test_basic
(
self
):
for
type1
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
]:
x
=
TensorType
(
dtype
=
type1
,
broadcastable
=
(
False
,
))
.
make_variable
()
for
type2
,
converter
in
zip
([
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
],
[
convert_to_int8
,
convert_to_int16
,
convert_to_int32
,
convert_to_int64
,
convert_to_float32
,
convert_to_float64
]):
y
=
converter
(
x
)
f
=
inplace_func
([
compile
.
In
(
x
,
strict
=
True
)],
y
)
a
=
numpy
.
arange
(
10
,
dtype
=
type1
)
b
=
f
(
a
)
self
.
failUnless
(
numpy
.
all
(
b
==
numpy
.
arange
(
10
,
dtype
=
type2
)))
class
T_max_and_argmax
(
unittest
.
TestCase
):
class
T_max_and_argmax
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
utt
.
seed_rng
()
utt
.
seed_rng
()
...
@@ -1920,43 +1907,6 @@ def test_sum_overflow():
...
@@ -1920,43 +1907,6 @@ def test_sum_overflow():
f
=
function
([
a
],
sum
(
a
))
f
=
function
([
a
],
sum
(
a
))
assert
f
([
1
]
*
300
)
==
300
assert
f
([
1
]
*
300
)
==
300
def
test_convert_to_complex
():
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex64'
)
+
0.5
j
)
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex128'
)
+
0.5
j
)
f
=
function
([
a
],
basic
.
convert_to_complex128
(
a
))
#we need to compare with the same type.
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
f
=
function
([
b
],
basic
.
convert_to_complex128
(
b
))
assert
b
.
type
.
values_eq_approx
(
b
.
data
,
f
(
b
.
data
))
f
=
function
([
a
],
basic
.
convert_to_complex64
(
a
))
assert
a
.
type
.
values_eq_approx
(
a
.
data
,
f
(
a
.
data
))
#down cast don,t work for now
#f = function([b],basic.convert_to_complex64(b))
#assert b.type.values_eq_approx(b.data, f(b.data))
for
nbits
in
(
64
,
128
):
for
t
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
]:
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
t
))
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex128'
))
f
=
function
([
a
],
basic
.
convert_to_complex128
(
a
))
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
for
t
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
]:
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
t
))
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex64'
))
f
=
function
([
a
],
basic
.
convert_to_complex64
(
a
))
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
#this work, but should we allow it? How well it is implemented?
for
t
in
[
'float64'
]:
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
t
))
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex64'
))
f
=
function
([
a
],
basic
.
convert_to_complex64
(
a
))
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
def
test_default
():
def
test_default
():
x
,
y
=
dscalars
(
'xy'
)
x
,
y
=
dscalars
(
'xy'
)
z
=
default
(
x
,
y
)
z
=
default
(
x
,
y
)
...
@@ -1975,16 +1925,6 @@ def test_default_state():
...
@@ -1975,16 +1925,6 @@ def test_default_state():
assert
f
(
1
)
==
4.8
assert
f
(
1
)
==
4.8
assert
f
(
2.2
)
==
7
assert
f
(
2.2
)
==
7
def
test_bug_complext_10_august_09
():
v0
=
dmatrix
()
v1
=
basic
.
convert_to_complex128
(
v0
)
inputs
=
[
v0
]
outputs
=
[
v1
]
f
=
function
(
inputs
,
outputs
)
i
=
numpy
.
zeros
((
2
,
2
))
assert
(
f
(
i
)
==
numpy
.
zeros
((
2
,
2
)))
.
all
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
>=
2
and
sys
.
argv
[
1
]
==
'OPT'
:
if
len
(
sys
.
argv
)
>=
2
and
sys
.
argv
[
1
]
==
'OPT'
:
default_mode
=
compile
.
Mode
(
linker
=
'c&py'
,
default_mode
=
compile
.
Mode
(
linker
=
'c&py'
,
...
...
theano/tensor/tests/test_casting.py
0 → 100644
浏览文件 @
ce7533df
import
unittest
from
theano
import
function
from
theano.tensor.basic
import
(
_convert_to_int32
,
_convert_to_int8
,
_convert_to_int16
,
_convert_to_int64
,
_convert_to_float32
,
_convert_to_float64
)
from
theano.tensor
import
*
class
test_casting
(
unittest
.
TestCase
):
def
test_0
(
self
):
for
op_fn
in
_convert_to_int32
,
_convert_to_float32
,
_convert_to_float64
:
for
type_fn
in
bvector
,
ivector
,
fvector
,
dvector
:
x
=
type_fn
()
f
=
function
([
x
],
op_fn
(
x
))
xval
=
numpy
.
asarray
(
numpy
.
random
.
rand
(
10
)
*
10
,
dtype
=
type_fn
.
dtype
)
yval
=
f
(
xval
)
assert
str
(
yval
.
dtype
)
==
op_fn
.
scalar_op
.
output_types_preference
.
spec
[
0
]
.
dtype
def
test_illegal
(
self
):
try
:
x
=
zmatrix
()
function
([
x
],
cast
(
x
,
'float64'
))(
numpy
.
ones
((
2
,
3
),
dtype
=
'complex128'
))
except
TypeError
:
return
assert
0
def
test_basic
(
self
):
for
type1
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
]:
x
=
TensorType
(
dtype
=
type1
,
broadcastable
=
(
False
,
))
.
make_variable
()
for
type2
,
converter
in
zip
([
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
],
[
_convert_to_int8
,
_convert_to_int16
,
_convert_to_int32
,
_convert_to_int64
,
_convert_to_float32
,
_convert_to_float64
]):
y
=
converter
(
x
)
f
=
function
([
compile
.
In
(
x
,
strict
=
True
)],
y
)
a
=
numpy
.
arange
(
10
,
dtype
=
type1
)
b
=
f
(
a
)
self
.
failUnless
(
numpy
.
all
(
b
==
numpy
.
arange
(
10
,
dtype
=
type2
)))
def
test_convert_to_complex
(
self
):
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex64'
)
+
0.5
j
)
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex128'
)
+
0.5
j
)
f
=
function
([
a
],
basic
.
_convert_to_complex128
(
a
))
#we need to compare with the same type.
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
f
=
function
([
b
],
basic
.
_convert_to_complex128
(
b
))
assert
b
.
type
.
values_eq_approx
(
b
.
data
,
f
(
b
.
data
))
f
=
function
([
a
],
basic
.
_convert_to_complex64
(
a
))
assert
a
.
type
.
values_eq_approx
(
a
.
data
,
f
(
a
.
data
))
f
=
function
([
b
],
basic
.
_convert_to_complex64
(
b
))
assert
b
.
type
.
values_eq_approx
(
a
.
data
,
f
(
b
.
data
))
for
nbits
in
(
64
,
128
):
# upcasting to complex128
for
t
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
,
'float64'
]:
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
t
))
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex128'
))
f
=
function
([
a
],
basic
.
_convert_to_complex128
(
a
))
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
# upcasting to complex64
for
t
in
[
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'float32'
]:
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
t
))
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex64'
))
f
=
function
([
a
],
basic
.
_convert_to_complex64
(
a
))
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
# downcast to complex64
for
t
in
[
'float64'
]:
a
=
value
(
numpy
.
ones
(
3
,
dtype
=
t
))
b
=
value
(
numpy
.
ones
(
3
,
dtype
=
'complex64'
))
f
=
function
([
a
],
basic
.
_convert_to_complex64
(
a
))
assert
a
.
type
.
values_eq_approx
(
b
.
data
,
f
(
a
.
data
))
def
test_bug_complext_10_august_09
(
self
):
v0
=
dmatrix
()
v1
=
basic
.
_convert_to_complex128
(
v0
)
inputs
=
[
v0
]
outputs
=
[
v1
]
f
=
function
(
inputs
,
outputs
)
i
=
numpy
.
zeros
((
2
,
2
))
assert
(
f
(
i
)
==
numpy
.
zeros
((
2
,
2
)))
.
all
()
theano/tensor/tests/test_complex.py
0 → 100644
浏览文件 @
ce7533df
import
unittest
import
theano
from
theano.tensor
import
*
class
TestRealImag
(
unittest
.
TestCase
):
def
test0
(
self
):
x
=
zvector
()
rng
=
numpy
.
random
.
RandomState
(
23
)
xval
=
numpy
.
asarray
(
list
(
numpy
.
complex
(
rng
.
randn
(),
rng
.
randn
())
for
i
in
xrange
(
10
)))
assert
numpy
.
all
(
xval
.
real
==
theano
.
function
([
x
],
real
(
x
))(
xval
))
assert
numpy
.
all
(
xval
.
imag
==
theano
.
function
([
x
],
imag
(
x
))(
xval
))
def
test_on_real_input
(
self
):
x
=
dvector
()
rng
=
numpy
.
random
.
RandomState
(
23
)
xval
=
rng
.
randn
(
10
)
assert
numpy
.
all
(
0
==
theano
.
function
([
x
],
imag
(
x
))(
xval
))
assert
numpy
.
all
(
xval
==
theano
.
function
([
x
],
real
(
x
))(
xval
))
def
test_cast
(
self
):
x
=
zvector
()
self
.
failUnlessRaises
(
TypeError
,
cast
,
x
,
'int32'
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论