Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
73c28a05
提交
73c28a05
authored
2月 26, 2016
作者:
Caglar
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
added the only_process_constants.
上级
6f97e51d
隐藏空白字符变更
内嵌
并排
正在显示
6 个修改的文件
包含
195 行增加
和
38 行删除
+195
-38
test_debugprint.py
theano/compile/tests/test_debugprint.py
+32
-0
check.py
theano/sandbox/cuda/check.py
+1
-0
test_gpuarray.py
theano/sandbox/cuda/test_gpuarray.py
+75
-0
opt.py
theano/tensor/opt.py
+59
-38
test_debugprint.py
theano/tests/test_debugprint.py
+15
-0
test_scanmode.py
theano/tests/test_scanmode.py
+13
-0
没有找到文件。
theano/compile/tests/test_debugprint.py
0 → 100644
浏览文件 @
73c28a05
import
theano
import
theano.tensor
as
T
def
test_debugprint
():
k
=
T
.
iscalar
(
"k"
)
A
=
T
.
vector
(
"A"
)
# Symbolic description of the result
result
,
updates
=
theano
.
scan
(
fn
=
lambda
prior_result
,
A
:
prior_result
*
A
,
outputs_info
=
T
.
ones_like
(
A
),
non_sequences
=
A
,
n_steps
=
k
,
name
=
"scan"
)
final_result
=
result
[
-
1
]
# compiled function that returns A**k
power
=
theano
.
function
(
inputs
=
[
A
,
k
],
outputs
=
final_result
,
updates
=
updates
,
mode
=
'DebugMode'
)
#a = theano.printing.debugprint(power, file="str")
#a = theano.compile.debugmode.debugprint(power,
# prefix="test")
#print(a)
theano
.
printing
.
debugprint
(
power
)
print
power
(
range
(
10
),
2
)
print
power
(
range
(
10
),
4
)
test_debugprint
()
theano/sandbox/cuda/check.py
0 → 100644
浏览文件 @
73c28a05
import
theano
;
import
numpy
;
a
=
theano
.
shared
(
numpy
.
zeros
(
100000
)
.
astype
(
'float32'
))
theano/sandbox/cuda/test_gpuarray.py
0 → 100644
浏览文件 @
73c28a05
from
theano.sandbox.cuda.cula
import
gpu_solve
import
numpy
as
np
import
theano.tensor
as
TT
import
theano
def
thrash
():
import
numpy
as
np
A_val
=
np
.
asarray
([[
2
,
0
,
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
]],
dtype
=
"float32"
)
#b_val = np.asarray([[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5]], dtype="float32")
b_val
=
np
.
asarray
([[
0.5
],
[
0.5
],
[
0.5
]],
dtype
=
"float32"
)
A_empty
=
np
.
zeros
((
3
,
3
))
.
astype
(
"float32"
)
b_empty
=
np
.
zeros
((
3
,
1
))
.
astype
(
"float32"
)
import
theano
A
=
TT
.
matrix
(
"A"
,
dtype
=
"float32"
)
b
=
TT
.
matrix
(
"b"
,
dtype
=
"float32"
)
#theano.config.compute_test_value = 'warn'
#A.tag.test_value = A_val
#b.tag.test_value = b_val
#A = theano.shared(A_val)
#b = theano.shared(b_val)
from
theano.misc.pycuda_utils
import
to_gpuarray
solver
=
gpu_solve
(
A
,
b
)
fn
=
theano
.
function
([
A
,
b
],
[
solver
])
res
=
fn
(
A_val
,
b_val
)
print
(
np
.
asarray
(
res
[
0
]))
#import ipdb; ipdb.set_trace()
def
thrash2
():
import
numpy
as
np
A_val
=
np
.
asarray
([[
2
,
0
,
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
]],
dtype
=
"float32"
)
#A_val = np.random.uniform(-0.01, 0.01, (10, 10)).astype("float32")
#A_val +=1
#A_val = np.linalg.svd(A_val)[0]
#A_val = (A_val + A_val.T) / 2.0
x_val
=
np
.
random
.
uniform
(
-
0.4
,
0.4
,
(
A_val
.
shape
[
1
],
1
))
.
astype
(
"float32"
)
b_val
=
np
.
dot
(
A_val
,
x_val
)
#b_val = np.asarray([[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5]], dtype="float32")
#b_val = np.asarray([[0.5], [0.5], [0.5]], dtype="float32")
#A_empty = np.zeros((A_val.shape[1], A_val.shape[1])).astype("float32")
x_res
=
np
.
zeros
((
A_val
.
shape
[
1
],
1
))
.
astype
(
"float32"
)
import
theano
A
=
TT
.
matrix
(
"A"
,
dtype
=
"float32"
)
b
=
TT
.
matrix
(
"b"
,
dtype
=
"float32"
)
#theano.config.compute_test_value = 'warn'
#A.tag.test_value = A_val
#b.tag.test_value = b_val
#A = theano.shared(A_val)
#b = theano.shared(b_val)
from
theano.misc.pycuda_utils
import
to_gpuarray
solver
=
gpu_solve
(
A
,
b
)
fn
=
theano
.
function
([
A
,
b
],
[
solver
])
res
=
fn
(
A_val
,
b_val
)
res
[
0
]
.
get
(
x_res
)
print
(
np
.
allclose
(
x_res
,
x_val
))
import
ipdb
;
ipdb
.
set_trace
()
thrash2
()
theano/tensor/opt.py
浏览文件 @
73c28a05
...
@@ -479,13 +479,13 @@ def local_0_dot_x(node):
...
@@ -479,13 +479,13 @@ def local_0_dot_x(node):
y
=
node
.
inputs
[
1
]
y
=
node
.
inputs
[
1
]
replace
=
False
replace
=
False
try
:
try
:
if
get_scalar_constant_value
(
x
)
==
0
:
if
get_scalar_constant_value
(
x
,
only_process_constants
=
True
)
==
0
:
replace
=
True
replace
=
True
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
try
:
try
:
if
get_scalar_constant_value
(
y
)
==
0
:
if
get_scalar_constant_value
(
y
,
only_process_constants
=
True
)
==
0
:
replace
=
True
replace
=
True
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
...
@@ -988,7 +988,7 @@ class ShapeFeature(object):
...
@@ -988,7 +988,7 @@ class ShapeFeature(object):
# Do not call make_node for test_value
# Do not call make_node for test_value
s
=
Shape_i
(
i
)(
r
)
s
=
Shape_i
(
i
)(
r
)
try
:
try
:
s
=
get_scalar_constant_value
(
s
)
s
=
get_scalar_constant_value
(
s
,
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
return
s
return
s
...
@@ -1058,7 +1058,7 @@ class ShapeFeature(object):
...
@@ -1058,7 +1058,7 @@ class ShapeFeature(object):
assert
len
(
idx
)
==
1
assert
len
(
idx
)
==
1
idx
=
idx
[
0
]
idx
=
idx
[
0
]
try
:
try
:
i
=
get_scalar_constant_value
(
idx
)
i
=
get_scalar_constant_value
(
idx
,
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
else
:
else
:
...
@@ -1117,7 +1117,7 @@ class ShapeFeature(object):
...
@@ -1117,7 +1117,7 @@ class ShapeFeature(object):
# But we never timed this speed optimization!
# But we never timed this speed optimization!
self
.
lscalar_one
.
equals
(
shape_vars
[
i
])
or
self
.
lscalar_one
.
equals
(
shape_vars
[
i
])
or
self
.
lscalar_one
.
equals
(
self
.
lscalar_one
.
equals
(
T
.
extract_constant
(
shape_vars
[
i
]))
T
.
extract_constant
(
shape_vars
[
i
]
,
only_process_constants
=
True
))
for
i
in
xrange
(
r
.
ndim
)])
for
i
in
xrange
(
r
.
ndim
)])
self
.
shape_of
[
r
]
=
tuple
(
shape_vars
)
self
.
shape_of
[
r
]
=
tuple
(
shape_vars
)
for
sv
in
shape_vars
:
for
sv
in
shape_vars
:
...
@@ -1196,7 +1196,7 @@ class ShapeFeature(object):
...
@@ -1196,7 +1196,7 @@ class ShapeFeature(object):
# But we never timed this speed optimization!
# But we never timed this speed optimization!
self
.
lscalar_one
.
equals
(
merged_shape
[
i
])
or
self
.
lscalar_one
.
equals
(
merged_shape
[
i
])
or
self
.
lscalar_one
.
equals
(
self
.
lscalar_one
.
equals
(
T
.
extract_constant
(
merged_shape
[
i
]))
T
.
extract_constant
(
merged_shape
[
i
]
,
only_process_constants
=
True
))
for
i
in
xrange
(
r
.
ndim
)])
for
i
in
xrange
(
r
.
ndim
)])
self
.
shape_of
[
r
]
=
tuple
(
merged_shape
)
self
.
shape_of
[
r
]
=
tuple
(
merged_shape
)
for
sv
in
self
.
shape_of
[
r
]:
for
sv
in
self
.
shape_of
[
r
]:
...
@@ -1219,7 +1219,8 @@ class ShapeFeature(object):
...
@@ -1219,7 +1219,8 @@ class ShapeFeature(object):
# The two following comparison are a speed optimization
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
# But we never timed this speed optimization!
self
.
lscalar_one
.
equals
(
new_shape
[
idx
])
or
self
.
lscalar_one
.
equals
(
new_shape
[
idx
])
or
self
.
lscalar_one
.
equals
(
T
.
extract_constant
(
new_shape
[
idx
]))
self
.
lscalar_one
.
equals
(
T
.
extract_constant
(
new_shape
[
idx
],
only_process_constants
=
True
))
for
idx
in
xrange
(
r
.
ndim
)])
for
idx
in
xrange
(
r
.
ndim
)])
self
.
shape_of
[
r
]
=
tuple
(
new_shape
)
self
.
shape_of
[
r
]
=
tuple
(
new_shape
)
for
sv
in
self
.
shape_of
[
r
]:
for
sv
in
self
.
shape_of
[
r
]:
...
@@ -1893,7 +1894,7 @@ def local_subtensor_make_vector(node):
...
@@ -1893,7 +1894,7 @@ def local_subtensor_make_vector(node):
if
idx
.
ndim
==
0
:
if
idx
.
ndim
==
0
:
# if it is a constant we can do something with it
# if it is a constant we can do something with it
try
:
try
:
v
=
get_scalar_constant_value
(
idx
)
v
=
get_scalar_constant_value
(
idx
,
only_process_constants
=
True
)
if
isinstance
(
v
,
numpy
.
integer
):
if
isinstance
(
v
,
numpy
.
integer
):
# Python 2.4 wants to index only with Python integers
# Python 2.4 wants to index only with Python integers
v
=
int
(
v
)
v
=
int
(
v
)
...
@@ -1998,7 +1999,7 @@ def local_useless_elemwise(node):
...
@@ -1998,7 +1999,7 @@ def local_useless_elemwise(node):
len
(
node
.
inputs
)
==
2
):
len
(
node
.
inputs
)
==
2
):
if
isinstance
(
node
.
inputs
[
0
],
T
.
TensorConstant
):
if
isinstance
(
node
.
inputs
[
0
],
T
.
TensorConstant
):
const_val
=
T
.
extract_constant
(
node
.
inputs
[
0
])
const_val
=
T
.
extract_constant
(
node
.
inputs
[
0
]
,
only_process_constants
=
True
)
if
not
isinstance
(
const_val
,
Variable
):
if
not
isinstance
(
const_val
,
Variable
):
if
const_val
==
0
:
if
const_val
==
0
:
return
zeros_like
(
node
,
1
)
return
zeros_like
(
node
,
1
)
...
@@ -2006,7 +2007,7 @@ def local_useless_elemwise(node):
...
@@ -2006,7 +2007,7 @@ def local_useless_elemwise(node):
return
[
node
.
inputs
[
1
]]
return
[
node
.
inputs
[
1
]]
if
isinstance
(
node
.
inputs
[
1
],
T
.
TensorConstant
):
if
isinstance
(
node
.
inputs
[
1
],
T
.
TensorConstant
):
const_val
=
T
.
extract_constant
(
node
.
inputs
[
1
])
const_val
=
T
.
extract_constant
(
node
.
inputs
[
1
]
,
only_process_constants
=
True
)
if
not
isinstance
(
const_val
,
Variable
):
if
not
isinstance
(
const_val
,
Variable
):
if
const_val
==
0
:
if
const_val
==
0
:
return
zeros_like
(
node
,
0
)
return
zeros_like
(
node
,
0
)
...
@@ -2017,7 +2018,7 @@ def local_useless_elemwise(node):
...
@@ -2017,7 +2018,7 @@ def local_useless_elemwise(node):
len
(
node
.
inputs
)
==
2
):
len
(
node
.
inputs
)
==
2
):
if
isinstance
(
node
.
inputs
[
0
],
T
.
TensorConstant
):
if
isinstance
(
node
.
inputs
[
0
],
T
.
TensorConstant
):
const_val
=
T
.
extract_constant
(
node
.
inputs
[
0
])
const_val
=
T
.
extract_constant
(
node
.
inputs
[
0
]
,
only_process_constants
=
True
)
if
not
isinstance
(
const_val
,
Variable
):
if
not
isinstance
(
const_val
,
Variable
):
if
const_val
==
0
:
if
const_val
==
0
:
return
[
node
.
inputs
[
1
]]
return
[
node
.
inputs
[
1
]]
...
@@ -2025,7 +2026,7 @@ def local_useless_elemwise(node):
...
@@ -2025,7 +2026,7 @@ def local_useless_elemwise(node):
return
ones_like
(
node
,
1
)
return
ones_like
(
node
,
1
)
if
isinstance
(
node
.
inputs
[
1
],
T
.
TensorConstant
):
if
isinstance
(
node
.
inputs
[
1
],
T
.
TensorConstant
):
const_val
=
T
.
extract_constant
(
node
.
inputs
[
1
])
const_val
=
T
.
extract_constant
(
node
.
inputs
[
1
]
,
only_process_constants
=
True
)
if
not
isinstance
(
const_val
,
Variable
):
if
not
isinstance
(
const_val
,
Variable
):
if
const_val
==
0
:
if
const_val
==
0
:
return
[
node
.
inputs
[
0
]]
return
[
node
.
inputs
[
0
]]
...
@@ -2317,7 +2318,8 @@ def local_upcast_elemwise_constant_inputs(node):
...
@@ -2317,7 +2318,8 @@ def local_upcast_elemwise_constant_inputs(node):
else
:
else
:
try
:
try
:
# works only for scalars
# works only for scalars
cval_i
=
get_scalar_constant_value
(
i
,
elemwise
=
False
)
cval_i
=
get_scalar_constant_value
(
i
,
elemwise
=
False
,
only_process_constants
=
True
)
if
all
(
i
.
broadcastable
):
if
all
(
i
.
broadcastable
):
new_inputs
.
append
(
T
.
shape_padleft
(
new_inputs
.
append
(
T
.
shape_padleft
(
T
.
cast
(
cval_i
,
output_dtype
),
T
.
cast
(
cval_i
,
output_dtype
),
...
@@ -2372,7 +2374,8 @@ def local_useless_inc_subtensor(node):
...
@@ -2372,7 +2374,8 @@ def local_useless_inc_subtensor(node):
if
node
.
op
.
set_instead_of_inc
is
False
:
if
node
.
op
.
set_instead_of_inc
is
False
:
# This is an IncSubtensor, so the init value must be zeros
# This is an IncSubtensor, so the init value must be zeros
try
:
try
:
c
=
get_scalar_constant_value
(
node
.
inputs
[
0
])
c
=
get_scalar_constant_value
(
node
.
inputs
[
0
],
only_process_constants
=
True
)
if
c
!=
0
:
if
c
!=
0
:
return
return
except
NotScalarConstantError
:
except
NotScalarConstantError
:
...
@@ -2389,7 +2392,8 @@ def local_useless_inc_subtensor(node):
...
@@ -2389,7 +2392,8 @@ def local_useless_inc_subtensor(node):
# Put the constant inputs in the slice.
# Put the constant inputs in the slice.
idx_cst
=
get_idx_list
(
node
.
inputs
[
1
:],
node
.
op
.
idx_list
)
idx_cst
=
get_idx_list
(
node
.
inputs
[
1
:],
node
.
op
.
idx_list
)
if
all
(
isinstance
(
e
,
slice
)
and
e
.
start
is
None
and
if
all
(
isinstance
(
e
,
slice
)
and
e
.
start
is
None
and
e
.
stop
is
None
and
(
e
.
step
is
None
or
T
.
extract_constant
(
e
.
step
)
==
-
1
)
e
.
stop
is
None
and
(
e
.
step
is
None
or
T
.
extract_constant
(
e
.
step
,
only_process_constants
=
True
)
==
-
1
)
for
e
in
idx_cst
):
for
e
in
idx_cst
):
# IncSubtensor broadcast node.inputs[1] on node.inputs[0]
# IncSubtensor broadcast node.inputs[1] on node.inputs[0]
# based on run time shapes, so we must check they are the same.
# based on run time shapes, so we must check they are the same.
...
@@ -2459,7 +2463,8 @@ def local_useless_slice(node):
...
@@ -2459,7 +2463,8 @@ def local_useless_slice(node):
for
s
in
slices
[::
-
1
]:
for
s
in
slices
[::
-
1
]:
# check if slice and then check slice indices
# check if slice and then check slice indices
if
(
isinstance
(
s
,
slice
)
and
s
.
start
is
None
and
s
.
stop
is
None
and
if
(
isinstance
(
s
,
slice
)
and
s
.
start
is
None
and
s
.
stop
is
None
and
(
s
.
step
is
None
or
T
.
extract_constant
(
s
.
step
)
==
1
)):
(
s
.
step
is
None
or
T
.
extract_constant
(
s
.
step
,
only_process_constants
=
True
)
==
1
)):
last_slice
-=
1
last_slice
-=
1
else
:
else
:
break
break
...
@@ -2515,7 +2520,8 @@ def local_useless_subtensor(node):
...
@@ -2515,7 +2520,8 @@ def local_useless_subtensor(node):
if
isinstance
(
idx
.
stop
,
(
integer_types
,
numpy
.
integer
)):
if
isinstance
(
idx
.
stop
,
(
integer_types
,
numpy
.
integer
)):
length_pos_data
=
sys
.
maxsize
length_pos_data
=
sys
.
maxsize
try
:
try
:
length_pos_data
=
get_scalar_constant_value
(
length_pos
)
length_pos_data
=
get_scalar_constant_value
(
length_pos
,
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
...
@@ -2555,7 +2561,8 @@ def local_useless_subtensor(node):
...
@@ -2555,7 +2561,8 @@ def local_useless_subtensor(node):
elif
isinstance
(
node
.
op
,
AdvancedSubtensor1
):
elif
isinstance
(
node
.
op
,
AdvancedSubtensor1
):
# get length of the indexed tensor along the first axis
# get length of the indexed tensor along the first axis
try
:
try
:
length
=
get_scalar_constant_value
(
shape_of
[
node
.
inputs
[
0
]][
0
])
length
=
get_scalar_constant_value
(
shape_of
[
node
.
inputs
[
0
]][
0
],
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
return
False
return
False
...
@@ -2572,7 +2579,8 @@ def local_useless_subtensor(node):
...
@@ -2572,7 +2579,8 @@ def local_useless_subtensor(node):
return
False
return
False
elif
idx
.
owner
is
not
None
and
isinstance
(
idx
.
owner
.
op
,
T
.
ARange
):
elif
idx
.
owner
is
not
None
and
isinstance
(
idx
.
owner
.
op
,
T
.
ARange
):
try
:
try
:
start
,
stop
,
step
=
map
(
get_scalar_constant_value
,
start
,
stop
,
step
=
map
(
lambda
x
:
get_scalar_constant_value
(
x
,
only_process_constants
=
True
),
idx
.
owner
.
inputs
)
idx
.
owner
.
inputs
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
return
False
return
False
...
@@ -3197,7 +3205,7 @@ def local_incsubtensor_of_zeros(node):
...
@@ -3197,7 +3205,7 @@ def local_incsubtensor_of_zeros(node):
y
=
node
.
inputs
[
1
]
y
=
node
.
inputs
[
1
]
replace
=
False
replace
=
False
try
:
try
:
if
get_scalar_constant_value
(
y
)
==
0
:
if
get_scalar_constant_value
(
y
,
only_process_constants
=
True
)
==
0
:
replace
=
True
replace
=
True
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
...
@@ -3227,12 +3235,12 @@ def local_setsubtensor_of_constants(node):
...
@@ -3227,12 +3235,12 @@ def local_setsubtensor_of_constants(node):
replace_y
=
None
replace_y
=
None
try
:
try
:
replace_x
=
get_scalar_constant_value
(
x
)
replace_x
=
get_scalar_constant_value
(
x
,
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
try
:
try
:
replace_y
=
get_scalar_constant_value
(
y
)
replace_y
=
get_scalar_constant_value
(
y
,
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
...
@@ -3276,7 +3284,7 @@ def local_adv_sub1_adv_inc_sub1(node):
...
@@ -3276,7 +3284,7 @@ def local_adv_sub1_adv_inc_sub1(node):
if
idx
is
not
idx2
:
if
idx
is
not
idx2
:
return
return
if
(
not
inp
.
owner
.
op
.
set_instead_of_inc
and
if
(
not
inp
.
owner
.
op
.
set_instead_of_inc
and
T
.
extract_constant
(
x
)
!=
0
):
T
.
extract_constant
(
x
,
only_process_constants
=
True
)
!=
0
):
return
return
cond
=
[
T
.
all
(
T
.
and_
(
T
.
lt
(
idx
,
x
.
shape
[
0
]),
T
.
ge
(
idx
,
-
x
.
shape
[
0
])))]
cond
=
[
T
.
all
(
T
.
and_
(
T
.
lt
(
idx
,
x
.
shape
[
0
]),
T
.
ge
(
idx
,
-
x
.
shape
[
0
])))]
if
not
node
.
fgraph
.
shape_feature
.
same_shape
(
idx
,
y
,
0
,
0
):
if
not
node
.
fgraph
.
shape_feature
.
same_shape
(
idx
,
y
,
0
,
0
):
...
@@ -3568,7 +3576,8 @@ def local_join_empty(node):
...
@@ -3568,7 +3576,8 @@ def local_join_empty(node):
return
return
new_inputs
=
[]
new_inputs
=
[]
try
:
try
:
join_idx
=
get_scalar_constant_value
(
node
.
inputs
[
0
])
join_idx
=
get_scalar_constant_value
(
node
.
inputs
[
0
],
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
return
return
for
idx
in
xrange
(
1
,
len
(
node
.
inputs
)):
for
idx
in
xrange
(
1
,
len
(
node
.
inputs
)):
...
@@ -3727,7 +3736,8 @@ def local_useless_switch(node):
...
@@ -3727,7 +3736,8 @@ def local_useless_switch(node):
"""
"""
if
(
isinstance
(
node
.
op
,
T
.
Elemwise
)
and
if
(
isinstance
(
node
.
op
,
T
.
Elemwise
)
and
isinstance
(
node
.
op
.
scalar_op
,
scalar
.
basic
.
Switch
)):
isinstance
(
node
.
op
.
scalar_op
,
scalar
.
basic
.
Switch
)):
cond
=
T
.
extract_constant
(
node
.
inputs
[
0
],
elemwise
=
False
)
cond
=
T
.
extract_constant
(
node
.
inputs
[
0
],
elemwise
=
False
,
only_process_constants
=
True
)
if
type
(
cond
)
is
numpy
.
ndarray
and
cond
.
ndim
==
0
:
if
type
(
cond
)
is
numpy
.
ndarray
and
cond
.
ndim
==
0
:
if
cond
==
0
:
if
cond
==
0
:
correct_out
=
node
.
inputs
[
2
]
correct_out
=
node
.
inputs
[
2
]
...
@@ -3775,8 +3785,8 @@ def local_useless_switch(node):
...
@@ -3775,8 +3785,8 @@ def local_useless_switch(node):
isinstance
(
cond_var
.
owner
.
op
.
scalar_op
,
scalar
.
LE
)
and
\
isinstance
(
cond_var
.
owner
.
op
.
scalar_op
,
scalar
.
LE
)
and
\
cond_var
.
owner
.
inputs
[
0
]
.
owner
and
\
cond_var
.
owner
.
inputs
[
0
]
.
owner
and
\
isinstance
(
cond_var
.
owner
.
inputs
[
0
]
.
owner
.
op
,
Shape_i
)
and
\
isinstance
(
cond_var
.
owner
.
inputs
[
0
]
.
owner
.
op
,
Shape_i
)
and
\
T
.
extract_constant
(
cond_var
.
owner
.
inputs
[
1
])
==
0
and
\
T
.
extract_constant
(
cond_var
.
owner
.
inputs
[
1
]
,
only_process_constants
=
True
)
==
0
and
\
T
.
extract_constant
(
left
)
==
0
and
\
T
.
extract_constant
(
left
,
only_process_constants
=
True
)
==
0
and
\
right
is
cond_var
.
owner
.
inputs
[
0
]:
right
is
cond_var
.
owner
.
inputs
[
0
]:
assert
right
.
type
==
node
.
outputs
[
0
]
.
type
assert
right
.
type
==
node
.
outputs
[
0
]
.
type
# No need to copy over stacktrace, because the right input node
# No need to copy over stacktrace, because the right input node
...
@@ -3889,7 +3899,8 @@ def local_div_switch_sink(node):
...
@@ -3889,7 +3899,8 @@ def local_div_switch_sink(node):
if
node
.
inputs
[
0
]
.
owner
and
node
.
inputs
[
0
]
.
owner
.
op
==
T
.
switch
:
if
node
.
inputs
[
0
]
.
owner
and
node
.
inputs
[
0
]
.
owner
.
op
==
T
.
switch
:
switch
=
node
.
inputs
[
0
]
.
owner
switch
=
node
.
inputs
[
0
]
.
owner
try
:
try
:
if
get_scalar_constant_value
(
switch
.
inputs
[
1
])
==
0.
:
if
get_scalar_constant_value
(
switch
.
inputs
[
1
],
only_process_constants
=
True
)
==
0.
:
fdiv
=
op
(
switch
.
inputs
[
2
],
node
.
inputs
[
1
])
fdiv
=
op
(
switch
.
inputs
[
2
],
node
.
inputs
[
1
])
# Copy over stacktrace for elementwise division op
# Copy over stacktrace for elementwise division op
# from previous elementwise multiplication op.
# from previous elementwise multiplication op.
...
@@ -3911,7 +3922,8 @@ def local_div_switch_sink(node):
...
@@ -3911,7 +3922,8 @@ def local_div_switch_sink(node):
except
NotScalarConstantError
:
except
NotScalarConstantError
:
pass
pass
try
:
try
:
if
get_scalar_constant_value
(
switch
.
inputs
[
2
])
==
0.
:
if
get_scalar_constant_value
(
switch
.
inputs
[
2
],
only_process_constants
=
True
)
==
0.
:
fdiv
=
op
(
switch
.
inputs
[
1
],
node
.
inputs
[
1
])
fdiv
=
op
(
switch
.
inputs
[
1
],
node
.
inputs
[
1
])
# Copy over stacktrace for elementwise division op
# Copy over stacktrace for elementwise division op
# from previous elementwise multiplication op.
# from previous elementwise multiplication op.
...
@@ -3976,7 +3988,8 @@ def local_useless_tile(node):
...
@@ -3976,7 +3988,8 @@ def local_useless_tile(node):
"""
"""
if
isinstance
(
node
.
op
,
T
.
Tile
):
if
isinstance
(
node
.
op
,
T
.
Tile
):
try
:
try
:
a
=
T
.
get_scalar_constant_value
(
node
.
inputs
[
1
])
a
=
T
.
get_scalar_constant_value
(
node
.
inputs
[
1
],
only_process_constants
=
True
)
if
a
==
1
:
if
a
==
1
:
try
:
try
:
l
=
T
.
get_vector_length
(
node
.
inputs
[
1
])
l
=
T
.
get_vector_length
(
node
.
inputs
[
1
])
...
@@ -4159,7 +4172,8 @@ if 0:
...
@@ -4159,7 +4172,8 @@ if 0:
def
tmp
(
thing
):
def
tmp
(
thing
):
try
:
try
:
return
T
.
get_scalar_constant_value
(
thing
)
return
T
.
get_scalar_constant_value
(
thing
,
only_process_constants
=
True
)
except
(
TypeError
,
ValueError
)
as
e
:
except
(
TypeError
,
ValueError
)
as
e
:
print
(
e
,
thing
.
owner
.
inputs
[
0
])
print
(
e
,
thing
.
owner
.
inputs
[
0
])
return
None
return
None
...
@@ -5156,7 +5170,7 @@ def local_reduce_join(node):
...
@@ -5156,7 +5170,7 @@ def local_reduce_join(node):
node
.
inputs
[
0
]
.
owner
and
node
.
inputs
[
0
]
.
owner
and
isinstance
(
node
.
inputs
[
0
]
.
owner
.
op
,
T
.
Join
)):
isinstance
(
node
.
inputs
[
0
]
.
owner
.
op
,
T
.
Join
)):
join
=
node
.
inputs
[
0
]
.
owner
join
=
node
.
inputs
[
0
]
.
owner
if
T
.
extract_constant
(
join
.
inputs
[
0
])
!=
0
:
if
T
.
extract_constant
(
join
.
inputs
[
0
]
,
only_process_constants
=
True
)
!=
0
:
return
return
if
isinstance
(
node
.
op
.
scalar_op
,
(
scalar
.
Maximum
,
scalar
.
Minimum
)):
if
isinstance
(
node
.
op
.
scalar_op
,
(
scalar
.
Maximum
,
scalar
.
Minimum
)):
...
@@ -5206,7 +5220,9 @@ def local_reduce_join(node):
...
@@ -5206,7 +5220,9 @@ def local_reduce_join(node):
# We add the new check late to don't add extra warning.
# We add the new check late to don't add extra warning.
try
:
try
:
join_axis
=
get_scalar_constant_value
(
join
.
inputs
[
0
])
join_axis
=
get_scalar_constant_value
(
join
.
inputs
[
0
],
only_process_constants
=
True
)
if
join_axis
!=
reduce_axis
[
0
]:
if
join_axis
!=
reduce_axis
[
0
]:
return
return
except
NotScalarConstantError
:
except
NotScalarConstantError
:
...
@@ -5288,7 +5304,8 @@ def local_opt_alloc(node):
...
@@ -5288,7 +5304,8 @@ def local_opt_alloc(node):
if
(
node
.
op
.
axis
is
None
or
if
(
node
.
op
.
axis
is
None
or
node
.
op
.
axis
==
tuple
(
range
(
input
.
ndim
))):
node
.
op
.
axis
==
tuple
(
range
(
input
.
ndim
))):
try
:
try
:
val
=
get_scalar_constant_value
(
input
)
val
=
get_scalar_constant_value
(
input
,
only_process_constants
=
True
)
assert
val
.
size
==
1
assert
val
.
size
==
1
# check which type of op
# check which type of op
casted
=
T
.
mul
(
*
shapes
)
.
astype
(
str
(
input
.
dtype
))
casted
=
T
.
mul
(
*
shapes
)
.
astype
(
str
(
input
.
dtype
))
...
@@ -5302,7 +5319,8 @@ def local_opt_alloc(node):
...
@@ -5302,7 +5319,8 @@ def local_opt_alloc(node):
pass
pass
else
:
else
:
try
:
try
:
val
=
get_scalar_constant_value
(
input
)
val
=
get_scalar_constant_value
(
input
,
only_process_constants
=
True
)
assert
val
.
size
==
1
assert
val
.
size
==
1
val
=
val
.
reshape
(
1
)[
0
]
val
=
val
.
reshape
(
1
)[
0
]
to_prod
=
[
shapes
[
i
]
for
i
in
xrange
(
len
(
shapes
))
to_prod
=
[
shapes
[
i
]
for
i
in
xrange
(
len
(
shapes
))
...
@@ -5746,7 +5764,8 @@ def local_abs_merge(node):
...
@@ -5746,7 +5764,8 @@ def local_abs_merge(node):
inputs
.
append
(
i
.
owner
.
inputs
[
0
])
inputs
.
append
(
i
.
owner
.
inputs
[
0
])
elif
isinstance
(
i
,
Constant
):
elif
isinstance
(
i
,
Constant
):
try
:
try
:
const
=
get_scalar_constant_value
(
i
)
const
=
get_scalar_constant_value
(
i
,
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
return
False
return
False
if
not
(
const
>=
0
)
.
all
():
if
not
(
const
>=
0
)
.
all
():
...
@@ -6328,7 +6347,8 @@ def local_grad_log_erfc_neg(node):
...
@@ -6328,7 +6347,8 @@ def local_grad_log_erfc_neg(node):
mul_neg
=
T
.
mul
(
*
mul_inputs
)
mul_neg
=
T
.
mul
(
*
mul_inputs
)
try
:
try
:
cst2
=
get_scalar_constant_value
(
mul_neg
.
owner
.
inputs
[
0
])
cst2
=
get_scalar_constant_value
(
mul_neg
.
owner
.
inputs
[
0
],
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
return
False
return
False
...
@@ -6355,7 +6375,8 @@ def local_grad_log_erfc_neg(node):
...
@@ -6355,7 +6375,8 @@ def local_grad_log_erfc_neg(node):
x
=
erfc_x
x
=
erfc_x
try
:
try
:
cst
=
get_scalar_constant_value
(
erfc_x
.
owner
.
inputs
[
0
])
cst
=
get_scalar_constant_value
(
erfc_x
.
owner
.
inputs
[
0
],
only_process_constants
=
True
)
except
NotScalarConstantError
:
except
NotScalarConstantError
:
return
False
return
False
if
cst2
!=
-
cst
*
2
:
if
cst2
!=
-
cst
*
2
:
...
...
theano/tests/test_debugprint.py
0 → 100644
浏览文件 @
73c28a05
import
theano
import
theano.tensor
as
T
from
theano
import
printing
X
=
T
.
matrix
(
'X'
)
results
,
updates
=
theano
.
scan
(
fn
=
lambda
x
:
2
*
x
.
sum
()
+
1
,
outputs_info
=
None
,
sequences
=
[
X
],
non_sequences
=
None
)
printing
.
debugprint
(
theano
.
function
([
X
],
results
))
theano/tests/test_scanmode.py
0 → 100644
浏览文件 @
73c28a05
import
theano
import
theano.tensor
as
T
from
theano
import
printing
X
=
T
.
matrix
(
'X'
)
results
,
updates
=
theano
.
scan
(
fn
=
lambda
x
:
2
*
x
.
sum
()
+
3
,
outputs_info
=
None
,
sequences
=
[
X
],
non_sequences
=
None
)
printing
.
debugprint
(
results
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论