Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
6937f122
提交
6937f122
authored
9月 30, 2013
作者:
Pascal Lamblin
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1524 from nouiz/fix_cycle
Fix cycle
上级
88157f68
7efba27f
隐藏空白字符变更
内嵌
并排
正在显示
10 个修改的文件
包含
134 行增加
和
63 行删除
+134
-63
test_lazy.py
theano/gof/tests/test_lazy.py
+5
-5
test_op.py
theano/gof/tests/test_op.py
+40
-38
test_opt.py
theano/gof/tests/test_opt.py
+5
-5
cuda_ndarray.cu
theano/sandbox/cuda/cuda_ndarray.cu
+32
-2
cuda_ndarray.cuh
theano/sandbox/cuda/cuda_ndarray.cuh
+1
-1
test_conv_cuda_ndarray.py
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
+2
-2
test_cuda_ndarray.py
theano/sandbox/cuda/tests/test_cuda_ndarray.py
+11
-0
basic.py
theano/tensor/basic.py
+4
-0
opt.py
theano/tensor/opt.py
+27
-9
test_basic.py
theano/tensor/tests/test_basic.py
+7
-1
没有找到文件。
theano/gof/tests/test_lazy.py
浏览文件 @
6937f122
...
...
@@ -117,17 +117,17 @@ def test_ifelse():
mode
=
Mode
(
linker
=
linker
,
optimizer
=
'fast_run'
))
try
:
print
"case 1"
#
print "case 1"
f
(
1
,
'a'
,
'b'
)
assert
False
except
NotImplementedOp
.
E
:
pass
print
"... passed"
#
print "... passed"
print
"case 2"
print
f
(
0
,
'a'
,
'b'
)
#
print "case 2"
#
print f(0, 'a', 'b')
assert
f
(
0
,
'a'
,
'b'
)
==
'b'
print
"... passed"
#
print "... passed"
def
more_complex_test
():
...
...
theano/gof/tests/test_op.py
浏览文件 @
6937f122
from
copy
import
copy
import
unittest
import
numpy
...
...
@@ -45,6 +44,7 @@ class MyType(Type):
raise
ValueError
(
"Invalid value"
)
return
x
class
MyOp
(
Op
):
def
make_node
(
self
,
*
inputs
):
...
...
@@ -81,14 +81,16 @@ class TestOp:
def
test_sanity_0
(
self
):
r1
,
r2
=
MyType
(
1
)(),
MyType
(
2
)()
node
=
MyOp
.
make_node
(
r1
,
r2
)
assert
[
x
for
x
in
node
.
inputs
]
==
[
r1
,
r2
]
# Are the inputs what I provided?
assert
[
x
.
type
for
x
in
node
.
outputs
]
==
[
MyType
(
3
)]
# Are the outputs what I expect?
# Are the inputs what I provided?
assert
[
x
for
x
in
node
.
inputs
]
==
[
r1
,
r2
]
# Are the outputs what I expect?
assert
[
x
.
type
for
x
in
node
.
outputs
]
==
[
MyType
(
3
)]
assert
node
.
outputs
[
0
]
.
owner
is
node
and
node
.
outputs
[
0
]
.
index
==
0
# validate
def
test_validate
(
self
):
try
:
MyOp
(
Generic
()(),
MyType
(
1
)())
# MyOp requires MyType instances
MyOp
(
Generic
()(),
MyType
(
1
)())
# MyOp requires MyType instances
raise
Exception
(
"Expected an exception"
)
except
Exception
,
e
:
if
str
(
e
)
!=
"Error 1"
:
...
...
@@ -100,6 +102,7 @@ class TestOp:
rval
=
f
()
assert
rval
==
'test Op no input'
class
TestMakeThunk
(
unittest
.
TestCase
):
def
test_no_c_code
(
self
):
class
IncOnePython
(
Op
):
...
...
@@ -121,28 +124,25 @@ class TestMakeThunk(unittest.TestCase):
output
,
=
outputs
output
[
0
]
=
input
+
1
i
=
scalar
.
int32
(
'i'
)
o
=
IncOnePython
()(
i
)
# Check that the c_code function is not implemented
self
.
assertRaises
((
NotImplementedError
,
utils
.
MethodNotDefined
),
o
.
owner
.
op
.
c_code
,
o
.
owner
,
'o'
,
[
'x'
],
'z'
,
{
'fail'
:
''
})
o
.
owner
.
op
.
c_code
,
o
.
owner
,
'o'
,
[
'x'
],
'z'
,
{
'fail'
:
''
})
storage_map
=
{
i
:
[
numpy
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
storage_map
=
{
i
:
[
numpy
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
thunk
=
o
.
owner
.
op
.
make_thunk
(
o
.
owner
,
storage_map
,
compute_map
,
no_recycling
=
[])
no_recycling
=
[])
required
=
thunk
()
# Check everything went OK
assert
not
required
# We provided all inputs
assert
not
required
# We provided all inputs
assert
compute_map
[
o
][
0
]
assert
storage_map
[
o
][
0
]
==
4
...
...
@@ -166,28 +166,25 @@ class TestMakeThunk(unittest.TestCase):
z
,
=
outputs
return
"
%(z)
s =
%(x)
s + 1;"
%
locals
()
i
=
scalar
.
int32
(
'i'
)
o
=
IncOneC
()(
i
)
# Check that the perform function is not implemented
self
.
assertRaises
((
NotImplementedError
,
utils
.
MethodNotDefined
),
o
.
owner
.
op
.
perform
,
o
.
owner
,
0
,
[
None
])
o
.
owner
.
op
.
perform
,
o
.
owner
,
0
,
[
None
])
storage_map
=
{
i
:
[
numpy
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
storage_map
=
{
i
:
[
numpy
.
int32
(
3
)],
o
:
[
None
]}
compute_map
=
{
i
:
[
True
],
o
:
[
False
]}
thunk
=
o
.
owner
.
op
.
make_thunk
(
o
.
owner
,
storage_map
,
compute_map
,
no_recycling
=
[])
no_recycling
=
[])
if
theano
.
config
.
cxx
:
required
=
thunk
()
# Check everything went OK
assert
not
required
# We provided all inputs
assert
not
required
# We provided all inputs
assert
compute_map
[
o
][
0
]
assert
storage_map
[
o
][
0
]
==
4
else
:
...
...
@@ -201,30 +198,33 @@ def test_test_value_python_objects():
def
test_test_value_ndarray
():
x
=
numpy
.
zeros
((
5
,
5
))
x
=
numpy
.
zeros
((
5
,
5
))
v
=
op
.
get_test_value
(
x
)
assert
(
v
==
x
)
.
all
()
def
test_test_value_constant
():
x
=
T
.
as_tensor_variable
(
numpy
.
zeros
((
5
,
5
)))
x
=
T
.
as_tensor_variable
(
numpy
.
zeros
((
5
,
5
)))
v
=
op
.
get_test_value
(
x
)
assert
numpy
.
all
(
v
==
numpy
.
zeros
((
5
,
5
)))
assert
numpy
.
all
(
v
==
numpy
.
zeros
((
5
,
5
)))
def
test_test_value_shared
():
x
=
shared
(
numpy
.
zeros
((
5
,
5
)))
x
=
shared
(
numpy
.
zeros
((
5
,
5
)))
v
=
op
.
get_test_value
(
x
)
assert
numpy
.
all
(
v
==
numpy
.
zeros
((
5
,
5
)))
assert
numpy
.
all
(
v
==
numpy
.
zeros
((
5
,
5
)))
def
test_test_value_op
():
try
:
prev_value
=
config
.
compute_test_value
config
.
compute_test_value
=
'raise'
x
=
T
.
log
(
numpy
.
ones
((
5
,
5
)))
x
=
T
.
log
(
numpy
.
ones
((
5
,
5
)))
v
=
op
.
get_test_value
(
x
)
assert
numpy
.
allclose
(
v
,
numpy
.
zeros
((
5
,
5
)))
assert
numpy
.
allclose
(
v
,
numpy
.
zeros
((
5
,
5
)))
finally
:
config
.
compute_test_value
=
prev_value
...
...
@@ -244,11 +244,11 @@ def test_get_debug_values_no_debugger():
finally
:
config
.
compute_test_value
=
prev_value
def
test_get_det_debug_values_ignore
():
"""get_debug_values should return [] when debugger is ignore
and some values are missing """
prev_value
=
config
.
compute_test_value
try
:
config
.
compute_test_value
=
'ignore'
...
...
@@ -267,21 +267,21 @@ def test_get_debug_values_success():
(and the debugger is on)"""
prev_value
=
config
.
compute_test_value
for
mode
in
[
'ignore'
,
'warn'
,
'raise'
]:
for
mode
in
[
'ignore'
,
'warn'
,
'raise'
]:
try
:
config
.
compute_test_value
=
mode
x
=
T
.
vector
()
x
.
tag
.
test_value
=
numpy
.
zeros
((
4
,),
dtype
=
config
.
floatX
)
y
=
numpy
.
zeros
((
5
,
5
))
y
=
numpy
.
zeros
((
5
,
5
))
iters
=
0
for
x_val
,
y_val
in
op
.
get_debug_values
(
x
,
y
):
assert
x_val
.
shape
==
(
4
,)
assert
y_val
.
shape
==
(
5
,
5
)
assert
y_val
.
shape
==
(
5
,
5
)
iters
+=
1
...
...
@@ -290,6 +290,7 @@ def test_get_debug_values_success():
finally
:
config
.
compute_test_value
=
prev_value
def
test_get_debug_values_exc
():
"""tests that get_debug_value raises an exception when
debugger is set to raise and a value is missing """
...
...
@@ -317,13 +318,14 @@ def test_get_debug_values_exc():
finally
:
config
.
compute_test_value
=
prev_value
def
test_debug_error_message
():
"""tests that debug_error_message raises an
exception when it should."""
prev_value
=
config
.
compute_test_value
for
mode
in
[
'ignore'
,
'raise'
]:
for
mode
in
[
'ignore'
,
'raise'
]:
try
:
config
.
compute_test_value
=
mode
...
...
theano/gof/tests/test_opt.py
浏览文件 @
6937f122
...
...
@@ -360,7 +360,7 @@ class TestEquilibrium(object):
x
,
y
,
z
=
map
(
MyVariable
,
'xyz'
)
e
=
op3
(
op4
(
x
,
y
))
g
=
Env
([
x
,
y
,
z
],
[
e
])
print
g
#
print g
opt
=
EquilibriumOptimizer
(
[
PatternSub
((
op1
,
'x'
,
'y'
),
(
op2
,
'x'
,
'y'
)),
PatternSub
((
op4
,
'x'
,
'y'
),
(
op1
,
'x'
,
'y'
)),
...
...
@@ -368,14 +368,14 @@ class TestEquilibrium(object):
],
max_use_ratio
=
10
)
opt
.
optimize
(
g
)
print
g
#
print g
assert
str
(
g
)
==
'[Op2(x, y)]'
def
test_2
(
self
):
x
,
y
,
z
=
map
(
MyVariable
,
'xyz'
)
e
=
op1
(
op1
(
op3
(
x
,
y
)))
g
=
Env
([
x
,
y
,
z
],
[
e
])
print
g
#
print g
opt
=
EquilibriumOptimizer
(
[
PatternSub
((
op1
,
(
op2
,
'x'
,
'y'
)),
(
op4
,
'x'
,
'y'
)),
PatternSub
((
op3
,
'x'
,
'y'
),
(
op4
,
'x'
,
'y'
)),
...
...
@@ -391,7 +391,7 @@ class TestEquilibrium(object):
x
,
y
,
z
=
map
(
MyVariable
,
'xyz'
)
e
=
op3
(
op4
(
x
,
y
))
g
=
Env
([
x
,
y
,
z
],
[
e
])
print
'before'
,
g
#
print 'before', g
# display pesky warnings along with stdout
# also silence logger for 'theano.gof.opt'
_logger
=
logging
.
getLogger
(
'theano.gof.opt'
)
...
...
@@ -407,5 +407,5 @@ class TestEquilibrium(object):
opt
.
optimize
(
g
)
finally
:
_logger
.
setLevel
(
oldlevel
)
print
'after'
,
g
#
print 'after', g
assert
str
(
g
)
==
'[Op1(x, y)]'
theano/sandbox/cuda/cuda_ndarray.cu
浏览文件 @
6937f122
...
...
@@ -422,8 +422,38 @@ static PyMemberDef CudaNdarray_members[] =
{
NULL
}
/* Sentinel */
};
PyObject
*
CudaNdarray_CreateArrayObj
(
CudaNdarray
*
self
)
PyObject
*
CudaNdarray_CreateArrayObj
(
CudaNdarray
*
self
,
PyObject
*
args
)
{
PyObject
*
dtype
=
NULL
;
if
(
args
&&
!
PyArg_ParseTuple
(
args
,
"|O"
,
&
dtype
))
return
NULL
;
if
(
dtype
)
{
PyArray_Descr
*
dtype2
;
// PyArray_DescrConverter try to convert anything to a PyArray_Descr.
if
(
!
PyArray_DescrConverter
(
dtype
,
&
dtype2
))
{
PyObject
*
str
=
PyObject_Repr
(
dtype
);
PyErr_Format
(
PyExc_TypeError
,
"CudaNdarray dtype parameter not understood: %s"
,
PyString_AsString
(
str
)
);
Py_CLEAR
(
str
);
return
NULL
;
}
int
typeNum
=
dtype2
->
type_num
;
Py_DECREF
(
dtype2
);
if
(
typeNum
!=
NPY_FLOAT32
)
{
PyObject
*
str
=
PyObject_Repr
(
dtype
);
PyErr_Format
(
PyExc_TypeError
,
"CudaNdarray support only support float32 dtype, provided: %d"
,
typeNum
);
Py_CLEAR
(
str
);
return
NULL
;
}
}
int
verbose
=
0
;
if
(
self
->
nd
>=
0
&&
CudaNdarray_SIZE
(
self
)
==
0
){
npy_intp
*
npydims
=
(
npy_intp
*
)
malloc
(
self
->
nd
*
sizeof
(
npy_intp
));
...
...
@@ -1291,7 +1321,7 @@ CudaNdarray_exp(CudaNdarray* self)
static
PyMethodDef
CudaNdarray_methods
[]
=
{
{
"__array__"
,
(
PyCFunction
)
CudaNdarray_CreateArrayObj
,
METH_
NO
ARGS
,
(
PyCFunction
)
CudaNdarray_CreateArrayObj
,
METH_
VAR
ARGS
,
"Copy from the device to a numpy ndarray"
},
{
"__copy__"
,
(
PyCFunction
)
CudaNdarray_View
,
METH_NOARGS
,
...
...
theano/sandbox/cuda/cuda_ndarray.cuh
浏览文件 @
6937f122
...
...
@@ -473,7 +473,7 @@ DllExport int CudaNdarray_CopyFromCudaNdarray(CudaNdarray * self,
* Transfer the contents of CudaNdarray `self` to a new numpy ndarray.
*/
DllExport
PyObject
*
CudaNdarray_CreateArrayObj
(
CudaNdarray
*
self
);
CudaNdarray_CreateArrayObj
(
CudaNdarray
*
self
,
PyObject
*
args
=
NULL
);
DllExport
PyObject
*
CudaNdarray_ZEROS
(
int
n
,
int
*
dims
);
...
...
theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py
浏览文件 @
6937f122
...
...
@@ -726,7 +726,7 @@ class TestConv2DGPU(unittest.TestCase):
featshp_logical
=
(
featshp
[
0
],
featshp
[
1
],
featshp
[
2
]
*
stride
,
featshp
[
3
]
*
stride
)
kshp_rotated
=
(
kshp
[
1
],
kshp
[
0
],
kshp
[
2
],
kshp
[
3
])
print
featshp
,
kshp_rotated
,
featshp_logical
[
1
:],
kshp
[
2
:]
#
print featshp, kshp_rotated, featshp_logical[1:], kshp[2:]
image_estimate
=
tensor
.
nnet
.
conv2d
(
a
,
kernel_rotated
,
border_mode
=
'full'
,
image_shape
=
featshp
,
...
...
@@ -735,7 +735,7 @@ class TestConv2DGPU(unittest.TestCase):
kshp_logical
=
kshp
[
2
:])
func
=
theano
.
function
([
a
,
A
],
image_estimate
,
mode
=
theano_mode
)
theano
.
printing
.
debugprint
(
func
,)
#
theano.printing.debugprint(func,)
assert
any
([
isinstance
(
node
.
op
,
theano
.
sandbox
.
cuda
.
blas
.
GpuConv
)
for
node
in
func
.
maker
.
fgraph
.
toposort
()])
...
...
theano/sandbox/cuda/tests/test_cuda_ndarray.py
浏览文件 @
6937f122
...
...
@@ -38,6 +38,17 @@ def test_host_to_device():
c
=
numpy
.
asarray
(
b
)
assert
numpy
.
all
(
a
==
c
)
# test with float32 dtype
d
=
numpy
.
asarray
(
b
,
dtype
=
'float32'
)
assert
numpy
.
all
(
a
==
d
)
# test with not float32 dtype
try
:
numpy
.
asarray
(
b
,
dtype
=
'int8'
)
assert
False
except
TypeError
:
pass
def
test_add_iadd_idiv
():
for
shapes
in
(
...
...
theano/tensor/basic.py
浏览文件 @
6937f122
...
...
@@ -3425,6 +3425,10 @@ class Join(Op):
bcastable
=
[
False
]
*
len
(
as_tensor_variable_args
[
0
]
.
type
.
broadcastable
)
if
not
python_all
([
x
.
ndim
==
len
(
bcastable
)
for
x
in
as_tensor_variable_args
[
1
:]]):
raise
TypeError
(
"Join() can only join tensor with the same number of dimensions."
)
inputs
=
[
as_tensor_variable
(
axis
)]
+
list
(
as_tensor_variable_args
)
if
inputs
[
0
]
.
type
not
in
int_types
:
raise
TypeError
(
'Axis could not be cast to an integer type'
,
...
...
theano/tensor/opt.py
浏览文件 @
6937f122
...
...
@@ -395,9 +395,9 @@ def local_dimshuffle_lift(node):
inode
=
input
.
owner
if
inode
and
isinstance
(
inode
.
op
,
Elemwise
)
and
(
len
(
input
.
clients
)
==
1
):
# Don't use make_node to have tag.test_value set.
ret
=
inode
.
op
(
*
[
DimShuffle
(
inp
ut
.
type
.
broadcastable
,
ret
=
inode
.
op
(
*
[
DimShuffle
(
inp
.
type
.
broadcastable
,
op
.
new_order
,
op
.
inplace
)(
inp
ut
)
for
input
in
op
.
inplace
)(
inp
)
for
inp
in
inode
.
inputs
],
**
dict
(
return_list
=
True
))
return
ret
if
inode
and
isinstance
(
inode
.
op
,
DimShuffle
):
...
...
@@ -943,12 +943,12 @@ class ShapeFeature(object):
else
:
new_shape
.
append
(
s_j
)
assert
all
([
not
hasattr
(
r
.
type
,
"broadcastable"
)
or
not
r
.
type
.
broadcastable
[
i
]
or
not
r
.
type
.
broadcastable
[
i
dx
]
or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self
.
lscalar_one
.
equals
(
new_shape
[
i
])
or
self
.
lscalar_one
.
equals
(
T
.
extract_constant
(
new_shape
[
i
]))
for
i
in
range
(
r
.
ndim
)])
self
.
lscalar_one
.
equals
(
new_shape
[
i
dx
])
or
self
.
lscalar_one
.
equals
(
T
.
extract_constant
(
new_shape
[
i
dx
]))
for
i
dx
in
range
(
r
.
ndim
)])
self
.
shape_of
[
r
]
=
tuple
(
new_shape
)
for
sv
in
self
.
shape_of
[
r
]:
self
.
shape_of_reverse_index
.
setdefault
(
sv
,
set
())
.
add
(
r
)
...
...
@@ -1041,13 +1041,13 @@ class ShapeFeature(object):
# Ensure shapes are in 'int64'. This is to make sure the assert
# found in the `local_useless_subtensor` optimization does not fail.
new_shape
=
[]
for
sh_idx
,
sh
in
enumerate
(
o_shapes
):
if
sh
is
None
:
continue
if
not
isinstance
(
sh
,
(
list
,
tuple
)):
raise
ValueError
(
"infer_shape of
%
s didn't return a list of"
" list. It returned '
%
s'"
%
(
str
(
node
),
str
(
o_shapes
)))
new_shape
=
[]
for
i
,
d
in
enumerate
(
sh
):
# Note: we ignore any shape element that is not typed (i.e.,
# does not have a 'dtype' attribute). This means there may
...
...
@@ -1064,7 +1064,6 @@ class ShapeFeature(object):
# 'int64'.
new_shape
+=
sh
[
len
(
new_shape
):]
o_shapes
[
sh_idx
]
=
tuple
(
new_shape
)
new_shape
=
[]
for
r
,
s
in
izip
(
node
.
outputs
,
o_shapes
):
self
.
set_shape
(
r
,
s
)
...
...
@@ -1091,6 +1090,23 @@ class ShapeFeature(object):
# At that point, node is no longer a client of r, but of new_r
for
(
shpnode
,
idx
)
in
(
r
.
clients
+
[(
node
,
i
)]):
if
isinstance
(
getattr
(
shpnode
,
'op'
,
None
),
Shape_i
):
idx
=
shpnode
.
op
.
i
repl
=
self
.
shape_of
[
new_r
][
idx
]
if
repl
.
owner
is
shpnode
:
# This mean the replacement shape object is
# exactly the same as the current shape object. So
# no need for replacement. This happen for example
# with the InputToGpuOptimizer optimizer.
continue
if
(
repl
.
owner
and
repl
.
owner
.
inputs
[
0
]
is
shpnode
.
inputs
[
0
]
and
isinstance
(
repl
.
owner
.
op
,
Shape_i
)
and
repl
.
owner
.
op
.
i
==
shpnode
.
op
.
i
):
# The replacement is a shape_i of the same
# input. So no need to do this equivalent
# replacement.
continue
self
.
scheduled
[
shpnode
]
=
new_r
# In case 2, if r is a variable that we've scheduled for shape update,
# then we should cancel it.
...
...
@@ -1228,6 +1244,9 @@ def local_track_shape_i(node):
except
AttributeError
:
return
if
node
in
shape_feature
.
scheduled
:
# Don't unschedule node as it could be reinserted in the
# fgraph as we don't change it in the shapefeature internal
# structure.
assert
isinstance
(
node
.
op
,
Shape_i
)
replacement
=
shape_feature
.
scheduled
[
node
]
return
[
shape_feature
.
shape_of
[
replacement
][
node
.
op
.
i
]]
...
...
@@ -2271,7 +2290,6 @@ def local_join_1(node):
"""
if
not
isinstance
(
node
.
op
,
T
.
Join
):
return
axis
=
node
.
inputs
[
0
]
tensors
=
node
.
inputs
[
1
:]
if
len
(
tensors
)
==
1
:
return
[
tensors
[
0
]]
...
...
theano/tensor/tests/test_basic.py
浏览文件 @
6937f122
...
...
@@ -3317,6 +3317,12 @@ class T_Join_and_Split(unittest.TestCase):
numpy
.
concatenate
([
T_shared
.
get_value
(),
T_shared
.
get_value
()]))
def
test_mixed_ndim_error
(
self
):
rng
=
numpy
.
random
.
RandomState
(
seed
=
utt
.
fetch_seed
())
v
=
self
.
shared
(
rng
.
rand
(
4
)
.
astype
(
self
.
floatX
))
m
=
self
.
shared
(
rng
.
rand
(
4
,
4
)
.
astype
(
self
.
floatX
))
self
.
assertRaises
(
TypeError
,
self
.
join_op
(),
0
,
v
,
m
)
class
test_comparison
(
unittest
.
TestCase
):
"""Test <, >, <=, >=, == and !=
...
...
@@ -5694,7 +5700,7 @@ class T_get_scalar_constant_value(unittest.TestCase):
# For now get_scalar_constant_value goes through only MakeVector and Join of
# scalars.
v
=
tensor
.
ivector
()
a
=
tensor
.
stack
(
v
,
2
,
3
)
a
=
tensor
.
stack
(
v
,
[
2
],
[
3
]
)
self
.
assertRaises
(
tensor
.
NotScalarConstantError
,
get_scalar_constant_value
,
a
[
0
])
self
.
assertRaises
(
tensor
.
NotScalarConstantError
,
get_scalar_constant_value
,
a
[
1
])
self
.
assertRaises
(
tensor
.
NotScalarConstantError
,
get_scalar_constant_value
,
a
[
2
])
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论