提交 147bcead authored 作者: Frederic Bastien's avatar Frederic Bastien

Do review

上级 3173f02c
......@@ -257,7 +257,7 @@ class GpuGer(BlasOp):
x = as_gpuarray_variable(x, ctx_name)
y = as_gpuarray_variable(y, ctx_name)
alpha = as_tensor_variable(alpha)
if len(set([A.dtype, alpha.dtype, x.dtype, y.dtype])) != 1:
if not(A.dtype == x.dtype == y.dtype == alpha.dtype):
raise TypeError('ger requires matching dtypes',
(A.dtype, alpha.dtype, x.dtype, y.dtype))
......@@ -265,7 +265,6 @@ class GpuGer(BlasOp):
assert A.ndim == 2
assert x.ndim == 1
assert y.ndim == 1
assert A.dtype == x.dtype == y.dtype
return Apply(self, [A, alpha, x, y], [A.type()])
def perform(self, node, inp, out):
......
......@@ -24,7 +24,7 @@ from ..blas import (gpugemv_inplace, gpugemv_no_inplace,
GpuGemvTester = makeTester(
'GpuGemvTester',
op=gemv_inplace, gpu_op=gpugemv_inplace,
# It don't support float16
# It doesn't support float16
cases=dict(dot_vv=[rand(1), 1, rand(1, 2), rand(2), 0],
dot_vm=[rand(3), 1, rand(3, 2), rand(2), 0],
float32=[rand(3).astype('float32'), np.float32(1),
......
......@@ -495,7 +495,7 @@ def test_many_arg_elemwise():
def test_not_useless_scalar_gpuelemwise():
# We don't want to move elemwise on scalar on the GPU when the
# result will be used on the GPU!
# result will not be used on the GPU!
with theano.configparser.change_flags(warn_float64='ignore'):
X = tensor.fmatrix()
......
......@@ -24,7 +24,7 @@ except ImportError:
_context_reg = {}
def do_gpu_support(data):
def gpu_supported(data):
"""
Is the following data supported on the GPU?
......@@ -50,7 +50,7 @@ def move_to_gpu(data):
(it must have dtype and ndim parameter)
"""
# We don't support complex on the GPU
if not do_gpu_support(data):
if not gpu_supported(data):
return False
# We don't want scalars on the GPU.
if data.ndim == 0:
......@@ -651,7 +651,7 @@ def gpuarray_shared_constructor(value, name=None, strict=False,
if target is notset:
target = None
if not do_gpu_support(value):
if not gpu_supported(value):
raise TypeError('We do not move that data by default to the GPU')
try:
get_context(target)
......
......@@ -317,7 +317,7 @@ class Ger(Op):
y = T.as_tensor_variable(y)
x = T.as_tensor_variable(x)
alpha = T.as_tensor_variable(alpha)
if len(set([A.dtype, alpha.dtype, x.dtype, y.dtype])) != 1:
if not(A.dtype == x.dtype == y.dtype == alpha.dtype):
raise TypeError('ger requires matching dtypes',
(A.dtype, alpha.dtype, x.dtype, y.dtype))
if alpha.ndim != 0:
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论