提交 05c424e7 authored 作者: Frederic Bastien's avatar Frederic Bastien

flake8, docstring and small crash fix

上级 a7b4a79e
......@@ -170,7 +170,7 @@ class GpuGemm(BlasOp):
beta = as_tensor_variable(beta)
if not (A.dtype == B.dtype == C.dtype):
raise TypeError(Gemm.E_mixed,
raise TypeError(theano.tensor.blas.Gemm.E_mixed,
(A.dtype, B.dtype, C.dtype,
alpha.dtype, beta.dtype))
if A.dtype == 'float16':
......@@ -178,7 +178,7 @@ class GpuGemm(BlasOp):
else:
assert alpha.dtype == beta.dtype == A.dtype
if not A.dtype.startswith('float'):
raise TypeError(Gemm.E_float, (A.dtype))
raise TypeError(theano.tensor.blas.Gemm.E_float, (A.dtype))
assert alpha.ndim == 0
assert beta.ndim == 0
assert A.ndim == 2
......
......@@ -25,9 +25,9 @@ GpuGemvTester = makeTester(
op=gemv_inplace, gpu_op=gpugemv_inplace,
cases=dict(dot_vv=[rand(1), 1, rand(1, 2), rand(2), 0],
dot_vm=[rand(3), 1, rand(3, 2), rand(2), 0],
# float16=[rand(3).astype('float16'), np.float32(1),
# rand(3, 2).astype('float16'),
# rand(2).astype('float16'), np.float32(0)],
# float16=[rand(3).astype('float16'), np.float32(1),
# rand(3, 2).astype('float16'),
# rand(2).astype('float16'), np.float32(0)],
float32=[rand(3).astype('float32'), np.float32(1),
rand(3, 2).astype('float32'),
rand(2).astype('float32'), np.float32(0)],
......@@ -42,6 +42,21 @@ GpuGemvTester = makeTester(
)
def test_gemv_float16():
float16 = [rand(3).astype('float16'),
np.asarray(1, dtype=np.float32),
rand(3, 2).astype('float16'),
rand(2).astype('float16'),
np.asarray(0.5, dtype=np.float32)]
float16 = [gpuarray_shared_constructor(val)
for val in float16]
o = gpugemv_no_inplace(*float16)
f = theano.function([], o)
y, alpha, A, x, beta = float16
out = f()
utt.assert_asclose(out, alpha * np.dot(A, x) + beta * y)
class TestGpuSgemv(TestCase, BaseGemv, utt.TestOptimizationMixin):
mode = mode_with_gpu
dtype = 'float32'
......@@ -68,9 +83,9 @@ GpuGemmTester = makeTester(
test6=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), -1.0],
test7=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), 0.0],
test8=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), 1.1],
# float16=[rand(3, 4).astype('float16'), np.float32(-1.0),
# rand(3, 5).astype('float16'),
# rand(5, 4).astype('float16'), np.float32(-1.1)],
# float16=[rand(3, 4).astype('float16'), np.float32(-1.0),
# rand(3, 5).astype('float16'),
# rand(5, 4).astype('float16'), np.float32(-1.1)],
float32=[rand(3, 4).astype('float32'), np.float32(-1.0),
rand(3, 5).astype('float32'),
rand(5, 4).astype('float32'), np.float32(-1.1)],
......
......@@ -25,13 +25,24 @@ _context_reg = {}
def do_gpu_support(data):
"""
Is the following data supported on the GPU?
Currently, only complex aren't supported.
Parameters
----------
data : numpy.ndarray or TensorVariable
(it must have dtype and ndim parameter)
"""
return str(data.dtype) not in tensor.basic.complex_dtypes
def move_to_gpu(data):
"""
Do we want to move this computation to the GPU?
Currently, we don't move complex and scalar int.
Currently, we don't move complex and scalar.
Parameters
----------
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论