提交 e90987e8 authored 作者: Frederic Bastien's avatar Frederic Bastien

allow float16 and flake8

上级 6101a0ea
......@@ -1175,7 +1175,7 @@ def local_gpua_gemv(op, context_name, inputs, outputs):
@op_lifter([tensor.blas.Gemm])
@register_opt2([tensor.blas.Gemm], 'fast_compile')
def local_gpua_gemm(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
if op.inplace:
return gpugemm_inplace
......
......@@ -26,7 +26,6 @@ from ..subtensor import GpuSubtensor
from .config import mode_with_gpu, mode_without_gpu, test_ctx_name
import pygpu
from pygpu import gpuarray
utt.seed_rng()
......@@ -236,7 +235,7 @@ def gpu_alloc_expected(x, *shp):
GpuAllocTester = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args)+1,
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论