提交 99363898 authored 作者: Frederic Bastien's avatar Frederic Bastien

Enable float16 ops on ops that don't care about dtype or call copy that both back-end support.

上级 a0c90645
......@@ -50,6 +50,7 @@ class ViewOp(gof.Op):
# the output variable is %(oname)s.
c_code_and_version = {}
__props__ = ()
_f16_ok = True
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
......@@ -151,6 +152,7 @@ class DeepCopyOp(gof.Op):
check_input = False
__props__ = ()
_f16_ok = True
def __init__(self):
pass
......@@ -659,6 +661,7 @@ class Rebroadcast(gof.Op):
check_input = False
__props__ = ("axis",)
_f16_ok = True
def __init__(self, *axis):
# Sort them to make sure we merge all possible case.
......@@ -820,6 +823,7 @@ class SpecifyShape(gof.Op):
# the output variable is %(oname)s.
c_code_and_version = {}
__props__ = ()
_f16_ok = True
def make_node(self, x, shape):
if not isinstance(x, gof.Variable):
......
......@@ -7,7 +7,7 @@ import numpy as np
import theano
from theano.compat import PY3
from theano import config
from theano.compile import DeepCopyOp
from theano.compile import DeepCopyOp, Rebroadcast, ViewOp
from theano.misc.pkl_utils import CompatUnpickler
# Disabled for now
......@@ -21,16 +21,45 @@ import pygpu
def test_deep_copy():
a = rand_gpuarray(20, dtype='float32')
g = GpuArrayType(dtype='float32', broadcastable=(False,))('g')
for dtype in ['float16', 'float32']:
a = rand_gpuarray(20, dtype=dtype)
g = GpuArrayType(dtype=dtype, broadcastable=(False,))('g')
f = theano.function([g], g)
assert isinstance(f.maker.fgraph.toposort()[0].op, DeepCopyOp)
res = f(a)
assert GpuArrayType.values_eq(res, a)
def test_view():
for dtype in ['float16', 'float32']:
a = rand_gpuarray(20, dtype=dtype)
g = GpuArrayType(dtype=dtype, broadcastable=(False,))('g')
f = theano.function([g], g)
f = theano.function([g], ViewOp()(g))
assert isinstance(f.maker.fgraph.toposort()[0].op, DeepCopyOp)
assert isinstance(f.maker.fgraph.toposort()[0].op, ViewOp)
res = f(a)
res = f(a)
assert GpuArrayType.values_eq(res, a)
assert GpuArrayType.values_eq(res, a)
def test_rebroadcast():
for dtype in ['float16', 'float32']:
a = rand_gpuarray(1, dtype=dtype)
g = GpuArrayType(dtype=dtype, broadcastable=(False,))('g')
f = theano.function([g], Rebroadcast((0, True))(g))
assert isinstance(f.maker.fgraph.toposort()[0].op, Rebroadcast)
res = f(a)
assert GpuArrayType.values_eq(res, a)
def test_values_eq_approx():
......@@ -45,10 +74,11 @@ def test_values_eq_approx():
def test_specify_shape():
a = rand_gpuarray(20, dtype='float32')
g = GpuArrayType(dtype='float32', broadcastable=(False,))('g')
f = theano.function([g], theano.tensor.specify_shape(g, [20]))
f(a)
for dtype in ['float16', 'float32']:
a = rand_gpuarray(20, dtype=dtype)
g = GpuArrayType(dtype=dtype, broadcastable=(False,))('g')
f = theano.function([g], theano.tensor.specify_shape(g, [20]))
f(a)
def test_filter_float():
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论