Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
234ffeab
提交
234ffeab
authored
4月 20, 2015
作者:
Arnaud Bergeron
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add the float16 dtype and make sure it isn't lost to casting.
上级
fd7655aa
隐藏空白字符变更
内嵌
并排
正在显示
7 个修改的文件
包含
52 行增加
和
8 行删除
+52
-8
elemwise.py
theano/sandbox/gpuarray/elemwise.py
+2
-1
fp16_help.py
theano/sandbox/gpuarray/fp16_help.py
+9
-0
opt.py
theano/sandbox/gpuarray/opt.py
+17
-1
type.py
theano/sandbox/gpuarray/type.py
+1
-0
basic.py
theano/scalar/basic.py
+4
-0
basic.py
theano/tensor/basic.py
+18
-6
elemwise.py
theano/tensor/elemwise.py
+1
-0
没有找到文件。
theano/sandbox/gpuarray/elemwise.py
浏览文件 @
234ffeab
...
...
@@ -142,7 +142,7 @@ class GpuElemwise(HideC, Elemwise):
code
.
append
(
'ga_float
%
s;'
%
(
f
[
0
],))
# XXX: The replace is an ugly hack to make sure temp
# variables inthe middle are float32
code
.
append
(
kop
.
replace
(
'npy_
uin
t16'
,
'ga_float'
))
code
.
append
(
kop
.
replace
(
'npy_
floa
t16'
,
'ga_float'
))
for
f
in
scal_f16
:
code
.
append
(
'
%
s[i] = __float2half_rn(
%
s);'
%
(
f
[
1
]
.
name
,
f
[
0
]))
code
.
append
(
'}'
)
...
...
@@ -195,6 +195,7 @@ class GpuElemwise(HideC, Elemwise):
(
"npy_int16"
,
"ga_short"
),
(
"npy_int32"
,
"ga_int"
),
(
"npy_int64"
,
"ga_long"
),
(
"npy_float16"
,
"ga_half"
),
(
"npy_float32"
,
"ga_float"
),
(
"npy_float64"
,
"ga_double"
),
]:
...
...
theano/sandbox/gpuarray/fp16_help.py
浏览文件 @
234ffeab
from
theano
import
scalar
def
work_dtype
(
dtype
):
if
dtype
==
'float16'
:
...
...
@@ -5,14 +7,21 @@ def work_dtype(dtype):
else
:
return
dtype
def
load_w
(
dtype
):
if
dtype
==
'float16'
:
return
'__half2float'
else
:
return
''
def
write_w
(
dtype
):
if
dtype
==
'float16'
:
return
'__float2half_rn'
else
:
return
''
class
Cast16
(
scalar
.
Cast
):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
return
"
%
s =
%
s;
\n
"
%
(
outputs
[
0
],
inputs
[
0
])
theano/sandbox/gpuarray/opt.py
浏览文件 @
234ffeab
...
...
@@ -31,6 +31,7 @@ from .nnet import (GpuCrossentropySoftmaxArgmax1HotWithBias,
from
.elemwise
import
(
GpuElemwise
,
_is_scalar
,
GpuDimShuffle
,
GpuCAReduceCuda
,
GpuCAReduceCPY
)
from
.
import
fp16_help
from
.subtensor
import
(
GpuIncSubtensor
,
GpuSubtensor
,
GpuAdvancedIncSubtensor1
,
GpuAdvancedIncSubtensor1_dev20
)
...
...
@@ -253,10 +254,25 @@ def local_gpuflatten(node):
@op_lifter
([
tensor
.
Elemwise
])
def
local_gpu_elemwise
(
node
):
op
=
node
.
op
scal_op
=
op
.
scalar_op
name
=
op
.
name
if
name
:
name
=
'Gpu'
+
name
res
=
GpuElemwise
(
op
.
scalar_op
,
name
=
name
,
if
(
type
(
scal_op
)
==
scalar
.
Cast
and
(
node
.
inputs
[
0
]
.
dtype
==
'float16'
or
node
.
outputs
[
0
]
.
dtype
==
'float16'
)):
scal_op
=
fp16_help
.
Cast16
(
scal_op
.
o_type
,
name
=
scal_op
.
name
)
if
(
type
(
scal_op
)
==
scalar
.
Composite
and
True
):
inputs
,
outputs
=
gof
.
graph
.
clone
(
scal_op
.
inputs
,
scal_op
.
outputs
)
for
v
in
variables
(
inputs
,
outputs
):
if
(
type
(
v
.
op
)
==
scalar
.
Cast
and
(
v
.
inputs
[
0
]
.
dtype
==
'float16'
or
v
.
outputs
[
0
]
.
dtype
==
'float16'
)):
# We cloned the graph before so this is ok
v
.
op
=
fp16_help
.
Cast16
(
v
.
op
.
o_type
,
name
=
v
.
op
.
name
)
scal_op
=
scalar
.
Composite
(
inputs
,
outputs
)
res
=
GpuElemwise
(
scal_op
,
name
=
name
,
inplace_pattern
=
copy
.
copy
(
op
.
inplace_pattern
),
nfunc_spec
=
op
.
nfunc_spec
)
return
res
...
...
theano/sandbox/gpuarray/type.py
浏览文件 @
234ffeab
...
...
@@ -172,6 +172,7 @@ class GpuArrayType(Type):
# complex64, etc.
try
:
return
{
'float16'
:
(
float
,
'npy_float16'
,
'NPY_FLOAT16'
),
'float32'
:
(
float
,
'npy_float32'
,
'NPY_FLOAT32'
),
'float64'
:
(
float
,
'npy_float64'
,
'NPY_FLOAT64'
),
'uint8'
:
(
int
,
'npy_uint8'
,
'NPY_UINT8'
),
...
...
theano/scalar/basic.py
浏览文件 @
234ffeab
...
...
@@ -1972,6 +1972,8 @@ class Cast(UnaryScalarOp):
def
c_code
(
self
,
node
,
name
,
inputs
,
outputs
,
sub
):
(
x
,)
=
inputs
(
z
,)
=
outputs
if
node
.
inputs
[
0
]
.
dtype
==
'float16'
or
node
.
outputs
[
0
]
==
'float16'
:
raise
NotImplementedError
(
"C code doesn't work for float16"
)
return
"
%
s = (
%
s)
%
s;"
%
(
z
,
node
.
outputs
[
0
]
.
type
.
dtype_specs
()[
1
],
x
)
def
grad
(
self
,
inputs
,
gout
):
...
...
@@ -1997,6 +1999,7 @@ convert_to_uint8 = Cast(uint8, name='convert_to_uint8')
convert_to_uint16
=
Cast
(
uint16
,
name
=
'convert_to_uint16'
)
convert_to_uint32
=
Cast
(
uint32
,
name
=
'convert_to_uint32'
)
convert_to_uint64
=
Cast
(
uint64
,
name
=
'convert_to_uint64'
)
convert_to_float16
=
Cast
(
float16
,
name
=
'convert_to_float16'
)
convert_to_float32
=
Cast
(
float32
,
name
=
'convert_to_float32'
)
convert_to_float64
=
Cast
(
float64
,
name
=
'convert_to_float64'
)
convert_to_complex64
=
Cast
(
complex64
,
name
=
'convert_to_complex64'
)
...
...
@@ -2011,6 +2014,7 @@ _cast_mapping = {
'uint16'
:
convert_to_uint16
,
'uint32'
:
convert_to_uint32
,
'uint64'
:
convert_to_uint64
,
'float16'
:
convert_to_float16
,
'float32'
:
convert_to_float32
,
'float64'
:
convert_to_float64
,
'complex64'
:
convert_to_complex64
,
...
...
theano/tensor/basic.py
浏览文件 @
234ffeab
...
...
@@ -277,10 +277,8 @@ class NumpyAutocaster(object):
# unsafe downcast of float64 variables when config.floatX == 'float32'
# recall: float is numpy.float
if
((
isinstance
(
x
,
float
)
and
config
.
floatX
in
self
.
dtypes
and
config
.
floatX
==
'float32'
)):
return
theano
.
_asarray
(
x
,
dtype
=
'float32'
)
config
.
floatX
in
self
.
dtypes
)):
return
theano
.
_asarray
(
x
,
dtype
=
config
.
floatX
)
for
dtype
in
self
.
dtypes
:
x_
=
theano
.
_asarray
(
x
,
dtype
=
dtype
)
...
...
@@ -290,7 +288,7 @@ class NumpyAutocaster(object):
return
x_
autocast_int
=
NumpyAutocaster
((
'int8'
,
'int16'
,
'int32'
,
'int64'
))
autocast_float
=
NumpyAutocaster
((
'float32'
,
'float64'
))
autocast_float
=
NumpyAutocaster
((
'float
16'
,
'float
32'
,
'float64'
))
# autocast_float dtypes might be manipulated in tensor.__init__
...
...
@@ -313,7 +311,7 @@ class autocast_float_as(object):
If `config.cast_policy` is not 'custom', an exception is raised.
For example:
>>> with autocast_float_as('float32')
as _dummy
:
>>> with autocast_float_as('float32'):
... assert (fvector() + 1.1).dtype == 'float32' # temporary downcasting
>>> assert (fvector() + 1.1).dtype == 'float64' # back to default behaviour
...
...
@@ -1137,6 +1135,10 @@ _convert_to_uint64 = _conversion(
elemwise
.
Elemwise
(
scal
.
convert_to_uint64
),
'uint64'
)
"""Cast to unsigned 64-bit integer"""
_convert_to_float16
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_float16
),
'float16'
)
"""Cast to half-precision floating point"""
_convert_to_float32
=
_conversion
(
elemwise
.
Elemwise
(
scal
.
convert_to_float32
),
'float32'
)
"""Cast to single-precision floating point"""
...
...
@@ -1162,6 +1164,7 @@ _cast_mapping = {
'uint16'
:
_convert_to_uint16
,
'uint32'
:
_convert_to_uint32
,
'uint64'
:
_convert_to_uint64
,
'float16'
:
_convert_to_float16
,
'float32'
:
_convert_to_float32
,
'float64'
:
_convert_to_float64
,
'complex64'
:
_convert_to_complex64
,
...
...
@@ -2752,6 +2755,9 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
out
=
makeKeepDims
(
input
,
out
,
axis
)
return
out
# float16 has very low precision so we do some things differently
f16
=
(
input
.
dtype
==
'float16'
)
if
dtype
is
not
None
:
# The summation will be done with the specified dtype.
# sum() will complain if it is not suitable.
...
...
@@ -2760,6 +2766,9 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
# Let sum() infer the appropriate dtype.
sum_dtype
=
None
if
f16
and
sum_dtype
is
None
and
acc_dtype
!=
'float16'
:
sum_dtype
=
'float32'
s
=
sum
(
input
,
axis
=
axis
,
dtype
=
sum_dtype
,
keepdims
=
keepdims
,
acc_dtype
=
acc_dtype
)
shp
=
shape
(
input
)
...
...
@@ -2785,6 +2794,9 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False,
for
i
in
axis
:
s
=
true_div
(
s
,
shp
[
i
])
if
f16
:
s
=
cast
(
s
,
'float16'
)
return
s
...
...
theano/tensor/elemwise.py
浏览文件 @
234ffeab
...
...
@@ -1806,6 +1806,7 @@ class CAReduceDtype(CAReduce):
uint8
=
'uint64'
,
uint16
=
'uint64'
,
uint32
=
'uint64'
,
float16
=
'float32'
,
float32
=
'float64'
,
complex64
=
'complex128'
,
)
.
get
(
idtype
,
idtype
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论