Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
P
pytensor
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
testgroup
pytensor
Commits
9c989bb2
提交
9c989bb2
authored
5月 30, 2016
作者:
slefrancois
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add rfft grad for even transforms
上级
33ce980e
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
379 行增加
和
81 行删除
+379
-81
fft.py
theano/gpuarray/fft.py
+236
-1
test_fft.py
theano/gpuarray/tests/test_fft.py
+143
-80
没有找到文件。
theano/gpuarray/fft.py
浏览文件 @
9c989bb2
...
@@ -128,6 +128,14 @@ class CuRFFTOp(Op):
...
@@ -128,6 +128,14 @@ class CuRFFTOp(Op):
thunk
.
lazy
=
False
thunk
.
lazy
=
False
return
thunk
return
thunk
def
grad
(
self
,
inputs
,
output_grads
):
gout
,
=
output_grads
# gout = theano.printing.Print('aVANT')(gout)
gout
=
T
.
set_subtensor
(
gout
[:,
1
:
-
1
,:],
gout
[:,
1
:
-
1
,:]
*
0.5
)
# gout = theano.printing.Print('apres')(gout)
return
[
cuirfft_op
(
gout
)]
curfft_op
=
CuRFFTOp
()
curfft_op
=
CuRFFTOp
()
...
@@ -195,6 +203,8 @@ class CuIRFFTOp(Op):
...
@@ -195,6 +203,8 @@ class CuIRFFTOp(Op):
output_shape
=
list
(
input_shape
[:
-
1
])
output_shape
=
list
(
input_shape
[:
-
1
])
# restore full signal length
# restore full signal length
output_shape
[
-
1
]
=
(
output_shape
[
-
1
]
-
1
)
*
2
output_shape
[
-
1
]
=
(
output_shape
[
-
1
]
-
1
)
*
2
# if inputs[0][0][0,-1,1] != 0:
# output_shape[-1] += 1
output_shape
=
tuple
(
output_shape
)
output_shape
=
tuple
(
output_shape
)
z
=
outputs
[
0
]
z
=
outputs
[
0
]
...
@@ -235,8 +245,234 @@ class CuIRFFTOp(Op):
...
@@ -235,8 +245,234 @@ class CuIRFFTOp(Op):
thunk
.
lazy
=
False
thunk
.
lazy
=
False
return
thunk
return
thunk
def
grad
(
self
,
inputs
,
output_grads
):
gout
,
=
output_grads
gf
=
curfft_op
(
gout
)
gf
=
T
.
set_subtensor
(
gf
[:,
1
:
-
1
,:],
gf
[:,
1
:
-
1
,:]
*
2
)
return
[
gf
]
cuirfft_op
=
CuIRFFTOp
()
cuirfft_op
=
CuIRFFTOp
()
class
CuFFTOp
(
Op
):
"""
Operator for the fast Fourier transform of a real-valued output on the GPU
using the scikits CUDA FFT through the gpuarray backend.
The input must be a real-valued float32 variable of dimensions (m, n). It
performs m 1-D FFTs of size n each.
The output is a GpuArray of dimensions (m, n/2+1, 2). The output contains
the n//2+1 non-trivial elements of the m real-valued FFTs. The real
and imaginary parts are stored as two float32 arrays, emulating complex64.
Since theano does not support complex number operations, care must be
taken to manually implement operators such as multiplication.
The module provides the convenience function curfft(input).
"""
__props__
=
()
def
output_type
(
self
,
inp
):
# add one extra dim for real/imag
return
GpuArrayType
(
inp
.
dtype
,
broadcastable
=
[
False
]
*
(
inp
.
type
.
ndim
+
1
),
context_name
=
inp
.
type
.
context_name
)
def
make_node
(
self
,
inp
):
if
not
scikits_cuda_available
:
raise
RuntimeError
(
"scikits.cuda is needed for CuFFTOp"
)
if
not
pygpu_available
:
raise
RuntimeError
(
"pygpu is needed for CuFFTOp"
)
if
not
pycuda_available
:
raise
RuntimeError
(
"pycuda is needed for CuFFTOp"
)
print
(
inp
)
inp
=
basic_ops
.
gpu_contiguous
(
basic_ops
.
as_gpuarray_variable
(
inp
,
basic_ops
.
infer_context_name
(
inp
)))
assert
inp
.
dtype
==
"float32"
return
theano
.
Apply
(
self
,
[
inp
],
[
self
.
output_type
(
inp
)()])
def
make_thunk
(
self
,
node
,
storage_map
,
_
,
_2
):
inputs
=
[
storage_map
[
v
]
for
v
in
node
.
inputs
]
outputs
=
[
storage_map
[
v
]
for
v
in
node
.
outputs
]
# Initiliaze cuda context to the input's.
with
node
.
inputs
[
0
]
.
type
.
context
:
scikits
.
cuda
.
misc
.
init
()
plan_input_shape
=
[
None
]
plan
=
[
None
]
def
thunk
():
input_shape
=
inputs
[
0
][
0
]
.
shape
# construct output shape
output_shape
=
list
(
input_shape
)
# DFT of real input is symmetric, no need to store
# redundant coefficients
# output_shape[-1] = output_shape[-1] // 2 + 1
# extra dimension with length 2 for real/imag
output_shape
+=
[
2
]
output_shape
=
tuple
(
output_shape
)
z
=
outputs
[
0
]
# only allocate if there is no previous allocation of the
# right size.
if
z
[
0
]
is
None
or
z
[
0
]
.
shape
!=
output_shape
:
z
[
0
]
=
pygpu
.
zeros
(
output_shape
,
context
=
inputs
[
0
][
0
]
.
context
,
dtype
=
'float32'
)
input_pycuda
=
T
.
stack
(
inputs
[
0
][
0
],
T
.
zeros_like
(
inputs
[
0
][
0
]))
# I thought we'd need to change the type on output_pycuda
# so it is complex64, but as it turns out scikits.cuda.fft
# doesn't really care either way and treats the array as
# if it is complex64 anyway.
output_pycuda
=
z
[
0
]
with
input_pycuda
.
context
:
# only initialise plan if necessary
if
plan
[
0
]
is
None
or
plan_input_shape
[
0
]
!=
input_shape
:
plan_input_shape
[
0
]
=
input_shape
plan
[
0
]
=
fft
.
Plan
(
input_shape
[
1
:
-
1
],
np
.
complex64
,
np
.
complex64
,
batch
=
input_shape
[
0
])
# Sync GPU variables before computation
input_pycuda
.
sync
()
output_pycuda
.
sync
()
fft
.
fft
(
input_pycuda
,
output_pycuda
,
plan
[
0
])
# Sync results to ensure output contains completed computation
pycuda
.
driver
.
Context
.
synchronize
()
thunk
.
inputs
=
inputs
thunk
.
outputs
=
outputs
thunk
.
lazy
=
False
return
thunk
def
grad
(
self
,
inputs
,
output_grads
):
gout
,
=
output_grads
return
[
cuifft_op
(
gout
)]
cufft_op
=
CuFFTOp
()
class
CuIFFTOp
(
Op
):
"""
Operator for the inverse fast Fourier transform with real-valued output
on the GPU using the scikits CUDA FFT through the gpuarray backend.
The input is a variable of dimensions (m, n/2+1, 2) with
type float32 representing the n/2+1 non-trivial elements of m
real-valued Fourier transforms of initial size n. The real and imaginary
parts are stored as two float32 arrays, emulating complex64 given that
Theano does not support complex numbers.
The output is a real-valued float32 variable of dimensions (m, n)
giving the m inverse FFTs. *The output is NOT normalized*. You can
manualy divide by the size of the output array to normalize.
The module provides the convenience function cuirfft(input).
"""
__props__
=
()
def
output_type
(
self
,
inp
):
# add one extra dim for real/imag
return
GpuArrayType
(
inp
.
dtype
,
broadcastable
=
[
False
]
*
(
inp
.
type
.
ndim
),
context_name
=
inp
.
type
.
context_name
)
def
make_node
(
self
,
inp
):
if
not
scikits_cuda_available
:
raise
RuntimeError
(
"scikits.cuda is needed for CuIFFTOp"
)
if
not
pygpu_available
:
raise
RuntimeError
(
"pygpu is needed for CuIFFTOp"
)
if
not
pycuda_available
:
raise
RuntimeError
(
"pycuda is needed for CuIFFTOp"
)
inp
=
basic_ops
.
gpu_contiguous
(
basic_ops
.
as_gpuarray_variable
(
inp
,
basic_ops
.
infer_context_name
(
inp
)))
assert
inp
.
dtype
==
"float32"
return
theano
.
Apply
(
self
,
[
inp
],
[
self
.
output_type
(
inp
)()])
def
make_thunk
(
self
,
node
,
storage_map
,
_
,
_2
):
inputs
=
[
storage_map
[
v
]
for
v
in
node
.
inputs
]
outputs
=
[
storage_map
[
v
]
for
v
in
node
.
outputs
]
# Initiliaze cuda context to the input's.
with
node
.
inputs
[
0
]
.
type
.
context
:
scikits
.
cuda
.
misc
.
init
()
plan_input_shape
=
[
None
]
plan
=
[
None
]
def
thunk
():
input_shape
=
inputs
[
0
][
0
]
.
shape
# construct output shape
# chop off the extra length-2 dimension for real/imag
output_shape
=
list
(
input_shape
)
# restore full signal length
# output_shape[-1] = (output_shape[-1] - 1) * 2
output_shape
=
tuple
(
output_shape
)
z
=
outputs
[
0
]
# only allocate if there is no previous allocation of the
# right size.
if
z
[
0
]
is
None
or
z
[
0
]
.
shape
!=
output_shape
:
z
[
0
]
=
pygpu
.
zeros
(
output_shape
,
context
=
inputs
[
0
][
0
]
.
context
,
dtype
=
'float32'
)
input_pycuda
=
inputs
[
0
][
0
]
# input_pycuda is a float32 array with an extra dimension,
# but will be interpreted by scikits.cuda as a complex64
# array instead.
output_pycuda
=
z
[
0
]
with
input_pycuda
.
context
:
# only initialise plan if necessary
if
plan
[
0
]
is
None
or
plan_input_shape
[
0
]
!=
input_shape
:
plan_input_shape
[
0
]
=
input_shape
plan
[
0
]
=
fft
.
Plan
(
input_shape
[
1
:
-
1
],
np
.
complex64
,
np
.
complex64
,
batch
=
output_shape
[
0
])
# Sync GPU variables before computation
input_pycuda
.
sync
()
output_pycuda
.
sync
()
fft
.
ifft
(
input_pycuda
,
output_pycuda
,
plan
[
0
])
# strangely enough, enabling rescaling here makes it run
# very, very slowly. so do this rescaling manually
# afterwards!
# Sync results to ensure output contains completed computation
pycuda
.
driver
.
Context
.
synchronize
()
thunk
.
inputs
=
inputs
thunk
.
outputs
=
outputs
thunk
.
lazy
=
False
return
thunk
def
grad
(
self
,
inputs
,
output_grads
):
gout
,
=
output_grads
return
[
cufft_op
(
gout
)]
cuifft_op
=
CuIFFTOp
()
def
curfft
(
inputs
,
norm
=
None
):
def
curfft
(
inputs
,
norm
=
None
):
"""
"""
...
@@ -293,7 +529,6 @@ def cuirfft(inputs, norm=None):
...
@@ -293,7 +529,6 @@ def cuirfft(inputs, norm=None):
if
cond_norm
==
"no_norm"
:
if
cond_norm
==
"no_norm"
:
return
cuirfft_op
(
inputs
)
return
cuirfft_op
(
inputs
)
def
_unitary
(
norm
):
def
_unitary
(
norm
):
if
norm
not
in
(
None
,
"ortho"
,
"no_norm"
):
if
norm
not
in
(
None
,
"ortho"
,
"no_norm"
):
raise
ValueError
(
"Invalid value
%
s for norm, must be None, 'ortho' or "
raise
ValueError
(
"Invalid value
%
s for norm, must be None, 'ortho' or "
...
...
theano/gpuarray/tests/test_fft.py
浏览文件 @
9c989bb2
...
@@ -3,7 +3,7 @@ import unittest
...
@@ -3,7 +3,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
theano
import
theano
import
theano.tensor
import
theano.tensor
as
T
from
theano.tests
import
unittest_tools
as
utt
from
theano.tests
import
unittest_tools
as
utt
import
theano.gpuarray.fft
import
theano.gpuarray.fft
...
@@ -22,91 +22,154 @@ if not scikits_cuda_available: # noqa
...
@@ -22,91 +22,154 @@ if not scikits_cuda_available: # noqa
if
not
pycuda_available
:
# noqa
if
not
pycuda_available
:
# noqa
raise
SkipTest
(
'Optional package pycuda not available'
)
raise
SkipTest
(
'Optional package pycuda not available'
)
import
theano.gpuarray.cuda_fft
# Transform sizes
# Transform sizes
N
=
64
N
=
64
class
TestFFT
(
unittest
.
TestCase
):
class
TestFFT
(
unittest
.
TestCase
):
def
test_rfft
(
self
):
# def test_rfft(self):
inputs_val
=
np
.
random
.
random
((
1
,
N
))
.
astype
(
'float32'
)
# inputs_val = np.random.random((1, N)).astype('float32')
inputs
=
theano
.
shared
(
inputs_val
)
# inputs = theano.shared(inputs_val)
#
rfft
=
theano
.
gpuarray
.
fft
.
curfft
(
inputs
)
# rfft = theano.gpuarray.fft.curfft(inputs)
f_rfft
=
theano
.
function
([],
rfft
,
mode
=
mode_with_gpu
)
# f_rfft = theano.function([], rfft, mode=mode_with_gpu)
res_rfft
=
f_rfft
()
# res_rfft = f_rfft()
res_rfft_comp
=
(
np
.
asarray
(
res_rfft
[:,
:,
0
])
+
# res_rfft_comp = (np.asarray(res_rfft[:, :, 0]) +
1
j
*
np
.
asarray
(
res_rfft
[:,
:,
1
]))
# 1j * np.asarray(res_rfft[:, :, 1]))
#
rfft_ref
=
numpy
.
fft
.
rfft
(
inputs_val
,
N
,
1
)
# rfft_ref = numpy.fft.rfft(inputs_val, N, 1)
#
utt
.
assert_allclose
(
rfft_ref
,
res_rfft_comp
)
# utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4)
def
test_irfft
(
self
):
# def test_irfft(self):
# inputs_val = np.random.random((1, N)).astype('float32')
# inputs = theano.shared(inputs_val)
#
# fft = theano.gpuarray.fft.curfft(inputs)
# f_fft = theano.function([], fft, mode=mode_with_gpu)
# res_fft = f_fft()
#
# m = fft.type()
# ifft = theano.gpuarray.fft.cuirfft(m)
# f_ifft = theano.function([m], ifft, mode=mode_with_gpu)
# res_ifft = f_ifft(res_fft)
#
# utt.assert_allclose(inputs_val, np.asarray(res_ifft))
#
# def test_type(self):
# inputs_val = np.random.random((1, N)).astype('float64')
# inputs = theano.shared(inputs_val)
#
# with self.assertRaises(AssertionError):
# theano.gpuarray.fft.curfft(inputs)
# with self.assertRaises(AssertionError):
# theano.gpuarray.fft.cuirfft(inputs)
#
# def test_norm(self):
# inputs_val = np.random.random((1, N)).astype('float32')
# inputs = theano.shared(inputs_val)
#
# # Unitary normalization
# rfft = theano.gpuarray.fft.curfft(inputs, norm='ortho')
# f_rfft = theano.function([], rfft, mode=mode_with_gpu)
# res_rfft = f_rfft()
# res_rfft_comp = (np.asarray(res_rfft[:, :, 0]) +
# 1j * np.asarray(res_rfft[:, :, 1]))
#
# rfft_ref_ortho = numpy.fft.rfft(inputs_val, N, 1, norm='ortho')
#
# utt.assert_allclose(rfft_ref_ortho, res_rfft_comp)
#
# # No normalization
# rfft = theano.gpuarray.fft.curfft(inputs, norm='no_norm')
# f_rfft = theano.function([], rfft, mode=mode_with_gpu)
# res_rfft = f_rfft()
# res_rfft_comp = (np.asarray(res_rfft[:, :, 0]) +
# 1j * np.asarray(res_rfft[:, :, 1]))
#
# utt.assert_allclose(rfft_ref_ortho * np.sqrt(N), res_rfft_comp)
#
# # Inverse FFT inputs
# inputs_val = np.random.random((1, N // 2 + 1, 2)).astype('float32')
# inputs = theano.shared(inputs_val)
# inputs_ref = inputs_val[:, :, 0] + 1j * inputs_val[:, :, 1]
#
# # Unitary normalization inverse FFT
# irfft = theano.gpuarray.fft.cuirfft(inputs, norm='ortho')
# f_irfft = theano.function([], irfft, mode=mode_with_gpu)
# res_irfft = f_irfft()
#
# irfft_ref_ortho = numpy.fft.irfft(inputs_ref, norm='ortho')
#
# utt.assert_allclose(irfft_ref_ortho, res_irfft)
#
# # No normalization inverse FFT
# irfft = theano.gpuarray.fft.cuirfft(inputs, norm='no_norm')
# f_irfft = theano.function([], irfft, mode=mode_with_gpu)
# res_irfft = f_irfft()
#
# utt.assert_allclose(irfft_ref_ortho * np.sqrt(N), res_irfft)
# def test_fft(self):
# # inputs_val = np.random.random((1, N, N)).astype('float32')
# # inputs = theano.shared(inputs_val)
# #
# # fft = theano.gpuarray.fft.cufft_op(inputs)
# # f_fft = theano.function([], fft, mode=mode_with_gpu)
# # res_fft = f_fft()
# # res_fft_comp = (np.asarray(res_fft[:, :,:, 0]) +
# # 1j * np.asarray(res_fft[:, :,:, 1]))
# #
# # # inputs_ref = inputs_val[:,:,:,0] + 1j*inputs_val[:,:,:,1]
# # fft_ref = numpy.fft.fftn(inputs_val, (N,N), axes=(1,2))
#
# inputs_val = np.random.random((1, N, 2)).astype('float32')
# # inputs = theano.shared(inputs_val)
# inputs = T.tensor3('inputs', dtype='float32')
#
# fft = theano.gpuarray.fft.cufft_op(inputs)
# f_fft = theano.function([inputs], fft, mode=mode_with_gpu)
# res_fft = f_fft(inputs_val)
# res_fft_comp = (np.asarray(res_fft[:, :, 0]) +
# 1j * np.asarray(res_fft[:, :, 1]))
#
# inputs_ref = inputs_val[:,:,0] + 1j*inputs_val[:,:,1]
# fft_ref = numpy.fft.fft(inputs_ref, N, 1)
#
# utt.assert_allclose(fft_ref, res_fft_comp, atol=1e-4, rtol=1e-4)
# def test_ifft(self):
# inputs_val = np.random.random((1, N, 2)).astype('float32')
# inputs = theano.shared(inputs_val)
#
# fft = theano.gpuarray.fft.cufft_op(inputs)
# f_fft = theano.function([], fft, mode=mode_with_gpu)
# res_fft = f_fft()
#
# m = fft.type()
# ifft = theano.gpuarray.fft.cuifft_op(m)
# f_ifft = theano.function([m], ifft, mode=mode_with_gpu)
# res_ifft = f_ifft(res_fft)
#
# utt.assert_allclose(inputs_val, np.asarray(res_ifft) / N)
def
test_grad
(
self
):
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps
=
1e-1
inputs_val
=
np
.
random
.
random
((
1
,
N
))
.
astype
(
'float32'
)
inputs_val
=
np
.
random
.
random
((
1
,
N
))
.
astype
(
'float32'
)
inputs
=
theano
.
shared
(
inputs_val
)
utt
.
verify_grad
(
theano
.
gpuarray
.
fft
.
curfft_op
,
[
inputs_val
],
eps
=
eps
)
fft
=
theano
.
gpuarray
.
fft
.
curfft
(
inputs
)
f_fft
=
theano
.
function
([],
fft
,
mode
=
mode_with_gpu
)
res_fft
=
f_fft
()
m
=
fft
.
type
()
ifft
=
theano
.
gpuarray
.
fft
.
cuirfft
(
m
)
f_ifft
=
theano
.
function
([
m
],
ifft
,
mode
=
mode_with_gpu
)
res_ifft
=
f_ifft
(
res_fft
)
utt
.
assert_allclose
(
inputs_val
,
np
.
asarray
(
res_ifft
))
def
test_type
(
self
):
inputs_val
=
np
.
random
.
random
((
1
,
N
))
.
astype
(
'float64'
)
inputs
=
theano
.
shared
(
inputs_val
)
with
self
.
assertRaises
(
AssertionError
):
theano
.
gpuarray
.
fft
.
curfft
(
inputs
)
with
self
.
assertRaises
(
AssertionError
):
theano
.
gpuarray
.
fft
.
cuirfft
(
inputs
)
def
test_norm
(
self
):
inputs_val
=
np
.
random
.
random
((
1
,
N
))
.
astype
(
'float32'
)
inputs
=
theano
.
shared
(
inputs_val
)
# Unitary normalization
rfft
=
theano
.
gpuarray
.
fft
.
curfft
(
inputs
,
norm
=
'ortho'
)
f_rfft
=
theano
.
function
([],
rfft
,
mode
=
mode_with_gpu
)
res_rfft
=
f_rfft
()
res_rfft_comp
=
(
np
.
asarray
(
res_rfft
[:,
:,
0
])
+
1
j
*
np
.
asarray
(
res_rfft
[:,
:,
1
]))
rfft_ref_ortho
=
numpy
.
fft
.
rfft
(
inputs_val
,
N
,
1
,
norm
=
'ortho'
)
utt
.
assert_allclose
(
rfft_ref_ortho
,
res_rfft_comp
)
# No normalization
rfft
=
theano
.
gpuarray
.
fft
.
curfft
(
inputs
,
norm
=
'no_norm'
)
f_rfft
=
theano
.
function
([],
rfft
,
mode
=
mode_with_gpu
)
res_rfft
=
f_rfft
()
res_rfft_comp
=
(
np
.
asarray
(
res_rfft
[:,
:,
0
])
+
1
j
*
np
.
asarray
(
res_rfft
[:,
:,
1
]))
utt
.
assert_allclose
(
rfft_ref_ortho
*
np
.
sqrt
(
N
),
res_rfft_comp
)
# Inverse FFT inputs
inputs_val
=
np
.
random
.
random
((
1
,
N
//
2
+
1
,
2
))
.
astype
(
'float32'
)
inputs_val
=
np
.
random
.
random
((
1
,
N
//
2
+
1
,
2
))
.
astype
(
'float32'
)
inputs
=
theano
.
shared
(
inputs_val
)
utt
.
verify_grad
(
theano
.
gpuarray
.
fft
.
cuirfft_op
,
[
inputs_val
],
eps
=
eps
)
inputs_ref
=
inputs_val
[:,
:,
0
]
+
1
j
*
inputs_val
[:,
:,
1
]
# M = 61
# Unitary normalization inverse FFT
# inputs_val = np.random.random((1, M)).astype('float32')
irfft
=
theano
.
gpuarray
.
fft
.
cuirfft
(
inputs
,
norm
=
'ortho'
)
# utt.verify_grad(theano.gpuarray.fft.curfft_op, [inputs_val], eps=eps)
f_irfft
=
theano
.
function
([],
irfft
,
mode
=
mode_with_gpu
)
#
res_irfft
=
f_irfft
()
# inputs_val = np.random.random((1, M // 2 + 1, 2)).astype('float32')
# utt.verify_grad(theano.gpuarray.fft.cuirfft_op, [inputs_val], eps=eps)
irfft_ref_ortho
=
numpy
.
fft
.
irfft
(
inputs_ref
,
norm
=
'ortho'
)
utt
.
assert_allclose
(
irfft_ref_ortho
,
res_irfft
)
# No normalization inverse FFT
irfft
=
theano
.
gpuarray
.
fft
.
cuirfft
(
inputs
,
norm
=
'no_norm'
)
f_irfft
=
theano
.
function
([],
irfft
,
mode
=
mode_with_gpu
)
res_irfft
=
f_irfft
()
utt
.
assert_allclose
(
irfft_ref_ortho
*
np
.
sqrt
(
N
),
res_irfft
)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论