Move the UHJ phase shifter to a common header
This commit is contained in:
parent
819e0297ff
commit
8ab5e5dba2
@ -627,6 +627,7 @@ set(COMMON_OBJS
|
||||
common/intrusive_ptr.h
|
||||
common/math_defs.h
|
||||
common/opthelpers.h
|
||||
common/phase_shifter.h
|
||||
common/polyphase_resampler.cpp
|
||||
common/polyphase_resampler.h
|
||||
common/pragmadefs.h
|
||||
|
@ -2001,7 +2001,7 @@ static ALCenum UpdateDeviceParams(ALCdevice *device, const int *attrList)
|
||||
|
||||
nanoseconds::rep sample_delay{0};
|
||||
if(device->Uhj_Encoder)
|
||||
sample_delay += Uhj2Encoder::sFilterSize;
|
||||
sample_delay += Uhj2Encoder::sFilterDelay;
|
||||
if(device->mHrtfState)
|
||||
sample_delay += HrtfDirectDelay;
|
||||
if(auto *ambidec = device->AmbiDecoder.get())
|
||||
|
347
common/phase_shifter.h
Normal file
347
common/phase_shifter.h
Normal file
@ -0,0 +1,347 @@
|
||||
#ifndef PHASE_SHIFTER_H
|
||||
#define PHASE_SHIFTER_H
|
||||
|
||||
#ifdef HAVE_SSE_INTRINSICS
|
||||
#include <xmmintrin.h>
|
||||
#elif defined(HAVE_NEON)
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
#include <array>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "alcomplex.h"
|
||||
#include "alspan.h"
|
||||
|
||||
|
||||
/* Implements a wide-band +90 degree phase-shift. Note that this should be
|
||||
* given one sample less of a delay (FilterSize/2 - 1) compared to the direct
|
||||
* signal delay (FilterSize/2) to properly align.
|
||||
*/
|
||||
template<size_t FilterSize>
|
||||
struct PhaseShifterT {
|
||||
static_assert(FilterSize >= 16, "FilterSize needs to be at least 16");
|
||||
static_assert((FilterSize&(FilterSize-1)) == 0, "FilterSize needs to be power-of-two");
|
||||
|
||||
alignas(16) std::array<float,FilterSize/2> mCoeffs{};
|
||||
|
||||
/* Some notes on this filter construction.
|
||||
*
|
||||
* A wide-band phase-shift filter needs a delay to maintain linearity. A
|
||||
* dirac impulse in the center of a time-domain buffer represents a filter
|
||||
* passing all frequencies through as-is with a pure delay. Converting that
|
||||
* to the frequency domain, adjusting the phase of each frequency bin by
|
||||
* +90 degrees, then converting back to the time domain, results in a FIR
|
||||
* filter that applies a +90 degree wide-band phase-shift.
|
||||
*
|
||||
* A particularly notable aspect of the time-domain filter response is that
|
||||
* every other coefficient is 0. This allows doubling the effective size of
|
||||
* the filter, by storing only the non-0 coefficients and double-stepping
|
||||
* over the input to apply it.
|
||||
*
|
||||
* Additionally, the resulting filter is independent of the sample rate.
|
||||
* The same filter can be applied regardless of the device's sample rate
|
||||
* and achieve the same effect.
|
||||
*/
|
||||
PhaseShifterT()
|
||||
{
|
||||
using complex_d = std::complex<double>;
|
||||
constexpr size_t fft_size{FilterSize};
|
||||
constexpr size_t half_size{fft_size / 2};
|
||||
|
||||
auto fftBuffer = std::make_unique<complex_d[]>(fft_size);
|
||||
std::fill_n(fftBuffer.get(), fft_size, complex_d{});
|
||||
fftBuffer[half_size] = 1.0;
|
||||
|
||||
forward_fft({fftBuffer.get(), fft_size});
|
||||
for(size_t i{0};i < half_size+1;++i)
|
||||
fftBuffer[i] = complex_d{-fftBuffer[i].imag(), fftBuffer[i].real()};
|
||||
for(size_t i{half_size+1};i < fft_size;++i)
|
||||
fftBuffer[i] = std::conj(fftBuffer[fft_size - i]);
|
||||
inverse_fft({fftBuffer.get(), fft_size});
|
||||
|
||||
auto fftiter = fftBuffer.get() + half_size + (FilterSize/2 - 1);
|
||||
for(float &coeff : mCoeffs)
|
||||
{
|
||||
coeff = static_cast<float>(fftiter->real() / double{fft_size});
|
||||
fftiter -= 2;
|
||||
}
|
||||
}
|
||||
|
||||
void process(al::span<float> dst, const float *RESTRICT src) const;
|
||||
void processAccum(al::span<float> dst, const float *RESTRICT src) const;
|
||||
};
|
||||
|
||||
template<size_t S>
|
||||
inline void PhaseShifterT<S>::process(al::span<float> dst, const float *RESTRICT src) const
|
||||
{
|
||||
#ifdef HAVE_SSE_INTRINSICS
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
auto *out = reinterpret_cast<__m64*>(dst.data());
|
||||
do {
|
||||
__m128 r04{_mm_setzero_ps()};
|
||||
__m128 r14{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&mCoeffs[j])};
|
||||
const __m128 s0{_mm_loadu_ps(&src[j*2])};
|
||||
const __m128 s1{_mm_loadu_ps(&src[j*2 + 4])};
|
||||
|
||||
__m128 s{_mm_shuffle_ps(s0, s1, _MM_SHUFFLE(2, 0, 2, 0))};
|
||||
r04 = _mm_add_ps(r04, _mm_mul_ps(s, coeffs));
|
||||
|
||||
s = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(3, 1, 3, 1));
|
||||
r14 = _mm_add_ps(r14, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
src += 2;
|
||||
|
||||
__m128 r4{_mm_add_ps(_mm_unpackhi_ps(r04, r14), _mm_unpacklo_ps(r04, r14))};
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
_mm_storel_pi(out, r4);
|
||||
++out;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
__m128 r4{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&mCoeffs[j])};
|
||||
const __m128 s{_mm_setr_ps(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = _mm_add_ps(r4, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
dst.back() = _mm_cvtss_f32(r4);
|
||||
}
|
||||
|
||||
#elif defined(HAVE_NEON)
|
||||
|
||||
size_t pos{0};
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
/* There doesn't seem to be NEON intrinsics to do this kind of stipple
|
||||
* shuffling, so there's two custom methods for it.
|
||||
*/
|
||||
auto shuffle_2020 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 0))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 2), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 0), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 2), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto shuffle_3131 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 1))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 3), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 1), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 3), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto unpacklo = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_low_f32(a), vget_low_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
auto unpackhi = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_high_f32(a), vget_high_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
do {
|
||||
float32x4_t r04{vdupq_n_f32(0.0f)};
|
||||
float32x4_t r14{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&mCoeffs[j])};
|
||||
const float32x4_t s0{vld1q_f32(&src[j*2])};
|
||||
const float32x4_t s1{vld1q_f32(&src[j*2 + 4])};
|
||||
|
||||
r04 = vmlaq_f32(r04, shuffle_2020(s0, s1), coeffs);
|
||||
r14 = vmlaq_f32(r14, shuffle_3131(s0, s1), coeffs);
|
||||
}
|
||||
src += 2;
|
||||
|
||||
float32x4_t r4{vaddq_f32(unpackhi(r04, r14), unpacklo(r04, r14))};
|
||||
float32x2_t r2{vadd_f32(vget_low_f32(r4), vget_high_f32(r4))};
|
||||
|
||||
vst1_f32(&dst[pos], r2);
|
||||
pos += 2;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
auto load4 = [](float32_t a, float32_t b, float32_t c, float32_t d)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(a)};
|
||||
ret = vsetq_lane_f32(b, ret, 1);
|
||||
ret = vsetq_lane_f32(c, ret, 2);
|
||||
ret = vsetq_lane_f32(d, ret, 3);
|
||||
return ret;
|
||||
};
|
||||
float32x4_t r4{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&mCoeffs[j])};
|
||||
const float32x4_t s{load4(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = vmlaq_f32(r4, s, coeffs);
|
||||
}
|
||||
r4 = vaddq_f32(r4, vrev64q_f32(r4));
|
||||
dst[pos] = vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(float &output : dst)
|
||||
{
|
||||
float ret{0.0f};
|
||||
for(size_t j{0};j < mCoeffs.size();++j)
|
||||
ret += src[j*2] * mCoeffs[j];
|
||||
|
||||
output = ret;
|
||||
++src;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
template<size_t S>
|
||||
inline void PhaseShifterT<S>::processAccum(al::span<float> dst, const float *RESTRICT src) const
|
||||
{
|
||||
#ifdef HAVE_SSE_INTRINSICS
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
auto *out = reinterpret_cast<__m64*>(dst.data());
|
||||
do {
|
||||
__m128 r04{_mm_setzero_ps()};
|
||||
__m128 r14{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&mCoeffs[j])};
|
||||
const __m128 s0{_mm_loadu_ps(&src[j*2])};
|
||||
const __m128 s1{_mm_loadu_ps(&src[j*2 + 4])};
|
||||
|
||||
__m128 s{_mm_shuffle_ps(s0, s1, _MM_SHUFFLE(2, 0, 2, 0))};
|
||||
r04 = _mm_add_ps(r04, _mm_mul_ps(s, coeffs));
|
||||
|
||||
s = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(3, 1, 3, 1));
|
||||
r14 = _mm_add_ps(r14, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
src += 2;
|
||||
|
||||
__m128 r4{_mm_add_ps(_mm_unpackhi_ps(r04, r14), _mm_unpacklo_ps(r04, r14))};
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
_mm_storel_pi(out, _mm_add_ps(_mm_loadl_pi(_mm_undefined_ps(), out), r4));
|
||||
++out;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
__m128 r4{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&mCoeffs[j])};
|
||||
/* NOTE: This could alternatively be done with two unaligned loads
|
||||
* and a shuffle. Which would be better?
|
||||
*/
|
||||
const __m128 s{_mm_setr_ps(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = _mm_add_ps(r4, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
dst.back() += _mm_cvtss_f32(r4);
|
||||
}
|
||||
|
||||
#elif defined(HAVE_NEON)
|
||||
|
||||
size_t pos{0};
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
auto shuffle_2020 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 0))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 2), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 0), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 2), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto shuffle_3131 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 1))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 3), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 1), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 3), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto unpacklo = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_low_f32(a), vget_low_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
auto unpackhi = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_high_f32(a), vget_high_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
do {
|
||||
float32x4_t r04{vdupq_n_f32(0.0f)};
|
||||
float32x4_t r14{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&mCoeffs[j])};
|
||||
const float32x4_t s0{vld1q_f32(&src[j*2])};
|
||||
const float32x4_t s1{vld1q_f32(&src[j*2 + 4])};
|
||||
|
||||
r04 = vmlaq_f32(r04, shuffle_2020(s0, s1), coeffs);
|
||||
r14 = vmlaq_f32(r14, shuffle_3131(s0, s1), coeffs);
|
||||
}
|
||||
src += 2;
|
||||
|
||||
float32x4_t r4{vaddq_f32(unpackhi(r04, r14), unpacklo(r04, r14))};
|
||||
float32x2_t r2{vadd_f32(vget_low_f32(r4), vget_high_f32(r4))};
|
||||
|
||||
vst1_f32(&dst[pos], vadd_f32(vld1_f32(&dst[pos]), r2));
|
||||
pos += 2;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
auto load4 = [](float32_t a, float32_t b, float32_t c, float32_t d)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(a)};
|
||||
ret = vsetq_lane_f32(b, ret, 1);
|
||||
ret = vsetq_lane_f32(c, ret, 2);
|
||||
ret = vsetq_lane_f32(d, ret, 3);
|
||||
return ret;
|
||||
};
|
||||
float32x4_t r4{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < mCoeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&mCoeffs[j])};
|
||||
const float32x4_t s{load4(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = vmlaq_f32(r4, s, coeffs);
|
||||
}
|
||||
r4 = vaddq_f32(r4, vrev64q_f32(r4));
|
||||
dst[pos] += vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(float &output : dst)
|
||||
{
|
||||
float ret{0.0f};
|
||||
for(size_t j{0};j < mCoeffs.size();++j)
|
||||
ret += src[j*2] * mCoeffs[j];
|
||||
|
||||
output += ret;
|
||||
++src;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* PHASE_SHIFTER_H */
|
@ -15,202 +15,14 @@
|
||||
#include "alcomplex.h"
|
||||
#include "alnumeric.h"
|
||||
#include "opthelpers.h"
|
||||
#include "phase_shifter.h"
|
||||
|
||||
|
||||
namespace {
|
||||
|
||||
using complex_d = std::complex<double>;
|
||||
|
||||
struct PhaseShifterT {
|
||||
alignas(16) std::array<float,Uhj2Encoder::sFilterSize> Coeffs;
|
||||
|
||||
/* Some notes on this filter construction.
|
||||
*
|
||||
* A wide-band phase-shift filter needs a delay to maintain linearity. A
|
||||
* dirac impulse in the center of a time-domain buffer represents a filter
|
||||
* passing all frequencies through as-is with a pure delay. Converting that
|
||||
* to the frequency domain, adjusting the phase of each frequency bin by
|
||||
* +90 degrees, then converting back to the time domain, results in a FIR
|
||||
* filter that applies a +90 degree wide-band phase-shift.
|
||||
*
|
||||
* A particularly notable aspect of the time-domain filter response is that
|
||||
* every other coefficient is 0. This allows doubling the effective size of
|
||||
* the filter, by storing only the non-0 coefficients and double-stepping
|
||||
* over the input to apply it.
|
||||
*
|
||||
* Additionally, the resulting filter is independent of the sample rate.
|
||||
* The same filter can be applied regardless of the device's sample rate
|
||||
* and achieve the same effect.
|
||||
*/
|
||||
PhaseShifterT()
|
||||
{
|
||||
constexpr size_t fft_size{Uhj2Encoder::sFilterSize * 2};
|
||||
constexpr size_t half_size{fft_size / 2};
|
||||
|
||||
/* Generate a frequency domain impulse with a +90 degree phase offset.
|
||||
* Reconstruct the mirrored frequencies to convert to the time domain.
|
||||
*/
|
||||
auto fftBuffer = std::make_unique<complex_d[]>(fft_size);
|
||||
std::fill_n(fftBuffer.get(), fft_size, complex_d{});
|
||||
fftBuffer[half_size] = 1.0;
|
||||
|
||||
forward_fft({fftBuffer.get(), fft_size});
|
||||
for(size_t i{0};i < half_size+1;++i)
|
||||
fftBuffer[i] = complex_d{-fftBuffer[i].imag(), fftBuffer[i].real()};
|
||||
for(size_t i{half_size+1};i < fft_size;++i)
|
||||
fftBuffer[i] = std::conj(fftBuffer[fft_size - i]);
|
||||
inverse_fft({fftBuffer.get(), fft_size});
|
||||
|
||||
/* Reverse the filter for simpler processing, and store only the non-0
|
||||
* coefficients.
|
||||
*/
|
||||
auto fftiter = fftBuffer.get() + half_size + (Uhj2Encoder::sFilterSize-1);
|
||||
for(float &coeff : Coeffs)
|
||||
{
|
||||
coeff = static_cast<float>(fftiter->real() / double{fft_size});
|
||||
fftiter -= 2;
|
||||
}
|
||||
}
|
||||
};
|
||||
const PhaseShifterT PShift{};
|
||||
|
||||
void allpass_process(al::span<float> dst, const float *RESTRICT src)
|
||||
{
|
||||
#ifdef HAVE_SSE_INTRINSICS
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
auto *out = reinterpret_cast<__m64*>(dst.data());
|
||||
do {
|
||||
__m128 r04{_mm_setzero_ps()};
|
||||
__m128 r14{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&PShift.Coeffs[j])};
|
||||
const __m128 s0{_mm_loadu_ps(&src[j*2])};
|
||||
const __m128 s1{_mm_loadu_ps(&src[j*2 + 4])};
|
||||
|
||||
__m128 s{_mm_shuffle_ps(s0, s1, _MM_SHUFFLE(2, 0, 2, 0))};
|
||||
r04 = _mm_add_ps(r04, _mm_mul_ps(s, coeffs));
|
||||
|
||||
s = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(3, 1, 3, 1));
|
||||
r14 = _mm_add_ps(r14, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
src += 2;
|
||||
|
||||
__m128 r4{_mm_add_ps(_mm_unpackhi_ps(r04, r14), _mm_unpacklo_ps(r04, r14))};
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
_mm_storel_pi(out, _mm_add_ps(_mm_loadl_pi(_mm_undefined_ps(), out), r4));
|
||||
++out;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
__m128 r4{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&PShift.Coeffs[j])};
|
||||
/* NOTE: This could alternatively be done with two unaligned loads
|
||||
* and a shuffle. Which would be better?
|
||||
*/
|
||||
const __m128 s{_mm_setr_ps(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = _mm_add_ps(r4, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
dst.back() += _mm_cvtss_f32(r4);
|
||||
}
|
||||
|
||||
#elif defined(HAVE_NEON)
|
||||
|
||||
size_t pos{0};
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
/* There doesn't seem to be NEON intrinsics to do this kind of stipple
|
||||
* shuffling, so there's two custom methods for it.
|
||||
*/
|
||||
auto shuffle_2020 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 0))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 2), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 0), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 2), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto shuffle_3131 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 1))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 3), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 1), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 3), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto unpacklo = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_low_f32(a), vget_low_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
auto unpackhi = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_high_f32(a), vget_high_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
do {
|
||||
float32x4_t r04{vdupq_n_f32(0.0f)};
|
||||
float32x4_t r14{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&PShift.Coeffs[j])};
|
||||
const float32x4_t s0{vld1q_f32(&src[j*2])};
|
||||
const float32x4_t s1{vld1q_f32(&src[j*2 + 4])};
|
||||
|
||||
r04 = vmlaq_f32(r04, shuffle_2020(s0, s1), coeffs);
|
||||
r14 = vmlaq_f32(r14, shuffle_3131(s0, s1), coeffs);
|
||||
}
|
||||
src += 2;
|
||||
|
||||
float32x4_t r4{vaddq_f32(unpackhi(r04, r14), unpacklo(r04, r14))};
|
||||
float32x2_t r2{vadd_f32(vget_low_f32(r4), vget_high_f32(r4))};
|
||||
|
||||
vst1_f32(&dst[pos], vadd_f32(vld1_f32(&dst[pos]), r2));
|
||||
pos += 2;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
auto load4 = [](float32_t a, float32_t b, float32_t c, float32_t d)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(a)};
|
||||
ret = vsetq_lane_f32(b, ret, 1);
|
||||
ret = vsetq_lane_f32(c, ret, 2);
|
||||
ret = vsetq_lane_f32(d, ret, 3);
|
||||
return ret;
|
||||
};
|
||||
float32x4_t r4{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&PShift.Coeffs[j])};
|
||||
const float32x4_t s{load4(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = vmlaq_f32(r4, s, coeffs);
|
||||
}
|
||||
r4 = vaddq_f32(r4, vrev64q_f32(r4));
|
||||
dst[pos] += vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(float &output : dst)
|
||||
{
|
||||
float ret{0.0f};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();++j)
|
||||
ret += src[j*2] * PShift.Coeffs[j];
|
||||
|
||||
output += ret;
|
||||
++src;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
const PhaseShifterT<Uhj2Encoder::sFilterDelay*2> PShift{};
|
||||
|
||||
} // namespace
|
||||
|
||||
@ -247,13 +59,13 @@ void Uhj2Encoder::encode(const FloatBufferSpan LeftOut, const FloatBufferSpan Ri
|
||||
/* Combine the previously delayed mid/side signal with the input. */
|
||||
|
||||
/* S = 0.9396926*W + 0.1855740*X */
|
||||
auto miditer = mMid.begin() + sFilterSize;
|
||||
auto miditer = mMid.begin() + sFilterDelay;
|
||||
std::transform(winput, winput+SamplesToDo, xinput, miditer,
|
||||
[](const float w, const float x) noexcept -> float
|
||||
{ return 0.9396926f*w + 0.1855740f*x; });
|
||||
|
||||
/* D = 0.6554516*Y */
|
||||
auto sideiter = mSide.begin() + sFilterSize;
|
||||
auto sideiter = mSide.begin() + sFilterDelay;
|
||||
std::transform(yinput, yinput+SamplesToDo, sideiter,
|
||||
[](const float y) noexcept -> float { return 0.6554516f*y; });
|
||||
|
||||
@ -271,7 +83,7 @@ void Uhj2Encoder::encode(const FloatBufferSpan LeftOut, const FloatBufferSpan Ri
|
||||
[](const float w, const float x) noexcept -> float
|
||||
{ return -0.3420201f*w + 0.5098604f*x; });
|
||||
std::copy_n(mTemp.cbegin()+SamplesToDo, mSideHistory.size(), mSideHistory.begin());
|
||||
allpass_process({mSide.data(), SamplesToDo}, mTemp.data());
|
||||
PShift.processAccum({mSide.data(), SamplesToDo}, mTemp.data());
|
||||
|
||||
/* Left = (S + D)/2.0 */
|
||||
for(size_t i{0};i < SamplesToDo;i++)
|
||||
@ -281,6 +93,6 @@ void Uhj2Encoder::encode(const FloatBufferSpan LeftOut, const FloatBufferSpan Ri
|
||||
right[i] = (mMid[i] - mSide[i]) * 0.5f;
|
||||
|
||||
/* Copy the future samples to the front for next time. */
|
||||
std::copy(mMid.cbegin()+SamplesToDo, mMid.cbegin()+SamplesToDo+sFilterSize, mMid.begin());
|
||||
std::copy(mSide.cbegin()+SamplesToDo, mSide.cbegin()+SamplesToDo+sFilterSize, mSide.begin());
|
||||
std::copy(mMid.cbegin()+SamplesToDo, mMid.cbegin()+SamplesToDo+sFilterDelay, mMid.begin());
|
||||
std::copy(mSide.cbegin()+SamplesToDo, mSide.cbegin()+SamplesToDo+sFilterDelay, mSide.begin());
|
||||
}
|
||||
|
@ -8,20 +8,19 @@
|
||||
|
||||
|
||||
struct Uhj2Encoder {
|
||||
/* A particular property of the filter allows it to cover nearly twice its
|
||||
* length, so the filter size is also the effective delay (despite being
|
||||
* center-aligned).
|
||||
/* The filter delay is half it's effective size, so a delay of 128 has a
|
||||
* FIR length of 256.
|
||||
*/
|
||||
constexpr static size_t sFilterSize{128};
|
||||
constexpr static size_t sFilterDelay{128};
|
||||
|
||||
/* Delays and processing storage for the unfiltered signal. */
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterSize> mMid{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterSize> mSide{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterDelay> mMid{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterDelay> mSide{};
|
||||
|
||||
/* History for the FIR filter. */
|
||||
alignas(16) std::array<float,sFilterSize*2 - 1> mSideHistory{};
|
||||
alignas(16) std::array<float,sFilterDelay*2 - 1> mSideHistory{};
|
||||
|
||||
alignas(16) std::array<float,BufferLineSize + sFilterSize*2> mTemp{};
|
||||
alignas(16) std::array<float,BufferLineSize + sFilterDelay*2> mTemp{};
|
||||
|
||||
/**
|
||||
* Encodes a 2-channel UHJ (stereo-compatible) signal from a B-Format input
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include "alspan.h"
|
||||
#include "vector.h"
|
||||
#include "opthelpers.h"
|
||||
#include "phase_shifter.h"
|
||||
|
||||
#include "sndfile.h"
|
||||
|
||||
@ -117,18 +118,18 @@ using FloatBufferSpan = al::span<float,BufferLineSize>;
|
||||
|
||||
|
||||
struct UhjDecoder {
|
||||
constexpr static size_t sFilterSize{128};
|
||||
constexpr static size_t sFilterDelay{128};
|
||||
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterSize> mS{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterSize> mD{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterSize> mT{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterSize> mQ{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterDelay> mS{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterDelay> mD{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterDelay> mT{};
|
||||
alignas(16) std::array<float,BufferLineSize+sFilterDelay> mQ{};
|
||||
|
||||
/* History for the FIR filter. */
|
||||
alignas(16) std::array<float,sFilterSize-1> mDTHistory{};
|
||||
alignas(16) std::array<float,sFilterSize-1> mSHistory{};
|
||||
alignas(16) std::array<float,sFilterDelay-1> mDTHistory{};
|
||||
alignas(16) std::array<float,sFilterDelay-1> mSHistory{};
|
||||
|
||||
alignas(16) std::array<float,BufferLineSize + sFilterSize*2> mTemp{};
|
||||
alignas(16) std::array<float,BufferLineSize + sFilterDelay*2> mTemp{};
|
||||
|
||||
void decode(const float *RESTRICT InSamples, const size_t InChannels,
|
||||
const al::span<FloatBufferLine> OutSamples, const size_t SamplesToDo);
|
||||
@ -138,173 +139,7 @@ struct UhjDecoder {
|
||||
DEF_NEWDEL(UhjDecoder)
|
||||
};
|
||||
|
||||
/* Same basic filter design as in core/uhjfilter.cpp. */
|
||||
template<size_t FilterSize>
|
||||
struct PhaseShifterT {
|
||||
static_assert((FilterSize&(FilterSize-1)) == 0, "FilterSize needs to be power-of-two");
|
||||
|
||||
alignas(16) std::array<float,FilterSize> Coeffs{};
|
||||
|
||||
PhaseShifterT()
|
||||
{
|
||||
constexpr size_t fft_size{FilterSize * 2};
|
||||
constexpr size_t half_size{fft_size / 2};
|
||||
|
||||
auto fftBuffer = std::make_unique<complex_d[]>(fft_size);
|
||||
std::fill_n(fftBuffer.get(), fft_size, complex_d{});
|
||||
fftBuffer[half_size] = 1.0;
|
||||
|
||||
forward_fft({fftBuffer.get(), fft_size});
|
||||
for(size_t i{0};i < half_size+1;++i)
|
||||
fftBuffer[i] = complex_d{-fftBuffer[i].imag(), fftBuffer[i].real()};
|
||||
for(size_t i{half_size+1};i < fft_size;++i)
|
||||
fftBuffer[i] = std::conj(fftBuffer[fft_size - i]);
|
||||
inverse_fft({fftBuffer.get(), fft_size});
|
||||
|
||||
auto fftiter = fftBuffer.get() + half_size + (FilterSize-1);
|
||||
for(float &coeff : Coeffs)
|
||||
{
|
||||
coeff = static_cast<float>(fftiter->real() / double{fft_size});
|
||||
fftiter -= 2;
|
||||
}
|
||||
}
|
||||
};
|
||||
const PhaseShifterT<UhjDecoder::sFilterSize> PShift{};
|
||||
|
||||
/* Mostly the same as in core/uhjfilter.cpp, except this overwrites the output
|
||||
* instead of adding to it.
|
||||
*/
|
||||
void allpass_process(al::span<float> dst, const float *RESTRICT src)
|
||||
{
|
||||
#ifdef HAVE_SSE_INTRINSICS
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
auto *out = reinterpret_cast<__m64*>(dst.data());
|
||||
do {
|
||||
__m128 r04{_mm_setzero_ps()};
|
||||
__m128 r14{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&PShift.Coeffs[j])};
|
||||
const __m128 s0{_mm_loadu_ps(&src[j*2])};
|
||||
const __m128 s1{_mm_loadu_ps(&src[j*2 + 4])};
|
||||
|
||||
__m128 s{_mm_shuffle_ps(s0, s1, _MM_SHUFFLE(2, 0, 2, 0))};
|
||||
r04 = _mm_add_ps(r04, _mm_mul_ps(s, coeffs));
|
||||
|
||||
s = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(3, 1, 3, 1));
|
||||
r14 = _mm_add_ps(r14, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
src += 2;
|
||||
|
||||
__m128 r4{_mm_add_ps(_mm_unpackhi_ps(r04, r14), _mm_unpacklo_ps(r04, r14))};
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
_mm_storel_pi(out, r4);
|
||||
++out;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
__m128 r4{_mm_setzero_ps()};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const __m128 coeffs{_mm_load_ps(&PShift.Coeffs[j])};
|
||||
const __m128 s{_mm_setr_ps(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = _mm_add_ps(r4, _mm_mul_ps(s, coeffs));
|
||||
}
|
||||
r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
|
||||
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
|
||||
|
||||
dst.back() = _mm_cvtss_f32(r4);
|
||||
}
|
||||
|
||||
#elif defined(HAVE_NEON)
|
||||
|
||||
size_t pos{0};
|
||||
if(size_t todo{dst.size()>>1})
|
||||
{
|
||||
auto shuffle_2020 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 0))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 2), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 0), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 2), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto shuffle_3131 = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 1))};
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(a, 3), ret, 1);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 1), ret, 2);
|
||||
ret = vsetq_lane_f32(vgetq_lane_f32(b, 3), ret, 3);
|
||||
return ret;
|
||||
};
|
||||
auto unpacklo = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_low_f32(a), vget_low_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
auto unpackhi = [](float32x4_t a, float32x4_t b)
|
||||
{
|
||||
float32x2x2_t result{vzip_f32(vget_high_f32(a), vget_high_f32(b))};
|
||||
return vcombine_f32(result.val[0], result.val[1]);
|
||||
};
|
||||
do {
|
||||
float32x4_t r04{vdupq_n_f32(0.0f)};
|
||||
float32x4_t r14{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&PShift.Coeffs[j])};
|
||||
const float32x4_t s0{vld1q_f32(&src[j*2])};
|
||||
const float32x4_t s1{vld1q_f32(&src[j*2 + 4])};
|
||||
|
||||
r04 = vmlaq_f32(r04, shuffle_2020(s0, s1), coeffs);
|
||||
r14 = vmlaq_f32(r14, shuffle_3131(s0, s1), coeffs);
|
||||
}
|
||||
src += 2;
|
||||
|
||||
float32x4_t r4{vaddq_f32(unpackhi(r04, r14), unpacklo(r04, r14))};
|
||||
float32x2_t r2{vadd_f32(vget_low_f32(r4), vget_high_f32(r4))};
|
||||
|
||||
vst1_f32(&dst[pos], r2);
|
||||
pos += 2;
|
||||
} while(--todo);
|
||||
}
|
||||
if((dst.size()&1))
|
||||
{
|
||||
auto load4 = [](float32_t a, float32_t b, float32_t c, float32_t d)
|
||||
{
|
||||
float32x4_t ret{vmovq_n_f32(a)};
|
||||
ret = vsetq_lane_f32(b, ret, 1);
|
||||
ret = vsetq_lane_f32(c, ret, 2);
|
||||
ret = vsetq_lane_f32(d, ret, 3);
|
||||
return ret;
|
||||
};
|
||||
float32x4_t r4{vdupq_n_f32(0.0f)};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();j+=4)
|
||||
{
|
||||
const float32x4_t coeffs{vld1q_f32(&PShift.Coeffs[j])};
|
||||
const float32x4_t s{load4(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
|
||||
r4 = vmlaq_f32(r4, s, coeffs);
|
||||
}
|
||||
r4 = vaddq_f32(r4, vrev64q_f32(r4));
|
||||
dst[pos] = vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(float &output : dst)
|
||||
{
|
||||
float ret{0.0f};
|
||||
for(size_t j{0};j < PShift.Coeffs.size();++j)
|
||||
ret += src[j*2] * PShift.Coeffs[j];
|
||||
|
||||
output = ret;
|
||||
++src;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
const PhaseShifterT<UhjDecoder::sFilterDelay*2> PShift{};
|
||||
|
||||
|
||||
/* Decoding UHJ is done as:
|
||||
@ -395,31 +230,31 @@ void UhjDecoder::decode(const float *RESTRICT InSamples, const size_t InChannels
|
||||
|
||||
/* S = Left + Right */
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
mS[sFilterSize+i] = InSamples[i*InChannels + 0] + InSamples[i*InChannels + 1];
|
||||
mS[sFilterDelay+i] = InSamples[i*InChannels + 0] + InSamples[i*InChannels + 1];
|
||||
|
||||
/* D = Left - Right */
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
mD[sFilterSize+i] = InSamples[i*InChannels + 0] - InSamples[i*InChannels + 1];
|
||||
mD[sFilterDelay+i] = InSamples[i*InChannels + 0] - InSamples[i*InChannels + 1];
|
||||
|
||||
if(InChannels > 2)
|
||||
{
|
||||
/* T */
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
mT[sFilterSize+i] = InSamples[i*InChannels + 2];
|
||||
mT[sFilterDelay+i] = InSamples[i*InChannels + 2];
|
||||
}
|
||||
if(InChannels > 3)
|
||||
{
|
||||
/* Q */
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
mQ[sFilterSize+i] = InSamples[i*InChannels + 3];
|
||||
mQ[sFilterDelay+i] = InSamples[i*InChannels + 3];
|
||||
}
|
||||
|
||||
/* Precompute j(0.828347*D + 0.767835*T) and store in xoutput. */
|
||||
auto tmpiter = std::copy(mDTHistory.cbegin(), mDTHistory.cend(), mTemp.begin());
|
||||
std::transform(mD.cbegin(), mD.cbegin()+SamplesToDo+sFilterSize, mT.cbegin(), tmpiter,
|
||||
std::transform(mD.cbegin(), mD.cbegin()+SamplesToDo+sFilterDelay, mT.cbegin(), tmpiter,
|
||||
[](const float d, const float t) noexcept { return 0.828347f*d + 0.767835f*t; });
|
||||
std::copy_n(mTemp.cbegin()+SamplesToDo, mDTHistory.size(), mDTHistory.begin());
|
||||
allpass_process({xoutput, SamplesToDo}, mTemp.data());
|
||||
PShift.process({xoutput, SamplesToDo}, mTemp.data());
|
||||
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
{
|
||||
@ -431,9 +266,9 @@ void UhjDecoder::decode(const float *RESTRICT InSamples, const size_t InChannels
|
||||
|
||||
/* Precompute j*S and store in youtput. */
|
||||
tmpiter = std::copy(mSHistory.cbegin(), mSHistory.cend(), mTemp.begin());
|
||||
std::copy_n(mS.cbegin(), SamplesToDo+sFilterSize, tmpiter);
|
||||
std::copy_n(mS.cbegin(), SamplesToDo+sFilterDelay, tmpiter);
|
||||
std::copy_n(mTemp.cbegin()+SamplesToDo, mSHistory.size(), mSHistory.begin());
|
||||
allpass_process({youtput, SamplesToDo}, mTemp.data());
|
||||
PShift.process({youtput, SamplesToDo}, mTemp.data());
|
||||
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
{
|
||||
@ -449,10 +284,10 @@ void UhjDecoder::decode(const float *RESTRICT InSamples, const size_t InChannels
|
||||
zoutput[i] = 1.023332f*mQ[i];
|
||||
}
|
||||
|
||||
std::copy(mS.begin()+SamplesToDo, mS.begin()+SamplesToDo+sFilterSize, mS.begin());
|
||||
std::copy(mD.begin()+SamplesToDo, mD.begin()+SamplesToDo+sFilterSize, mD.begin());
|
||||
std::copy(mT.begin()+SamplesToDo, mT.begin()+SamplesToDo+sFilterSize, mT.begin());
|
||||
std::copy(mQ.begin()+SamplesToDo, mQ.begin()+SamplesToDo+sFilterSize, mQ.begin());
|
||||
std::copy(mS.begin()+SamplesToDo, mS.begin()+SamplesToDo+sFilterDelay, mS.begin());
|
||||
std::copy(mD.begin()+SamplesToDo, mD.begin()+SamplesToDo+sFilterDelay, mD.begin());
|
||||
std::copy(mT.begin()+SamplesToDo, mT.begin()+SamplesToDo+sFilterDelay, mT.begin());
|
||||
std::copy(mQ.begin()+SamplesToDo, mQ.begin()+SamplesToDo+sFilterDelay, mQ.begin());
|
||||
}
|
||||
|
||||
/* This is an alternative equation for decoding 2-channel UHJ. Not sure what
|
||||
@ -485,17 +320,17 @@ void UhjDecoder::decode2(const float *RESTRICT InSamples,
|
||||
|
||||
/* S = Left + Right */
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
mS[sFilterSize+i] = InSamples[i*2 + 0] + InSamples[i*2 + 1];
|
||||
mS[sFilterDelay+i] = InSamples[i*2 + 0] + InSamples[i*2 + 1];
|
||||
|
||||
/* D = Left - Right */
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
mD[sFilterSize+i] = InSamples[i*2 + 0] - InSamples[i*2 + 1];
|
||||
mD[sFilterDelay+i] = InSamples[i*2 + 0] - InSamples[i*2 + 1];
|
||||
|
||||
/* Precompute j*D and store in xoutput. */
|
||||
auto tmpiter = std::copy(mDTHistory.cbegin(), mDTHistory.cend(), mTemp.begin());
|
||||
std::copy_n(mD.cbegin(), SamplesToDo+sFilterSize, tmpiter);
|
||||
std::copy_n(mD.cbegin(), SamplesToDo+sFilterDelay, tmpiter);
|
||||
std::copy_n(mTemp.cbegin()+SamplesToDo, mDTHistory.size(), mDTHistory.begin());
|
||||
allpass_process({xoutput, SamplesToDo}, mTemp.data());
|
||||
PShift.process({xoutput, SamplesToDo}, mTemp.data());
|
||||
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
{
|
||||
@ -507,9 +342,9 @@ void UhjDecoder::decode2(const float *RESTRICT InSamples,
|
||||
|
||||
/* Precompute j*S and store in youtput. */
|
||||
tmpiter = std::copy(mSHistory.cbegin(), mSHistory.cend(), mTemp.begin());
|
||||
std::copy_n(mS.cbegin(), SamplesToDo+sFilterSize, tmpiter);
|
||||
std::copy_n(mS.cbegin(), SamplesToDo+sFilterDelay, tmpiter);
|
||||
std::copy_n(mTemp.cbegin()+SamplesToDo, mSHistory.size(), mSHistory.begin());
|
||||
allpass_process({youtput, SamplesToDo}, mTemp.data());
|
||||
PShift.process({youtput, SamplesToDo}, mTemp.data());
|
||||
|
||||
for(size_t i{0};i < SamplesToDo;++i)
|
||||
{
|
||||
@ -517,8 +352,8 @@ void UhjDecoder::decode2(const float *RESTRICT InSamples,
|
||||
youtput[i] = 0.762956f*mD[i] + 0.384230f*youtput[i];
|
||||
}
|
||||
|
||||
std::copy(mS.begin()+SamplesToDo, mS.begin()+SamplesToDo+sFilterSize, mS.begin());
|
||||
std::copy(mD.begin()+SamplesToDo, mD.begin()+SamplesToDo+sFilterSize, mD.begin());
|
||||
std::copy(mS.begin()+SamplesToDo, mS.begin()+SamplesToDo+sFilterDelay, mS.begin());
|
||||
std::copy(mD.begin()+SamplesToDo, mD.begin()+SamplesToDo+sFilterDelay, mD.begin());
|
||||
}
|
||||
|
||||
|
||||
@ -643,7 +478,7 @@ int main(int argc, char **argv)
|
||||
* additional 255 samples of silence need to be fed through the decoder
|
||||
* for it to finish.
|
||||
*/
|
||||
sf_count_t LeadOut{UhjDecoder::sFilterSize*2 - 1};
|
||||
sf_count_t LeadOut{UhjDecoder::sFilterDelay*2 - 1};
|
||||
while(LeadOut > 0)
|
||||
{
|
||||
sf_count_t sgot{sf_readf_float(infile.get(), inmem.get(), BufferLineSize)};
|
||||
|
Loading…
x
Reference in New Issue
Block a user