2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
|
|
|
|
#include "voice.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <array>
|
|
|
|
#include <atomic>
|
|
|
|
#include <cassert>
|
2023-05-03 18:59:33 +08:00
|
|
|
#include <climits>
|
2021-05-14 10:15:42 +08:00
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
#include <memory>
|
|
|
|
#include <new>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
#include "albyte.h"
|
2021-05-14 10:15:42 +08:00
|
|
|
#include "alnumeric.h"
|
2023-05-31 23:57:33 +08:00
|
|
|
#include "aloptional.h"
|
2021-05-14 10:15:42 +08:00
|
|
|
#include "alspan.h"
|
|
|
|
#include "alstring.h"
|
|
|
|
#include "ambidefs.h"
|
|
|
|
#include "async_event.h"
|
|
|
|
#include "buffer_storage.h"
|
|
|
|
#include "context.h"
|
|
|
|
#include "cpu_caps.h"
|
|
|
|
#include "devformat.h"
|
|
|
|
#include "device.h"
|
|
|
|
#include "filters/biquad.h"
|
|
|
|
#include "filters/nfc.h"
|
|
|
|
#include "filters/splitter.h"
|
|
|
|
#include "fmt_traits.h"
|
|
|
|
#include "logging.h"
|
|
|
|
#include "mixer.h"
|
|
|
|
#include "mixer/defs.h"
|
|
|
|
#include "mixer/hrtfdefs.h"
|
|
|
|
#include "opthelpers.h"
|
|
|
|
#include "resampler_limits.h"
|
|
|
|
#include "ringbuffer.h"
|
|
|
|
#include "vector.h"
|
|
|
|
#include "voice_change.h"
|
|
|
|
|
|
|
|
struct CTag;
|
|
|
|
#ifdef HAVE_SSE
|
|
|
|
struct SSETag;
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_NEON
|
|
|
|
struct NEONTag;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
static_assert(!(sizeof(DeviceBase::MixerBufferLine)&15),
|
|
|
|
"DeviceBase::MixerBufferLine must be a multiple of 16 bytes");
|
|
|
|
static_assert(!(MaxResamplerEdge&3), "MaxResamplerEdge is not a multiple of 4");
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
static_assert((BufferLineSize-1)/MaxPitch > 0, "MaxPitch is too large for BufferLineSize!");
|
|
|
|
static_assert((INT_MAX>>MixerFracBits)/MaxPitch > BufferLineSize,
|
|
|
|
"MaxPitch and/or BufferLineSize are too large for MixerFracBits!");
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
Resampler ResamplerDefault{Resampler::Cubic};
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
using uint = unsigned int;
|
2023-02-04 15:03:54 +08:00
|
|
|
using namespace std::chrono;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
using HrtfMixerFunc = void(*)(const float *InSamples, float2 *AccumSamples, const uint IrSize,
|
|
|
|
const MixHrtfFilter *hrtfparams, const size_t BufferSize);
|
|
|
|
using HrtfMixerBlendFunc = void(*)(const float *InSamples, float2 *AccumSamples,
|
|
|
|
const uint IrSize, const HrtfFilter *oldparams, const MixHrtfFilter *newparams,
|
|
|
|
const size_t BufferSize);
|
|
|
|
|
|
|
|
HrtfMixerFunc MixHrtfSamples{MixHrtf_<CTag>};
|
|
|
|
HrtfMixerBlendFunc MixHrtfBlendSamples{MixHrtfBlend_<CTag>};
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
inline MixerOutFunc SelectMixer()
|
|
|
|
{
|
|
|
|
#ifdef HAVE_NEON
|
|
|
|
if((CPUCapFlags&CPU_CAP_NEON))
|
|
|
|
return Mix_<NEONTag>;
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SSE
|
|
|
|
if((CPUCapFlags&CPU_CAP_SSE))
|
|
|
|
return Mix_<SSETag>;
|
|
|
|
#endif
|
|
|
|
return Mix_<CTag>;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline MixerOneFunc SelectMixerOne()
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
#ifdef HAVE_NEON
|
|
|
|
if((CPUCapFlags&CPU_CAP_NEON))
|
|
|
|
return Mix_<NEONTag>;
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SSE
|
|
|
|
if((CPUCapFlags&CPU_CAP_SSE))
|
|
|
|
return Mix_<SSETag>;
|
|
|
|
#endif
|
|
|
|
return Mix_<CTag>;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline HrtfMixerFunc SelectHrtfMixer()
|
|
|
|
{
|
|
|
|
#ifdef HAVE_NEON
|
|
|
|
if((CPUCapFlags&CPU_CAP_NEON))
|
|
|
|
return MixHrtf_<NEONTag>;
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SSE
|
|
|
|
if((CPUCapFlags&CPU_CAP_SSE))
|
|
|
|
return MixHrtf_<SSETag>;
|
|
|
|
#endif
|
|
|
|
return MixHrtf_<CTag>;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline HrtfMixerBlendFunc SelectHrtfBlendMixer()
|
|
|
|
{
|
|
|
|
#ifdef HAVE_NEON
|
|
|
|
if((CPUCapFlags&CPU_CAP_NEON))
|
|
|
|
return MixHrtfBlend_<NEONTag>;
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SSE
|
|
|
|
if((CPUCapFlags&CPU_CAP_SSE))
|
|
|
|
return MixHrtfBlend_<SSETag>;
|
|
|
|
#endif
|
|
|
|
return MixHrtfBlend_<CTag>;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
void Voice::InitMixer(al::optional<std::string> resampler)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
if(resampler)
|
|
|
|
{
|
|
|
|
struct ResamplerEntry {
|
|
|
|
const char name[16];
|
|
|
|
const Resampler resampler;
|
|
|
|
};
|
|
|
|
constexpr ResamplerEntry ResamplerList[]{
|
|
|
|
{ "none", Resampler::Point },
|
|
|
|
{ "point", Resampler::Point },
|
|
|
|
{ "linear", Resampler::Linear },
|
|
|
|
{ "cubic", Resampler::Cubic },
|
|
|
|
{ "bsinc12", Resampler::BSinc12 },
|
|
|
|
{ "fast_bsinc12", Resampler::FastBSinc12 },
|
|
|
|
{ "bsinc24", Resampler::BSinc24 },
|
|
|
|
{ "fast_bsinc24", Resampler::FastBSinc24 },
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *str{resampler->c_str()};
|
|
|
|
if(al::strcasecmp(str, "bsinc") == 0)
|
|
|
|
{
|
|
|
|
WARN("Resampler option \"%s\" is deprecated, using bsinc12\n", str);
|
|
|
|
str = "bsinc12";
|
|
|
|
}
|
|
|
|
else if(al::strcasecmp(str, "sinc4") == 0 || al::strcasecmp(str, "sinc8") == 0)
|
|
|
|
{
|
|
|
|
WARN("Resampler option \"%s\" is deprecated, using cubic\n", str);
|
|
|
|
str = "cubic";
|
|
|
|
}
|
|
|
|
|
|
|
|
auto iter = std::find_if(std::begin(ResamplerList), std::end(ResamplerList),
|
|
|
|
[str](const ResamplerEntry &entry) -> bool
|
|
|
|
{ return al::strcasecmp(str, entry.name) == 0; });
|
|
|
|
if(iter == std::end(ResamplerList))
|
|
|
|
ERR("Invalid resampler: %s\n", str);
|
|
|
|
else
|
|
|
|
ResamplerDefault = iter->resampler;
|
|
|
|
}
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
MixSamplesOut = SelectMixer();
|
|
|
|
MixSamplesOne = SelectMixerOne();
|
2021-05-14 10:15:42 +08:00
|
|
|
MixHrtfBlendSamples = SelectHrtfBlendMixer();
|
|
|
|
MixHrtfSamples = SelectHrtfMixer();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* IMA ADPCM Stepsize table */
|
|
|
|
constexpr int IMAStep_size[89] = {
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19,
|
|
|
|
21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55,
|
|
|
|
60, 66, 73, 80, 88, 97, 107, 118, 130, 143, 157,
|
|
|
|
173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449,
|
|
|
|
494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282,
|
|
|
|
1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660,
|
|
|
|
4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493,10442,
|
|
|
|
11487,12635,13899,15289,16818,18500,20350,22358,24633,27086,29794,
|
|
|
|
32767
|
|
|
|
};
|
|
|
|
|
|
|
|
/* IMA4 ADPCM Codeword decode table */
|
|
|
|
constexpr int IMA4Codeword[16] = {
|
|
|
|
1, 3, 5, 7, 9, 11, 13, 15,
|
|
|
|
-1,-3,-5,-7,-9,-11,-13,-15,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* IMA4 ADPCM Step index adjust decode table */
|
|
|
|
constexpr int IMA4Index_adjust[16] = {
|
|
|
|
-1,-1,-1,-1, 2, 4, 6, 8,
|
|
|
|
-1,-1,-1,-1, 2, 4, 6, 8
|
|
|
|
};
|
|
|
|
|
|
|
|
/* MSADPCM Adaption table */
|
|
|
|
constexpr int MSADPCMAdaption[16] = {
|
|
|
|
230, 230, 230, 230, 307, 409, 512, 614,
|
|
|
|
768, 614, 512, 409, 307, 230, 230, 230
|
|
|
|
};
|
|
|
|
|
|
|
|
/* MSADPCM Adaption Coefficient tables */
|
|
|
|
constexpr int MSADPCMAdaptionCoeff[7][2] = {
|
|
|
|
{ 256, 0 },
|
|
|
|
{ 512, -256 },
|
|
|
|
{ 0, 0 },
|
|
|
|
{ 192, 64 },
|
|
|
|
{ 240, 0 },
|
|
|
|
{ 460, -208 },
|
|
|
|
{ 392, -232 }
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
void SendSourceStoppedEvent(ContextBase *context, uint id)
|
|
|
|
{
|
|
|
|
RingBuffer *ring{context->mAsyncEvents.get()};
|
|
|
|
auto evt_vec = ring->getWriteVector();
|
|
|
|
if(evt_vec.first.len < 1) return;
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
|
|
|
|
AsyncEvent::SourceStateChange)};
|
|
|
|
evt->u.srcstate.id = id;
|
|
|
|
evt->u.srcstate.state = AsyncEvent::SrcState::Stop;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
ring->writeAdvance(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const float *DoFilters(BiquadFilter &lpfilter, BiquadFilter &hpfilter, float *dst,
|
|
|
|
const al::span<const float> src, int type)
|
|
|
|
{
|
|
|
|
switch(type)
|
|
|
|
{
|
|
|
|
case AF_None:
|
|
|
|
lpfilter.clear();
|
|
|
|
hpfilter.clear();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AF_LowPass:
|
|
|
|
lpfilter.process(src, dst);
|
|
|
|
hpfilter.clear();
|
|
|
|
return dst;
|
|
|
|
case AF_HighPass:
|
|
|
|
lpfilter.clear();
|
|
|
|
hpfilter.process(src, dst);
|
|
|
|
return dst;
|
|
|
|
|
|
|
|
case AF_BandPass:
|
|
|
|
DualBiquad{lpfilter, hpfilter}.process(src, dst);
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
return src.data();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
template<FmtType Type>
|
2023-05-31 23:57:33 +08:00
|
|
|
inline void LoadSamples(float *RESTRICT dstSamples, const al::byte *src, const size_t srcChan,
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t srcOffset, const size_t srcStep, const size_t /*samplesPerBlock*/,
|
|
|
|
const size_t samplesToLoad) noexcept
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
|
|
|
constexpr size_t sampleSize{sizeof(typename al::FmtTypeTraits<Type>::Type)};
|
2023-05-03 18:59:33 +08:00
|
|
|
auto s = src + (srcOffset*srcStep + srcChan)*sampleSize;
|
|
|
|
|
|
|
|
al::LoadSampleArray<Type>(dstSamples, s, srcStep, samplesToLoad);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<>
|
2023-05-31 23:57:33 +08:00
|
|
|
inline void LoadSamples<FmtIMA4>(float *RESTRICT dstSamples, const al::byte *src,
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t srcChan, const size_t srcOffset, const size_t srcStep,
|
|
|
|
const size_t samplesPerBlock, const size_t samplesToLoad) noexcept
|
|
|
|
{
|
|
|
|
const size_t blockBytes{((samplesPerBlock-1)/2 + 4)*srcStep};
|
|
|
|
|
|
|
|
/* Skip to the ADPCM block containing the srcOffset sample. */
|
|
|
|
src += srcOffset/samplesPerBlock*blockBytes;
|
|
|
|
/* Calculate how many samples need to be skipped in the block. */
|
|
|
|
size_t skip{srcOffset % samplesPerBlock};
|
|
|
|
|
|
|
|
/* NOTE: This could probably be optimized better. */
|
|
|
|
size_t wrote{0};
|
|
|
|
do {
|
|
|
|
/* Each IMA4 block starts with a signed 16-bit sample, and a signed
|
|
|
|
* 16-bit table index. The table index needs to be clamped.
|
|
|
|
*/
|
2023-05-31 23:57:33 +08:00
|
|
|
int sample{src[srcChan*4] | (src[srcChan*4 + 1] << 8)};
|
|
|
|
int index{src[srcChan*4 + 2] | (src[srcChan*4 + 3] << 8)};
|
2023-05-03 18:59:33 +08:00
|
|
|
|
|
|
|
sample = (sample^0x8000) - 32768;
|
2023-05-31 23:57:33 +08:00
|
|
|
index = clampi((index^0x8000) - 32768, 0, al::size(IMAStep_size)-1);
|
2023-05-03 18:59:33 +08:00
|
|
|
|
|
|
|
if(skip == 0)
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
dstSamples[wrote++] = static_cast<float>(sample) / 32768.0f;
|
|
|
|
if(wrote == samplesToLoad) return;
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
else
|
|
|
|
--skip;
|
|
|
|
|
|
|
|
auto decode_sample = [&sample,&index](const uint nibble)
|
|
|
|
{
|
|
|
|
sample += IMA4Codeword[nibble] * IMAStep_size[index] / 8;
|
|
|
|
sample = clampi(sample, -32768, 32767);
|
|
|
|
|
|
|
|
index += IMA4Index_adjust[nibble];
|
2023-05-31 23:57:33 +08:00
|
|
|
index = clampi(index, 0, al::size(IMAStep_size)-1);
|
2023-05-03 18:59:33 +08:00
|
|
|
|
|
|
|
return sample;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* The rest of the block is arranged as a series of nibbles, contained
|
|
|
|
* in 4 *bytes* per channel interleaved. So every 8 nibbles we need to
|
|
|
|
* skip 4 bytes per channel to get the next nibbles for this channel.
|
|
|
|
*
|
|
|
|
* First, decode the samples that we need to skip in the block (will
|
|
|
|
* always be less than the block size). They need to be decoded despite
|
|
|
|
* being ignored for proper state on the remaining samples.
|
|
|
|
*/
|
2023-05-31 23:57:33 +08:00
|
|
|
const al::byte *nibbleData{src + (srcStep+srcChan)*4};
|
2023-05-03 18:59:33 +08:00
|
|
|
size_t nibbleOffset{0};
|
|
|
|
const size_t startOffset{skip + 1};
|
|
|
|
for(;skip;--skip)
|
|
|
|
{
|
|
|
|
const size_t byteShift{(nibbleOffset&1) * 4};
|
|
|
|
const size_t wordOffset{(nibbleOffset>>1) & ~size_t{3}};
|
|
|
|
const size_t byteOffset{wordOffset*srcStep + ((nibbleOffset>>1)&3u)};
|
|
|
|
++nibbleOffset;
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
std::ignore = decode_sample((nibbleData[byteOffset]>>byteShift) & 15u);
|
2023-05-03 18:59:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Second, decode the rest of the block and write to the output, until
|
|
|
|
* the end of the block or the end of output.
|
|
|
|
*/
|
|
|
|
const size_t todo{minz(samplesPerBlock-startOffset, samplesToLoad-wrote)};
|
|
|
|
for(size_t i{0};i < todo;++i)
|
|
|
|
{
|
|
|
|
const size_t byteShift{(nibbleOffset&1) * 4};
|
|
|
|
const size_t wordOffset{(nibbleOffset>>1) & ~size_t{3}};
|
|
|
|
const size_t byteOffset{wordOffset*srcStep + ((nibbleOffset>>1)&3u)};
|
|
|
|
++nibbleOffset;
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
const int result{decode_sample((nibbleData[byteOffset]>>byteShift) & 15u)};
|
2023-05-03 18:59:33 +08:00
|
|
|
dstSamples[wrote++] = static_cast<float>(result) / 32768.0f;
|
|
|
|
}
|
|
|
|
if(wrote == samplesToLoad)
|
|
|
|
return;
|
|
|
|
|
|
|
|
src += blockBytes;
|
|
|
|
} while(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<>
|
2023-05-31 23:57:33 +08:00
|
|
|
inline void LoadSamples<FmtMSADPCM>(float *RESTRICT dstSamples, const al::byte *src,
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t srcChan, const size_t srcOffset, const size_t srcStep,
|
|
|
|
const size_t samplesPerBlock, const size_t samplesToLoad) noexcept
|
|
|
|
{
|
|
|
|
const size_t blockBytes{((samplesPerBlock-2)/2 + 7)*srcStep};
|
|
|
|
|
|
|
|
src += srcOffset/samplesPerBlock*blockBytes;
|
|
|
|
size_t skip{srcOffset % samplesPerBlock};
|
|
|
|
|
|
|
|
size_t wrote{0};
|
|
|
|
do {
|
|
|
|
/* Each MS ADPCM block starts with an 8-bit block predictor, used to
|
|
|
|
* dictate how the two sample history values are mixed with the decoded
|
|
|
|
* sample, and an initial signed 16-bit delta value which scales the
|
|
|
|
* nibble sample value. This is followed by the two initial 16-bit
|
|
|
|
* sample history values.
|
|
|
|
*/
|
2023-05-31 23:57:33 +08:00
|
|
|
const al::byte *input{src};
|
|
|
|
const uint8_t blockpred{std::min(input[srcChan], uint8_t{6})};
|
2023-05-03 18:59:33 +08:00
|
|
|
input += srcStep;
|
2023-05-31 23:57:33 +08:00
|
|
|
int delta{input[2*srcChan + 0] | (input[2*srcChan + 1] << 8)};
|
2023-05-03 18:59:33 +08:00
|
|
|
input += srcStep*2;
|
|
|
|
|
|
|
|
int sampleHistory[2]{};
|
2023-05-31 23:57:33 +08:00
|
|
|
sampleHistory[0] = input[2*srcChan + 0] | (input[2*srcChan + 1]<<8);
|
2023-05-03 18:59:33 +08:00
|
|
|
input += srcStep*2;
|
2023-05-31 23:57:33 +08:00
|
|
|
sampleHistory[1] = input[2*srcChan + 0] | (input[2*srcChan + 1]<<8);
|
2023-05-03 18:59:33 +08:00
|
|
|
input += srcStep*2;
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
const auto coeffs = al::as_span(MSADPCMAdaptionCoeff[blockpred]);
|
2023-05-03 18:59:33 +08:00
|
|
|
delta = (delta^0x8000) - 32768;
|
|
|
|
sampleHistory[0] = (sampleHistory[0]^0x8000) - 32768;
|
|
|
|
sampleHistory[1] = (sampleHistory[1]^0x8000) - 32768;
|
|
|
|
|
|
|
|
/* The second history sample is "older", so it's the first to be
|
|
|
|
* written out.
|
|
|
|
*/
|
|
|
|
if(skip == 0)
|
|
|
|
{
|
|
|
|
dstSamples[wrote++] = static_cast<float>(sampleHistory[1]) / 32768.0f;
|
|
|
|
if(wrote == samplesToLoad) return;
|
|
|
|
dstSamples[wrote++] = static_cast<float>(sampleHistory[0]) / 32768.0f;
|
|
|
|
if(wrote == samplesToLoad) return;
|
|
|
|
}
|
|
|
|
else if(skip == 1)
|
|
|
|
{
|
|
|
|
--skip;
|
|
|
|
dstSamples[wrote++] = static_cast<float>(sampleHistory[0]) / 32768.0f;
|
|
|
|
if(wrote == samplesToLoad) return;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
skip -= 2;
|
|
|
|
|
|
|
|
auto decode_sample = [&sampleHistory,&delta,coeffs](const int nibble)
|
|
|
|
{
|
|
|
|
int pred{(sampleHistory[0]*coeffs[0] + sampleHistory[1]*coeffs[1]) / 256};
|
|
|
|
pred += ((nibble^0x08) - 0x08) * delta;
|
|
|
|
pred = clampi(pred, -32768, 32767);
|
|
|
|
|
|
|
|
sampleHistory[1] = sampleHistory[0];
|
|
|
|
sampleHistory[0] = pred;
|
|
|
|
|
|
|
|
delta = (MSADPCMAdaption[nibble] * delta) / 256;
|
|
|
|
delta = maxi(16, delta);
|
|
|
|
|
|
|
|
return pred;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* The rest of the block is a series of nibbles, interleaved per-
|
|
|
|
* channel. First, skip samples.
|
|
|
|
*/
|
|
|
|
const size_t startOffset{skip + 2};
|
|
|
|
size_t nibbleOffset{srcChan};
|
|
|
|
for(;skip;--skip)
|
|
|
|
{
|
|
|
|
const size_t byteOffset{nibbleOffset>>1};
|
|
|
|
const size_t byteShift{((nibbleOffset&1)^1) * 4};
|
|
|
|
nibbleOffset += srcStep;
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
std::ignore = decode_sample((input[byteOffset]>>byteShift) & 15);
|
2023-05-03 18:59:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now decode the rest of the block, until the end of the block or the
|
|
|
|
* dst buffer is filled.
|
|
|
|
*/
|
|
|
|
const size_t todo{minz(samplesPerBlock-startOffset, samplesToLoad-wrote)};
|
|
|
|
for(size_t j{0};j < todo;++j)
|
|
|
|
{
|
|
|
|
const size_t byteOffset{nibbleOffset>>1};
|
|
|
|
const size_t byteShift{((nibbleOffset&1)^1) * 4};
|
|
|
|
nibbleOffset += srcStep;
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
const int sample{decode_sample((input[byteOffset]>>byteShift) & 15)};
|
2023-05-03 18:59:33 +08:00
|
|
|
dstSamples[wrote++] = static_cast<float>(sample) / 32768.0f;
|
|
|
|
}
|
|
|
|
if(wrote == samplesToLoad)
|
|
|
|
return;
|
|
|
|
|
|
|
|
src += blockBytes;
|
|
|
|
} while(true);
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
void LoadSamples(float *dstSamples, const al::byte *src, const size_t srcChan,
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t srcOffset, const FmtType srcType, const size_t srcStep,
|
|
|
|
const size_t samplesPerBlock, const size_t samplesToLoad) noexcept
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
#define HANDLE_FMT(T) case T: \
|
2023-05-03 18:59:33 +08:00
|
|
|
LoadSamples<T>(dstSamples, src, srcChan, srcOffset, srcStep, \
|
|
|
|
samplesPerBlock, samplesToLoad); \
|
2021-05-14 10:15:42 +08:00
|
|
|
break
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
switch(srcType)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
HANDLE_FMT(FmtUByte);
|
|
|
|
HANDLE_FMT(FmtShort);
|
|
|
|
HANDLE_FMT(FmtFloat);
|
|
|
|
HANDLE_FMT(FmtDouble);
|
|
|
|
HANDLE_FMT(FmtMulaw);
|
|
|
|
HANDLE_FMT(FmtAlaw);
|
2023-05-03 18:59:33 +08:00
|
|
|
HANDLE_FMT(FmtIMA4);
|
|
|
|
HANDLE_FMT(FmtMSADPCM);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
#undef HANDLE_FMT
|
|
|
|
}
|
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
void LoadBufferStatic(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
|
|
|
|
const size_t dataPosInt, const FmtType sampleType, const size_t srcChannel,
|
2023-02-04 15:03:54 +08:00
|
|
|
const size_t srcStep, size_t samplesLoaded, const size_t samplesToLoad,
|
2023-05-03 18:59:33 +08:00
|
|
|
float *voiceSamples)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
if(!bufferLoopItem)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
/* Load what's left to play from the buffer */
|
2023-05-03 18:59:33 +08:00
|
|
|
if(buffer->mSampleLen > dataPosInt) LIKELY
|
|
|
|
{
|
|
|
|
const size_t buffer_remaining{buffer->mSampleLen - dataPosInt};
|
|
|
|
const size_t remaining{minz(samplesToLoad-samplesLoaded, buffer_remaining)};
|
|
|
|
LoadSamples(voiceSamples+samplesLoaded, buffer->mSamples, srcChannel, dataPosInt,
|
|
|
|
sampleType, srcStep, buffer->mBlockAlign, remaining);
|
|
|
|
samplesLoaded += remaining;
|
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
if(const size_t toFill{samplesToLoad - samplesLoaded})
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
auto srcsamples = voiceSamples + samplesLoaded;
|
|
|
|
std::fill_n(srcsamples, toFill, *(srcsamples-1));
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t loopStart{buffer->mLoopStart};
|
|
|
|
const size_t loopEnd{buffer->mLoopEnd};
|
2023-02-04 15:03:54 +08:00
|
|
|
ASSUME(loopEnd > loopStart);
|
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t intPos{(dataPosInt < loopEnd) ? dataPosInt
|
|
|
|
: (((dataPosInt-loopStart)%(loopEnd-loopStart)) + loopStart)};
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
/* Load what's left of this loop iteration */
|
2023-02-04 15:03:54 +08:00
|
|
|
const size_t remaining{minz(samplesToLoad-samplesLoaded, loopEnd-dataPosInt)};
|
2023-05-03 18:59:33 +08:00
|
|
|
LoadSamples(voiceSamples+samplesLoaded, buffer->mSamples, srcChannel, intPos, sampleType,
|
|
|
|
srcStep, buffer->mBlockAlign, remaining);
|
2023-02-04 15:03:54 +08:00
|
|
|
samplesLoaded += remaining;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
/* Load repeats of the loop to fill the buffer. */
|
2023-02-04 15:03:54 +08:00
|
|
|
const size_t loopSize{loopEnd - loopStart};
|
2021-05-14 10:15:42 +08:00
|
|
|
while(const size_t toFill{minz(samplesToLoad - samplesLoaded, loopSize)})
|
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
LoadSamples(voiceSamples+samplesLoaded, buffer->mSamples, srcChannel, loopStart,
|
|
|
|
sampleType, srcStep, buffer->mBlockAlign, toFill);
|
2021-05-14 10:15:42 +08:00
|
|
|
samplesLoaded += toFill;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
void LoadBufferCallback(VoiceBufferItem *buffer, const size_t dataPosInt,
|
|
|
|
const size_t numCallbackSamples, const FmtType sampleType, const size_t srcChannel,
|
|
|
|
const size_t srcStep, size_t samplesLoaded, const size_t samplesToLoad, float *voiceSamples)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
/* Load what's left to play from the buffer */
|
2023-05-03 18:59:33 +08:00
|
|
|
if(numCallbackSamples > dataPosInt) LIKELY
|
|
|
|
{
|
|
|
|
const size_t remaining{minz(samplesToLoad-samplesLoaded, numCallbackSamples-dataPosInt)};
|
|
|
|
LoadSamples(voiceSamples+samplesLoaded, buffer->mSamples, srcChannel, dataPosInt,
|
|
|
|
sampleType, srcStep, buffer->mBlockAlign, remaining);
|
|
|
|
samplesLoaded += remaining;
|
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
if(const size_t toFill{samplesToLoad - samplesLoaded})
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
auto srcsamples = voiceSamples + samplesLoaded;
|
|
|
|
std::fill_n(srcsamples, toFill, *(srcsamples-1));
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void LoadBufferQueue(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
|
2023-05-03 18:59:33 +08:00
|
|
|
size_t dataPosInt, const FmtType sampleType, const size_t srcChannel,
|
2023-02-04 15:03:54 +08:00
|
|
|
const size_t srcStep, size_t samplesLoaded, const size_t samplesToLoad,
|
2023-05-03 18:59:33 +08:00
|
|
|
float *voiceSamples)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
/* Crawl the buffer queue to fill in the temp buffer */
|
|
|
|
while(buffer && samplesLoaded != samplesToLoad)
|
|
|
|
{
|
|
|
|
if(dataPosInt >= buffer->mSampleLen)
|
|
|
|
{
|
|
|
|
dataPosInt -= buffer->mSampleLen;
|
|
|
|
buffer = buffer->mNext.load(std::memory_order_acquire);
|
|
|
|
if(!buffer) buffer = bufferLoopItem;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t remaining{minz(samplesToLoad-samplesLoaded, buffer->mSampleLen-dataPosInt)};
|
2023-05-03 18:59:33 +08:00
|
|
|
LoadSamples(voiceSamples+samplesLoaded, buffer->mSamples, srcChannel, dataPosInt,
|
|
|
|
sampleType, srcStep, buffer->mBlockAlign, remaining);
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
samplesLoaded += remaining;
|
|
|
|
if(samplesLoaded == samplesToLoad)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dataPosInt = 0;
|
|
|
|
buffer = buffer->mNext.load(std::memory_order_acquire);
|
|
|
|
if(!buffer) buffer = bufferLoopItem;
|
|
|
|
}
|
|
|
|
if(const size_t toFill{samplesToLoad - samplesLoaded})
|
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
auto srcsamples = voiceSamples + samplesLoaded;
|
|
|
|
std::fill_n(srcsamples, toFill, *(srcsamples-1));
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void DoHrtfMix(const float *samples, const uint DstBufferSize, DirectParams &parms,
|
2022-04-25 12:02:45 +08:00
|
|
|
const float TargetGain, const uint Counter, uint OutPos, const bool IsPlaying,
|
|
|
|
DeviceBase *Device)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
const uint IrSize{Device->mIrSize};
|
|
|
|
auto &HrtfSamples = Device->HrtfSourceData;
|
2022-04-25 12:02:45 +08:00
|
|
|
auto &AccumSamples = Device->HrtfAccumData;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
/* Copy the HRTF history and new input samples into a temp buffer. */
|
|
|
|
auto src_iter = std::copy(parms.Hrtf.History.begin(), parms.Hrtf.History.end(),
|
|
|
|
std::begin(HrtfSamples));
|
|
|
|
std::copy_n(samples, DstBufferSize, src_iter);
|
|
|
|
/* Copy the last used samples back into the history buffer for later. */
|
2023-05-03 18:59:33 +08:00
|
|
|
if(IsPlaying) LIKELY
|
2022-04-25 12:02:45 +08:00
|
|
|
std::copy_n(std::begin(HrtfSamples) + DstBufferSize, parms.Hrtf.History.size(),
|
|
|
|
parms.Hrtf.History.begin());
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
/* If fading and this is the first mixing pass, fade between the IRs. */
|
|
|
|
uint fademix{0u};
|
|
|
|
if(Counter && OutPos == 0)
|
|
|
|
{
|
|
|
|
fademix = minu(DstBufferSize, Counter);
|
|
|
|
|
|
|
|
float gain{TargetGain};
|
|
|
|
|
|
|
|
/* The new coefficients need to fade in completely since they're
|
|
|
|
* replacing the old ones. To keep the gain fading consistent,
|
|
|
|
* interpolate between the old and new target gains given how much of
|
|
|
|
* the fade time this mix handles.
|
|
|
|
*/
|
|
|
|
if(Counter > fademix)
|
|
|
|
{
|
|
|
|
const float a{static_cast<float>(fademix) / static_cast<float>(Counter)};
|
2022-04-25 12:02:45 +08:00
|
|
|
gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MixHrtfFilter hrtfparams{
|
|
|
|
parms.Hrtf.Target.Coeffs,
|
|
|
|
parms.Hrtf.Target.Delay,
|
|
|
|
0.0f, gain / static_cast<float>(fademix)};
|
|
|
|
MixHrtfBlendSamples(HrtfSamples, AccumSamples+OutPos, IrSize, &parms.Hrtf.Old, &hrtfparams,
|
|
|
|
fademix);
|
|
|
|
|
|
|
|
/* Update the old parameters with the result. */
|
|
|
|
parms.Hrtf.Old = parms.Hrtf.Target;
|
|
|
|
parms.Hrtf.Old.Gain = gain;
|
|
|
|
OutPos += fademix;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(fademix < DstBufferSize)
|
|
|
|
{
|
|
|
|
const uint todo{DstBufferSize - fademix};
|
|
|
|
float gain{TargetGain};
|
|
|
|
|
|
|
|
/* Interpolate the target gain if the gain fading lasts longer than
|
|
|
|
* this mix.
|
|
|
|
*/
|
|
|
|
if(Counter > DstBufferSize)
|
|
|
|
{
|
|
|
|
const float a{static_cast<float>(todo) / static_cast<float>(Counter-fademix)};
|
2022-04-25 12:02:45 +08:00
|
|
|
gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MixHrtfFilter hrtfparams{
|
|
|
|
parms.Hrtf.Target.Coeffs,
|
|
|
|
parms.Hrtf.Target.Delay,
|
|
|
|
parms.Hrtf.Old.Gain,
|
|
|
|
(gain - parms.Hrtf.Old.Gain) / static_cast<float>(todo)};
|
|
|
|
MixHrtfSamples(HrtfSamples+fademix, AccumSamples+OutPos, IrSize, &hrtfparams, todo);
|
|
|
|
|
|
|
|
/* Store the now-current gain for next time. */
|
|
|
|
parms.Hrtf.Old.Gain = gain;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DoNfcMix(const al::span<const float> samples, FloatBufferLine *OutBuffer, DirectParams &parms,
|
|
|
|
const float *TargetGains, const uint Counter, const uint OutPos, DeviceBase *Device)
|
|
|
|
{
|
|
|
|
using FilterProc = void (NfcFilter::*)(const al::span<const float>, float*);
|
|
|
|
static constexpr FilterProc NfcProcess[MaxAmbiOrder+1]{
|
|
|
|
nullptr, &NfcFilter::process1, &NfcFilter::process2, &NfcFilter::process3};
|
|
|
|
|
|
|
|
float *CurrentGains{parms.Gains.Current.data()};
|
|
|
|
MixSamples(samples, {OutBuffer, 1u}, CurrentGains, TargetGains, Counter, OutPos);
|
|
|
|
++OutBuffer;
|
|
|
|
++CurrentGains;
|
|
|
|
++TargetGains;
|
|
|
|
|
|
|
|
const al::span<float> nfcsamples{Device->NfcSampleData, samples.size()};
|
|
|
|
size_t order{1};
|
|
|
|
while(const size_t chancount{Device->NumChannelsPerOrder[order]})
|
|
|
|
{
|
|
|
|
(parms.NFCtrlFilter.*NfcProcess[order])(samples, nfcsamples.data());
|
|
|
|
MixSamples(nfcsamples, {OutBuffer, chancount}, CurrentGains, TargetGains, Counter, OutPos);
|
|
|
|
OutBuffer += chancount;
|
|
|
|
CurrentGains += chancount;
|
|
|
|
TargetGains += chancount;
|
|
|
|
if(++order == MaxAmbiOrder+1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
void Voice::mix(const State vstate, ContextBase *Context, const nanoseconds deviceTime,
|
|
|
|
const uint SamplesToDo)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
static constexpr std::array<float,MAX_OUTPUT_CHANNELS> SilentTarget{};
|
|
|
|
|
|
|
|
ASSUME(SamplesToDo > 0);
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
DeviceBase *Device{Context->mDevice};
|
|
|
|
const uint NumSends{Device->NumAuxSends};
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
/* Get voice info */
|
2023-02-04 15:03:54 +08:00
|
|
|
int DataPosInt{mPosition.load(std::memory_order_relaxed)};
|
2021-05-14 10:15:42 +08:00
|
|
|
uint DataPosFrac{mPositionFrac.load(std::memory_order_relaxed)};
|
|
|
|
VoiceBufferItem *BufferListItem{mCurrentBuffer.load(std::memory_order_relaxed)};
|
|
|
|
VoiceBufferItem *BufferLoopItem{mLoopBuffer.load(std::memory_order_relaxed)};
|
|
|
|
const uint increment{mStep};
|
2023-05-03 18:59:33 +08:00
|
|
|
if(increment < 1) UNLIKELY
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
/* If the voice is supposed to be stopping but can't be mixed, just
|
|
|
|
* stop it before bailing.
|
|
|
|
*/
|
|
|
|
if(vstate == Stopping)
|
|
|
|
mPlayState.store(Stopped, std::memory_order_release);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* If the static voice's current position is beyond the buffer loop end
|
|
|
|
* position, disable looping.
|
|
|
|
*/
|
|
|
|
if(mFlags.test(VoiceIsStatic) && BufferLoopItem)
|
|
|
|
{
|
|
|
|
if(DataPosInt >= 0 && static_cast<uint>(DataPosInt) >= BufferListItem->mLoopEnd)
|
|
|
|
BufferLoopItem = nullptr;
|
|
|
|
}
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
uint OutPos{0u};
|
|
|
|
|
|
|
|
/* Check if we're doing a delayed start, and we start in this update. */
|
2023-05-03 18:59:33 +08:00
|
|
|
if(mStartTime > deviceTime) UNLIKELY
|
2023-02-04 15:03:54 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
/* If the voice is supposed to be stopping but hasn't actually started
|
|
|
|
* yet, make sure its stopped.
|
|
|
|
*/
|
|
|
|
if(vstate == Stopping)
|
|
|
|
{
|
|
|
|
mPlayState.store(Stopped, std::memory_order_release);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
/* If the start time is too far ahead, don't bother. */
|
|
|
|
auto diff = mStartTime - deviceTime;
|
|
|
|
if(diff >= seconds{1})
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Get the number of samples ahead of the current time that output
|
|
|
|
* should start at. Skip this update if it's beyond the output sample
|
|
|
|
* count.
|
|
|
|
*
|
|
|
|
* Round the start position to a multiple of 4, which some mixers want.
|
|
|
|
* This makes the start time accurate to 4 samples. This could be made
|
|
|
|
* sample-accurate by forcing non-SIMD functions on the first run.
|
|
|
|
*/
|
|
|
|
seconds::rep sampleOffset{duration_cast<seconds>(diff * Device->Frequency).count()};
|
|
|
|
sampleOffset = (sampleOffset+2) & ~seconds::rep{3};
|
|
|
|
if(sampleOffset >= SamplesToDo)
|
|
|
|
return;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
OutPos = static_cast<uint>(sampleOffset);
|
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Calculate the number of samples to mix, and the number of (resampled)
|
|
|
|
* samples that need to be loaded (mixing samples and decoder padding).
|
|
|
|
*/
|
|
|
|
const uint samplesToMix{SamplesToDo - OutPos};
|
|
|
|
const uint samplesToLoad{samplesToMix + mDecoderPadding};
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Get a span of pointers to hold the floating point, deinterlaced,
|
|
|
|
* resampled buffer data to be mixed.
|
|
|
|
*/
|
2022-04-25 12:02:45 +08:00
|
|
|
std::array<float*,DeviceBase::MixerChannelsMax> SamplePointers;
|
|
|
|
const al::span<float*> MixingSamples{SamplePointers.data(), mChans.size()};
|
2023-05-03 18:59:33 +08:00
|
|
|
auto get_bufferline = [](DeviceBase::MixerBufferLine &bufline) noexcept -> float*
|
|
|
|
{ return bufline.data(); };
|
2022-04-25 12:02:45 +08:00
|
|
|
std::transform(Device->mSampleData.end() - mChans.size(), Device->mSampleData.end(),
|
2023-05-03 18:59:33 +08:00
|
|
|
MixingSamples.begin(), get_bufferline);
|
2022-04-25 12:02:45 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* If there's a matching sample step and no phase offset, use a simple copy
|
|
|
|
* for resampling.
|
|
|
|
*/
|
|
|
|
const ResamplerFunc Resample{(increment == MixerFracOne && DataPosFrac == 0)
|
|
|
|
? ResamplerFunc{[](const InterpState*, const float *RESTRICT src, uint, const uint,
|
|
|
|
const al::span<float> dst) { std::copy_n(src, dst.size(), dst.begin()); }}
|
|
|
|
: mResampler};
|
|
|
|
|
|
|
|
/* UHJ2 and SuperStereo only have 2 buffer channels, but 3 mixing channels
|
|
|
|
* (3rd channel is generated from decoding).
|
|
|
|
*/
|
|
|
|
const size_t realChannels{(mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 2u
|
|
|
|
: MixingSamples.size()};
|
|
|
|
for(size_t chan{0};chan < realChannels;++chan)
|
|
|
|
{
|
|
|
|
using ResBufType = decltype(DeviceBase::mResampleData);
|
|
|
|
static constexpr uint srcSizeMax{static_cast<uint>(ResBufType{}.size()-MaxResamplerEdge)};
|
|
|
|
|
2023-05-31 23:57:33 +08:00
|
|
|
const auto prevSamples = al::as_span(mPrevSamples[chan]);
|
2023-05-03 18:59:33 +08:00
|
|
|
const auto resampleBuffer = std::copy(prevSamples.cbegin(), prevSamples.cend(),
|
|
|
|
Device->mResampleData.begin()) - MaxResamplerEdge;
|
|
|
|
int intPos{DataPosInt};
|
|
|
|
uint fracPos{DataPosFrac};
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Load samples for this channel from the available buffer(s), with
|
|
|
|
* resampling.
|
|
|
|
*/
|
|
|
|
for(uint samplesLoaded{0};samplesLoaded < samplesToLoad;)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Calculate the number of dst samples that can be loaded this
|
|
|
|
* iteration, given the available resampler buffer size, and the
|
|
|
|
* number of src samples that are needed to load it.
|
2021-05-14 10:15:42 +08:00
|
|
|
*/
|
2023-05-03 18:59:33 +08:00
|
|
|
auto calc_buffer_sizes = [fracPos,increment](uint dstBufferSize)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
/* If ext=true, calculate the last written dst pos from the dst
|
|
|
|
* count, convert to the last read src pos, then add one to get
|
|
|
|
* the src count.
|
|
|
|
*
|
|
|
|
* If ext=false, convert the dst count to src count directly.
|
|
|
|
*
|
|
|
|
* Without this, the src count could be short by one when
|
|
|
|
* increment < 1.0, or not have a full src at the end when
|
|
|
|
* increment > 1.0.
|
2021-05-14 10:15:42 +08:00
|
|
|
*/
|
2023-05-03 18:59:33 +08:00
|
|
|
const bool ext{increment <= MixerFracOne};
|
|
|
|
uint64_t dataSize64{dstBufferSize - ext};
|
|
|
|
dataSize64 = (dataSize64*increment + fracPos) >> MixerFracBits;
|
|
|
|
/* Also include resampler padding. */
|
|
|
|
dataSize64 += ext + MaxResamplerEdge;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
if(dataSize64 <= srcSizeMax)
|
|
|
|
return std::make_pair(dstBufferSize, static_cast<uint>(dataSize64));
|
|
|
|
|
|
|
|
/* If the source size got saturated, we can't fill the desired
|
|
|
|
* dst size. Figure out how many dst samples we can fill.
|
|
|
|
*/
|
|
|
|
dataSize64 = srcSizeMax - MaxResamplerEdge;
|
|
|
|
dataSize64 = ((dataSize64<<MixerFracBits) - fracPos) / increment;
|
|
|
|
if(dataSize64 < dstBufferSize)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Some resamplers require the destination being 16-byte
|
|
|
|
* aligned, so limit to a multiple of 4 samples to maintain
|
|
|
|
* alignment if we need to do another iteration after this.
|
2022-04-25 12:02:45 +08:00
|
|
|
*/
|
2023-05-03 18:59:33 +08:00
|
|
|
dstBufferSize = static_cast<uint>(dataSize64) & ~3u;
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
return std::make_pair(dstBufferSize, srcSizeMax);
|
|
|
|
};
|
|
|
|
const auto bufferSizes = calc_buffer_sizes(samplesToLoad - samplesLoaded);
|
|
|
|
const auto dstBufferSize = bufferSizes.first;
|
|
|
|
const auto srcBufferSize = bufferSizes.second;
|
|
|
|
|
|
|
|
/* Load the necessary samples from the given buffer(s). */
|
|
|
|
if(!BufferListItem)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
const uint avail{minu(srcBufferSize, MaxResamplerEdge)};
|
|
|
|
const uint tofill{maxu(srcBufferSize, MaxResamplerEdge)};
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
/* When loading from a voice that ended prematurely, only take
|
|
|
|
* the samples that get closest to 0 amplitude. This helps
|
|
|
|
* certain sounds fade out better.
|
|
|
|
*/
|
|
|
|
auto abs_lt = [](const float lhs, const float rhs) noexcept -> bool
|
|
|
|
{ return std::abs(lhs) < std::abs(rhs); };
|
2023-05-03 18:59:33 +08:00
|
|
|
auto srciter = std::min_element(resampleBuffer, resampleBuffer+avail, abs_lt);
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
std::fill(srciter+1, resampleBuffer+tofill, *srciter);
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
else
|
2023-02-04 15:03:54 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
size_t srcSampleDelay{0};
|
|
|
|
if(intPos < 0) UNLIKELY
|
|
|
|
{
|
|
|
|
/* If the current position is negative, there's that many
|
|
|
|
* silent samples to load before using the buffer.
|
|
|
|
*/
|
|
|
|
srcSampleDelay = static_cast<uint>(-intPos);
|
|
|
|
if(srcSampleDelay >= srcBufferSize)
|
|
|
|
{
|
|
|
|
/* If the number of silent source samples exceeds the
|
|
|
|
* number to load, the output will be silent.
|
|
|
|
*/
|
|
|
|
std::fill_n(MixingSamples[chan]+samplesLoaded, dstBufferSize, 0.0f);
|
|
|
|
std::fill_n(resampleBuffer, srcBufferSize, 0.0f);
|
|
|
|
goto skip_resample;
|
|
|
|
}
|
2023-02-04 15:03:54 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
std::fill_n(resampleBuffer, srcSampleDelay, 0.0f);
|
|
|
|
}
|
|
|
|
const uint uintPos{static_cast<uint>(maxi(intPos, 0))};
|
2023-02-04 15:03:54 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
if(mFlags.test(VoiceIsStatic))
|
|
|
|
LoadBufferStatic(BufferListItem, BufferLoopItem, uintPos, mFmtType, chan,
|
|
|
|
mFrameStep, srcSampleDelay, srcBufferSize, al::to_address(resampleBuffer));
|
|
|
|
else if(mFlags.test(VoiceIsCallback))
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
const uint callbackBase{mCallbackBlockBase * mSamplesPerBlock};
|
|
|
|
const size_t bufferOffset{uintPos - callbackBase};
|
|
|
|
const size_t needSamples{bufferOffset + srcBufferSize - srcSampleDelay};
|
|
|
|
const size_t needBlocks{(needSamples + mSamplesPerBlock-1) / mSamplesPerBlock};
|
|
|
|
if(!mFlags.test(VoiceCallbackStopped) && needBlocks > mNumCallbackBlocks)
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t byteOffset{mNumCallbackBlocks*mBytesPerBlock};
|
|
|
|
const size_t needBytes{(needBlocks-mNumCallbackBlocks)*mBytesPerBlock};
|
|
|
|
|
|
|
|
const int gotBytes{BufferListItem->mCallback(BufferListItem->mUserData,
|
|
|
|
&BufferListItem->mSamples[byteOffset], static_cast<int>(needBytes))};
|
|
|
|
if(gotBytes < 0)
|
|
|
|
mFlags.set(VoiceCallbackStopped);
|
|
|
|
else if(static_cast<uint>(gotBytes) < needBytes)
|
|
|
|
{
|
|
|
|
mFlags.set(VoiceCallbackStopped);
|
|
|
|
mNumCallbackBlocks += static_cast<uint>(gotBytes) / mBytesPerBlock;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mNumCallbackBlocks = static_cast<uint>(needBlocks);
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t numSamples{uint{mNumCallbackBlocks} * mSamplesPerBlock};
|
|
|
|
LoadBufferCallback(BufferListItem, bufferOffset, numSamples, mFmtType, chan,
|
|
|
|
mFrameStep, srcSampleDelay, srcBufferSize, al::to_address(resampleBuffer));
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
else
|
|
|
|
LoadBufferQueue(BufferListItem, BufferLoopItem, uintPos, mFmtType, chan,
|
|
|
|
mFrameStep, srcSampleDelay, srcBufferSize, al::to_address(resampleBuffer));
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
Resample(&mResampleState, al::to_address(resampleBuffer), fracPos, increment,
|
|
|
|
{MixingSamples[chan]+samplesLoaded, dstBufferSize});
|
2023-02-04 15:03:54 +08:00
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
/* Store the last source samples used for next time. */
|
2023-05-03 18:59:33 +08:00
|
|
|
if(vstate == Playing) LIKELY
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Only store samples for the end of the mix, excluding what
|
|
|
|
* gets loaded for decoder padding.
|
|
|
|
*/
|
|
|
|
const uint loadEnd{samplesLoaded + dstBufferSize};
|
|
|
|
if(samplesToMix > samplesLoaded && samplesToMix <= loadEnd) LIKELY
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t dstOffset{samplesToMix - samplesLoaded};
|
|
|
|
const size_t srcOffset{(dstOffset*increment + fracPos) >> MixerFracBits};
|
|
|
|
std::copy_n(resampleBuffer-MaxResamplerEdge+srcOffset, prevSamples.size(),
|
|
|
|
prevSamples.begin());
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
|
|
|
|
skip_resample:
|
|
|
|
samplesLoaded += dstBufferSize;
|
|
|
|
if(samplesLoaded < samplesToLoad)
|
|
|
|
{
|
|
|
|
fracPos += dstBufferSize*increment;
|
|
|
|
const uint srcOffset{fracPos >> MixerFracBits};
|
|
|
|
fracPos &= MixerFracMask;
|
|
|
|
intPos += srcOffset;
|
|
|
|
|
|
|
|
/* If more samples need to be loaded, copy the back of the
|
|
|
|
* resampleBuffer to the front to reuse it. prevSamples isn't
|
|
|
|
* reliable since it's only updated for the end of the mix.
|
|
|
|
*/
|
|
|
|
std::copy(resampleBuffer-MaxResamplerEdge+srcOffset,
|
|
|
|
resampleBuffer+MaxResamplerEdge+srcOffset, resampleBuffer-MaxResamplerEdge);
|
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
2023-05-03 18:59:33 +08:00
|
|
|
}
|
|
|
|
for(auto &samples : MixingSamples.subspan(realChannels))
|
|
|
|
std::fill_n(samples, samplesToLoad, 0.0f);
|
|
|
|
|
|
|
|
if(mDecoder)
|
|
|
|
mDecoder->decode(MixingSamples, samplesToMix, (vstate==Playing));
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
if(mFlags.test(VoiceIsAmbisonic))
|
|
|
|
{
|
|
|
|
auto voiceSamples = MixingSamples.begin();
|
2021-05-14 10:15:42 +08:00
|
|
|
for(auto &chandata : mChans)
|
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
chandata.mAmbiSplitter.processScale({*voiceSamples, samplesToMix},
|
|
|
|
chandata.mAmbiHFScale, chandata.mAmbiLFScale);
|
2022-04-25 12:02:45 +08:00
|
|
|
++voiceSamples;
|
2023-05-03 18:59:33 +08:00
|
|
|
}
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
const uint Counter{mFlags.test(VoiceIsFading) ? minu(samplesToMix, 64u) : 0u};
|
|
|
|
if(!Counter)
|
|
|
|
{
|
|
|
|
/* No fading, just overwrite the old/current params. */
|
|
|
|
for(auto &chandata : mChans)
|
|
|
|
{
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
DirectParams &parms = chandata.mDryParams;
|
2023-05-03 18:59:33 +08:00
|
|
|
if(!mFlags.test(VoiceHasHrtf))
|
|
|
|
parms.Gains.Current = parms.Gains.Target;
|
2021-05-14 10:15:42 +08:00
|
|
|
else
|
2023-05-03 18:59:33 +08:00
|
|
|
parms.Hrtf.Old = parms.Hrtf.Target;
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
for(uint send{0};send < NumSends;++send)
|
|
|
|
{
|
|
|
|
if(mSend[send].Buffer.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
SendParams &parms = chandata.mWetParams[send];
|
2023-05-03 18:59:33 +08:00
|
|
|
parms.Gains.Current = parms.Gains.Target;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
auto voiceSamples = MixingSamples.begin();
|
|
|
|
for(auto &chandata : mChans)
|
|
|
|
{
|
|
|
|
/* Now filter and mix to the appropriate outputs. */
|
|
|
|
const al::span<float,BufferLineSize> FilterBuf{Device->FilteredData};
|
|
|
|
{
|
|
|
|
DirectParams &parms = chandata.mDryParams;
|
|
|
|
const float *samples{DoFilters(parms.LowPass, parms.HighPass, FilterBuf.data(),
|
|
|
|
{*voiceSamples, samplesToMix}, mDirect.FilterType)};
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
if(mFlags.test(VoiceHasHrtf))
|
|
|
|
{
|
|
|
|
const float TargetGain{parms.Hrtf.Target.Gain * (vstate == Playing)};
|
|
|
|
DoHrtfMix(samples, samplesToMix, parms, TargetGain, Counter, OutPos,
|
|
|
|
(vstate == Playing), Device);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
const float *TargetGains{(vstate == Playing) ? parms.Gains.Target.data()
|
2022-04-25 12:02:45 +08:00
|
|
|
: SilentTarget.data()};
|
2023-05-03 18:59:33 +08:00
|
|
|
if(mFlags.test(VoiceHasNfc))
|
|
|
|
DoNfcMix({samples, samplesToMix}, mDirect.Buffer.data(), parms,
|
|
|
|
TargetGains, Counter, OutPos, Device);
|
|
|
|
else
|
|
|
|
MixSamples({samples, samplesToMix}, mDirect.Buffer,
|
|
|
|
parms.Gains.Current.data(), TargetGains, Counter, OutPos);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
for(uint send{0};send < NumSends;++send)
|
|
|
|
{
|
|
|
|
if(mSend[send].Buffer.empty())
|
|
|
|
continue;
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
SendParams &parms = chandata.mWetParams[send];
|
|
|
|
const float *samples{DoFilters(parms.LowPass, parms.HighPass, FilterBuf.data(),
|
|
|
|
{*voiceSamples, samplesToMix}, mSend[send].FilterType)};
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
const float *TargetGains{(vstate == Playing) ? parms.Gains.Target.data()
|
|
|
|
: SilentTarget.data()};
|
|
|
|
MixSamples({samples, samplesToMix}, mSend[send].Buffer,
|
|
|
|
parms.Gains.Current.data(), TargetGains, Counter, OutPos);
|
|
|
|
}
|
|
|
|
|
|
|
|
++voiceSamples;
|
|
|
|
}
|
|
|
|
|
|
|
|
mFlags.set(VoiceIsFading);
|
|
|
|
|
|
|
|
/* Don't update positions and buffers if we were stopping. */
|
|
|
|
if(vstate == Stopping) UNLIKELY
|
|
|
|
{
|
|
|
|
mPlayState.store(Stopped, std::memory_order_release);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update voice positions and buffers as needed. */
|
|
|
|
DataPosFrac += increment*samplesToMix;
|
|
|
|
const uint SrcSamplesDone{DataPosFrac>>MixerFracBits};
|
|
|
|
DataPosInt += SrcSamplesDone;
|
|
|
|
DataPosFrac &= MixerFracMask;
|
2023-02-04 15:03:54 +08:00
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
uint buffers_done{0u};
|
|
|
|
if(BufferListItem && DataPosInt >= 0) LIKELY
|
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
if(mFlags.test(VoiceIsStatic))
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
if(BufferLoopItem)
|
|
|
|
{
|
|
|
|
/* Handle looping static source */
|
|
|
|
const uint LoopStart{BufferListItem->mLoopStart};
|
|
|
|
const uint LoopEnd{BufferListItem->mLoopEnd};
|
2023-02-04 15:03:54 +08:00
|
|
|
uint DataPosUInt{static_cast<uint>(DataPosInt)};
|
|
|
|
if(DataPosUInt >= LoopEnd)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
assert(LoopEnd > LoopStart);
|
2023-02-04 15:03:54 +08:00
|
|
|
DataPosUInt = ((DataPosUInt-LoopStart)%(LoopEnd-LoopStart)) + LoopStart;
|
|
|
|
DataPosInt = static_cast<int>(DataPosUInt);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Handle non-looping static source */
|
2023-02-04 15:03:54 +08:00
|
|
|
if(static_cast<uint>(DataPosInt) >= BufferListItem->mSampleLen)
|
2021-05-14 10:15:42 +08:00
|
|
|
BufferListItem = nullptr;
|
|
|
|
}
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
else if(mFlags.test(VoiceIsCallback))
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2022-04-25 12:02:45 +08:00
|
|
|
/* Handle callback buffer source */
|
2023-05-03 18:59:33 +08:00
|
|
|
const uint currentBlock{static_cast<uint>(DataPosInt) / mSamplesPerBlock};
|
|
|
|
const uint blocksDone{currentBlock - mCallbackBlockBase};
|
|
|
|
if(blocksDone < mNumCallbackBlocks)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-05-03 18:59:33 +08:00
|
|
|
const size_t byteOffset{blocksDone*mBytesPerBlock};
|
|
|
|
const size_t byteEnd{mNumCallbackBlocks*mBytesPerBlock};
|
2023-05-31 23:57:33 +08:00
|
|
|
al::byte *data{BufferListItem->mSamples};
|
2021-05-14 10:15:42 +08:00
|
|
|
std::copy(data+byteOffset, data+byteEnd, data);
|
2023-05-03 18:59:33 +08:00
|
|
|
mNumCallbackBlocks -= blocksDone;
|
|
|
|
mCallbackBlockBase += blocksDone;
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
BufferListItem = nullptr;
|
2023-05-03 18:59:33 +08:00
|
|
|
mNumCallbackBlocks = 0;
|
|
|
|
mCallbackBlockBase += blocksDone;
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Handle streaming source */
|
|
|
|
do {
|
2023-02-04 15:03:54 +08:00
|
|
|
if(BufferListItem->mSampleLen > static_cast<uint>(DataPosInt))
|
2021-05-14 10:15:42 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
DataPosInt -= BufferListItem->mSampleLen;
|
|
|
|
|
|
|
|
++buffers_done;
|
|
|
|
BufferListItem = BufferListItem->mNext.load(std::memory_order_relaxed);
|
|
|
|
if(!BufferListItem) BufferListItem = BufferLoopItem;
|
|
|
|
} while(BufferListItem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-03 18:59:33 +08:00
|
|
|
/* Capture the source ID in case it gets reset for stopping. */
|
2021-05-14 10:15:42 +08:00
|
|
|
const uint SourceID{mSourceID.load(std::memory_order_relaxed)};
|
|
|
|
|
|
|
|
/* Update voice info */
|
|
|
|
mPosition.store(DataPosInt, std::memory_order_relaxed);
|
|
|
|
mPositionFrac.store(DataPosFrac, std::memory_order_relaxed);
|
|
|
|
mCurrentBuffer.store(BufferListItem, std::memory_order_relaxed);
|
|
|
|
if(!BufferListItem)
|
|
|
|
{
|
|
|
|
mLoopBuffer.store(nullptr, std::memory_order_relaxed);
|
|
|
|
mSourceID.store(0u, std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
std::atomic_thread_fence(std::memory_order_release);
|
|
|
|
|
|
|
|
/* Send any events now, after the position/buffer info was updated. */
|
2023-02-04 15:03:54 +08:00
|
|
|
const auto enabledevt = Context->mEnabledEvts.load(std::memory_order_acquire);
|
2023-05-31 23:57:33 +08:00
|
|
|
if(buffers_done > 0 && enabledevt.test(AsyncEvent::BufferCompleted))
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
RingBuffer *ring{Context->mAsyncEvents.get()};
|
|
|
|
auto evt_vec = ring->getWriteVector();
|
|
|
|
if(evt_vec.first.len > 0)
|
|
|
|
{
|
2023-05-31 23:57:33 +08:00
|
|
|
AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
|
|
|
|
AsyncEvent::BufferCompleted)};
|
|
|
|
evt->u.bufcomp.id = SourceID;
|
|
|
|
evt->u.bufcomp.count = buffers_done;
|
2021-05-14 10:15:42 +08:00
|
|
|
ring->writeAdvance(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!BufferListItem)
|
|
|
|
{
|
|
|
|
/* If the voice just ended, set it to Stopping so the next render
|
|
|
|
* ensures any residual noise fades to 0 amplitude.
|
|
|
|
*/
|
|
|
|
mPlayState.store(Stopping, std::memory_order_release);
|
2023-05-31 23:57:33 +08:00
|
|
|
if(enabledevt.test(AsyncEvent::SourceStateChange))
|
2021-05-14 10:15:42 +08:00
|
|
|
SendSourceStoppedEvent(Context, SourceID);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Voice::prepare(DeviceBase *device)
|
|
|
|
{
|
2022-04-25 12:02:45 +08:00
|
|
|
/* Even if storing really high order ambisonics, we only mix channels for
|
|
|
|
* orders up to the device order. The rest are simply dropped.
|
|
|
|
*/
|
|
|
|
uint num_channels{(mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 3 :
|
|
|
|
ChannelsFromFmt(mFmtChannels, minu(mAmbiOrder, device->mAmbiOrder))};
|
2023-05-03 18:59:33 +08:00
|
|
|
if(num_channels > device->mSampleData.size()) UNLIKELY
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
|
|
|
ERR("Unexpected channel count: %u (limit: %zu, %d:%d)\n", num_channels,
|
|
|
|
device->mSampleData.size(), mFmtChannels, mAmbiOrder);
|
|
|
|
num_channels = static_cast<uint>(device->mSampleData.size());
|
|
|
|
}
|
|
|
|
if(mChans.capacity() > 2 && num_channels < mChans.capacity())
|
|
|
|
{
|
|
|
|
decltype(mChans){}.swap(mChans);
|
|
|
|
decltype(mPrevSamples){}.swap(mPrevSamples);
|
|
|
|
}
|
|
|
|
mChans.reserve(maxu(2, num_channels));
|
|
|
|
mChans.resize(num_channels);
|
|
|
|
mPrevSamples.reserve(maxu(2, num_channels));
|
|
|
|
mPrevSamples.resize(num_channels);
|
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
mDecoder = nullptr;
|
|
|
|
mDecoderPadding = 0;
|
2022-07-14 23:17:11 +08:00
|
|
|
if(mFmtChannels == FmtSuperStereo)
|
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
switch(UhjDecodeQuality)
|
|
|
|
{
|
|
|
|
case UhjQualityType::IIR:
|
|
|
|
mDecoder = std::make_unique<UhjStereoDecoderIIR>();
|
|
|
|
mDecoderPadding = UhjStereoDecoderIIR::sInputPadding;
|
|
|
|
break;
|
|
|
|
case UhjQualityType::FIR256:
|
|
|
|
mDecoder = std::make_unique<UhjStereoDecoder<UhjLength256>>();
|
|
|
|
mDecoderPadding = UhjStereoDecoder<UhjLength256>::sInputPadding;
|
|
|
|
break;
|
|
|
|
case UhjQualityType::FIR512:
|
|
|
|
mDecoder = std::make_unique<UhjStereoDecoder<UhjLength512>>();
|
|
|
|
mDecoderPadding = UhjStereoDecoder<UhjLength512>::sInputPadding;
|
|
|
|
break;
|
|
|
|
}
|
2022-07-14 23:17:11 +08:00
|
|
|
}
|
|
|
|
else if(IsUHJ(mFmtChannels))
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
switch(UhjDecodeQuality)
|
|
|
|
{
|
|
|
|
case UhjQualityType::IIR:
|
|
|
|
mDecoder = std::make_unique<UhjDecoderIIR>();
|
|
|
|
mDecoderPadding = UhjDecoderIIR::sInputPadding;
|
|
|
|
break;
|
|
|
|
case UhjQualityType::FIR256:
|
|
|
|
mDecoder = std::make_unique<UhjDecoder<UhjLength256>>();
|
|
|
|
mDecoderPadding = UhjDecoder<UhjLength256>::sInputPadding;
|
|
|
|
break;
|
|
|
|
case UhjQualityType::FIR512:
|
|
|
|
mDecoder = std::make_unique<UhjDecoder<UhjLength512>>();
|
|
|
|
mDecoderPadding = UhjDecoder<UhjLength512>::sInputPadding;
|
|
|
|
break;
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2021-05-14 10:15:42 +08:00
|
|
|
|
|
|
|
/* Clear the stepping value explicitly so the mixer knows not to mix this
|
|
|
|
* until the update gets applied.
|
|
|
|
*/
|
|
|
|
mStep = 0;
|
|
|
|
|
|
|
|
/* Make sure the sample history is cleared. */
|
2022-04-25 12:02:45 +08:00
|
|
|
std::fill(mPrevSamples.begin(), mPrevSamples.end(), HistoryLine{});
|
2021-05-14 10:15:42 +08:00
|
|
|
|
2023-02-04 15:03:54 +08:00
|
|
|
if(mFmtChannels == FmtUHJ2 && !device->mUhjEncoder)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2022-04-25 12:02:45 +08:00
|
|
|
/* 2-channel UHJ needs different shelf filters. However, we can't just
|
2023-02-04 15:03:54 +08:00
|
|
|
* use different shelf filters after mixing it, given any old speaker
|
2022-04-25 12:02:45 +08:00
|
|
|
* setup the user has. To make this work, we apply the expected shelf
|
|
|
|
* filters for decoding UHJ2 to quad (only needs LF scaling), and act
|
2023-02-04 15:03:54 +08:00
|
|
|
* as if those 4 quad channels are encoded right back into B-Format.
|
2022-04-25 12:02:45 +08:00
|
|
|
*
|
|
|
|
* This isn't perfect, but without an entirely separate and limited
|
|
|
|
* UHJ2 path, it's better than nothing.
|
2023-02-04 15:03:54 +08:00
|
|
|
*
|
|
|
|
* Note this isn't needed with UHJ output (UHJ2->B-Format->UHJ2 is
|
|
|
|
* identity, so don't mess with it).
|
2022-04-25 12:02:45 +08:00
|
|
|
*/
|
2023-02-04 15:03:54 +08:00
|
|
|
const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
|
|
|
|
for(auto &chandata : mChans)
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
chandata.mAmbiHFScale = 1.0f;
|
|
|
|
chandata.mAmbiLFScale = 1.0f;
|
|
|
|
chandata.mAmbiSplitter = splitter;
|
|
|
|
chandata.mDryParams = DirectParams{};
|
|
|
|
chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
|
|
|
|
std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
|
2022-04-25 12:02:45 +08:00
|
|
|
}
|
2023-02-04 15:03:54 +08:00
|
|
|
mChans[0].mAmbiLFScale = DecoderBase::sWLFScale;
|
|
|
|
mChans[1].mAmbiLFScale = DecoderBase::sXYLFScale;
|
|
|
|
mChans[2].mAmbiLFScale = DecoderBase::sXYLFScale;
|
2022-04-25 12:02:45 +08:00
|
|
|
mFlags.set(VoiceIsAmbisonic);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
2023-02-04 15:03:54 +08:00
|
|
|
/* Don't need to set the VoiceIsAmbisonic flag if the device is not higher
|
|
|
|
* order than the voice. No HF scaling is necessary to mix it.
|
|
|
|
*/
|
|
|
|
else if(mAmbiOrder && device->mAmbiOrder > mAmbiOrder)
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
const uint8_t *OrderFromChan{Is2DAmbisonic(mFmtChannels) ?
|
|
|
|
AmbiIndex::OrderFrom2DChannel().data() : AmbiIndex::OrderFromChannel().data()};
|
|
|
|
const auto scales = AmbiScale::GetHFOrderScales(mAmbiOrder, device->mAmbiOrder,
|
|
|
|
device->m2DMixing);
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
|
2021-05-14 10:15:42 +08:00
|
|
|
for(auto &chandata : mChans)
|
|
|
|
{
|
2023-02-04 15:03:54 +08:00
|
|
|
chandata.mAmbiHFScale = scales[*(OrderFromChan++)];
|
2022-04-25 12:02:45 +08:00
|
|
|
chandata.mAmbiLFScale = 1.0f;
|
|
|
|
chandata.mAmbiSplitter = splitter;
|
2021-05-14 10:15:42 +08:00
|
|
|
chandata.mDryParams = DirectParams{};
|
2022-04-25 12:02:45 +08:00
|
|
|
chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
|
2021-05-14 10:15:42 +08:00
|
|
|
std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
mFlags.set(VoiceIsAmbisonic);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
else
|
2021-05-14 10:15:42 +08:00
|
|
|
{
|
|
|
|
for(auto &chandata : mChans)
|
2022-04-25 12:02:45 +08:00
|
|
|
{
|
|
|
|
chandata.mDryParams = DirectParams{};
|
|
|
|
chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
|
|
|
|
std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
|
|
|
|
}
|
|
|
|
mFlags.reset(VoiceIsAmbisonic);
|
2021-05-14 10:15:42 +08:00
|
|
|
}
|
|
|
|
}
|