improve server audio mix processing for better clipping behavior

This commit is contained in:
Volker Fischer 2020-07-23 17:31:21 +02:00
parent 1fd3cc977f
commit 9c5a77940f
3 changed files with 38 additions and 25 deletions

View file

@ -11,6 +11,8 @@
- improve compact skin by using smaller font size if the name is too long - improve compact skin by using smaller font size if the name is too long
- improve server audio mix processing for better clipping behavior
- bug fix: --showallservers ping column sort is alphabetic (#201) - bug fix: --showallservers ping column sort is alphabetic (#201)

View file

@ -338,6 +338,7 @@ CServer::CServer ( const int iNewMaxNumChan,
vecvecdPannings.Init ( iMaxNumChannels ); vecvecdPannings.Init ( iMaxNumChannels );
vecvecsData.Init ( iMaxNumChannels ); vecvecsData.Init ( iMaxNumChannels );
vecvecsSendData.Init ( iMaxNumChannels ); vecvecsSendData.Init ( iMaxNumChannels );
vecvecsIntermediateProcBuf.Init ( iMaxNumChannels );
vecvecbyCodedData.Init ( iMaxNumChannels ); vecvecbyCodedData.Init ( iMaxNumChannels );
vecNumAudioChannels.Init ( iMaxNumChannels ); vecNumAudioChannels.Init ( iMaxNumChannels );
vecNumFrameSizeConvBlocks.Init ( iMaxNumChannels ); vecNumFrameSizeConvBlocks.Init ( iMaxNumChannels );
@ -357,6 +358,9 @@ CServer::CServer ( const int iNewMaxNumChan,
// and coded data because of the OMP implementation) // and coded data because of the OMP implementation)
vecvecsSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); vecvecsSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ );
// allocate worst case memory for intermediate processing buffers in double precision
vecvecsIntermediateProcBuf[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ );
// allocate worst case memory for the coded data // allocate worst case memory for the coded data
vecvecbyCodedData[i].Init ( MAX_SIZE_BYTES_NETW_BUF ); vecvecbyCodedData[i].Init ( MAX_SIZE_BYTES_NETW_BUF );
} }
@ -1031,6 +1035,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE
vecvecdGains[i], vecvecdGains[i],
vecvecdPannings[i], vecvecdPannings[i],
vecNumAudioChannels, vecNumAudioChannels,
vecvecsIntermediateProcBuf[i],
vecvecsSendData[i], vecvecsSendData[i],
iCurNumAudChan, iCurNumAudChan,
iNumClients ); iNumClients );
@ -1136,14 +1141,15 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
const CVector<double>& vecdGains, const CVector<double>& vecdGains,
const CVector<double>& vecdPannings, const CVector<double>& vecdPannings,
const CVector<int>& vecNumAudioChannels, const CVector<int>& vecNumAudioChannels,
CVector<double>& vecdIntermProcBuf,
CVector<int16_t>& vecsOutData, CVector<int16_t>& vecsOutData,
const int iCurNumAudChan, const int iCurNumAudChan,
const int iNumClients ) const int iNumClients )
{ {
int i, j, k; int i, j, k;
// init return vector with zeros since we mix all channels on that vector // init intermediate processing vector with zeros since we mix all channels on that vector
vecsOutData.Reset ( 0 ); vecdIntermProcBuf.Reset ( 0 );
// distinguish between stereo and mono mode // distinguish between stereo and mono mode
if ( iCurNumAudChan == 1 ) if ( iCurNumAudChan == 1 )
@ -1163,8 +1169,7 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
// mono // mono
for ( i = 0; i < iServerFrameSizeSamples; i++ ) for ( i = 0; i < iServerFrameSizeSamples; i++ )
{ {
vecsOutData[i] = Double2Short ( vecdIntermProcBuf[i] += vecsData[i];
static_cast<double> ( vecsOutData[i] ) + vecsData[i] );
} }
} }
else else
@ -1172,9 +1177,8 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
// stereo: apply stereo-to-mono attenuation // stereo: apply stereo-to-mono attenuation
for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 )
{ {
vecsOutData[i] = vecdIntermProcBuf[i] +=
Double2Short ( vecsOutData[i] + ( static_cast<double> ( vecsData[k] ) + vecsData[k + 1] ) / 2;
( static_cast<double> ( vecsData[k] ) + vecsData[k + 1] ) / 2 );
} }
} }
} }
@ -1185,8 +1189,7 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
// mono // mono
for ( i = 0; i < iServerFrameSizeSamples; i++ ) for ( i = 0; i < iServerFrameSizeSamples; i++ )
{ {
vecsOutData[i] = Double2Short ( vecdIntermProcBuf[i] += vecsData[i] * dGain;
vecsOutData[i] + vecsData[i] * dGain );
} }
} }
else else
@ -1194,13 +1197,18 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
// stereo: apply stereo-to-mono attenuation // stereo: apply stereo-to-mono attenuation
for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 )
{ {
vecsOutData[i] = vecdIntermProcBuf[i] += dGain *
Double2Short ( vecsOutData[i] + dGain * ( static_cast<double> ( vecsData[k] ) + vecsData[k + 1] ) / 2;
( static_cast<double> ( vecsData[k] ) + vecsData[k + 1] ) / 2 );
} }
} }
} }
} }
// convert from double to short with clipping
for ( i = 0; i < iServerFrameSizeSamples; i++ )
{
vecsOutData[i] = Double2Short ( vecdIntermProcBuf[i] );
}
} }
else else
{ {
@ -1225,13 +1233,9 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
// mono: copy same mono data in both out stereo audio channels // mono: copy same mono data in both out stereo audio channels
for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 )
{ {
// left channel // left/right channel
vecsOutData[k] = Double2Short ( vecdIntermProcBuf[k] += vecsData[i];
static_cast<double> ( vecsOutData[k] ) + vecsData[i] ); vecdIntermProcBuf[k + 1] += vecsData[i];
// right channel
vecsOutData[k + 1] = Double2Short (
static_cast<double> ( vecsOutData[k + 1] ) + vecsData[i] );
} }
} }
else else
@ -1239,8 +1243,7 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
// stereo // stereo
for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ )
{ {
vecsOutData[i] = Double2Short ( vecdIntermProcBuf[i] += vecsData[i];
static_cast<double> ( vecsOutData[i] ) + vecsData[i] );
} }
} }
} }
@ -1252,8 +1255,8 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 )
{ {
// left/right channel // left/right channel
vecsOutData[k] = Double2Short ( vecsOutData[k] + vecsData[i] * dGainL ); vecdIntermProcBuf[k] += vecsData[i] * dGainL;
vecsOutData[k + 1] = Double2Short ( vecsOutData[k + 1] + vecsData[i] * dGainR ); vecdIntermProcBuf[k + 1] += vecsData[i] * dGainR;
} }
} }
else else
@ -1262,12 +1265,18 @@ void CServer::ProcessData ( const CVector<CVector<int16_t> >& vecvecsData,
for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i += 2 ) for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i += 2 )
{ {
// left/right channel // left/right channel
vecsOutData[i] = Double2Short ( vecsOutData[i] + vecsData[i] * dGainL ); vecdIntermProcBuf[i] += vecsData[i] * dGainL;
vecsOutData[i + 1] = Double2Short ( vecsOutData[i + 1] + vecsData[i + 1] * dGainR ); vecdIntermProcBuf[i + 1] += vecsData[i + 1] * dGainR;
} }
} }
} }
} }
// convert from double to short with clipping
for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ )
{
vecsOutData[i] = Double2Short ( vecdIntermProcBuf[i] );
}
} }
} }

View file

@ -305,6 +305,7 @@ protected:
const CVector<double>& vecdGains, const CVector<double>& vecdGains,
const CVector<double>& vecdPannings, const CVector<double>& vecdPannings,
const CVector<int>& vecNumAudioChannels, const CVector<int>& vecNumAudioChannels,
CVector<double>& vecdIntermProcBuf,
CVector<int16_t>& vecsOutData, CVector<int16_t>& vecsOutData,
const int iCurNumAudChan, const int iCurNumAudChan,
const int iNumClients ); const int iNumClients );
@ -353,6 +354,7 @@ protected:
CVector<int> vecUseDoubleSysFraSizeConvBuf; CVector<int> vecUseDoubleSysFraSizeConvBuf;
CVector<EAudComprType> vecAudioComprType; CVector<EAudComprType> vecAudioComprType;
CVector<CVector<int16_t> > vecvecsSendData; CVector<CVector<int16_t> > vecvecsSendData;
CVector<CVector<double> > vecvecsIntermediateProcBuf;
CVector<CVector<uint8_t> > vecvecbyCodedData; CVector<CVector<uint8_t> > vecvecbyCodedData;
// Channel levels // Channel levels