jamulus/windows/sound.cpp

1159 lines
43 KiB
C++
Raw Normal View History

/******************************************************************************\
2019-03-24 09:30:30 +01:00
* Copyright (c) 2004-2019
*
* Author(s):
* Volker Fischer
*
* Description:
* Sound card interface for Windows operating systems
*
******************************************************************************
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
\******************************************************************************/
Add recording support with Reaper Project generation Includes the following changes * Initial .gitignore Administrative * Fix up warning message * Not all Windows file systems are case insensitive Bugfixes * (Qt5) Use QCoreApplication for headless Possible solution to get the application to run as a headless server but it loses the nice history graph, so not ideal. * Avoid ESC closing chat Because ESC shouldn't close the chat window. Or the main app window. * Add console logging support for Windows Whilst looking for the headless support, I found this idea for Windows logging. New improved version. This makes far fewer changes. ---- * Add recording support with Reaper Project generation The main feature! * New -r option to enable recording of PCM files and conversion to Reaper RPP with WAV files * New -R option to set the directory in which to create recording sessions You need to specify the -R option, there's no default... so I guess -r and -R could be combined. * New -T option to convert a session directory with PCM files into a Reaper RPP with WAV files You can use -T on "failed" sessions, if the -r option captures the PCMs but the RPP converter doesn't run for some reaon. (It was useful during development, maybe less so once things seem stable.) The recorder is implemented as a new thread with queuing from the main "real time" server thread. When a new client connects or if its audio format changes (e.g. mono to stereo), a new RIFF WAVE file is started. Each frame of decompressed audio for each client written out as LPCM to the file. When the client disconnects, the RIFF WAVE headers are updated to reflect the file length. Once all clients disconnect, the session is considered ended and a Reaper RPP file is written.
2019-04-03 19:12:45 +02:00
#include "sound.h"
/* Implementation *************************************************************/
// external references
extern AsioDrivers* asioDrivers;
2012-01-22 14:46:15 +01:00
bool loadAsioDriver ( char* name );
// pointer to our sound object
CSound* pSound;
/******************************************************************************\
* Common *
\******************************************************************************/
QString CSound::LoadAndInitializeDriver ( int iDriverIdx )
{
// load driver
loadAsioDriver ( cDriverNames[iDriverIdx] );
if ( ASIOInit ( &driverInfo ) != ASE_OK )
{
// clean up and return error string
asioDrivers->removeCurrentDriver();
return tr ( "The audio driver could not be initialized." );
}
// check device capabilities if it fullfills our requirements
const QString strStat = CheckDeviceCapabilities();
// check if device is capable
if ( strStat.isEmpty() )
{
// the device has changed, per definition we reset the channel
// mapping to the defaults (first two available channels)
ResetChannelMapping();
// store ID of selected driver if initialization was successful
lCurDev = iDriverIdx;
}
else
{
// driver cannot be used, clean up
asioDrivers->removeCurrentDriver();
}
return strStat;
}
void CSound::UnloadCurrentDriver()
{
// clean up ASIO stuff
ASIOStop();
ASIODisposeBuffers();
ASIOExit();
asioDrivers->removeCurrentDriver();
}
QString CSound::CheckDeviceCapabilities()
{
// This function checks if our required input/output channel
// properties are supported by the selected device. If the return
// string is empty, the device can be used, otherwise the error
// message is returned.
// check the sample rate
const ASIOError CanSaRateReturn = ASIOCanSampleRate ( SYSTEM_SAMPLE_RATE_HZ );
2018-03-25 13:21:58 +02:00
if ( ( CanSaRateReturn == ASE_NoClock ) ||
( CanSaRateReturn == ASE_NotPresent ) )
{
// return error string
return tr ( "The audio device does not support the "
"required sample rate. The required sample rate is: " ) +
QString().setNum ( SYSTEM_SAMPLE_RATE_HZ ) + " Hz";
}
// check the number of available channels
ASIOGetChannels ( &lNumInChan, &lNumOutChan );
2018-03-25 13:21:58 +02:00
if ( ( lNumInChan < NUM_IN_OUT_CHANNELS ) ||
( lNumOutChan < NUM_IN_OUT_CHANNELS ) )
{
// return error string
return tr ( "The audio device does not support the "
"required number of channels. The required number of channels "
"for input and output is: " ) +
QString().setNum ( NUM_IN_OUT_CHANNELS );
}
// clip number of input/output channels to our maximum
if ( lNumInChan > MAX_NUM_IN_OUT_CHANNELS )
{
lNumInChan = MAX_NUM_IN_OUT_CHANNELS;
}
if ( lNumOutChan > MAX_NUM_IN_OUT_CHANNELS )
{
lNumOutChan = MAX_NUM_IN_OUT_CHANNELS;
}
// query channel infos for all available input channels
bool bInputChMixingSupported = true;
for ( int i = 0; i < lNumInChan; i++ )
{
// setup for input channels
channelInfosInput[i].isInput = ASIOTrue;
channelInfosInput[i].channel = i;
ASIOGetChannelInfo ( &channelInfosInput[i] );
// Check supported sample formats.
// Actually, it would be enough to have at least two channels which
// support the required sample format. But since we have support for
// all known sample types, the following check should always pass and
// therefore we throw the error message on any channel which does not
// fullfill the sample format requirement (quick hack solution).
if ( !CheckSampleTypeSupported ( channelInfosInput[i].type ) )
{
// return error string
return tr ( "Required audio sample format not available." );
}
// store the name of the channel and check if channel mixing is supported
channelInputName[i] = channelInfosInput[i].name;
if ( !CheckSampleTypeSupportedForCHMixing ( channelInfosInput[i].type ) )
{
bInputChMixingSupported = false;
}
}
// query channel infos for all available output channels
for ( int i = 0; i < lNumOutChan; i++ )
{
// setup for output channels
channelInfosOutput[i].isInput = ASIOFalse;
channelInfosOutput[i].channel = i;
ASIOGetChannelInfo ( &channelInfosOutput[i] );
// Check supported sample formats.
// Actually, it would be enough to have at least two channels which
// support the required sample format. But since we have support for
// all known sample types, the following check should always pass and
// therefore we throw the error message on any channel which does not
// fullfill the sample format requirement (quick hack solution).
if ( !CheckSampleTypeSupported ( channelInfosOutput[i].type ) )
{
// return error string
return tr ( "Required audio sample format not available." );
}
}
// special case with 4 input channels: support adding channels
if ( ( lNumInChan == 4 ) && bInputChMixingSupported )
{
// add four mixed channels (i.e. 4 normal, 4 mixed channels)
lNumInChanPlusAddChan = 8;
for ( int iCh = 0; iCh < lNumInChanPlusAddChan; iCh++ )
{
int iSelCH, iSelAddCH;
GetSelCHAndAddCH ( iCh, lNumInChan, iSelCH, iSelAddCH );
if ( iSelAddCH >= 0 )
{
// for mixed channels, show both audio channel names to be mixed
channelInputName[iCh] =
channelInputName[iSelCH] + " + " + channelInputName[iSelAddCH];
}
}
}
else
{
// regular case: no mixing input channels used
lNumInChanPlusAddChan = lNumInChan;
}
// everything is ok, return empty string for "no error" case
return "";
}
void CSound::SetLeftInputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
if ( ( iNewChan >= 0 ) && ( iNewChan < lNumInChanPlusAddChan ) )
{
vSelectedInputChannels[0] = iNewChan;
}
}
void CSound::SetRightInputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
if ( ( iNewChan >= 0 ) && ( iNewChan < lNumInChanPlusAddChan ) )
{
vSelectedInputChannels[1] = iNewChan;
}
}
void CSound::SetLeftOutputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
if ( ( iNewChan >= 0 ) && ( iNewChan < lNumOutChan ) )
{
vSelectedOutputChannels[0] = iNewChan;
}
}
void CSound::SetRightOutputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
if ( ( iNewChan >= 0 ) && ( iNewChan < lNumOutChan ) )
{
vSelectedOutputChannels[1] = iNewChan;
}
}
int CSound::GetActualBufferSize ( const int iDesiredBufferSizeMono )
{
int iActualBufferSizeMono;
// query the usable buffer sizes
ASIOGetBufferSize ( &HWBufferInfo.lMinSize,
&HWBufferInfo.lMaxSize,
&HWBufferInfo.lPreferredSize,
&HWBufferInfo.lGranularity );
// calculate "nearest" buffer size and set internal parameter accordingly
// first check minimum and maximum values
if ( iDesiredBufferSizeMono <= HWBufferInfo.lMinSize )
{
iActualBufferSizeMono = HWBufferInfo.lMinSize;
}
else
{
if ( iDesiredBufferSizeMono >= HWBufferInfo.lMaxSize )
{
iActualBufferSizeMono = HWBufferInfo.lMaxSize;
}
else
{
// ASIO SDK 2.2: "Notes: When minimum and maximum buffer size are
// equal, the preferred buffer size has to be the same value as
// well; granularity should be 0 in this case."
if ( HWBufferInfo.lMinSize == HWBufferInfo.lMaxSize )
{
iActualBufferSizeMono = HWBufferInfo.lMinSize;
}
else
{
if ( ( HWBufferInfo.lGranularity < -1 ) ||
( HWBufferInfo.lGranularity == 0 ) )
{
// Special case (seen for EMU audio cards): granularity is
// zero or less than zero (make sure to exclude the special
// case of -1).
// There is no definition of this case in the ASIO SDK
// document. We assume here that all buffer sizes in between
// minimum and maximum buffer sizes are allowed.
iActualBufferSizeMono = iDesiredBufferSizeMono;
}
else
{
// General case --------------------------------------------
// initialization
int iTrialBufSize = HWBufferInfo.lMinSize;
int iLastTrialBufSize = HWBufferInfo.lMinSize;
bool bSizeFound = false;
// test loop
while ( ( iTrialBufSize <= HWBufferInfo.lMaxSize ) && ( !bSizeFound ) )
{
if ( iTrialBufSize >= iDesiredBufferSizeMono )
{
// test which buffer size fits better: the old one or the
// current one
if ( ( iTrialBufSize - iDesiredBufferSizeMono ) >
( iDesiredBufferSizeMono - iLastTrialBufSize ) )
{
iTrialBufSize = iLastTrialBufSize;
}
// exit while loop
bSizeFound = true;
}
if ( !bSizeFound )
{
// store old trial buffer size
iLastTrialBufSize = iTrialBufSize;
// increment trial buffer size (check for special
// case first)
if ( HWBufferInfo.lGranularity == -1 )
{
// special case: buffer sizes are a power of 2
iTrialBufSize *= 2;
}
else
{
iTrialBufSize += HWBufferInfo.lGranularity;
}
}
}
// clip trial buffer size (it may happen in the while
// routine that "iTrialBufSize" is larger than "lMaxSize" in
// case "lMaxSize - lMinSize" is not divisible by the
// granularity)
if ( iTrialBufSize > HWBufferInfo.lMaxSize )
{
iTrialBufSize = HWBufferInfo.lMaxSize;
}
// set ASIO buffer size
iActualBufferSizeMono = iTrialBufSize;
}
}
}
}
return iActualBufferSizeMono;
}
int CSound::Init ( const int iNewPrefMonoBufferSize )
{
ASIOMutex.lock(); // get mutex lock
{
// get the actual sound card buffer size which is supported
// by the audio hardware
2018-03-25 13:21:58 +02:00
iASIOBufferSizeMono = GetActualBufferSize ( iNewPrefMonoBufferSize );
// init base class
CSoundBase::Init ( iASIOBufferSizeMono );
// set internal buffer size value and calculate stereo buffer size
iASIOBufferSizeStereo = 2 * iASIOBufferSizeMono;
// set the sample rate
ASIOSetSampleRate ( SYSTEM_SAMPLE_RATE_HZ );
// create memory for intermediate audio buffer
vecsTmpAudioSndCrdStereo.Init ( iASIOBufferSizeStereo );
// create and activate ASIO buffers (buffer size in samples),
// dispose old buffers (if any)
ASIODisposeBuffers();
2018-03-25 13:21:58 +02:00
// prepare input channels
for ( int i = 0; i < lNumInChan; i++ )
{
bufferInfos[i].isInput = ASIOTrue;
2018-03-25 13:21:58 +02:00
bufferInfos[i].channelNum = i;
bufferInfos[i].buffers[0] = 0;
bufferInfos[i].buffers[1] = 0;
2018-03-25 13:21:58 +02:00
}
2018-03-25 13:21:58 +02:00
// prepare output channels
for ( int i = 0; i < lNumOutChan; i++ )
{
bufferInfos[lNumInChan + i].isInput = ASIOFalse;
bufferInfos[lNumInChan + i].channelNum = i;
bufferInfos[lNumInChan + i].buffers[0] = 0;
bufferInfos[lNumInChan + i].buffers[1] = 0;
}
2018-03-25 13:21:58 +02:00
ASIOCreateBuffers ( bufferInfos, lNumInChan + lNumOutChan,
iASIOBufferSizeMono, &asioCallbacks );
// query the latency of the driver
long lInputLatency = 0;
long lOutputLatency = 0;
if ( ASIOGetLatencies ( &lInputLatency, &lOutputLatency ) != ASE_NotPresent )
{
// add the input and output latencies (returned in number of
// samples) and calculate the time in ms
dInOutLatencyMs =
( static_cast<double> ( lInputLatency ) + lOutputLatency ) *
1000 / SYSTEM_SAMPLE_RATE_HZ;
}
else
{
// no latency available
dInOutLatencyMs = 0.0;
}
// check wether the driver requires the ASIOOutputReady() optimization
// (can be used by the driver to reduce output latency by one block)
bASIOPostOutput = ( ASIOOutputReady() == ASE_OK );
}
ASIOMutex.unlock();
return iASIOBufferSizeMono;
}
void CSound::Start()
{
// start audio
ASIOStart();
// call base class
CSoundBase::Start();
}
void CSound::Stop()
{
// stop audio
ASIOStop();
// call base class
CSoundBase::Stop();
// make sure the working thread is actually done
// (by checking the locked state)
if ( ASIOMutex.tryLock ( 5000 ) )
{
ASIOMutex.unlock();
}
}
CSound::CSound ( void (*fpNewCallback) ( CVector<int16_t>& psData, void* arg ),
void* arg,
const int iCtrlMIDIChannel ) :
CSoundBase ( "ASIO", true, fpNewCallback, arg, iCtrlMIDIChannel ),
vSelectedInputChannels ( NUM_IN_OUT_CHANNELS ),
vSelectedOutputChannels ( NUM_IN_OUT_CHANNELS ),
lNumInChan ( 0 ),
lNumInChanPlusAddChan ( 0 ),
lNumOutChan ( 0 ),
dInOutLatencyMs ( 0.0 ) // "0.0" means that no latency value is available
{
int i;
// init pointer to our sound object
pSound = this;
// get available ASIO driver names in system
for ( i = 0; i < MAX_NUMBER_SOUND_CARDS; i++ )
{
// allocate memory for driver names
cDriverNames[i] = new char[32];
}
char cDummyName[] = "dummy";
loadAsioDriver ( cDummyName ); // to initialize external object
lNumDevs = asioDrivers->getDriverNames ( cDriverNames, MAX_NUMBER_SOUND_CARDS );
// in case we do not have a driver available, throw error
if ( lNumDevs == 0 )
{
throw CGenErr ( tr ( "<b>No ASIO audio device (driver) found.</b><br><br>"
"The " ) + APP_NAME + tr ( " software requires the low latency audio "
"interface <b>ASIO</b> to work properly. This is no standard "
"Windows audio interface and therefore a special audio driver is "
"required. Either your sound card has a native ASIO driver (which "
"is recommended) or you might want to use alternative drivers like "
"the ASIO4All or kX driver." ) );
}
asioDrivers->removeCurrentDriver();
// copy driver names to base class but internally we still have to use
// the char* variable because of the ASIO API :-(
for ( i = 0; i < lNumDevs; i++ )
{
strDriverNames[i] = cDriverNames[i];
}
// init device index as not initialized (invalid)
lCurDev = INVALID_SNC_CARD_DEVICE;
// init channel mapping
ResetChannelMapping();
// set up the asioCallback structure
asioCallbacks.bufferSwitch = &bufferSwitch;
asioCallbacks.sampleRateDidChange = &sampleRateChanged;
asioCallbacks.asioMessage = &asioMessages;
asioCallbacks.bufferSwitchTimeInfo = &bufferSwitchTimeInfo;
}
void CSound::ResetChannelMapping()
{
// init selected channel numbers with defaults: use first available
// channels for input and output
vSelectedInputChannels[0] = 0;
vSelectedInputChannels[1] = 1;
vSelectedOutputChannels[0] = 0;
vSelectedOutputChannels[1] = 1;
}
// ASIO callbacks -------------------------------------------------------------
ASIOTime* CSound::bufferSwitchTimeInfo ( ASIOTime*,
long index,
ASIOBool processNow )
{
bufferSwitch ( index, processNow );
return 0L;
}
bool CSound::CheckSampleTypeSupported ( const ASIOSampleType SamType )
{
// check for supported sample types
return ( ( SamType == ASIOSTInt16LSB ) ||
( SamType == ASIOSTInt24LSB ) ||
( SamType == ASIOSTInt32LSB ) ||
( SamType == ASIOSTFloat32LSB ) ||
( SamType == ASIOSTFloat64LSB ) ||
( SamType == ASIOSTInt32LSB16 ) ||
( SamType == ASIOSTInt32LSB18 ) ||
( SamType == ASIOSTInt32LSB20 ) ||
( SamType == ASIOSTInt32LSB24 ) ||
( SamType == ASIOSTInt16MSB ) ||
( SamType == ASIOSTInt24MSB ) ||
( SamType == ASIOSTInt32MSB ) ||
( SamType == ASIOSTFloat32MSB ) ||
( SamType == ASIOSTFloat64MSB ) ||
( SamType == ASIOSTInt32MSB16 ) ||
( SamType == ASIOSTInt32MSB18 ) ||
( SamType == ASIOSTInt32MSB20 ) ||
( SamType == ASIOSTInt32MSB24 ) );
}
bool CSound::CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType )
{
// check for supported sample types for audio channel mixing (see bufferSwitch)
return ( ( SamType == ASIOSTInt16LSB ) ||
( SamType == ASIOSTInt24LSB ) ||
( SamType == ASIOSTInt32LSB ) );
}
void CSound::bufferSwitch ( long index, ASIOBool )
{
int iCurSample;
2013-12-14 23:16:20 +01:00
// get references to class members
2018-03-25 13:21:58 +02:00
int& iASIOBufferSizeMono = pSound->iASIOBufferSizeMono;
2013-12-14 23:16:20 +01:00
CVector<int16_t>& vecsTmpAudioSndCrdStereo = pSound->vecsTmpAudioSndCrdStereo;
// perform the processing for input and output
pSound->ASIOMutex.lock(); // get mutex lock
{
// CAPTURE -------------------------------------------------------------
for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ )
{
int iSelCH, iSelAddCH;
GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], pSound->lNumInChan,
iSelCH, iSelAddCH );
// copy new captured block in thread transfer buffer (copy
// mono data interleaved in stereo buffer)
2018-03-25 13:21:58 +02:00
switch ( pSound->channelInfosInput[iSelCH].type )
{
case ASIOSTInt16LSB:
2013-12-14 23:16:20 +01:00
{
// no type conversion required, just copy operation
2018-03-25 13:21:58 +02:00
int16_t* pASIOBuf = static_cast<int16_t*> ( pSound->bufferInfos[iSelCH].buffers[index] );
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] = pASIOBuf[iCurSample];
}
if ( iSelAddCH >= 0 )
{
// mix input channels case:
int16_t* pASIOBufAdd = static_cast<int16_t*> ( pSound->bufferInfos[iSelAddCH].buffers[index] );
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
Double2Short ( (double) vecsTmpAudioSndCrdStereo[2 * iCurSample + i] +
(double) pASIOBufAdd[iCurSample] );
}
}
break;
2013-12-14 23:16:20 +01:00
}
case ASIOSTInt24LSB:
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
int iCurSam = 0;
2018-03-25 13:21:58 +02:00
memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 );
iCurSam >>= 8;
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] = static_cast<int16_t> ( iCurSam );
}
if ( iSelAddCH >= 0 )
{
// mix input channels case:
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
int iCurSam = 0;
memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelAddCH].buffers[index] ) + iCurSample * 3, 3 );
iCurSam >>= 8;
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
Double2Short ( (double) vecsTmpAudioSndCrdStereo[2 * iCurSample + i] +
(double) static_cast<int16_t> ( iCurSam ) );
}
}
break;
case ASIOSTInt32LSB:
2013-12-14 23:16:20 +01:00
{
2018-03-25 13:21:58 +02:00
int32_t* pASIOBuf = static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] );
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
2013-12-14 23:16:20 +01:00
static_cast<int16_t> ( pASIOBuf[iCurSample] >> 16 );
}
if ( iSelAddCH >= 0 )
{
// mix input channels case:
int32_t* pASIOBufAdd = static_cast<int32_t*> ( pSound->bufferInfos[iSelAddCH].buffers[index] );
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
Double2Short ( (double) vecsTmpAudioSndCrdStereo[2 * iCurSample + i] +
(double) static_cast<int16_t> ( pASIOBufAdd[iCurSample] >> 16 ) );
}
}
break;
2013-12-14 23:16:20 +01:00
}
case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( static_cast<float*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT );
}
break;
case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( static_cast<double*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT );
}
break;
case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFF );
}
break;
case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0x3FFFF ) >> 2 );
}
break;
case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFF ) >> 4 );
}
break;
case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFFF ) >> 8 );
}
break;
case ASIOSTInt16MSB:
// NOT YET TESTED
// flip bits
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
Flip16Bits ( ( static_cast<int16_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] ) )[iCurSample] );
}
break;
case ASIOSTInt24MSB:
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// because the bits are flipped, we do not have to perform the
// shift by 8 bits
int iCurSam = 0;
2018-03-25 13:21:58 +02:00
memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 );
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
Flip16Bits ( static_cast<int16_t> ( iCurSam ) );
}
break;
case ASIOSTInt32MSB:
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// flip bits and convert to 16 bit
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( Flip32Bits ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) >> 16 );
}
break;
case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( static_cast<float> (
Flip32Bits ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT );
}
break;
case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( static_cast<double> (
Flip64Bits ( static_cast<int64_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT );
}
break;
case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( Flip32Bits ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFF );
}
break;
case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( ( Flip32Bits ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0x3FFFF ) >> 2 );
}
break;
case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( ( Flip32Bits ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFF ) >> 4 );
}
break;
case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] =
static_cast<int16_t> ( ( Flip32Bits ( static_cast<int32_t*> (
2018-03-25 13:21:58 +02:00
pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFFF ) >> 8 );
}
break;
}
}
// call processing callback function
2013-12-14 23:16:20 +01:00
pSound->ProcessCallback ( vecsTmpAudioSndCrdStereo );
// PLAYBACK ------------------------------------------------------------
2018-03-25 13:21:58 +02:00
for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ )
{
2018-03-25 13:21:58 +02:00
const int iSelCH = pSound->lNumInChan + pSound->vSelectedOutputChannels[i];
// copy data from sound card in output buffer (copy
// interleaved stereo data in mono sound card buffer)
2018-03-25 13:21:58 +02:00
switch ( pSound->channelInfosOutput[pSound->vSelectedOutputChannels[i]].type )
{
case ASIOSTInt16LSB:
2013-12-14 23:16:20 +01:00
{
// no type conversion required, just copy operation
2018-03-25 13:21:58 +02:00
int16_t* pASIOBuf = static_cast<int16_t*> ( pSound->bufferInfos[iSelCH].buffers[index] );
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
pASIOBuf[iCurSample] = vecsTmpAudioSndCrdStereo[2 * iCurSample + i];
}
break;
2013-12-14 23:16:20 +01:00
}
case ASIOSTInt24LSB:
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert current sample in 24 bit format
int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
iCurSam <<= 8;
2018-03-25 13:21:58 +02:00
memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 );
}
break;
case ASIOSTInt32LSB:
2013-12-14 23:16:20 +01:00
{
2018-03-25 13:21:58 +02:00
int32_t* pASIOBuf = static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] );
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2013-12-14 23:16:20 +01:00
pASIOBuf[iCurSample] = ( iCurSam << 16 );
}
break;
2013-12-14 23:16:20 +01:00
}
case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
const float fCurSam = static_cast<float> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<float*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
fCurSam / _MAXSHORT;
}
break;
case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
const double fCurSam = static_cast<double> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<double*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
fCurSam / _MAXSHORT;
}
break;
case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
iCurSam;
}
break;
case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
( iCurSam << 2 );
}
break;
case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
( iCurSam << 4 );
}
break;
case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
( iCurSam << 8 );
}
break;
case ASIOSTInt16MSB:
// NOT YET TESTED
// flip bits
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
2018-03-25 13:21:58 +02:00
( (int16_t*) pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
Flip16Bits ( vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
}
break;
case ASIOSTInt24MSB:
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// because the bits are flipped, we do not have to perform the
// shift by 8 bits
int32_t iCurSam = static_cast<int32_t> ( Flip16Bits (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] ) );
2018-03-25 13:21:58 +02:00
memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 );
}
break;
case ASIOSTInt32MSB:
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit and flip bits
int iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
Flip32Bits ( iCurSam << 16 );
}
break;
case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
const float fCurSam = static_cast<float> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<float*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
static_cast<float> ( Flip32Bits ( static_cast<int32_t> (
fCurSam / _MAXSHORT ) ) );
}
break;
case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
const double fCurSam = static_cast<double> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<float*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
static_cast<double> ( Flip64Bits ( static_cast<int64_t> (
fCurSam / _MAXSHORT ) ) );
}
break;
case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
Flip32Bits ( iCurSam );
}
break;
case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
Flip32Bits ( iCurSam << 2 );
}
break;
case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
Flip32Bits ( iCurSam << 4 );
}
break;
case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment
// NOT YET TESTED
2013-12-14 23:16:20 +01:00
for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ )
{
// convert to 32 bit
const int32_t iCurSam = static_cast<int32_t> (
2018-03-25 13:21:58 +02:00
vecsTmpAudioSndCrdStereo[2 * iCurSample + i] );
2018-03-25 13:21:58 +02:00
static_cast<int32_t*> ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] =
Flip32Bits ( iCurSam << 8 );
}
break;
}
}
2013-12-14 23:16:20 +01:00
// Finally if the driver supports the ASIOOutputReady() optimization,
// do it here, all data are in place -----------------------------------
if ( pSound->bASIOPostOutput )
{
ASIOOutputReady();
}
}
pSound->ASIOMutex.unlock();
}
long CSound::asioMessages ( long selector,
long,
void*,
double* )
{
long ret = 0;
2012-01-28 12:51:14 +01:00
switch ( selector )
{
case kAsioEngineVersion:
// return the supported ASIO version of the host application
ret = 2L; // Host ASIO implementation version, 2 or higher
break;
// both messages might be send if the buffer size changes
case kAsioBufferSizeChange:
2012-01-28 12:51:14 +01:00
pSound->EmitReinitRequestSignal ( RS_ONLY_RESTART_AND_INIT );
ret = 1L; // 1L if request is accepted or 0 otherwise
break;
2012-01-28 12:51:14 +01:00
case kAsioResetRequest:
pSound->EmitReinitRequestSignal ( RS_RELOAD_RESTART_AND_INIT );
ret = 1L; // 1L if request is accepted or 0 otherwise
break;
}
2012-01-28 12:51:14 +01:00
return ret;
}
int16_t CSound::Flip16Bits ( const int16_t iIn )
{
uint16_t iMask = ( 1 << 15 );
int16_t iOut = 0;
for ( unsigned int i = 0; i < 16; i++ )
{
// copy current bit to correct position
iOut |= ( iIn & iMask ) ? 1 : 0;
// shift out value and mask by one bit
iOut <<= 1;
iMask >>= 1;
}
return iOut;
}
int32_t CSound::Flip32Bits ( const int32_t iIn )
{
uint32_t iMask = ( static_cast<uint32_t> ( 1 ) << 31 );
int32_t iOut = 0;
for ( unsigned int i = 0; i < 32; i++ )
{
// copy current bit to correct position
iOut |= ( iIn & iMask ) ? 1 : 0;
// shift out value and mask by one bit
iOut <<= 1;
iMask >>= 1;
}
return iOut;
}
int64_t CSound::Flip64Bits ( const int64_t iIn )
{
uint64_t iMask = ( static_cast<uint64_t> ( 1 ) << 63 );
int64_t iOut = 0;
for ( unsigned int i = 0; i < 64; i++ )
{
// copy current bit to correct position
iOut |= ( iIn & iMask ) ? 1 : 0;
// shift out value and mask by one bit
iOut <<= 1;
iMask >>= 1;
}
return iOut;
}