jamulus/mac/sound.cpp

1096 lines
43 KiB
C++
Raw Normal View History

2012-01-28 12:51:14 +01:00
/******************************************************************************\
2020-01-01 15:41:43 +01:00
* Copyright (c) 2004-2020
2012-01-28 12:51:14 +01:00
*
* Author(s):
* Volker Fischer
*
******************************************************************************
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
2012-01-28 12:51:14 +01:00
*
\******************************************************************************/
#include "sound.h"
/* Implementation *************************************************************/
2020-04-30 22:37:48 +02:00
CSound::CSound ( void (*fpNewProcessCallback) ( CVector<short>& psData, void* arg ),
void* arg,
const int iCtrlMIDIChannel,
const bool ,
const QString& ) :
2020-04-30 22:18:11 +02:00
CSoundBase ( "CoreAudio", true, fpNewProcessCallback, arg, iCtrlMIDIChannel ),
midiInPortRef ( static_cast<MIDIPortRef> ( NULL ) )
2012-01-28 12:51:14 +01:00
{
// Apple Mailing Lists: Subject: GUI Apps should set kAudioHardwarePropertyRunLoop
// in the HAL, From: Jeff Moore, Date: Fri, 6 Dec 2002
// Most GUI applciations have several threads on which they receive
// notifications already, so the having the HAL's thread around is wasteful.
// Here is what you should do: On the thread you want the HAL to use for
// notifications (for most apps, this will be the main thread), add the
// following lines of code:
// tell the HAL to use the current thread as it's run loop
CFRunLoopRef theRunLoop = CFRunLoopGetCurrent();
AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster };
AudioObjectSetPropertyData ( kAudioObjectSystemObject,
&property,
0,
NULL,
sizeof ( CFRunLoopRef ),
&theRunLoop );
2012-01-28 12:51:14 +01:00
// Get available input/output devices --------------------------------------
UInt32 iPropertySize = 0;
2015-11-13 22:41:07 +01:00
AudioObjectPropertyAddress stPropertyAddress;
2012-01-28 12:51:14 +01:00
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
2012-01-28 12:51:14 +01:00
// first get property size of devices array and allocate memory
stPropertyAddress.mSelector = kAudioHardwarePropertyDevices;
AudioObjectGetPropertyDataSize ( kAudioObjectSystemObject,
&stPropertyAddress,
0,
NULL,
&iPropertySize );
2012-01-28 12:51:14 +01:00
CVector<AudioDeviceID> vAudioDevices ( iPropertySize );
2012-01-28 12:51:14 +01:00
// now actually query all devices present in the system
AudioObjectGetPropertyData ( kAudioObjectSystemObject,
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&vAudioDevices[0] );
2012-01-28 12:51:14 +01:00
// calculate device count based on size of returned data array
const UInt32 iDeviceCount = iPropertySize / sizeof ( AudioDeviceID );
2012-01-28 12:51:14 +01:00
// always add system default devices for input and output as first entry
2015-11-13 22:13:59 +01:00
lNumDevs = 0;
2012-01-28 12:51:14 +01:00
strDriverNames[lNumDevs] = "System Default In/Out Devices";
2015-11-13 22:41:07 +01:00
iPropertySize = sizeof ( AudioDeviceID );
stPropertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
2015-11-13 22:13:59 +01:00
if ( AudioObjectGetPropertyData ( kAudioObjectSystemObject,
2015-11-13 22:41:07 +01:00
&stPropertyAddress,
2015-11-13 22:13:59 +01:00
0,
NULL,
&iPropertySize,
&audioInputDevice[lNumDevs] ) )
2012-01-28 12:51:14 +01:00
{
throw CGenErr ( tr ( "CoreAudio input AudioHardwareGetProperty call failed. "
"It seems that no sound card is available in the system." ) );
}
2015-11-13 22:41:07 +01:00
iPropertySize = sizeof ( AudioDeviceID );
stPropertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
2015-11-13 22:13:59 +01:00
if ( AudioObjectGetPropertyData ( kAudioObjectSystemObject,
2015-11-13 22:41:07 +01:00
&stPropertyAddress,
2015-11-13 22:13:59 +01:00
0,
NULL,
&iPropertySize,
&audioOutputDevice[lNumDevs] ) )
2012-01-28 12:51:14 +01:00
{
throw CGenErr ( tr ( "CoreAudio output AudioHardwareGetProperty call failed. "
"It seems that no sound card is available in the system." ) );
}
lNumDevs++; // next device
// add detected devices
2012-01-28 12:51:14 +01:00
//
// we add combined entries for input and output for each device so that we
// do not need two combo boxes in the GUI for input and output (therefore
// all possible combinations are required which can be a large number)
for ( UInt32 i = 0; i < iDeviceCount; i++ )
2012-01-28 12:51:14 +01:00
{
for ( UInt32 j = 0; j < iDeviceCount; j++ )
2012-01-28 12:51:14 +01:00
{
// get device infos for both current devices
QString strDeviceName_i;
QString strDeviceName_j;
bool bIsInput_i;
bool bIsInput_j;
bool bIsOutput_i;
bool bIsOutput_j;
GetAudioDeviceInfos ( vAudioDevices[i],
2012-01-28 12:51:14 +01:00
strDeviceName_i,
bIsInput_i,
bIsOutput_i );
GetAudioDeviceInfos ( vAudioDevices[j],
2012-01-28 12:51:14 +01:00
strDeviceName_j,
bIsInput_j,
bIsOutput_j );
// check if i device is input and j device is output and that we are
// in range
if ( bIsInput_i && bIsOutput_j && ( lNumDevs < MAX_NUMBER_SOUND_CARDS ) )
2012-01-28 12:51:14 +01:00
{
strDriverNames[lNumDevs] = "in: " +
strDeviceName_i + "/out: " +
strDeviceName_j;
// store audio device IDs
audioInputDevice[lNumDevs] = vAudioDevices[i];
audioOutputDevice[lNumDevs] = vAudioDevices[j];
2012-01-28 12:51:14 +01:00
lNumDevs++; // next device
}
}
}
// init device index as not initialized (invalid)
lCurDev = INVALID_INDEX;
CurrentAudioInputDeviceID = 0;
CurrentAudioOutputDeviceID = 0;
iNumInChan = 0;
2020-05-01 16:04:04 +02:00
iNumInChanPlusAddChan = 0;
iNumOutChan = 0;
iSelInputLeftChannel = 0;
iSelInputRightChannel = 0;
iSelOutputLeftChannel = 0;
iSelOutputRightChannel = 0;
// Optional MIDI initialization --------------------------------------------
if ( iCtrlMIDIChannel != INVALID_MIDI_CH )
{
// create client and ports
MIDIClientRef midiClient = static_cast<MIDIClientRef> ( NULL );
MIDIClientCreate ( CFSTR ( APP_NAME ), NULL, NULL, &midiClient );
MIDIInputPortCreate ( midiClient, CFSTR ( "Input port" ), callbackMIDI, this, &midiInPortRef );
// open connections from all sources
const int iNMIDISources = MIDIGetNumberOfSources();
for ( int i = 0; i < iNMIDISources; i++ )
{
MIDIEndpointRef src = MIDIGetSource ( i );
2019-01-12 15:34:35 +01:00
MIDIPortConnectSource ( midiInPortRef, src, NULL ) ;
}
}
2012-01-28 12:51:14 +01:00
}
void CSound::GetAudioDeviceInfos ( const AudioDeviceID DeviceID,
QString& strDeviceName,
bool& bIsInput,
bool& bIsOutput )
{
UInt32 iPropertySize;
AudioObjectPropertyAddress stPropertyAddress;
// init return values
bIsInput = false;
bIsOutput = false;
2015-11-13 22:41:07 +01:00
// check if device is input or output or both (is that possible?)
stPropertyAddress.mSelector = kAudioDevicePropertyStreams;
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
// input check
iPropertySize = 0;
stPropertyAddress.mScope = kAudioDevicePropertyScopeInput;
AudioObjectGetPropertyDataSize ( DeviceID,
&stPropertyAddress,
0,
NULL,
&iPropertySize );
bIsInput = ( iPropertySize > 0 ); // check if any input streams are available
// output check
iPropertySize = 0;
stPropertyAddress.mScope = kAudioDevicePropertyScopeOutput;
AudioObjectGetPropertyDataSize ( DeviceID,
&stPropertyAddress,
0,
NULL,
&iPropertySize );
bIsOutput = ( iPropertySize > 0 ); // check if any output streams are available
2015-11-13 22:41:07 +01:00
2012-01-28 12:51:14 +01:00
// get property name
CFStringRef sPropertyStringValue = NULL;
2012-01-28 12:51:14 +01:00
2015-11-13 22:41:07 +01:00
stPropertyAddress.mSelector = kAudioObjectPropertyName;
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
iPropertySize = sizeof ( CFStringRef );
2015-11-13 22:41:07 +01:00
AudioObjectGetPropertyData ( DeviceID,
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&sPropertyStringValue );
2012-01-28 12:51:14 +01:00
// convert string
if ( !ConvertCFStringToQString ( sPropertyStringValue, strDeviceName ) )
{
// use a default name in case the conversion did not succeed
strDeviceName = "UNKNOWN";
}
2012-01-28 12:51:14 +01:00
}
int CSound::CountChannels ( AudioDeviceID devID,
bool isInput )
{
OSStatus err;
UInt32 propSize;
int result = 0;
2020-05-02 17:15:18 +02:00
if ( isInput )
{
2020-05-02 17:15:18 +02:00
vecNumInBufChan.Init ( 0 );
}
else
{
2020-05-02 17:15:18 +02:00
vecNumOutBufChan.Init ( 0 );
}
// it seems we have multiple buffers where each buffer has only one channel,
// in that case we assume that each input channel has its own buffer
AudioObjectPropertyScope theScope = isInput ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
2020-05-02 17:15:18 +02:00
AudioObjectPropertyAddress theAddress = { kAudioDevicePropertyStreamConfiguration,
theScope,
0 };
2020-05-02 17:15:18 +02:00
AudioObjectGetPropertyDataSize ( devID, &theAddress, 0, NULL, &propSize );
2020-05-02 17:15:18 +02:00
AudioBufferList *buflist = (AudioBufferList*) malloc ( propSize );
2020-05-02 17:15:18 +02:00
err = AudioObjectGetPropertyData ( devID, &theAddress, 0, NULL, &propSize, buflist );
2020-05-02 17:15:18 +02:00
if ( !err )
{
for ( UInt32 i = 0; i < buflist->mNumberBuffers; ++i )
{
2020-05-02 17:15:18 +02:00
// The correct value mNumberChannels for an AudioBuffer can be derived from the mChannelsPerFrame
// and the interleaved flag. For non interleaved formats, mNumberChannels is always 1.
// For interleaved formats, mNumberChannels is equal to mChannelsPerFrame.
result += buflist->mBuffers[i].mNumberChannels;
if ( isInput )
{
2020-05-02 17:15:18 +02:00
vecNumInBufChan.Add ( buflist->mBuffers[i].mNumberChannels );
}
else
{
vecNumOutBufChan.Add ( buflist->mBuffers[i].mNumberChannels );
}
}
}
2020-05-02 17:15:18 +02:00
free ( buflist );
return result;
}
QString CSound::LoadAndInitializeDriver ( int iDriverIdx, bool )
2012-01-28 12:51:14 +01:00
{
// check device capabilities if it fullfills our requirements
const QString strStat = CheckDeviceCapabilities ( iDriverIdx );
2012-01-28 12:51:14 +01:00
// check if device is capable
if ( strStat.isEmpty() )
{
// store ID of selected driver if initialization was successful
lCurDev = iDriverIdx;
CurrentAudioInputDeviceID = audioInputDevice[iDriverIdx];
CurrentAudioOutputDeviceID = audioOutputDevice[iDriverIdx];
// the device has changed, per definition we reset the channel
// mapping to the defaults (first two available channels)
2020-05-02 17:15:18 +02:00
SetLeftInputChannel ( 0 );
SetRightInputChannel ( 1 );
SetLeftOutputChannel ( 0 );
SetRightOutputChannel ( 1 );
2012-01-28 12:51:14 +01:00
}
return strStat;
}
QString CSound::CheckDeviceCapabilities ( const int iDriverIdx )
2012-01-28 12:51:14 +01:00
{
UInt32 iPropertySize;
AudioStreamBasicDescription CurDevStreamFormat;
Float64 inputSampleRate = 0;
Float64 outputSampleRate = 0;
const Float64 fSystemSampleRate = static_cast<Float64> ( SYSTEM_SAMPLE_RATE_HZ );
AudioObjectPropertyAddress stPropertyAddress;
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
2012-01-28 12:51:14 +01:00
// check input device sample rate
stPropertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
iPropertySize = sizeof ( Float64 );
AudioObjectGetPropertyData ( audioInputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&inputSampleRate );
2012-01-28 12:51:14 +01:00
if ( inputSampleRate != fSystemSampleRate )
2012-01-28 12:51:14 +01:00
{
// try to change the sample rate
if ( AudioObjectSetPropertyData ( audioInputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
sizeof ( Float64 ),
&fSystemSampleRate ) != noErr )
{
return QString ( tr ( "Current system audio input device sample "
"rate of %1 Hz is not supported. Please open the Audio-MIDI-Setup in "
"Applications->Utilities and try to set a sample rate of %2 Hz." ) ).arg (
static_cast<int> ( inputSampleRate ) ).arg ( SYSTEM_SAMPLE_RATE_HZ );
}
2012-01-28 12:51:14 +01:00
}
// check output device sample rate
iPropertySize = sizeof ( Float64 );
AudioObjectGetPropertyData ( audioOutputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&outputSampleRate );
if ( outputSampleRate != fSystemSampleRate )
2012-01-28 12:51:14 +01:00
{
// try to change the sample rate
if ( AudioObjectSetPropertyData ( audioOutputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
sizeof ( Float64 ),
&fSystemSampleRate ) != noErr )
{
return QString ( tr ( "Current system audio output device sample "
"rate of %1 Hz is not supported. Please open the Audio-MIDI-Setup in "
"Applications->Utilities and try to set a sample rate of %2 Hz." ) ).arg (
static_cast<int> ( outputSampleRate ) ).arg ( SYSTEM_SAMPLE_RATE_HZ );
}
2012-01-28 12:51:14 +01:00
}
// get the stream ID of the input device (at least one stream must always exist)
iPropertySize = 0;
stPropertyAddress.mSelector = kAudioDevicePropertyStreams;
stPropertyAddress.mScope = kAudioObjectPropertyScopeInput;
AudioObjectGetPropertyDataSize ( audioInputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize );
CVector<AudioStreamID> vInputStreamIDList ( iPropertySize );
AudioObjectGetPropertyData ( audioInputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&vInputStreamIDList[0] );
const AudioStreamID inputStreamID = vInputStreamIDList[0];
// get the stream ID of the output device (at least one stream must always exist)
iPropertySize = 0;
stPropertyAddress.mSelector = kAudioDevicePropertyStreams;
stPropertyAddress.mScope = kAudioObjectPropertyScopeOutput;
AudioObjectGetPropertyDataSize ( audioOutputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize );
CVector<AudioStreamID> vOutputStreamIDList ( iPropertySize );
AudioObjectGetPropertyData ( audioOutputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&vOutputStreamIDList[0] );
const AudioStreamID outputStreamID = vOutputStreamIDList[0];
// According to the AudioHardware documentation: "If the format is a linear PCM
// format, the data will always be presented as 32 bit, native endian floating
// point. All conversions to and from the true physical format of the hardware
// is handled by the devices driver.".
// check the input
iPropertySize = sizeof ( AudioStreamBasicDescription );
stPropertyAddress.mSelector = kAudioStreamPropertyVirtualFormat;
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
AudioObjectGetPropertyData ( inputStreamID,
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&CurDevStreamFormat );
if ( ( CurDevStreamFormat.mFormatID != kAudioFormatLinearPCM ) ||
( CurDevStreamFormat.mFramesPerPacket != 1 ) ||
( CurDevStreamFormat.mBitsPerChannel != 32 ) ||
2015-11-17 19:38:39 +01:00
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsFloat ) ) ||
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsPacked ) ) )
{
return tr ( "The audio input stream format for this audio device is "
"not compatible with this software." );
}
// check the output
AudioObjectGetPropertyData ( outputStreamID,
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&CurDevStreamFormat );
if ( ( CurDevStreamFormat.mFormatID != kAudioFormatLinearPCM ) ||
( CurDevStreamFormat.mFramesPerPacket != 1 ) ||
( CurDevStreamFormat.mBitsPerChannel != 32 ) ||
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsFloat ) ) ||
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsPacked ) ) )
{
return tr ( "The audio output stream format for this audio device is "
"not compatible with this software." );
}
// store the input and out number of channels for this device
2020-05-03 14:18:22 +02:00
iNumInChan = CountChannels ( audioInputDevice[iDriverIdx], true );
iNumOutChan = CountChannels ( audioOutputDevice[iDriverIdx], false );
// clip the number of input/output channels to our allowed maximum
if ( iNumInChan > MAX_NUM_IN_OUT_CHANNELS )
{
iNumInChan = MAX_NUM_IN_OUT_CHANNELS;
}
if ( iNumOutChan > MAX_NUM_IN_OUT_CHANNELS )
{
iNumOutChan = MAX_NUM_IN_OUT_CHANNELS;
}
// get the channel names of the input device
for ( int iCurInCH = 0; iCurInCH < iNumInChan; iCurInCH++ )
{
CFStringRef sPropertyStringValue = NULL;
stPropertyAddress.mSelector = kAudioObjectPropertyElementName;
stPropertyAddress.mElement = iCurInCH + 1;
stPropertyAddress.mScope = kAudioObjectPropertyScopeInput;
iPropertySize = sizeof ( CFStringRef );
AudioObjectGetPropertyData ( audioInputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&sPropertyStringValue );
// convert string
const bool bConvOK = ConvertCFStringToQString ( sPropertyStringValue,
sChannelNamesInput[iCurInCH] );
2015-11-20 12:24:20 +01:00
// add the "[n]:" at the beginning as is in the Audio-Midi-Setup
if ( !bConvOK || ( iPropertySize == 0 ) )
{
// use a default name in case there was an error or the name is empty
sChannelNamesInput[iCurInCH] =
2015-11-20 12:24:20 +01:00
QString ( "%1: Channel %1" ).arg ( iCurInCH + 1 );
}
else
{
sChannelNamesInput[iCurInCH].prepend ( QString ( "%1: " ).arg ( iCurInCH + 1 ) );
}
}
// get the channel names of the output device
for ( int iCurOutCH = 0; iCurOutCH < iNumOutChan; iCurOutCH++ )
{
CFStringRef sPropertyStringValue = NULL;
stPropertyAddress.mSelector = kAudioObjectPropertyElementName;
stPropertyAddress.mElement = iCurOutCH + 1;
stPropertyAddress.mScope = kAudioObjectPropertyScopeOutput;
iPropertySize = sizeof ( CFStringRef );
AudioObjectGetPropertyData ( audioOutputDevice[iDriverIdx],
&stPropertyAddress,
0,
NULL,
&iPropertySize,
&sPropertyStringValue );
// convert string
const bool bConvOK = ConvertCFStringToQString ( sPropertyStringValue,
sChannelNamesOutput[iCurOutCH] );
2015-11-20 12:24:20 +01:00
// add the "[n]:" at the beginning as is in the Audio-Midi-Setup
if ( !bConvOK || ( iPropertySize == 0 ) )
{
2015-11-20 12:24:20 +01:00
// use a defalut name in case there was an error or the name is empty
sChannelNamesOutput[iCurOutCH] =
2015-11-20 12:24:20 +01:00
QString ( "%1: Channel %1" ).arg ( iCurOutCH + 1 );
}
else
{
sChannelNamesOutput[iCurOutCH].prepend ( QString ( "%1: " ).arg ( iCurOutCH + 1 ) );
}
}
// special case with 4 input channels: support adding channels
if ( iNumInChan == 4 )
{
// add four mixed channels (i.e. 4 normal, 4 mixed channels)
iNumInChanPlusAddChan = 8;
for ( int iCh = 0; iCh < iNumInChanPlusAddChan; iCh++ )
{
int iSelCH, iSelAddCH;
GetSelCHAndAddCH ( iCh, iNumInChan, iSelCH, iSelAddCH );
if ( iSelAddCH >= 0 )
{
// for mixed channels, show both audio channel names to be mixed
sChannelNamesInput[iCh] =
sChannelNamesInput[iSelCH] + " + " + sChannelNamesInput[iSelAddCH];
}
}
}
else
{
// regular case: no mixing input channels used
iNumInChanPlusAddChan = iNumInChan;
}
2012-01-28 12:51:14 +01:00
// everything is ok, return empty string for "no error" case
return "";
}
2020-05-02 17:15:18 +02:00
void CSound::UpdateChSelection()
{
// calculate the selected input/output buffer and the selected interleaved
// channel index in the buffer, note that each buffer can have a different
// number of interleaved channels
int iChCnt;
int iSelCHLeft, iSelAddCHLeft;
int iSelCHRight, iSelAddCHRight;
2020-05-03 13:44:50 +02:00
// initialize all buffer indexes with an invalid value
iSelInBufferLeft = INVALID_INDEX;
iSelInBufferRight = INVALID_INDEX;
iSelAddInBufferLeft = INVALID_INDEX; // if no additional channel used, this will stay on the invalid value
iSelAddInBufferRight = INVALID_INDEX; // if no additional channel used, this will stay on the invalid value
iSelOutBufferLeft = INVALID_INDEX;
iSelOutBufferRight = INVALID_INDEX;
2020-05-02 17:15:18 +02:00
// input
GetSelCHAndAddCH ( iSelInputLeftChannel, iNumInChan, iSelCHLeft, iSelAddCHLeft );
GetSelCHAndAddCH ( iSelInputRightChannel, iNumInChan, iSelCHRight, iSelAddCHRight );
iChCnt = 0;
for ( int iBuf = 0; iBuf < vecNumInBufChan.Size(); iBuf++ )
{
iChCnt += vecNumInBufChan[iBuf];
2020-05-03 13:44:50 +02:00
if ( ( iSelInBufferLeft < 0 ) && ( iChCnt > iSelCHLeft ) )
2020-05-02 17:15:18 +02:00
{
iSelInBufferLeft = iBuf;
iSelInInterlChLeft = iSelCHLeft - iChCnt + vecNumInBufChan[iBuf];
}
2020-05-03 13:44:50 +02:00
if ( ( iSelInBufferRight < 0 ) && ( iChCnt > iSelCHRight ) )
2020-05-02 17:15:18 +02:00
{
iSelInBufferRight = iBuf;
iSelInInterlChRight = iSelCHRight - iChCnt + vecNumInBufChan[iBuf];
}
2020-05-03 13:44:50 +02:00
if ( ( iSelAddCHLeft >= 0 ) && ( iSelAddInBufferLeft < 0 ) && ( iChCnt > iSelAddCHLeft ) )
2020-05-02 17:15:18 +02:00
{
iSelAddInBufferLeft = iBuf;
iSelAddInInterlChLeft = iSelAddCHLeft - iChCnt + vecNumInBufChan[iBuf];
}
2020-05-03 13:44:50 +02:00
if ( ( iSelAddCHRight >= 0 ) && ( iSelAddInBufferRight < 0 ) && ( iChCnt > iSelAddCHRight ) )
2020-05-02 17:15:18 +02:00
{
iSelAddInBufferRight = iBuf;
iSelAddInInterlChRight = iSelAddCHRight - iChCnt + vecNumInBufChan[iBuf];
}
}
// output
GetSelCHAndAddCH ( iSelOutputLeftChannel, iNumOutChan, iSelCHLeft, iSelAddCHLeft );
GetSelCHAndAddCH ( iSelOutputRightChannel, iNumOutChan, iSelCHRight, iSelAddCHRight );
iChCnt = 0;
for ( int iBuf = 0; iBuf < vecNumOutBufChan.Size(); iBuf++ )
{
iChCnt += vecNumOutBufChan[iBuf];
2020-05-03 13:44:50 +02:00
if ( ( iSelOutBufferLeft < 0 ) && ( iChCnt > iSelCHLeft ) )
2020-05-02 17:15:18 +02:00
{
iSelOutBufferLeft = iBuf;
iSelOutInterlChLeft = iSelCHLeft - iChCnt + vecNumOutBufChan[iBuf];
}
2020-05-03 13:44:50 +02:00
if ( ( iSelOutBufferRight < 0 ) && ( iChCnt > iSelCHRight ) )
2020-05-02 17:15:18 +02:00
{
iSelOutBufferRight = iBuf;
iSelOutInterlChRight = iSelCHRight - iChCnt + vecNumOutBufChan[iBuf];
}
}
}
void CSound::SetLeftInputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
2020-05-01 16:04:04 +02:00
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumInChanPlusAddChan ) )
{
iSelInputLeftChannel = iNewChan;
2020-05-02 17:15:18 +02:00
UpdateChSelection();
}
}
void CSound::SetRightInputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
2020-05-01 16:04:04 +02:00
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumInChanPlusAddChan ) )
{
iSelInputRightChannel = iNewChan;
2020-05-02 17:15:18 +02:00
UpdateChSelection();
}
}
void CSound::SetLeftOutputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumOutChan ) )
{
iSelOutputLeftChannel = iNewChan;
2020-05-02 17:15:18 +02:00
UpdateChSelection();
}
}
void CSound::SetRightOutputChannel ( const int iNewChan )
{
// apply parameter after input parameter check
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumOutChan ) )
{
iSelOutputRightChannel = iNewChan;
2020-05-02 17:15:18 +02:00
UpdateChSelection();
}
}
2012-01-28 12:51:14 +01:00
void CSound::Start()
{
AudioObjectPropertyAddress stPropertyAddress;
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
2015-11-22 10:23:00 +01:00
// setup callback for xruns (only for input is enough)
stPropertyAddress.mSelector = kAudioDeviceProcessorOverload;
AudioObjectAddPropertyListener ( audioInputDevice[lCurDev],
&stPropertyAddress,
deviceNotification,
this );
2015-11-22 10:23:00 +01:00
// setup callbacks for device property changes
stPropertyAddress.mSelector = kAudioDevicePropertyDeviceHasChanged;
AudioObjectAddPropertyListener ( audioInputDevice[lCurDev],
&stPropertyAddress,
deviceNotification,
this );
AudioObjectAddPropertyListener ( audioOutputDevice[lCurDev],
&stPropertyAddress,
deviceNotification,
this );
// register the callback function for input and output
AudioDeviceCreateIOProcID ( audioInputDevice[lCurDev],
callbackIO,
this,
&audioInputProcID );
AudioDeviceCreateIOProcID ( audioOutputDevice[lCurDev],
callbackIO,
this,
&audioOutputProcID );
// start the audio stream
AudioDeviceStart ( audioInputDevice[lCurDev], audioInputProcID );
AudioDeviceStart ( audioOutputDevice[lCurDev], audioOutputProcID );
2012-01-28 12:51:14 +01:00
// call base class
CSoundBase::Start();
}
void CSound::Stop()
{
// stop the audio stream
AudioDeviceStop ( audioInputDevice[lCurDev], audioInputProcID );
AudioDeviceStop ( audioOutputDevice[lCurDev], audioOutputProcID );
// unregister the callback function for input and output
AudioDeviceDestroyIOProcID ( audioInputDevice[lCurDev], audioInputProcID );
AudioDeviceDestroyIOProcID ( audioOutputDevice[lCurDev], audioOutputProcID );
AudioObjectPropertyAddress stPropertyAddress;
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
2015-11-22 10:23:00 +01:00
// unregister callback functions for device property changes
stPropertyAddress.mSelector = kAudioDevicePropertyDeviceHasChanged;
AudioObjectRemovePropertyListener( audioOutputDevice[lCurDev],
&stPropertyAddress,
deviceNotification,
this );
AudioObjectRemovePropertyListener( audioInputDevice[lCurDev],
&stPropertyAddress,
deviceNotification,
this );
2015-11-22 10:23:00 +01:00
// unregister the callback function for xruns
stPropertyAddress.mSelector = kAudioDeviceProcessorOverload;
AudioObjectRemovePropertyListener( audioInputDevice[lCurDev],
&stPropertyAddress,
deviceNotification,
this );
2012-01-28 12:51:14 +01:00
// call base class
CSoundBase::Stop();
}
int CSound::Init ( const int iNewPrefMonoBufferSize )
{
UInt32 iActualMonoBufferSize;
// Error message string: in case buffer sizes on input and output cannot be
// set to the same value
const QString strErrBufSize = tr ( "The buffer sizes of the current "
"input and output audio device cannot be set to a common value. Please "
"choose other input/output audio devices in your system settings." );
// try to set input buffer size
iActualMonoBufferSize =
SetBufferSize ( audioInputDevice[lCurDev], true, iNewPrefMonoBufferSize );
if ( iActualMonoBufferSize != static_cast<UInt32> ( iNewPrefMonoBufferSize ) )
{
// try to set the input buffer size to the output so that we
// have a matching pair
if ( SetBufferSize ( audioOutputDevice[lCurDev], false, iActualMonoBufferSize ) !=
iActualMonoBufferSize )
{
throw CGenErr ( strErrBufSize );
}
}
else
{
// try to set output buffer size
if ( SetBufferSize ( audioOutputDevice[lCurDev], false, iNewPrefMonoBufferSize ) !=
static_cast<UInt32> ( iNewPrefMonoBufferSize ) )
{
throw CGenErr ( strErrBufSize );
}
}
// store buffer size
iCoreAudioBufferSizeMono = iActualMonoBufferSize;
2012-01-28 12:51:14 +01:00
// init base class
CSoundBase::Init ( iCoreAudioBufferSizeMono );
// set internal buffer size value and calculate stereo buffer size
2013-12-29 15:45:14 +01:00
iCoreAudioBufferSizeStereo = 2 * iCoreAudioBufferSizeMono;
2012-01-28 12:51:14 +01:00
// create memory for intermediate audio buffer
2013-12-29 15:45:14 +01:00
vecsTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo );
2012-01-28 12:51:14 +01:00
return iCoreAudioBufferSizeMono;
}
UInt32 CSound::SetBufferSize ( AudioDeviceID& audioDeviceID,
const bool bIsInput,
UInt32 iPrefBufferSize )
{
2015-11-13 22:57:24 +01:00
AudioObjectPropertyAddress stPropertyAddress;
stPropertyAddress.mSelector = kAudioDevicePropertyBufferFrameSize;
if ( bIsInput )
{
stPropertyAddress.mScope = kAudioDevicePropertyScopeInput;
}
else
{
stPropertyAddress.mScope = kAudioDevicePropertyScopeOutput;
}
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
2015-11-13 22:57:24 +01:00
2012-01-28 12:51:14 +01:00
// first set the value
UInt32 iSizeBufValue = sizeof ( UInt32 );
2015-11-13 22:57:24 +01:00
AudioObjectSetPropertyData ( audioDeviceID,
&stPropertyAddress,
0,
NULL,
iSizeBufValue,
&iPrefBufferSize );
2012-01-28 12:51:14 +01:00
// read back which value is actually used
UInt32 iActualMonoBufferSize = 0;
2015-11-13 22:57:24 +01:00
AudioObjectGetPropertyData ( audioDeviceID,
&stPropertyAddress,
0,
NULL,
&iSizeBufValue,
&iActualMonoBufferSize );
2012-01-28 12:51:14 +01:00
return iActualMonoBufferSize;
}
OSStatus CSound::deviceNotification ( AudioDeviceID,
UInt32,
const AudioObjectPropertyAddress* inAddresses,
void* inRefCon )
2012-01-28 12:51:14 +01:00
{
2014-01-03 09:56:31 +01:00
CSound* pSound = static_cast<CSound*> ( inRefCon );
2012-01-28 12:51:14 +01:00
if ( inAddresses->mSelector == kAudioDevicePropertyDeviceHasChanged )
{
2015-11-22 10:23:00 +01:00
// if any property of the device has changed, do a full reload
pSound->EmitReinitRequestSignal ( RS_RELOAD_RESTART_AND_INIT );
}
/*
2015-11-13 23:27:25 +01:00
if ( inAddresses->mSelector == kAudioDeviceProcessorOverload )
2012-01-28 12:51:14 +01:00
{
// xrun handling (it is important to act on xruns under CoreAudio
// since it seems that the xrun situation stays stable for a
2014-02-25 00:30:50 +01:00
// while and would give you a long time bad audio)
2012-01-28 12:51:14 +01:00
pSound->EmitReinitRequestSignal ( RS_ONLY_RESTART );
}
*/
2012-01-28 12:51:14 +01:00
return noErr;
}
OSStatus CSound::callbackIO ( AudioDeviceID inDevice,
const AudioTimeStamp*,
const AudioBufferList* inInputData,
const AudioTimeStamp*,
AudioBufferList* outOutputData,
const AudioTimeStamp*,
void* inRefCon )
{
CSound* pSound = static_cast<CSound*> ( inRefCon );
// both, the input and output device use the same callback function
QMutexLocker locker ( &pSound->Mutex );
2020-05-03 13:44:50 +02:00
const int iCoreAudioBufferSizeMono = pSound->iCoreAudioBufferSizeMono;
const int iSelInBufferLeft = pSound->iSelInBufferLeft;
const int iSelInBufferRight = pSound->iSelInBufferRight;
const int iSelInInterlChLeft = pSound->iSelInInterlChLeft;
const int iSelInInterlChRight = pSound->iSelInInterlChRight;
const int iSelAddInBufferLeft = pSound->iSelAddInBufferLeft;
const int iSelAddInBufferRight = pSound->iSelAddInBufferRight;
const int iSelAddInInterlChLeft = pSound->iSelAddInInterlChLeft;
const int iSelAddInInterlChRight = pSound->iSelAddInInterlChRight;
const int iSelOutBufferLeft = pSound->iSelOutBufferLeft;
const int iSelOutBufferRight = pSound->iSelOutBufferRight;
const int iSelOutInterlChLeft = pSound->iSelOutInterlChLeft;
const int iSelOutInterlChRight = pSound->iSelOutInterlChRight;
const CVector<int>& vecNumInBufChan = pSound->vecNumInBufChan;
const CVector<int>& vecNumOutBufChan = pSound->vecNumOutBufChan;
if ( ( inDevice == pSound->CurrentAudioInputDeviceID ) && inInputData )
{
2020-05-03 13:44:50 +02:00
// check sizes (note that float32 has four bytes)
if ( ( iSelInBufferLeft >= 0 ) &&
2020-05-03 14:18:22 +02:00
( iSelInBufferLeft < static_cast<int> ( inInputData->mNumberBuffers ) ) &&
2020-05-03 13:44:50 +02:00
( iSelInBufferRight >= 0 ) &&
2020-05-03 14:18:22 +02:00
( iSelInBufferRight < static_cast<int> ( inInputData->mNumberBuffers ) ) &&
( iSelAddInBufferLeft < static_cast<int> ( inInputData->mNumberBuffers ) ) &&
( iSelAddInBufferRight < static_cast<int> ( inInputData->mNumberBuffers ) ) &&
( inInputData->mBuffers[iSelInBufferLeft].mDataByteSize == static_cast<UInt32> ( vecNumInBufChan[iSelInBufferLeft] * iCoreAudioBufferSizeMono * 4 ) ) &&
( inInputData->mBuffers[iSelInBufferRight].mDataByteSize == static_cast<UInt32> ( vecNumInBufChan[iSelInBufferRight] * iCoreAudioBufferSizeMono * 4 ) ) )
{
2020-05-02 17:15:18 +02:00
Float32* pLeftData = static_cast<Float32*> ( inInputData->mBuffers[iSelInBufferLeft].mData );
Float32* pRightData = static_cast<Float32*> ( inInputData->mBuffers[iSelInBufferRight].mData );
2020-05-03 13:44:50 +02:00
int iNumChanPerFrameLeft = vecNumInBufChan[iSelInBufferLeft];
int iNumChanPerFrameRight = vecNumInBufChan[iSelInBufferRight];
// copy input data
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
{
// copy left and right channels separately
2020-05-02 17:15:18 +02:00
pSound->vecsTmpAudioSndCrdStereo[2 * i] = (short) ( pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft] * _MAXSHORT );
pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = (short) ( pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight] * _MAXSHORT );
}
// add an additional optional channel
2020-05-02 17:15:18 +02:00
if ( iSelAddInBufferLeft >= 0 )
{
2020-05-02 17:15:18 +02:00
pLeftData = static_cast<Float32*> ( inInputData->mBuffers[iSelAddInBufferLeft].mData );
2020-05-03 13:44:50 +02:00
iNumChanPerFrameLeft = vecNumInBufChan[iSelAddInBufferLeft];
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
{
pSound->vecsTmpAudioSndCrdStereo[2 * i] = Double2Short (
2020-05-02 17:15:18 +02:00
pSound->vecsTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] * _MAXSHORT );
}
}
2020-05-02 17:15:18 +02:00
if ( iSelAddInBufferRight >= 0 )
{
2020-05-02 17:15:18 +02:00
pRightData = static_cast<Float32*> ( inInputData->mBuffers[iSelAddInBufferRight].mData );
2020-05-03 13:44:50 +02:00
iNumChanPerFrameRight = vecNumInBufChan[iSelAddInBufferRight];
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
{
pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = Double2Short (
2020-05-02 17:15:18 +02:00
pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] * _MAXSHORT );
}
}
}
else
{
// incompatible sizes, clear work buffer
pSound->vecsTmpAudioSndCrdStereo.Reset ( 0 );
}
// call processing callback function
pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo );
}
if ( ( inDevice == pSound->CurrentAudioOutputDeviceID ) && outOutputData )
{
2020-05-03 13:44:50 +02:00
// check sizes (note that float32 has four bytes)
if ( ( iSelOutBufferLeft >= 0 ) &&
2020-05-03 14:18:22 +02:00
( iSelOutBufferLeft < static_cast<int> ( outOutputData->mNumberBuffers ) ) &&
2020-05-03 13:44:50 +02:00
( iSelOutBufferRight >= 0 ) &&
2020-05-03 14:18:22 +02:00
( iSelOutBufferRight < static_cast<int> ( outOutputData->mNumberBuffers ) ) &&
( outOutputData->mBuffers[iSelOutBufferLeft].mDataByteSize == static_cast<UInt32> ( vecNumOutBufChan[iSelOutBufferLeft] * iCoreAudioBufferSizeMono * 4 ) ) &&
( outOutputData->mBuffers[iSelOutBufferRight].mDataByteSize == static_cast<UInt32> ( vecNumOutBufChan[iSelOutBufferRight] * iCoreAudioBufferSizeMono * 4 ) ) )
2020-05-03 13:44:50 +02:00
{
Float32* pLeftData = static_cast<Float32*> ( outOutputData->mBuffers[iSelOutBufferLeft].mData );
Float32* pRightData = static_cast<Float32*> ( outOutputData->mBuffers[iSelOutBufferRight].mData );
int iNumChanPerFrameLeft = vecNumOutBufChan[iSelOutBufferLeft];
int iNumChanPerFrameRight = vecNumOutBufChan[iSelOutBufferRight];
// copy output data
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
{
// copy left and right channels separately
pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT;
pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT;
}
}
}
return kAudioHardwareNoError;
}
void CSound::callbackMIDI ( const MIDIPacketList* pktlist,
void* refCon,
void* )
{
CSound* pSound = static_cast<CSound*> ( refCon );
if ( pSound->midiInPortRef != static_cast<MIDIPortRef> ( NULL ) )
{
MIDIPacket* midiPacket = const_cast<MIDIPacket*> ( pktlist->packet );
for ( unsigned int j = 0; j < pktlist->numPackets; j++ )
{
// copy packet and send it to the MIDI parser
2019-01-12 15:34:35 +01:00
CVector<uint8_t> vMIDIPaketBytes ( midiPacket->length );
for ( int i = 0; i < midiPacket->length; i++ )
{
vMIDIPaketBytes[i] = static_cast<uint8_t> ( midiPacket->data[i] );
}
pSound->ParseMIDIMessage ( vMIDIPaketBytes );
midiPacket = MIDIPacketNext ( midiPacket );
}
}
}
bool CSound::ConvertCFStringToQString ( const CFStringRef stringRef,
QString& sOut )
{
// check if the string reference is a valid pointer
if ( stringRef != NULL )
{
// first check if the string is not empty
if ( CFStringGetLength ( stringRef ) > 0 )
{
// convert CFString in c-string (quick hack!) and then in QString
char* sC_strPropValue =
(char*) malloc ( CFStringGetLength ( stringRef ) * 3 + 1 );
if ( CFStringGetCString ( stringRef,
sC_strPropValue,
CFStringGetLength ( stringRef ) * 3 + 1,
kCFStringEncodingUTF8 ) )
{
sOut = sC_strPropValue;
free ( sC_strPropValue );
return true; // OK
}
}
// release the string reference because it is not needed anymore
CFRelease ( stringRef );
}
return false; // not OK
}