2012-01-28 12:51:14 +01:00
|
|
|
/******************************************************************************\
|
2020-01-01 15:41:43 +01:00
|
|
|
* Copyright (c) 2004-2020
|
2012-01-28 12:51:14 +01:00
|
|
|
*
|
|
|
|
* Author(s):
|
|
|
|
* Volker Fischer
|
|
|
|
*
|
|
|
|
******************************************************************************
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it under
|
|
|
|
* the terms of the GNU General Public License as published by the Free Software
|
|
|
|
* Foundation; either version 2 of the License, or (at your option) any later
|
|
|
|
* version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
\******************************************************************************/
|
|
|
|
|
|
|
|
#include "sound.h"
|
|
|
|
|
|
|
|
|
|
|
|
/* Implementation *************************************************************/
|
2019-09-22 20:13:08 +02:00
|
|
|
CSound::CSound ( void (*fpNewProcessCallback) ( CVector<short>& psData, void* arg ),
|
|
|
|
void* arg,
|
|
|
|
const int iCtrlMIDIChannel,
|
2020-04-30 20:48:48 +02:00
|
|
|
const bool bNoAutoJackConnect,
|
|
|
|
QString& strJackClientName ) :
|
|
|
|
CSoundBase ( "CoreAudio", true, fpNewProcessCallback, arg, iCtrlMIDIChannel, bNoAutoJackConnect, strJackClientName ),
|
2019-01-12 15:32:41 +01:00
|
|
|
midiInPortRef ( static_cast<MIDIPortRef> ( NULL ) )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
2015-11-15 20:21:24 +01:00
|
|
|
// Apple Mailing Lists: Subject: GUI Apps should set kAudioHardwarePropertyRunLoop
|
|
|
|
// in the HAL, From: Jeff Moore, Date: Fri, 6 Dec 2002
|
|
|
|
// Most GUI applciations have several threads on which they receive
|
|
|
|
// notifications already, so the having the HAL's thread around is wasteful.
|
|
|
|
// Here is what you should do: On the thread you want the HAL to use for
|
|
|
|
// notifications (for most apps, this will be the main thread), add the
|
|
|
|
// following lines of code:
|
|
|
|
// tell the HAL to use the current thread as it's run loop
|
2015-11-17 18:42:28 +01:00
|
|
|
CFRunLoopRef theRunLoop = CFRunLoopGetCurrent();
|
|
|
|
AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
|
|
|
|
kAudioObjectPropertyScopeGlobal,
|
|
|
|
kAudioObjectPropertyElementMaster };
|
|
|
|
|
2015-11-15 20:21:24 +01:00
|
|
|
AudioObjectSetPropertyData ( kAudioObjectSystemObject,
|
|
|
|
&property,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
sizeof ( CFRunLoopRef ),
|
|
|
|
&theRunLoop );
|
|
|
|
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// Get available input/output devices --------------------------------------
|
2015-11-20 19:23:42 +01:00
|
|
|
UInt32 iPropertySize = 0;
|
2015-11-13 22:41:07 +01:00
|
|
|
AudioObjectPropertyAddress stPropertyAddress;
|
2012-01-28 12:51:14 +01:00
|
|
|
|
2015-11-13 23:38:44 +01:00
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
|
|
|
|
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
|
|
|
|
|
2012-01-28 12:51:14 +01:00
|
|
|
// first get property size of devices array and allocate memory
|
2015-11-13 23:38:44 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioHardwarePropertyDevices;
|
|
|
|
|
|
|
|
AudioObjectGetPropertyDataSize ( kAudioObjectSystemObject,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
CVector<AudioDeviceID> vAudioDevices ( iPropertySize );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// now actually query all devices present in the system
|
2015-11-13 23:38:44 +01:00
|
|
|
AudioObjectGetPropertyData ( kAudioObjectSystemObject,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
2015-11-19 20:36:47 +01:00
|
|
|
&vAudioDevices[0] );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// calculate device count based on size of returned data array
|
2015-11-19 20:36:47 +01:00
|
|
|
const UInt32 iDeviceCount = iPropertySize / sizeof ( AudioDeviceID );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// always add system default devices for input and output as first entry
|
2015-11-13 22:13:59 +01:00
|
|
|
lNumDevs = 0;
|
2012-01-28 12:51:14 +01:00
|
|
|
strDriverNames[lNumDevs] = "System Default In/Out Devices";
|
|
|
|
|
2015-11-13 22:41:07 +01:00
|
|
|
iPropertySize = sizeof ( AudioDeviceID );
|
|
|
|
stPropertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
|
2015-11-13 22:13:59 +01:00
|
|
|
|
|
|
|
if ( AudioObjectGetPropertyData ( kAudioObjectSystemObject,
|
2015-11-13 22:41:07 +01:00
|
|
|
&stPropertyAddress,
|
2015-11-13 22:13:59 +01:00
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&audioInputDevice[lNumDevs] ) )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
|
|
|
throw CGenErr ( tr ( "CoreAudio input AudioHardwareGetProperty call failed. "
|
|
|
|
"It seems that no sound card is available in the system." ) );
|
|
|
|
}
|
|
|
|
|
2015-11-13 22:41:07 +01:00
|
|
|
iPropertySize = sizeof ( AudioDeviceID );
|
|
|
|
stPropertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
2015-11-13 22:13:59 +01:00
|
|
|
|
|
|
|
if ( AudioObjectGetPropertyData ( kAudioObjectSystemObject,
|
2015-11-13 22:41:07 +01:00
|
|
|
&stPropertyAddress,
|
2015-11-13 22:13:59 +01:00
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&audioOutputDevice[lNumDevs] ) )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
|
|
|
throw CGenErr ( tr ( "CoreAudio output AudioHardwareGetProperty call failed. "
|
|
|
|
"It seems that no sound card is available in the system." ) );
|
|
|
|
}
|
|
|
|
|
|
|
|
lNumDevs++; // next device
|
|
|
|
|
2015-07-05 14:00:54 +02:00
|
|
|
// add detected devices
|
2012-01-28 12:51:14 +01:00
|
|
|
//
|
|
|
|
// we add combined entries for input and output for each device so that we
|
|
|
|
// do not need two combo boxes in the GUI for input and output (therefore
|
2015-07-05 14:00:54 +02:00
|
|
|
// all possible combinations are required which can be a large number)
|
2015-11-19 20:36:47 +01:00
|
|
|
for ( UInt32 i = 0; i < iDeviceCount; i++ )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
2015-11-19 20:36:47 +01:00
|
|
|
for ( UInt32 j = 0; j < iDeviceCount; j++ )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
|
|
|
// get device infos for both current devices
|
|
|
|
QString strDeviceName_i;
|
|
|
|
QString strDeviceName_j;
|
|
|
|
bool bIsInput_i;
|
|
|
|
bool bIsInput_j;
|
|
|
|
bool bIsOutput_i;
|
|
|
|
bool bIsOutput_j;
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
GetAudioDeviceInfos ( vAudioDevices[i],
|
2012-01-28 12:51:14 +01:00
|
|
|
strDeviceName_i,
|
|
|
|
bIsInput_i,
|
|
|
|
bIsOutput_i );
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
GetAudioDeviceInfos ( vAudioDevices[j],
|
2012-01-28 12:51:14 +01:00
|
|
|
strDeviceName_j,
|
|
|
|
bIsInput_j,
|
|
|
|
bIsOutput_j );
|
|
|
|
|
2015-07-05 14:00:54 +02:00
|
|
|
// check if i device is input and j device is output and that we are
|
|
|
|
// in range
|
|
|
|
if ( bIsInput_i && bIsOutput_j && ( lNumDevs < MAX_NUMBER_SOUND_CARDS ) )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
|
|
|
strDriverNames[lNumDevs] = "in: " +
|
|
|
|
strDeviceName_i + "/out: " +
|
|
|
|
strDeviceName_j;
|
|
|
|
|
|
|
|
// store audio device IDs
|
2015-11-19 20:36:47 +01:00
|
|
|
audioInputDevice[lNumDevs] = vAudioDevices[i];
|
|
|
|
audioOutputDevice[lNumDevs] = vAudioDevices[j];
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
lNumDevs++; // next device
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// init device index as not initialized (invalid)
|
2015-11-19 20:36:47 +01:00
|
|
|
lCurDev = INVALID_SNC_CARD_DEVICE;
|
|
|
|
CurrentAudioInputDeviceID = 0;
|
|
|
|
CurrentAudioOutputDeviceID = 0;
|
2018-03-08 21:03:07 +01:00
|
|
|
iNumInChan = 0;
|
|
|
|
iNumOutChan = 0;
|
|
|
|
iSelInputLeftChannel = 0;
|
|
|
|
iSelInputRightChannel = 0;
|
|
|
|
iSelOutputLeftChannel = 0;
|
|
|
|
iSelOutputRightChannel = 0;
|
2019-01-12 15:32:41 +01:00
|
|
|
|
|
|
|
|
|
|
|
// Optional MIDI initialization --------------------------------------------
|
|
|
|
if ( iCtrlMIDIChannel != INVALID_MIDI_CH )
|
|
|
|
{
|
|
|
|
// create client and ports
|
|
|
|
MIDIClientRef midiClient = static_cast<MIDIClientRef> ( NULL );
|
|
|
|
MIDIClientCreate ( CFSTR ( APP_NAME ), NULL, NULL, &midiClient );
|
|
|
|
MIDIInputPortCreate ( midiClient, CFSTR ( "Input port" ), callbackMIDI, this, &midiInPortRef );
|
|
|
|
|
|
|
|
// open connections from all sources
|
|
|
|
const int iNMIDISources = MIDIGetNumberOfSources();
|
|
|
|
|
|
|
|
for ( int i = 0; i < iNMIDISources; i++ )
|
|
|
|
{
|
|
|
|
MIDIEndpointRef src = MIDIGetSource ( i );
|
2019-01-12 15:34:35 +01:00
|
|
|
MIDIPortConnectSource ( midiInPortRef, src, NULL ) ;
|
2019-01-12 15:32:41 +01:00
|
|
|
}
|
|
|
|
}
|
2012-01-28 12:51:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void CSound::GetAudioDeviceInfos ( const AudioDeviceID DeviceID,
|
|
|
|
QString& strDeviceName,
|
|
|
|
bool& bIsInput,
|
|
|
|
bool& bIsOutput )
|
|
|
|
{
|
2015-11-17 18:42:28 +01:00
|
|
|
UInt32 iPropertySize;
|
|
|
|
AudioObjectPropertyAddress stPropertyAddress;
|
|
|
|
|
|
|
|
// init return values
|
2015-11-19 20:36:47 +01:00
|
|
|
bIsInput = false;
|
|
|
|
bIsOutput = false;
|
2015-11-17 18:42:28 +01:00
|
|
|
|
2015-11-13 22:41:07 +01:00
|
|
|
// check if device is input or output or both (is that possible?)
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyStreams;
|
|
|
|
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
|
|
|
|
|
|
|
|
// input check
|
2015-11-19 20:36:47 +01:00
|
|
|
iPropertySize = 0;
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mScope = kAudioDevicePropertyScopeInput;
|
|
|
|
|
|
|
|
AudioObjectGetPropertyDataSize ( DeviceID,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize );
|
|
|
|
|
|
|
|
bIsInput = ( iPropertySize > 0 ); // check if any input streams are available
|
|
|
|
|
|
|
|
// output check
|
2015-11-19 20:36:47 +01:00
|
|
|
iPropertySize = 0;
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mScope = kAudioDevicePropertyScopeOutput;
|
|
|
|
|
|
|
|
AudioObjectGetPropertyDataSize ( DeviceID,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize );
|
|
|
|
|
|
|
|
bIsOutput = ( iPropertySize > 0 ); // check if any output streams are available
|
2015-11-13 22:41:07 +01:00
|
|
|
|
2012-01-28 12:51:14 +01:00
|
|
|
// get property name
|
2015-11-20 19:23:42 +01:00
|
|
|
CFStringRef sPropertyStringValue = NULL;
|
2012-01-28 12:51:14 +01:00
|
|
|
|
2015-11-13 22:41:07 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioObjectPropertyName;
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
|
|
|
|
iPropertySize = sizeof ( CFStringRef );
|
2015-11-13 22:41:07 +01:00
|
|
|
|
|
|
|
AudioObjectGetPropertyData ( DeviceID,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&sPropertyStringValue );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
// convert string
|
|
|
|
if ( !ConvertCFStringToQString ( sPropertyStringValue, strDeviceName ) )
|
2015-07-04 10:47:46 +02:00
|
|
|
{
|
2015-11-19 20:36:47 +01:00
|
|
|
// use a default name in case the conversion did not succeed
|
|
|
|
strDeviceName = "UNKNOWN";
|
2015-07-04 10:47:46 +02:00
|
|
|
}
|
2012-01-28 12:51:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-14 19:30:15 +02:00
|
|
|
int CSound::CountChannels ( AudioDeviceID devID,
|
2020-04-15 16:11:01 +02:00
|
|
|
const int iNumChanPerFrame,
|
2020-04-14 19:30:15 +02:00
|
|
|
bool isInput )
|
2020-04-10 20:29:14 +02:00
|
|
|
{
|
|
|
|
OSStatus err;
|
|
|
|
UInt32 propSize;
|
2020-04-14 19:30:15 +02:00
|
|
|
int result = 0;
|
2020-04-10 20:29:14 +02:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
// check for the case the we have interleaved format, in that case we assume
|
|
|
|
// that only the very first buffer contains all our channels
|
|
|
|
if ( iNumChanPerFrame > 1 )
|
|
|
|
{
|
|
|
|
result = iNumChanPerFrame;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// it seems we have multiple buffers where each buffer has only one channel,
|
|
|
|
// in that case we assume that each input channel has its own buffer
|
|
|
|
AudioObjectPropertyScope theScope = isInput ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
|
2020-04-10 20:29:14 +02:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
AudioObjectPropertyAddress theAddress = { kAudioDevicePropertyStreamConfiguration,
|
|
|
|
theScope,
|
|
|
|
0 };
|
2020-04-10 20:29:14 +02:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
AudioObjectGetPropertyDataSize ( devID, &theAddress, 0, NULL, &propSize );
|
2020-04-10 20:29:14 +02:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
AudioBufferList *buflist = (AudioBufferList*) malloc ( propSize );
|
2020-04-10 20:29:14 +02:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
err = AudioObjectGetPropertyData ( devID, &theAddress, 0, NULL, &propSize, buflist );
|
2020-04-10 20:29:14 +02:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
if ( !err )
|
2020-04-10 20:29:14 +02:00
|
|
|
{
|
2020-04-15 16:11:01 +02:00
|
|
|
for ( UInt32 i = 0; i < buflist->mNumberBuffers; ++i )
|
|
|
|
{
|
|
|
|
// The correct value mNumberChannels for an AudioBuffer can be derived from the mChannelsPerFrame
|
|
|
|
// and the interleaved flag. For non interleaved formats, mNumberChannels is always 1.
|
|
|
|
// For interleaved formats, mNumberChannels is equal to mChannelsPerFrame.
|
|
|
|
result += buflist->mBuffers[i].mNumberChannels;
|
|
|
|
}
|
2020-04-10 20:29:14 +02:00
|
|
|
}
|
2020-04-15 16:11:01 +02:00
|
|
|
free ( buflist );
|
2020-04-10 20:29:14 +02:00
|
|
|
}
|
|
|
|
|
2020-04-14 19:30:15 +02:00
|
|
|
return result;
|
2020-04-10 20:29:14 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 19:57:21 +02:00
|
|
|
QString CSound::LoadAndInitializeDriver ( int iDriverIdx, bool )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
|
|
|
// check device capabilities if it fullfills our requirements
|
2015-11-15 20:21:24 +01:00
|
|
|
const QString strStat = CheckDeviceCapabilities ( iDriverIdx );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// check if device is capable
|
|
|
|
if ( strStat.isEmpty() )
|
|
|
|
{
|
|
|
|
// store ID of selected driver if initialization was successful
|
2015-11-19 20:36:47 +01:00
|
|
|
lCurDev = iDriverIdx;
|
|
|
|
CurrentAudioInputDeviceID = audioInputDevice[iDriverIdx];
|
|
|
|
CurrentAudioOutputDeviceID = audioOutputDevice[iDriverIdx];
|
|
|
|
|
|
|
|
// the device has changed, per definition we reset the channel
|
|
|
|
// mapping to the defaults (first two available channels)
|
|
|
|
iSelInputLeftChannel = 0;
|
|
|
|
iSelInputRightChannel = min ( iNumInChan - 1, 1 );
|
|
|
|
iSelOutputLeftChannel = 0;
|
|
|
|
iSelOutputRightChannel = min ( iNumOutChan - 1, 1 );
|
2012-01-28 12:51:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return strStat;
|
|
|
|
}
|
|
|
|
|
2015-11-15 20:21:24 +01:00
|
|
|
QString CSound::CheckDeviceCapabilities ( const int iDriverIdx )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
2015-11-17 18:42:28 +01:00
|
|
|
UInt32 iPropertySize;
|
|
|
|
AudioStreamBasicDescription CurDevStreamFormat;
|
2015-11-20 19:23:42 +01:00
|
|
|
Float64 inputSampleRate = 0;
|
|
|
|
Float64 outputSampleRate = 0;
|
2015-11-17 18:42:28 +01:00
|
|
|
const Float64 fSystemSampleRate = static_cast<Float64> ( SYSTEM_SAMPLE_RATE_HZ );
|
|
|
|
AudioObjectPropertyAddress stPropertyAddress;
|
2015-11-15 20:21:24 +01:00
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
|
|
|
|
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// check input device sample rate
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
|
|
|
|
iPropertySize = sizeof ( Float64 );
|
2015-11-15 20:21:24 +01:00
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
AudioObjectGetPropertyData ( audioInputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&inputSampleRate );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
if ( inputSampleRate != fSystemSampleRate )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
2015-11-15 20:21:24 +01:00
|
|
|
// try to change the sample rate
|
|
|
|
if ( AudioObjectSetPropertyData ( audioInputDevice[iDriverIdx],
|
2015-11-17 18:42:28 +01:00
|
|
|
&stPropertyAddress,
|
2015-11-15 20:21:24 +01:00
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
sizeof ( Float64 ),
|
|
|
|
&fSystemSampleRate ) != noErr )
|
|
|
|
{
|
|
|
|
return QString ( tr ( "Current system audio input device sample "
|
|
|
|
"rate of %1 Hz is not supported. Please open the Audio-MIDI-Setup in "
|
|
|
|
"Applications->Utilities and try to set a sample rate of %2 Hz." ) ).arg (
|
|
|
|
static_cast<int> ( inputSampleRate ) ).arg ( SYSTEM_SAMPLE_RATE_HZ );
|
|
|
|
}
|
2012-01-28 12:51:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// check output device sample rate
|
2015-11-17 18:42:28 +01:00
|
|
|
iPropertySize = sizeof ( Float64 );
|
|
|
|
|
|
|
|
AudioObjectGetPropertyData ( audioOutputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&outputSampleRate );
|
|
|
|
|
|
|
|
if ( outputSampleRate != fSystemSampleRate )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
2015-11-15 20:21:24 +01:00
|
|
|
// try to change the sample rate
|
|
|
|
if ( AudioObjectSetPropertyData ( audioOutputDevice[iDriverIdx],
|
2015-11-17 18:42:28 +01:00
|
|
|
&stPropertyAddress,
|
2015-11-15 20:21:24 +01:00
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
sizeof ( Float64 ),
|
|
|
|
&fSystemSampleRate ) != noErr )
|
|
|
|
{
|
|
|
|
return QString ( tr ( "Current system audio output device sample "
|
|
|
|
"rate of %1 Hz is not supported. Please open the Audio-MIDI-Setup in "
|
|
|
|
"Applications->Utilities and try to set a sample rate of %2 Hz." ) ).arg (
|
|
|
|
static_cast<int> ( outputSampleRate ) ).arg ( SYSTEM_SAMPLE_RATE_HZ );
|
|
|
|
}
|
2012-01-28 12:51:14 +01:00
|
|
|
}
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
// get the stream ID of the input device (at least one stream must always exist)
|
|
|
|
iPropertySize = 0;
|
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyStreams;
|
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeInput;
|
|
|
|
|
|
|
|
AudioObjectGetPropertyDataSize ( audioInputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize );
|
|
|
|
|
|
|
|
CVector<AudioStreamID> vInputStreamIDList ( iPropertySize );
|
|
|
|
|
|
|
|
AudioObjectGetPropertyData ( audioInputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&vInputStreamIDList[0] );
|
|
|
|
|
|
|
|
const AudioStreamID inputStreamID = vInputStreamIDList[0];
|
|
|
|
|
|
|
|
// get the stream ID of the output device (at least one stream must always exist)
|
|
|
|
iPropertySize = 0;
|
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyStreams;
|
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeOutput;
|
|
|
|
|
|
|
|
AudioObjectGetPropertyDataSize ( audioOutputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize );
|
|
|
|
|
|
|
|
CVector<AudioStreamID> vOutputStreamIDList ( iPropertySize );
|
|
|
|
|
|
|
|
AudioObjectGetPropertyData ( audioOutputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&vOutputStreamIDList[0] );
|
|
|
|
|
|
|
|
const AudioStreamID outputStreamID = vOutputStreamIDList[0];
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
// According to the AudioHardware documentation: "If the format is a linear PCM
|
|
|
|
// format, the data will always be presented as 32 bit, native endian floating
|
|
|
|
// point. All conversions to and from the true physical format of the hardware
|
2015-11-19 20:36:47 +01:00
|
|
|
// is handled by the devices driver.".
|
|
|
|
// check the input
|
2015-11-17 18:42:28 +01:00
|
|
|
iPropertySize = sizeof ( AudioStreamBasicDescription );
|
|
|
|
stPropertyAddress.mSelector = kAudioStreamPropertyVirtualFormat;
|
2015-11-19 20:36:47 +01:00
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
|
2015-11-17 18:42:28 +01:00
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
AudioObjectGetPropertyData ( inputStreamID,
|
2015-11-17 18:42:28 +01:00
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&CurDevStreamFormat );
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
if ( ( CurDevStreamFormat.mFormatID != kAudioFormatLinearPCM ) ||
|
|
|
|
( CurDevStreamFormat.mFramesPerPacket != 1 ) ||
|
|
|
|
( CurDevStreamFormat.mBitsPerChannel != 32 ) ||
|
2015-11-17 19:38:39 +01:00
|
|
|
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsFloat ) ) ||
|
|
|
|
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsPacked ) ) )
|
2015-11-17 18:42:28 +01:00
|
|
|
{
|
2015-11-19 20:36:47 +01:00
|
|
|
return tr ( "The audio input stream format for this audio device is "
|
|
|
|
"not compatible with this software." );
|
2015-11-17 18:42:28 +01:00
|
|
|
}
|
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
// store the input number of channels per frame for this stream
|
|
|
|
const int iNumInChanPerFrame = CurDevStreamFormat.mChannelsPerFrame;
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
// check the output
|
|
|
|
AudioObjectGetPropertyData ( outputStreamID,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&CurDevStreamFormat );
|
|
|
|
|
|
|
|
if ( ( CurDevStreamFormat.mFormatID != kAudioFormatLinearPCM ) ||
|
|
|
|
( CurDevStreamFormat.mFramesPerPacket != 1 ) ||
|
|
|
|
( CurDevStreamFormat.mBitsPerChannel != 32 ) ||
|
|
|
|
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsFloat ) ) ||
|
|
|
|
( !( CurDevStreamFormat.mFormatFlags & kAudioFormatFlagIsPacked ) ) )
|
|
|
|
{
|
|
|
|
return tr ( "The audio output stream format for this audio device is "
|
|
|
|
"not compatible with this software." );
|
|
|
|
}
|
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
// store the output number of channels per frame for this stream
|
|
|
|
const int iNumOutChanPerFrame = CurDevStreamFormat.mChannelsPerFrame;
|
|
|
|
|
2020-04-14 19:30:15 +02:00
|
|
|
// store the input and out number of channels for this device
|
2020-04-15 16:11:01 +02:00
|
|
|
iNumInChan = CountChannels ( audioInputDevice[iDriverIdx], iNumInChanPerFrame, true );
|
|
|
|
iNumOutChan = CountChannels ( audioOutputDevice[iDriverIdx], iNumOutChanPerFrame, false );
|
2015-11-19 20:36:47 +01:00
|
|
|
|
|
|
|
// clip the number of input/output channels to our allowed maximum
|
|
|
|
if ( iNumInChan > MAX_NUM_IN_OUT_CHANNELS )
|
|
|
|
{
|
|
|
|
iNumInChan = MAX_NUM_IN_OUT_CHANNELS;
|
|
|
|
}
|
|
|
|
if ( iNumOutChan > MAX_NUM_IN_OUT_CHANNELS )
|
|
|
|
{
|
|
|
|
iNumOutChan = MAX_NUM_IN_OUT_CHANNELS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the channel names of the input device
|
|
|
|
for ( int iCurInCH = 0; iCurInCH < iNumInChan; iCurInCH++ )
|
|
|
|
{
|
2015-11-20 19:23:42 +01:00
|
|
|
CFStringRef sPropertyStringValue = NULL;
|
2015-11-19 20:36:47 +01:00
|
|
|
|
|
|
|
stPropertyAddress.mSelector = kAudioObjectPropertyElementName;
|
|
|
|
stPropertyAddress.mElement = iCurInCH + 1;
|
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeInput;
|
|
|
|
iPropertySize = sizeof ( CFStringRef );
|
|
|
|
|
|
|
|
AudioObjectGetPropertyData ( audioInputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&sPropertyStringValue );
|
|
|
|
|
|
|
|
// convert string
|
|
|
|
const bool bConvOK = ConvertCFStringToQString ( sPropertyStringValue,
|
|
|
|
sChannelNamesInput[iCurInCH] );
|
|
|
|
|
2015-11-20 12:24:20 +01:00
|
|
|
// add the "[n]:" at the beginning as is in the Audio-Midi-Setup
|
2015-11-19 20:36:47 +01:00
|
|
|
if ( !bConvOK || ( iPropertySize == 0 ) )
|
|
|
|
{
|
2020-04-10 20:29:14 +02:00
|
|
|
// use a default name in case there was an error or the name is empty
|
2015-11-19 20:36:47 +01:00
|
|
|
sChannelNamesInput[iCurInCH] =
|
2015-11-20 12:24:20 +01:00
|
|
|
QString ( "%1: Channel %1" ).arg ( iCurInCH + 1 );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sChannelNamesInput[iCurInCH].prepend ( QString ( "%1: " ).arg ( iCurInCH + 1 ) );
|
2015-11-19 20:36:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the channel names of the output device
|
|
|
|
for ( int iCurOutCH = 0; iCurOutCH < iNumOutChan; iCurOutCH++ )
|
|
|
|
{
|
2015-11-20 19:23:42 +01:00
|
|
|
CFStringRef sPropertyStringValue = NULL;
|
2015-11-19 20:36:47 +01:00
|
|
|
|
|
|
|
stPropertyAddress.mSelector = kAudioObjectPropertyElementName;
|
|
|
|
stPropertyAddress.mElement = iCurOutCH + 1;
|
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeOutput;
|
|
|
|
iPropertySize = sizeof ( CFStringRef );
|
|
|
|
|
|
|
|
AudioObjectGetPropertyData ( audioOutputDevice[iDriverIdx],
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iPropertySize,
|
|
|
|
&sPropertyStringValue );
|
|
|
|
|
|
|
|
// convert string
|
|
|
|
const bool bConvOK = ConvertCFStringToQString ( sPropertyStringValue,
|
|
|
|
sChannelNamesOutput[iCurOutCH] );
|
|
|
|
|
2015-11-20 12:24:20 +01:00
|
|
|
// add the "[n]:" at the beginning as is in the Audio-Midi-Setup
|
2015-11-19 20:36:47 +01:00
|
|
|
if ( !bConvOK || ( iPropertySize == 0 ) )
|
|
|
|
{
|
2015-11-20 12:24:20 +01:00
|
|
|
// use a defalut name in case there was an error or the name is empty
|
2015-11-19 20:36:47 +01:00
|
|
|
sChannelNamesOutput[iCurOutCH] =
|
2015-11-20 12:24:20 +01:00
|
|
|
QString ( "%1: Channel %1" ).arg ( iCurOutCH + 1 );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sChannelNamesOutput[iCurOutCH].prepend ( QString ( "%1: " ).arg ( iCurOutCH + 1 ) );
|
2015-11-19 20:36:47 +01:00
|
|
|
}
|
|
|
|
}
|
2015-11-17 18:42:28 +01:00
|
|
|
|
2012-01-28 12:51:14 +01:00
|
|
|
// everything is ok, return empty string for "no error" case
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
void CSound::SetLeftInputChannel ( const int iNewChan )
|
|
|
|
{
|
|
|
|
// apply parameter after input parameter check
|
|
|
|
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumInChan ) )
|
|
|
|
{
|
|
|
|
iSelInputLeftChannel = iNewChan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CSound::SetRightInputChannel ( const int iNewChan )
|
|
|
|
{
|
|
|
|
// apply parameter after input parameter check
|
|
|
|
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumInChan ) )
|
|
|
|
{
|
|
|
|
iSelInputRightChannel = iNewChan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CSound::SetLeftOutputChannel ( const int iNewChan )
|
|
|
|
{
|
|
|
|
// apply parameter after input parameter check
|
|
|
|
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumOutChan ) )
|
|
|
|
{
|
|
|
|
iSelOutputLeftChannel = iNewChan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CSound::SetRightOutputChannel ( const int iNewChan )
|
|
|
|
{
|
|
|
|
// apply parameter after input parameter check
|
|
|
|
if ( ( iNewChan >= 0 ) && ( iNewChan < iNumOutChan ) )
|
|
|
|
{
|
|
|
|
iSelOutputRightChannel = iNewChan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-28 12:51:14 +01:00
|
|
|
void CSound::Start()
|
|
|
|
{
|
2015-11-17 18:42:28 +01:00
|
|
|
AudioObjectPropertyAddress stPropertyAddress;
|
|
|
|
|
|
|
|
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
|
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
|
|
|
|
|
2015-11-22 10:23:00 +01:00
|
|
|
// setup callback for xruns (only for input is enough)
|
2015-11-20 19:39:28 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioDeviceProcessorOverload;
|
|
|
|
|
|
|
|
AudioObjectAddPropertyListener ( audioInputDevice[lCurDev],
|
|
|
|
&stPropertyAddress,
|
|
|
|
deviceNotification,
|
|
|
|
this );
|
|
|
|
|
2015-11-22 10:23:00 +01:00
|
|
|
// setup callbacks for device property changes
|
2015-11-20 19:39:28 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyDeviceHasChanged;
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
AudioObjectAddPropertyListener ( audioInputDevice[lCurDev],
|
|
|
|
&stPropertyAddress,
|
|
|
|
deviceNotification,
|
|
|
|
this );
|
|
|
|
|
2015-11-20 19:39:28 +01:00
|
|
|
AudioObjectAddPropertyListener ( audioOutputDevice[lCurDev],
|
|
|
|
&stPropertyAddress,
|
|
|
|
deviceNotification,
|
|
|
|
this );
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
// register the callback function for input and output
|
|
|
|
AudioDeviceCreateIOProcID ( audioInputDevice[lCurDev],
|
|
|
|
callbackIO,
|
|
|
|
this,
|
|
|
|
&audioInputProcID );
|
|
|
|
|
|
|
|
AudioDeviceCreateIOProcID ( audioOutputDevice[lCurDev],
|
|
|
|
callbackIO,
|
|
|
|
this,
|
|
|
|
&audioOutputProcID );
|
|
|
|
|
|
|
|
// start the audio stream
|
|
|
|
AudioDeviceStart ( audioInputDevice[lCurDev], audioInputProcID );
|
|
|
|
AudioDeviceStart ( audioOutputDevice[lCurDev], audioOutputProcID );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// call base class
|
|
|
|
CSoundBase::Start();
|
|
|
|
}
|
|
|
|
|
|
|
|
void CSound::Stop()
|
|
|
|
{
|
|
|
|
// stop the audio stream
|
2015-11-17 18:42:28 +01:00
|
|
|
AudioDeviceStop ( audioInputDevice[lCurDev], audioInputProcID );
|
|
|
|
AudioDeviceStop ( audioOutputDevice[lCurDev], audioOutputProcID );
|
|
|
|
|
|
|
|
// unregister the callback function for input and output
|
|
|
|
AudioDeviceDestroyIOProcID ( audioInputDevice[lCurDev], audioInputProcID );
|
|
|
|
AudioDeviceDestroyIOProcID ( audioOutputDevice[lCurDev], audioOutputProcID );
|
|
|
|
|
|
|
|
AudioObjectPropertyAddress stPropertyAddress;
|
|
|
|
|
|
|
|
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
|
|
|
|
stPropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
|
|
|
|
|
2015-11-22 10:23:00 +01:00
|
|
|
// unregister callback functions for device property changes
|
2015-11-20 19:39:28 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyDeviceHasChanged;
|
|
|
|
|
|
|
|
AudioObjectRemovePropertyListener( audioOutputDevice[lCurDev],
|
|
|
|
&stPropertyAddress,
|
|
|
|
deviceNotification,
|
|
|
|
this );
|
|
|
|
|
|
|
|
AudioObjectRemovePropertyListener( audioInputDevice[lCurDev],
|
|
|
|
&stPropertyAddress,
|
|
|
|
deviceNotification,
|
|
|
|
this );
|
|
|
|
|
2015-11-22 10:23:00 +01:00
|
|
|
// unregister the callback function for xruns
|
2015-11-20 19:39:28 +01:00
|
|
|
stPropertyAddress.mSelector = kAudioDeviceProcessorOverload;
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
AudioObjectRemovePropertyListener( audioInputDevice[lCurDev],
|
|
|
|
&stPropertyAddress,
|
|
|
|
deviceNotification,
|
|
|
|
this );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// call base class
|
|
|
|
CSoundBase::Stop();
|
|
|
|
}
|
|
|
|
|
|
|
|
int CSound::Init ( const int iNewPrefMonoBufferSize )
|
|
|
|
{
|
|
|
|
UInt32 iActualMonoBufferSize;
|
|
|
|
|
|
|
|
// Error message string: in case buffer sizes on input and output cannot be
|
|
|
|
// set to the same value
|
|
|
|
const QString strErrBufSize = tr ( "The buffer sizes of the current "
|
|
|
|
"input and output audio device cannot be set to a common value. Please "
|
|
|
|
"choose other input/output audio devices in your system settings." );
|
|
|
|
|
|
|
|
// try to set input buffer size
|
|
|
|
iActualMonoBufferSize =
|
|
|
|
SetBufferSize ( audioInputDevice[lCurDev], true, iNewPrefMonoBufferSize );
|
|
|
|
|
|
|
|
if ( iActualMonoBufferSize != static_cast<UInt32> ( iNewPrefMonoBufferSize ) )
|
|
|
|
{
|
|
|
|
// try to set the input buffer size to the output so that we
|
|
|
|
// have a matching pair
|
|
|
|
if ( SetBufferSize ( audioOutputDevice[lCurDev], false, iActualMonoBufferSize ) !=
|
|
|
|
iActualMonoBufferSize )
|
|
|
|
{
|
|
|
|
throw CGenErr ( strErrBufSize );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// try to set output buffer size
|
|
|
|
if ( SetBufferSize ( audioOutputDevice[lCurDev], false, iNewPrefMonoBufferSize ) !=
|
|
|
|
static_cast<UInt32> ( iNewPrefMonoBufferSize ) )
|
|
|
|
{
|
|
|
|
throw CGenErr ( strErrBufSize );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// store buffer size
|
2020-04-14 19:30:15 +02:00
|
|
|
iCoreAudioBufferSizeMono = iActualMonoBufferSize;
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// init base class
|
|
|
|
CSoundBase::Init ( iCoreAudioBufferSizeMono );
|
|
|
|
|
|
|
|
// set internal buffer size value and calculate stereo buffer size
|
2013-12-29 15:45:14 +01:00
|
|
|
iCoreAudioBufferSizeStereo = 2 * iCoreAudioBufferSizeMono;
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// create memory for intermediate audio buffer
|
2013-12-29 15:45:14 +01:00
|
|
|
vecsTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
return iCoreAudioBufferSizeMono;
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt32 CSound::SetBufferSize ( AudioDeviceID& audioDeviceID,
|
|
|
|
const bool bIsInput,
|
|
|
|
UInt32 iPrefBufferSize )
|
|
|
|
{
|
2015-11-13 22:57:24 +01:00
|
|
|
AudioObjectPropertyAddress stPropertyAddress;
|
|
|
|
stPropertyAddress.mSelector = kAudioDevicePropertyBufferFrameSize;
|
|
|
|
|
|
|
|
if ( bIsInput )
|
|
|
|
{
|
|
|
|
stPropertyAddress.mScope = kAudioDevicePropertyScopeInput;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stPropertyAddress.mScope = kAudioDevicePropertyScopeOutput;
|
|
|
|
}
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
stPropertyAddress.mElement = kAudioObjectPropertyElementMaster;
|
2015-11-13 22:57:24 +01:00
|
|
|
|
2012-01-28 12:51:14 +01:00
|
|
|
// first set the value
|
|
|
|
UInt32 iSizeBufValue = sizeof ( UInt32 );
|
2015-11-20 19:23:42 +01:00
|
|
|
|
2015-11-13 22:57:24 +01:00
|
|
|
AudioObjectSetPropertyData ( audioDeviceID,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
iSizeBufValue,
|
|
|
|
&iPrefBufferSize );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
// read back which value is actually used
|
2015-11-20 19:23:42 +01:00
|
|
|
UInt32 iActualMonoBufferSize = 0;
|
|
|
|
|
2015-11-13 22:57:24 +01:00
|
|
|
AudioObjectGetPropertyData ( audioDeviceID,
|
|
|
|
&stPropertyAddress,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&iSizeBufValue,
|
|
|
|
&iActualMonoBufferSize );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
return iActualMonoBufferSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
OSStatus CSound::deviceNotification ( AudioDeviceID,
|
|
|
|
UInt32,
|
2015-11-20 19:39:28 +01:00
|
|
|
const AudioObjectPropertyAddress* inAddresses,
|
|
|
|
void* inRefCon )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
2014-01-03 09:56:31 +01:00
|
|
|
CSound* pSound = static_cast<CSound*> ( inRefCon );
|
2012-01-28 12:51:14 +01:00
|
|
|
|
2015-11-20 19:39:28 +01:00
|
|
|
if ( inAddresses->mSelector == kAudioDevicePropertyDeviceHasChanged )
|
|
|
|
{
|
2015-11-22 10:23:00 +01:00
|
|
|
// if any property of the device has changed, do a full reload
|
2015-11-20 19:39:28 +01:00
|
|
|
pSound->EmitReinitRequestSignal ( RS_RELOAD_RESTART_AND_INIT );
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-11-13 23:27:25 +01:00
|
|
|
if ( inAddresses->mSelector == kAudioDeviceProcessorOverload )
|
2012-01-28 12:51:14 +01:00
|
|
|
{
|
|
|
|
// xrun handling (it is important to act on xruns under CoreAudio
|
|
|
|
// since it seems that the xrun situation stays stable for a
|
2014-02-25 00:30:50 +01:00
|
|
|
// while and would give you a long time bad audio)
|
2012-01-28 12:51:14 +01:00
|
|
|
pSound->EmitReinitRequestSignal ( RS_ONLY_RESTART );
|
|
|
|
}
|
2015-11-17 18:42:28 +01:00
|
|
|
*/
|
2012-01-28 12:51:14 +01:00
|
|
|
|
|
|
|
return noErr;
|
|
|
|
}
|
|
|
|
|
2015-11-15 20:21:24 +01:00
|
|
|
OSStatus CSound::callbackIO ( AudioDeviceID inDevice,
|
|
|
|
const AudioTimeStamp*,
|
|
|
|
const AudioBufferList* inInputData,
|
|
|
|
const AudioTimeStamp*,
|
|
|
|
AudioBufferList* outOutputData,
|
|
|
|
const AudioTimeStamp*,
|
|
|
|
void* inRefCon )
|
|
|
|
{
|
|
|
|
CSound* pSound = static_cast<CSound*> ( inRefCon );
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
// both, the input and output device use the same callback function
|
|
|
|
QMutexLocker locker ( &pSound->Mutex );
|
|
|
|
|
2015-11-19 20:36:47 +01:00
|
|
|
const int iCoreAudioBufferSizeMono = pSound->iCoreAudioBufferSizeMono;
|
|
|
|
const int iNumInChan = pSound->iNumInChan;
|
|
|
|
const int iNumOutChan = pSound->iNumOutChan;
|
|
|
|
const int iSelInputLeftChannel = pSound->iSelInputLeftChannel;
|
|
|
|
const int iSelInputRightChannel = pSound->iSelInputRightChannel;
|
|
|
|
const int iSelOutputLeftChannel = pSound->iSelOutputLeftChannel;
|
|
|
|
const int iSelOutputRightChannel = pSound->iSelOutputRightChannel;
|
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
if ( ( inDevice == pSound->CurrentAudioInputDeviceID ) && inInputData )
|
2015-11-15 20:21:24 +01:00
|
|
|
{
|
2020-04-13 20:19:06 +02:00
|
|
|
// check size (float32 has four bytes)
|
|
|
|
if ( inInputData->mBuffers[0].mDataByteSize ==
|
|
|
|
static_cast<UInt32> ( iCoreAudioBufferSizeMono * iNumInChan * 4 ) )
|
2020-04-10 20:29:14 +02:00
|
|
|
{
|
2020-04-13 20:19:06 +02:00
|
|
|
// one buffer with all the channels in interleaved format:
|
2015-11-17 18:42:28 +01:00
|
|
|
// get a pointer to the input data of the correct type
|
|
|
|
Float32* pInData = static_cast<Float32*> ( inInputData->mBuffers[0].mData );
|
|
|
|
|
|
|
|
// copy input data
|
2015-11-19 20:36:47 +01:00
|
|
|
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
|
2015-11-17 18:42:28 +01:00
|
|
|
{
|
2015-11-19 20:36:47 +01:00
|
|
|
// left
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo[2 * i] =
|
|
|
|
(short) ( pInData[iNumInChan * i + iSelInputLeftChannel] * _MAXSHORT );
|
|
|
|
|
|
|
|
// right
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] =
|
|
|
|
(short) ( pInData[iNumInChan * i + iSelInputRightChannel] * _MAXSHORT );
|
2018-03-04 11:22:10 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
// TEST mix channel with micro to the stereo output
|
|
|
|
if ( iNumInChan == 4 )
|
|
|
|
{
|
|
|
|
// add mic input on input channel 4 to both stereo channels
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo[2 * i] =
|
|
|
|
Double2Short ( (double) ( pInData[iNumInChan * i + 3] * _MAXSHORT ) +
|
|
|
|
(double) pSound->vecsTmpAudioSndCrdStereo[2 * i] );
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] =
|
|
|
|
Double2Short ( (double) ( pInData[iNumInChan * i + 3] * _MAXSHORT ) +
|
|
|
|
(double) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] );
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
2015-11-17 18:42:28 +01:00
|
|
|
}
|
|
|
|
}
|
2020-04-13 20:19:06 +02:00
|
|
|
else if ( inInputData->mNumberBuffers == (UInt32) iNumInChan && // we should have a matching number of buffers to channels
|
|
|
|
inInputData->mBuffers[0].mDataByteSize == static_cast<UInt32> ( iCoreAudioBufferSizeMono * 4 ) )
|
|
|
|
{
|
|
|
|
// one buffer per channel mode:
|
|
|
|
AudioBuffer left = inInputData->mBuffers[iSelInputLeftChannel];
|
|
|
|
Float32* pLeftData = static_cast<Float32*> ( left.mData );
|
|
|
|
AudioBuffer right = inInputData->mBuffers[iSelInputRightChannel];
|
|
|
|
Float32* pRightData = static_cast<Float32*> ( right.mData );
|
|
|
|
|
|
|
|
// copy input data
|
|
|
|
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
|
|
|
|
{
|
|
|
|
// left
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo[2 * i] = (short) ( pLeftData[i] * _MAXSHORT );
|
|
|
|
|
|
|
|
// right
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = (short) ( pRightData[i] * _MAXSHORT );
|
|
|
|
}
|
|
|
|
}
|
2015-11-17 19:48:07 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
// incompatible sizes, clear work buffer
|
|
|
|
pSound->vecsTmpAudioSndCrdStereo.Reset ( 0 );
|
|
|
|
}
|
|
|
|
|
|
|
|
// call processing callback function
|
|
|
|
pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo );
|
2015-11-15 20:21:24 +01:00
|
|
|
}
|
2015-11-19 20:36:47 +01:00
|
|
|
|
2020-04-15 16:11:01 +02:00
|
|
|
if ( ( inDevice == pSound->CurrentAudioOutputDeviceID ) && outOutputData )
|
2015-11-15 20:21:24 +01:00
|
|
|
{
|
2015-11-17 18:42:28 +01:00
|
|
|
// check size (float32 has four bytes)
|
|
|
|
if ( outOutputData->mBuffers[0].mDataByteSize ==
|
2015-11-19 20:36:47 +01:00
|
|
|
static_cast<UInt32> ( iCoreAudioBufferSizeMono * iNumOutChan * 4 ) )
|
2015-11-17 18:42:28 +01:00
|
|
|
{
|
2020-04-14 19:41:17 +02:00
|
|
|
// one buffer with all the channels in interleaved format:
|
|
|
|
// get a pointer to the input data of the correct type
|
|
|
|
Float32* pOutData = static_cast<Float32*> ( outOutputData->mBuffers[0].mData );
|
|
|
|
|
2020-04-13 23:55:29 +02:00
|
|
|
// copy output data
|
|
|
|
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
|
|
|
|
{
|
|
|
|
// left
|
2020-04-14 19:41:17 +02:00
|
|
|
pOutData[iNumOutChan * i + iSelOutputLeftChannel] =
|
2020-04-13 23:55:29 +02:00
|
|
|
(Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT;
|
|
|
|
|
|
|
|
// right
|
2020-04-14 19:41:17 +02:00
|
|
|
pOutData[iNumOutChan * i + iSelOutputRightChannel] =
|
2020-04-13 23:55:29 +02:00
|
|
|
(Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT;
|
|
|
|
}
|
2020-04-14 19:41:17 +02:00
|
|
|
}
|
|
|
|
else if ( outOutputData->mNumberBuffers == (UInt32) iNumOutChan && // we should have a matching number of buffers to channels
|
|
|
|
outOutputData->mBuffers[0].mDataByteSize == static_cast<UInt32> ( iCoreAudioBufferSizeMono * 4 ) )
|
|
|
|
{
|
|
|
|
// Outputs are to individual buffers too, rather than using channels
|
|
|
|
Float32* pLeftOutData = static_cast<Float32*> ( outOutputData->mBuffers[iSelOutputLeftChannel].mData );
|
|
|
|
Float32* pRightOutData = static_cast<Float32*> ( outOutputData->mBuffers[iSelOutputRightChannel].mData );
|
2015-11-17 18:42:28 +01:00
|
|
|
|
|
|
|
// copy output data
|
2015-11-19 20:36:47 +01:00
|
|
|
for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ )
|
2015-11-17 18:42:28 +01:00
|
|
|
{
|
2015-11-19 20:36:47 +01:00
|
|
|
// left
|
2020-04-14 19:41:17 +02:00
|
|
|
pLeftOutData[i] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT;
|
2015-11-19 20:36:47 +01:00
|
|
|
|
|
|
|
// right
|
2020-04-14 19:41:17 +02:00
|
|
|
pRightOutData[i] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT;
|
2015-11-17 18:42:28 +01:00
|
|
|
}
|
|
|
|
}
|
2015-11-15 20:21:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return kAudioHardwareNoError;
|
|
|
|
}
|
2015-11-19 20:36:47 +01:00
|
|
|
|
2019-01-12 15:32:41 +01:00
|
|
|
void CSound::callbackMIDI ( const MIDIPacketList* pktlist,
|
|
|
|
void* refCon,
|
|
|
|
void* )
|
|
|
|
{
|
|
|
|
CSound* pSound = static_cast<CSound*> ( refCon );
|
|
|
|
|
|
|
|
if ( pSound->midiInPortRef != static_cast<MIDIPortRef> ( NULL ) )
|
|
|
|
{
|
|
|
|
MIDIPacket* midiPacket = const_cast<MIDIPacket*> ( pktlist->packet );
|
|
|
|
|
|
|
|
for ( unsigned int j = 0; j < pktlist->numPackets; j++ )
|
|
|
|
{
|
|
|
|
// copy packet and send it to the MIDI parser
|
2019-01-12 15:34:35 +01:00
|
|
|
CVector<uint8_t> vMIDIPaketBytes ( midiPacket->length );
|
2019-01-12 15:32:41 +01:00
|
|
|
for ( int i = 0; i < midiPacket->length; i++ )
|
|
|
|
{
|
|
|
|
vMIDIPaketBytes[i] = static_cast<uint8_t> ( midiPacket->data[i] );
|
|
|
|
}
|
|
|
|
pSound->ParseMIDIMessage ( vMIDIPaketBytes );
|
|
|
|
|
|
|
|
midiPacket = MIDIPacketNext ( midiPacket );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-20 19:23:42 +01:00
|
|
|
bool CSound::ConvertCFStringToQString ( const CFStringRef stringRef,
|
|
|
|
QString& sOut )
|
2015-11-19 20:36:47 +01:00
|
|
|
{
|
2015-11-20 19:23:42 +01:00
|
|
|
// check if the string reference is a valid pointer
|
|
|
|
if ( stringRef != NULL )
|
2015-11-19 20:36:47 +01:00
|
|
|
{
|
2015-11-20 19:23:42 +01:00
|
|
|
// first check if the string is not empty
|
|
|
|
if ( CFStringGetLength ( stringRef ) > 0 )
|
2015-11-19 20:36:47 +01:00
|
|
|
{
|
2015-11-20 19:23:42 +01:00
|
|
|
// convert CFString in c-string (quick hack!) and then in QString
|
|
|
|
char* sC_strPropValue =
|
|
|
|
(char*) malloc ( CFStringGetLength ( stringRef ) * 3 + 1 );
|
|
|
|
|
|
|
|
if ( CFStringGetCString ( stringRef,
|
|
|
|
sC_strPropValue,
|
|
|
|
CFStringGetLength ( stringRef ) * 3 + 1,
|
|
|
|
kCFStringEncodingUTF8 ) )
|
|
|
|
{
|
|
|
|
sOut = sC_strPropValue;
|
|
|
|
free ( sC_strPropValue );
|
2015-11-19 20:36:47 +01:00
|
|
|
|
2015-11-20 19:23:42 +01:00
|
|
|
return true; // OK
|
|
|
|
}
|
2015-11-19 20:36:47 +01:00
|
|
|
}
|
2015-11-20 19:23:42 +01:00
|
|
|
|
|
|
|
// release the string reference because it is not needed anymore
|
|
|
|
CFRelease ( stringRef );
|
2015-11-19 20:36:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return false; // not OK
|
|
|
|
}
|