diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000..3f550c73
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "libs/oboe"]
+ path = libs/oboe
+ url = https://github.com/google/oboe.git
diff --git a/Jamulus.pro b/Jamulus.pro
index 97dc3c9d..c06f7e92 100755
--- a/Jamulus.pro
+++ b/Jamulus.pro
@@ -128,11 +128,136 @@ win32 {
LIBS += /usr/local/lib/libjack.dylib
}
} else:android {
+ # we want to compile with C++14
+ CONFIG += c++14
+
+ QT += androidextras
+
+ # enabled only for debugging on android devices
+ DEFINES += ANDROIDDEBUG
+
+ target.path = /tmp/your_executable # path on device
+ INSTALLS += target
+
HEADERS += android/sound.h
SOURCES += android/sound.cpp
LIBS += -lOpenSLES
ANDROID_PACKAGE_SOURCE_DIR = $$PWD/android
OTHER_FILES += android/AndroidManifest.xml
+
+# if compiling for android you need to use Oboe library which is included as a git submodule
+# make sure you git pull with submodules to pull the latest Oboe library
+OBOE_SOURCES = libs/oboe/src/aaudio/AAudioLoader.cpp \
+ libs/oboe/src/aaudio/AudioStreamAAudio.cpp \
+ libs/oboe/src/common/AudioSourceCaller.cpp \
+ libs/oboe/src/common/AudioStream.cpp \
+ libs/oboe/src/common/AudioStreamBuilder.cpp \
+ libs/oboe/src/common/DataConversionFlowGraph.cpp \
+ libs/oboe/src/common/FilterAudioStream.cpp \
+ libs/oboe/src/common/FixedBlockAdapter.cpp \
+ libs/oboe/src/common/FixedBlockReader.cpp \
+ libs/oboe/src/common/FixedBlockWriter.cpp \
+ libs/oboe/src/common/LatencyTuner.cpp \
+ libs/oboe/src/common/QuirksManager.cpp \
+ libs/oboe/src/common/SourceFloatCaller.cpp \
+ libs/oboe/src/common/SourceI16Caller.cpp \
+ libs/oboe/src/common/StabilizedCallback.cpp \
+ libs/oboe/src/common/Trace.cpp \
+ libs/oboe/src/common/Utilities.cpp \
+ libs/oboe/src/common/Version.cpp \
+ libs/oboe/src/fifo/FifoBuffer.cpp \
+ libs/oboe/src/fifo/FifoController.cpp \
+ libs/oboe/src/fifo/FifoControllerBase.cpp \
+ libs/oboe/src/fifo/FifoControllerIndirect.cpp \
+ libs/oboe/src/flowgraph/ClipToRange.cpp \
+ libs/oboe/src/flowgraph/FlowGraphNode.cpp \
+ libs/oboe/src/flowgraph/ManyToMultiConverter.cpp \
+ libs/oboe/src/flowgraph/MonoToMultiConverter.cpp \
+ libs/oboe/src/flowgraph/RampLinear.cpp \
+ libs/oboe/src/flowgraph/SampleRateConverter.cpp \
+ libs/oboe/src/flowgraph/SinkFloat.cpp \
+ libs/oboe/src/flowgraph/SinkI16.cpp \
+ libs/oboe/src/flowgraph/SinkI24.cpp \
+ libs/oboe/src/flowgraph/SourceFloat.cpp \
+ libs/oboe/src/flowgraph/SourceI16.cpp \
+ libs/oboe/src/flowgraph/SourceI24.cpp \
+ libs/oboe/src/flowgraph/resampler/IntegerRatio.cpp \
+ libs/oboe/src/flowgraph/resampler/LinearResampler.cpp \
+ libs/oboe/src/flowgraph/resampler/MultiChannelResampler.cpp \
+ libs/oboe/src/flowgraph/resampler/PolyphaseResampler.cpp \
+ libs/oboe/src/flowgraph/resampler/PolyphaseResamplerMono.cpp \
+ libs/oboe/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp \
+ libs/oboe/src/flowgraph/resampler/SincResampler.cpp \
+ libs/oboe/src/flowgraph/resampler/SincResamplerStereo.cpp \
+ libs/oboe/src/opensles/AudioInputStreamOpenSLES.cpp \
+ libs/oboe/src/opensles/AudioOutputStreamOpenSLES.cpp \
+ libs/oboe/src/opensles/AudioStreamBuffered.cpp \
+ libs/oboe/src/opensles/AudioStreamOpenSLES.cpp \
+ libs/oboe/src/opensles/EngineOpenSLES.cpp \
+ libs/oboe/src/opensles/OpenSLESUtilities.cpp \
+ libs/oboe/src/opensles/OutputMixerOpenSLES.cpp
+
+OBOE_HEADERS = libs/oboe/src/aaudio/AAudioLoader.h \
+ libs/oboe/src/aaudio/AudioStreamAAudio.h \
+ libs/oboe/src/common/AudioClock.h \
+ libs/oboe/src/common/AudioSourceCaller.h \
+ libs/oboe/src/common/DataConversionFlowGraph.h \
+ libs/oboe/src/common/FilterAudioStream.h \
+ libs/oboe/src/common/FixedBlockAdapter.h \
+ libs/oboe/src/common/FixedBlockReader.h \
+ libs/oboe/src/common/FixedBlockWriter.h \
+ libs/oboe/src/common/MonotonicCounter.h \
+ libs/oboe/src/common/OboeDebug.h \
+ libs/oboe/src/common/QuirksManager.h \
+ libs/oboe/src/common/SourceFloatCaller.h \
+ libs/oboe/src/common/SourceI16Caller.h \
+ libs/oboe/src/common/Trace.h \
+ libs/oboe/src/fifo/FifoBuffer.h \
+ libs/oboe/src/fifo/FifoController.h \
+ libs/oboe/src/fifo/FifoControllerBase.h \
+ libs/oboe/src/fifo/FifoControllerIndirect.h \
+ libs/oboe/src/flowgraph/ClipToRange.h \
+ libs/oboe/src/flowgraph/FlowGraphNode.h \
+ libs/oboe/src/flowgraph/ManyToMultiConverter.h \
+ libs/oboe/src/flowgraph/MonoToMultiConverter.h \
+ libs/oboe/src/flowgraph/RampLinear.h \
+ libs/oboe/src/flowgraph/SampleRateConverter.h \
+ libs/oboe/src/flowgraph/SinkFloat.h \
+ libs/oboe/src/flowgraph/SinkI16.h \
+ libs/oboe/src/flowgraph/SinkI24.h \
+ libs/oboe/src/flowgraph/SourceFloat.h \
+ libs/oboe/src/flowgraph/SourceI16.h \
+ libs/oboe/src/flowgraph/SourceI24.h \
+ libs/oboe/src/flowgraph/resampler/HyperbolicCosineWindow.h \
+ libs/oboe/src/flowgraph/resampler/IntegerRatio.h \
+ libs/oboe/src/flowgraph/resampler/LinearResampler.h \
+ libs/oboe/src/flowgraph/resampler/MultiChannelResampler.h \
+ libs/oboe/src/flowgraph/resampler/PolyphaseResampler.h \
+ libs/oboe/src/flowgraph/resampler/PolyphaseResamplerMono.h \
+ libs/oboe/src/flowgraph/resampler/PolyphaseResamplerStereo.h \
+ libs/oboe/src/flowgraph/resampler/SincResampler.h \
+ libs/oboe/src/flowgraph/resampler/SincResamplerStereo.h \
+ libs/oboe/src/opensles/AudioInputStreamOpenSLES.h \
+ libs/oboe/src/opensles/AudioOutputStreamOpenSLES.h \
+ libs/oboe/src/opensles/AudioStreamBuffered.h \
+ libs/oboe/src/opensles/AudioStreamOpenSLES.h \
+ libs/oboe/src/opensles/EngineOpenSLES.h \
+ libs/oboe/src/opensles/OpenSLESUtilities.h \
+ libs/oboe/src/opensles/OutputMixerOpenSLES.h
+
+INCLUDEPATH_OBOE = libs/oboe/include/ \
+ libs/oboe/src/
+
+DISTFILES_OBOE += libs/oboe/AUTHORS \
+ libs/oboe/CONTRIBUTING \
+ libs/oboe/LICENSE \
+ libs/oboe/README
+
+ INCLUDEPATH += $$INCLUDEPATH_OBOE
+ HEADERS += $$OBOE_HEADERS
+ SOURCES += $$OBOE_SOURCES
+ DISTFILES += $$DISTFILES_OBOE
+
} else:unix {
# we want to compile with C++11
QMAKE_CXXFLAGS += -std=c++11
@@ -308,6 +433,7 @@ android {
}
SOURCES += src/audiomixerboard.cpp \
+ android/androiddebug.cpp \
src/buffer.cpp \
src/channel.cpp \
src/chatdlg.cpp \
@@ -492,6 +618,12 @@ DISTFILES += ChangeLog \
COPYING \
INSTALL.md \
README.md \
+ android/build.gradle \
+ android/gradle/wrapper/gradle-wrapper.jar \
+ android/gradle/wrapper/gradle-wrapper.properties \
+ android/gradlew \
+ android/gradlew.bat \
+ android/res/values/libs.xml \
src/res/CLEDBlack.png \
src/res/CLEDBlackSmall.png \
src/res/CLEDDisabledSmall.png \
diff --git a/android/AndroidManifest.xml b/android/AndroidManifest.xml
index addcb052..cba6d9a2 100644
--- a/android/AndroidManifest.xml
+++ b/android/AndroidManifest.xml
@@ -1,41 +1,94 @@
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
diff --git a/android/androiddebug.cpp b/android/androiddebug.cpp
new file mode 100644
index 00000000..93656a47
--- /dev/null
+++ b/android/androiddebug.cpp
@@ -0,0 +1,45 @@
+const char*const applicationName="Jamulus";
+
+#ifdef ANDROIDDEBUG // Set in my myapp.pro file for android builds
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+void myMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString& msg)
+{
+ QString report=msg;
+ if (context.file && !QString(context.file).isEmpty()) {
+ report+=" in file ";
+ report+=QString(context.file);
+ report+=" line ";
+ report+=QString::number(context.line);
+ }
+ if (context.function && !QString(context.function).isEmpty()) {
+ report+=+" function ";
+ report+=QString(context.function);
+ }
+ const char*const local=report.toLocal8Bit().constData();
+ switch (type) {
+ case QtDebugMsg:
+ __android_log_write(ANDROID_LOG_DEBUG,applicationName,local);
+ break;
+ case QtInfoMsg:
+ __android_log_write(ANDROID_LOG_INFO,applicationName,local);
+ break;
+ case QtWarningMsg:
+ __android_log_write(ANDROID_LOG_WARN,applicationName,local);
+ break;
+ case QtCriticalMsg:
+ __android_log_write(ANDROID_LOG_ERROR,applicationName,local);
+ break;
+ case QtFatalMsg:
+ default:
+ __android_log_write(ANDROID_LOG_FATAL,applicationName,local);
+ abort();
+ }
+}
+#endif
diff --git a/android/sound.cpp b/android/sound.cpp
index 0b2ae651..1ed5dfc3 100644
--- a/android/sound.cpp
+++ b/android/sound.cpp
@@ -23,256 +23,157 @@
\******************************************************************************/
#include "sound.h"
-
+#include "androiddebug.cpp"
/* Implementation *************************************************************/
+
CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ),
void* arg,
const int iCtrlMIDIChannel,
const bool ,
const QString& ) :
CSoundBase ( "OpenSL", true, fpNewProcessCallback, arg, iCtrlMIDIChannel )
+
{
+ pSound = this;
+#ifdef ANDROIDDEBUG
+ qInstallMessageHandler(myMessageHandler);
+#endif
+}
+
+void CSound::setupCommonStreamParams(oboe::AudioStreamBuilder *builder)
+{
+ // We request EXCLUSIVE mode since this will give us the lowest possible
+ // latency. If EXCLUSIVE mode isn't available the builder will fall back to SHARED mode
+ builder->setCallback(this)
+ ->setFormat(oboe::AudioFormat::Float)
+ ->setSharingMode(oboe::SharingMode::Shared)
+ ->setChannelCount(oboe::ChannelCount::Mono)
+ // ->setSampleRate(48000)
+ // ->setSampleRateConversionQuality(oboe::SampleRateConversionQuality::Medium)
+ ->setPerformanceMode(oboe::PerformanceMode::None);
+ return;
+}
+
+void CSound::openStreams()
+{
+ // Create callback
+ mCallback = this;
+
+ //Setup output stream
+ oboe::AudioStreamBuilder inBuilder, outBuilder;
+ outBuilder.setDirection(oboe::Direction::Output);
+ setupCommonStreamParams(&outBuilder);
+ oboe::Result result = outBuilder.openManagedStream(mPlayStream);
+ if (result != oboe::Result::OK) {
+ return;
+ }
+ mPlayStream->setBufferSizeInFrames(pSound->iOpenSLBufferSizeStereo);
+
+ warnIfNotLowLatency(mPlayStream, "PlayStream");
+ printStreamDetails(mPlayStream);
+
+ //Setup input stream
+ inBuilder.setDirection(oboe::Direction::Input);
+ setupCommonStreamParams(&inBuilder);
+ result = inBuilder.openManagedStream(mRecordingStream);
+ if (result != oboe::Result::OK) {
+ closeStream(mPlayStream);
+ return;
+ }
+ mRecordingStream->setBufferSizeInFrames(pSound->iOpenSLBufferSizeStereo);
+
+ warnIfNotLowLatency(mRecordingStream, "RecordStream");
+ printStreamDetails(mRecordingStream);
+}
+
+void CSound::printStreamDetails(oboe::ManagedStream &stream)
+{
+
+ QString sDirection = (stream->getDirection()==oboe::Direction::Input?"Input":"Output");
+ QString sFramesPerBurst = QString::number(stream->getFramesPerBurst());
+ QString sBufferSizeInFrames = QString::number(stream->getBufferSizeInFrames());
+ QString sBytesPerFrame = QString::number(stream->getBytesPerFrame());
+ QString sBytesPerSample = QString::number(stream->getBytesPerSample());
+ QString sBufferCapacityInFrames = QString::number(stream->getBufferCapacityInFrames());
+ QString sPerformanceMode = (stream->getPerformanceMode()==oboe::PerformanceMode::LowLatency?"LowLatency":"NotLowLatency");
+ QString sSharingMode = (stream->getSharingMode() == oboe::SharingMode::Exclusive?"Exclusive":"Shared");
+ QString sDeviceID = QString::number(stream->getDeviceId());
+ QString sSampleRate = QString::number(stream->getSampleRate());
+ QString sAudioFormat = (stream->getFormat()==oboe::AudioFormat::I16?"I16":"Float");
+
+ QString sFramesPerCallback = QString::number(stream->getFramesPerCallback());
+ //QString sSampleRateConversionQuality = (stream.getSampleRateConversionQuality()==oboe::SampleRateConversionQuality::
+
+ qInfo() << "Stream details: [sDirection: " << sDirection <<
+ ", FramesPerBurst: " << sFramesPerBurst <<
+ ", BufferSizeInFrames: " << sBufferSizeInFrames <<
+ ", BytesPerFrame: " << sBytesPerFrame <<
+ ", BytesPerSample: " << sBytesPerSample <<
+ ", BufferCapacityInFrames: " << sBufferCapacityInFrames <<
+ ", PerformanceMode: " << sPerformanceMode <<
+ ", SharingMode: " << sSharingMode <<
+ ", DeviceID: " << sDeviceID <<
+ ", SampleRate: " << sSampleRate <<
+ ", AudioFormat: " << sAudioFormat <<
+ ", FramesPerCallback: " << sFramesPerCallback << "]";
}
-void CSound::InitializeOpenSL()
-{
- // set up stream formats for input and output
- SLDataFormat_PCM inStreamFormat;
- inStreamFormat.formatType = SL_DATAFORMAT_PCM;
- inStreamFormat.numChannels = 1;
- inStreamFormat.samplesPerSec = SL_SAMPLINGRATE_16;
- inStreamFormat.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
- inStreamFormat.containerSize = 16;
- inStreamFormat.channelMask = SL_SPEAKER_FRONT_CENTER;
- inStreamFormat.endianness = SL_BYTEORDER_LITTLEENDIAN;
-
- SLDataFormat_PCM outStreamFormat;
- outStreamFormat.formatType = SL_DATAFORMAT_PCM;
- outStreamFormat.numChannels = 2;
- outStreamFormat.samplesPerSec = SYSTEM_SAMPLE_RATE_HZ * 1000; // unit is mHz
- outStreamFormat.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
- outStreamFormat.containerSize = 16;
- outStreamFormat.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- outStreamFormat.endianness = SL_BYTEORDER_LITTLEENDIAN;
-
- // create the OpenSL root engine object
- slCreateEngine ( &engineObject,
- 0,
- nullptr,
- 0,
- nullptr,
- nullptr );
-
- // realize the engine
- (*engineObject)->Realize ( engineObject,
- SL_BOOLEAN_FALSE );
-
- // get the engine interface (required to create other objects)
- (*engineObject)->GetInterface ( engineObject,
- SL_IID_ENGINE,
- &engine );
-
- // create the main output mix
- (*engine)->CreateOutputMix ( engine,
- &outputMixObject,
- 0,
- nullptr,
- nullptr );
-
- // realize the output mix
- (*outputMixObject)->Realize ( outputMixObject,
- SL_BOOLEAN_FALSE );
-
- // configure the audio (data) source for input
- SLDataLocator_IODevice micLocator;
- micLocator.locatorType = SL_DATALOCATOR_IODEVICE;
- micLocator.deviceType = SL_IODEVICE_AUDIOINPUT;
- micLocator.deviceID = SL_DEFAULTDEVICEID_AUDIOINPUT;
- micLocator.device = nullptr;
-
- SLDataSource inDataSource;
- inDataSource.pLocator = &micLocator;
- inDataSource.pFormat = nullptr;
-
- // configure the input buffer queue
- SLDataLocator_AndroidSimpleBufferQueue inBufferQueue;
- inBufferQueue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
- inBufferQueue.numBuffers = 2; // max number of buffers in queue
-
- // configure the audio (data) sink for input
- SLDataSink inDataSink;
- inDataSink.pLocator = &inBufferQueue;
- inDataSink.pFormat = &inStreamFormat;
-
- // create the audio recorder
- const SLInterfaceID recorderIds[] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE };
- const SLboolean recorderReq[] = { SL_BOOLEAN_TRUE };
-
- (*engine)->CreateAudioRecorder ( engine,
- &recorderObject,
- &inDataSource,
- &inDataSink,
- 1,
- recorderIds,
- recorderReq );
-
- // realize the audio recorder
- (*recorderObject)->Realize ( recorderObject,
- SL_BOOLEAN_FALSE );
-
- // get the audio recorder interface
- (*recorderObject)->GetInterface ( recorderObject,
- SL_IID_RECORD,
- &recorder );
-
- // get the audio recorder simple buffer queue interface
- (*recorderObject)->GetInterface ( recorderObject,
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
- &recorderSimpleBufQueue );
-
- // register the audio input callback
- (*recorderSimpleBufQueue)->RegisterCallback ( recorderSimpleBufQueue,
- processInput,
- this );
-
- // configure the output buffer queue
- SLDataLocator_AndroidSimpleBufferQueue outBufferQueue;
- outBufferQueue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
- outBufferQueue.numBuffers = 2; // max number of buffers in queue
-
- // configure the audio (data) source for output
- SLDataSource outDataSource;
- outDataSource.pLocator = &outBufferQueue;
- outDataSource.pFormat = &outStreamFormat;
-
- // configure the output mix
- SLDataLocator_OutputMix outputMix;
- outputMix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
- outputMix.outputMix = outputMixObject;
-
- // configure the audio (data) sink for output
- SLDataSink outDataSink;
- outDataSink.pLocator = &outputMix;
- outDataSink.pFormat = nullptr;
-
- // create the audio player
- const SLInterfaceID playerIds[] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE };
- const SLboolean playerReq[] = { SL_BOOLEAN_TRUE };
-
- (*engine)->CreateAudioPlayer ( engine,
- &playerObject,
- &outDataSource,
- &outDataSink,
- 1,
- playerIds,
- playerReq );
-
- // realize the audio player
- (*playerObject)->Realize ( playerObject,
- SL_BOOLEAN_FALSE );
-
- // get the audio player interface
- (*playerObject)->GetInterface ( playerObject,
- SL_IID_PLAY,
- &player );
-
- // get the audio player simple buffer queue interface
- (*playerObject)->GetInterface ( playerObject,
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
- &playerSimpleBufQueue );
-
- // register the audio output callback
- (*playerSimpleBufQueue)->RegisterCallback ( playerSimpleBufQueue,
- processOutput,
- this );
+void CSound::warnIfNotLowLatency(oboe::ManagedStream &stream, QString streamName) {
+ if (stream->getPerformanceMode() != oboe::PerformanceMode::LowLatency) {
+ QString latencyMode = (stream->getPerformanceMode()==oboe::PerformanceMode::None ? "None" : "Power Saving");
+ // throw CGenErr ( tr ( "Stream is NOT low latency."
+ // "Check your requested format, sample rate and channel count." ) );
+ }
}
-void CSound::CloseOpenSL()
+void CSound::closeStream(oboe::ManagedStream &stream)
+{
+ if (stream) {
+ oboe::Result requestStopRes = stream->requestStop();
+ oboe::Result result = stream->close();
+ if (result != oboe::Result::OK) {
+ throw CGenErr ( tr ( "Error closing stream: $s",
+ oboe::convertToText(result) ) );
+ }
+ stream.reset();
+ }
+}
+
+void CSound::closeStreams()
{
// clean up
- (*recorderObject)->Destroy ( recorderObject );
- (*playerObject)->Destroy ( playerObject );
- (*outputMixObject)->Destroy ( outputMixObject );
- (*engineObject)->Destroy ( engineObject );
+ closeStream(mRecordingStream);
+ closeStream(mPlayStream);
}
void CSound::Start()
{
- InitializeOpenSL();
-
-// TEST We have to supply the interface with initial buffers, otherwise
-// the rendering will not start.
-// Note that the number of buffers enqueued here must match the maximum
-// numbers of buffers configured in the constructor of this class.
-vecsTmpAudioSndCrdStereo.Reset ( 0 );
-
- // enqueue initial buffers for record
- (*recorderSimpleBufQueue)->Enqueue ( recorderSimpleBufQueue,
- &vecsTmpAudioSndCrdStereo[0],
- iOpenSLBufferSizeStereo * 2 /* 2 bytes */ );
-
- (*recorderSimpleBufQueue)->Enqueue ( recorderSimpleBufQueue,
- &vecsTmpAudioSndCrdStereo[0],
- iOpenSLBufferSizeStereo * 2 /* 2 bytes */ );
-
- // enqueue initial buffers for playback
- (*playerSimpleBufQueue)->Enqueue ( playerSimpleBufQueue,
- &vecsTmpAudioSndCrdStereo[0],
- iOpenSLBufferSizeStereo * 2 /* 2 bytes */ );
-
- (*playerSimpleBufQueue)->Enqueue ( playerSimpleBufQueue,
- &vecsTmpAudioSndCrdStereo[0],
- iOpenSLBufferSizeStereo * 2 /* 2 bytes */ );
-
- // start the rendering
- (*recorder)->SetRecordState ( recorder, SL_RECORDSTATE_RECORDING );
- (*player)->SetPlayState ( player, SL_PLAYSTATE_PLAYING );
+ openStreams();
// call base class
CSoundBase::Start();
+
+ // finally start the streams so the callback begins, start with inputstream first.
+ mRecordingStream->requestStart();
+ mPlayStream->requestStart();
+
}
void CSound::Stop()
{
- // stop the audio stream
- (*recorder)->SetRecordState ( recorder, SL_RECORDSTATE_STOPPED );
- (*player)->SetPlayState ( player, SL_PLAYSTATE_STOPPED );
-
- // clear the buffers
- (*recorderSimpleBufQueue)->Clear ( recorderSimpleBufQueue );
- (*playerSimpleBufQueue)->Clear ( playerSimpleBufQueue );
+ closeStreams();
// call base class
CSoundBase::Stop();
-
- CloseOpenSL();
}
int CSound::Init ( const int iNewPrefMonoBufferSize )
{
-
-
-// TODO make use of the following:
-// String sampleRate = am.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE));
-// String framesPerBuffer = am.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER));
-/*
-// get the Audio IO DEVICE CAPABILITIES interface
-SLAudioIODeviceCapabilitiesItf audioCapabilities;
-
-(*engineObject)->GetInterface ( engineObject,
- SL_IID_AUDIOIODEVICECAPABILITIES,
- &audioCapabilities );
-
-(*audioCapabilities)->QueryAudioInputCapabilities ( audioCapabilities,
- inputDeviceIDs[i],
- &audioInputDescriptor );
-*/
-
-
// store buffer size
- iOpenSLBufferSizeMono = iNewPrefMonoBufferSize;
+ iOpenSLBufferSizeMono = 512 ;
+ //iNewPrefMonoBufferSize;
// init base class
CSoundBase::Init ( iOpenSLBufferSizeMono );
@@ -283,7 +184,6 @@ SLAudioIODeviceCapabilitiesItf audioCapabilities;
// create memory for intermediate audio buffer
vecsTmpAudioSndCrdStereo.Init ( iOpenSLBufferSizeStereo );
-
// TEST
#if ( SYSTEM_SAMPLE_RATE_HZ != 48000 )
# error "Only a system sample rate of 48 kHz is supported by this module"
@@ -296,57 +196,105 @@ SLAudioIODeviceCapabilitiesItf audioCapabilities;
iModifiedInBufSize = iOpenSLBufferSizeMono / 3;
vecsTmpAudioInSndCrd.Init ( iModifiedInBufSize );
-
return iOpenSLBufferSizeMono;
}
-void CSound::processInput ( SLAndroidSimpleBufferQueueItf bufferQueue,
- void* instance )
+// This is the main callback method for when an audio stream is ready to publish data to an output stream
+// or has received data on an input stream. As per manual much be very careful not to do anything in this back that
+// can cause delays such as sleeping, file processing, allocate memory, etc
+oboe::DataCallbackResult CSound::onAudioReady(oboe::AudioStream *oboeStream, void *audioData, int32_t numFrames)
{
- CSound* pSound = static_cast ( instance );
+ // only process if we are running
+ if ( ! pSound->bRun )
+ {
+ return oboe::DataCallbackResult::Continue;
+ }
- // only process if we are running
- if ( !pSound->bRun )
+ // Need to modify the size of the buffer based on the numFrames requested in this callback.
+ // Buffer size can change regularly by android devices
+ int& iBufferSizeMono = pSound->iOpenSLBufferSizeMono;
+
+ // perform the processing for input and output
+// QMutexLocker locker ( &pSound->Mutex );
+ // locker.mutex();
+
+ //This can be called from both input and output at different times
+ if (oboeStream == pSound->mPlayStream.get() && audioData)
{
- return;
+ float *floatData = static_cast(audioData);
+
+ // Zero out the incoming container array
+ memset(audioData, 0, sizeof(float) * numFrames * oboeStream->getChannelCount());
+
+ // Only copy data if we have data to copy, otherwise fill with silence
+ if (!pSound->vecsTmpAudioSndCrdStereo.empty())
+ {
+ for (int frmNum = 0; frmNum < numFrames; ++frmNum)
+ {
+ for (int channelNum = 0; channelNum < oboeStream->getChannelCount(); channelNum++)
+ {
+ // copy sample received from server into output buffer
+
+
+ // convert to 32 bit
+ const int32_t iCurSam = static_cast (
+ pSound->vecsTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum] );
+ floatData[frmNum * oboeStream->getChannelCount() + channelNum] = (float) iCurSam/ _MAXSHORT;
+ }
+ }
+ }
+ else
+ {
+ // prime output stream buffer with silence
+ memset(static_cast(audioData) + numFrames * oboeStream->getChannelCount(), 0,
+ (numFrames) * oboeStream->getBytesPerFrame());
+ }
}
-
- QMutexLocker locker ( &pSound->Mutex );
-
- // enqueue the buffer for record
- (*bufferQueue)->Enqueue ( bufferQueue,
- &pSound->vecsTmpAudioInSndCrd[0],
- pSound->iModifiedInBufSize * 2 /* 2 bytes */ );
-
-// upsampling (without filtering) and channel management
-pSound->vecsTmpAudioSndCrdStereo.Reset ( 0 );
-for ( int i = 0; i < pSound->iModifiedInBufSize; i++ )
-{
- pSound->vecsTmpAudioSndCrdStereo[6 * i] =
- pSound->vecsTmpAudioSndCrdStereo[6 * i + 1] =
- pSound->vecsTmpAudioInSndCrd[i];
-}
-
-}
-
-void CSound::processOutput ( SLAndroidSimpleBufferQueueItf bufferQueue,
- void* instance )
-{
- CSound* pSound = static_cast ( instance );
-
- // only process if we are running
- if ( !pSound->bRun )
+ else if (oboeStream == pSound->mRecordingStream.get() && audioData)
{
- return;
+ // First things first, we need to discard the input queue a little for 500ms or so
+ if (pSound->mCountCallbacksToDrain > 0)
+ {
+ // discard the input buffer
+ int32_t numBytes = numFrames * oboeStream->getBytesPerFrame();
+ memset(audioData, 0 /* value */, numBytes);
+ pSound->mCountCallbacksToDrain--;
+ }
+
+ // We're good to start recording now
+ // Take the data from the recording device ouput buffer and move
+ // it to the vector ready to send up to the server
+
+ float *floatData = static_cast(audioData);
+
+ // Copy recording data to internal vector
+ for (int frmNum = 0; frmNum < numFrames; ++frmNum)
+ {
+ for (int channelNum = 0; channelNum < oboeStream->getChannelCount(); channelNum++)
+ {
+ pSound->vecsTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum] =
+ (short) floatData[frmNum * oboeStream->getChannelCount() + channelNum] * _MAXSHORT;
+ }
+ }
+
+ // Tell parent class that we've put some data ready to send to the server
+ pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo );
}
-
- QMutexLocker locker ( &pSound->Mutex );
-
- // call processing callback function
- pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo );
-
- // enqueue the buffer for playback
- (*bufferQueue)->Enqueue ( bufferQueue,
- &pSound->vecsTmpAudioSndCrdStereo[0],
- pSound->iOpenSLBufferSizeStereo * 2 /* 2 bytes */ );
+ // locker.unlock();
+ return oboe::DataCallbackResult::Continue;
}
+
+//TODO better handling of stream closing errors
+void CSound::onErrorAfterClose(oboe::AudioStream *oboeStream, oboe::Result result)
+{
+ qDebug() << "CSound::onErrorAfterClose";
+}
+
+//TODO better handling of stream closing errors
+void CSound::onErrorBeforeClose(oboe::AudioStream *oboeStream, oboe::Result result)
+{
+ qDebug() << "CSound::onErrorBeforeClose";
+}
+
+
+
diff --git a/android/sound.h b/android/sound.h
index 6ab4c34b..fdd9caf7 100644
--- a/android/sound.h
+++ b/android/sound.h
@@ -24,15 +24,18 @@
#pragma once
-#include
-#include
+/* Deprecated, moving to OBOE
+ * #include
+ * #include */
+#include
#include
#include "soundbase.h"
#include "global.h"
-
+#include
+#include
/* Classes ********************************************************************/
-class CSound : public CSoundBase
+class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRenderableAudio, public IRestartable
{
public:
CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ),
@@ -46,10 +49,30 @@ public:
virtual void Start();
virtual void Stop();
+ // Call backs for Oboe
+ virtual oboe::DataCallbackResult onAudioReady(oboe::AudioStream *oboeStream, void *audioData, int32_t numFrames);
+ virtual void onErrorAfterClose(oboe::AudioStream *oboeStream, oboe::Result result);
+ virtual void onErrorBeforeClose(oboe::AudioStream *oboeStream, oboe::Result result);
+
// these variables should be protected but cannot since we want
// to access them from the callback function
CVector vecsTmpAudioSndCrdStereo;
+ static void android_message_handler(QtMsgType type,
+ const QMessageLogContext &context,
+ const QString &message)
+ {
+ android_LogPriority priority = ANDROID_LOG_DEBUG;
+ switch (type) {
+ case QtDebugMsg: priority = ANDROID_LOG_DEBUG; break;
+ case QtWarningMsg: priority = ANDROID_LOG_WARN; break;
+ case QtCriticalMsg: priority = ANDROID_LOG_ERROR; break;
+ case QtFatalMsg: priority = ANDROID_LOG_FATAL; break;
+ };
+
+ __android_log_print(priority, "Qt", "%s", qPrintable(message));
+ };
+
// TEST
CVector vecsTmpAudioInSndCrd;
int iModifiedInBufSize;
@@ -57,27 +80,25 @@ int iModifiedInBufSize;
int iOpenSLBufferSizeMono;
int iOpenSLBufferSizeStereo;
-protected:
+private:
+ void setupCommonStreamParams(oboe::AudioStreamBuilder *builder);
+ void printStreamDetails(oboe::ManagedStream &stream);
+ void openStreams();
+ void closeStreams();
+ void warnIfNotLowLatency(oboe::ManagedStream &stream, QString streamName);
+ void closeStream(oboe::ManagedStream &stream);
- void InitializeOpenSL();
- void CloseOpenSL();
+ oboe::ManagedStream mRecordingStream;
+ oboe::ManagedStream mPlayStream;
+ AudioStreamCallback *mCallback;
- // callbacks
- static void processInput ( SLAndroidSimpleBufferQueueItf bufferQueue,
- void* instance );
+ // used to reach a state where the input buffer is
+ // empty and the garbage in the first 500ms or so is discarded
+ static constexpr int32_t kNumCallbacksToDrain = 10;
+ int32_t mCountCallbacksToDrain = kNumCallbacksToDrain;
- static void processOutput ( SLAndroidSimpleBufferQueueItf bufferQueue,
- void* instance );
-
- SLObjectItf engineObject;
- SLEngineItf engine;
- SLObjectItf recorderObject;
- SLRecordItf recorder;
- SLAndroidSimpleBufferQueueItf recorderSimpleBufQueue;
- SLObjectItf outputMixObject;
- SLObjectItf playerObject;
- SLPlayItf player;
- SLAndroidSimpleBufferQueueItf playerSimpleBufQueue;
+ // Used to reference this instance of class from within the static callback
+ CSound *pSound;
QMutex Mutex;
diff --git a/libs/oboe b/libs/oboe
new file mode 160000
index 00000000..55d878a4
--- /dev/null
+++ b/libs/oboe
@@ -0,0 +1 @@
+Subproject commit 55d878a4e85e1994f2b5883366079b991500a25f
diff --git a/src/global.h b/src/global.h
index 24195fa9..a68380bb 100755
--- a/src/global.h
+++ b/src/global.h
@@ -258,6 +258,8 @@ typedef unsigned __int64 uint64_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int8 uint8_t;
+#elif defined ( __ANDROID__ )
+/* don't redfine types for android as these ones below don't work. */
#else
typedef long long int64_t;
typedef int int32_t;
diff --git a/src/main.cpp b/src/main.cpp
index c955e3b3..3df5b3d4 100755
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -33,12 +33,15 @@
#include "settings.h"
#include "testbench.h"
#include "util.h"
-
+#ifdef ANDROID
+ #include
+#endif
// Implementation **************************************************************
int main ( int argc, char** argv )
{
+
QTextStream& tsConsole = *( ( new ConsoleWriterFactory() )->get() );
QString strArgument;
double rDbleArgument;
@@ -521,6 +524,14 @@ int main ( int argc, char** argv )
QCoreApplication* pApp = bUseGUI
? new QApplication ( argc, argv )
: new QCoreApplication ( argc, argv );
+#ifdef ANDROID
+ auto result = QtAndroid::checkPermission(QString("android.permission.RECORD_AUDIO"));
+ if(result == QtAndroid::PermissionResult::Denied){
+ QtAndroid::PermissionResultMap resultHash = QtAndroid::requestPermissionsSync(QStringList({"android.permission.RECORD_AUDIO"}));
+ if(resultHash["android.permission.RECORD_AUDIO"] == QtAndroid::PermissionResult::Denied)
+ return 0;
+ }
+#endif
#ifdef _WIN32
// set application priority class -> high priority