2021-04-28 12:43:51 +08:00
|
|
|
/**
|
|
|
|
* OpenAL cross platform audio library
|
|
|
|
* Copyright (C) 1999-2007 by authors.
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
|
|
|
* Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
* Or go to http://www.gnu.org/copyleft/lgpl.html
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
#include "coreaudio.h"
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
#include <inttypes.h>
|
2021-05-14 10:15:42 +08:00
|
|
|
#include <stdint.h>
|
2021-04-28 12:43:51 +08:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2021-05-14 10:15:42 +08:00
|
|
|
#include <unistd.h>
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
#include <cmath>
|
2022-04-25 12:02:45 +08:00
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2021-04-28 12:43:51 +08:00
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
#include "alnumeric.h"
|
|
|
|
#include "core/converter.h"
|
|
|
|
#include "core/device.h"
|
2021-04-28 12:43:51 +08:00
|
|
|
#include "core/logging.h"
|
2021-05-14 10:15:42 +08:00
|
|
|
#include "ringbuffer.h"
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
#include <AudioUnit/AudioUnit.h>
|
|
|
|
#include <AudioToolbox/AudioToolbox.h>
|
|
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
#if TARGET_OS_IOS || TARGET_OS_TV
|
|
|
|
#define CAN_ENUMERATE 0
|
|
|
|
#else
|
|
|
|
#define CAN_ENUMERATE 1
|
|
|
|
#endif
|
|
|
|
|
2022-07-14 23:17:11 +08:00
|
|
|
constexpr auto OutputElement = 0;
|
|
|
|
constexpr auto InputElement = 1;
|
2022-04-25 12:02:45 +08:00
|
|
|
|
|
|
|
#if CAN_ENUMERATE
|
|
|
|
struct DeviceEntry {
|
|
|
|
AudioDeviceID mId;
|
|
|
|
std::string mName;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<DeviceEntry> PlaybackList;
|
|
|
|
std::vector<DeviceEntry> CaptureList;
|
|
|
|
|
|
|
|
|
|
|
|
OSStatus GetHwProperty(AudioHardwarePropertyID propId, UInt32 dataSize, void *propData)
|
|
|
|
{
|
|
|
|
const AudioObjectPropertyAddress addr{propId, kAudioObjectPropertyScopeGlobal,
|
|
|
|
kAudioObjectPropertyElementMaster};
|
|
|
|
return AudioObjectGetPropertyData(kAudioObjectSystemObject, &addr, 0, nullptr, &dataSize,
|
|
|
|
propData);
|
|
|
|
}
|
|
|
|
|
|
|
|
OSStatus GetHwPropertySize(AudioHardwarePropertyID propId, UInt32 *outSize)
|
|
|
|
{
|
|
|
|
const AudioObjectPropertyAddress addr{propId, kAudioObjectPropertyScopeGlobal,
|
|
|
|
kAudioObjectPropertyElementMaster};
|
|
|
|
return AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &addr, 0, nullptr, outSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
OSStatus GetDevProperty(AudioDeviceID devId, AudioDevicePropertyID propId, bool isCapture,
|
|
|
|
UInt32 elem, UInt32 dataSize, void *propData)
|
|
|
|
{
|
|
|
|
static const AudioObjectPropertyScope scopes[2]{kAudioDevicePropertyScopeOutput,
|
|
|
|
kAudioDevicePropertyScopeInput};
|
|
|
|
const AudioObjectPropertyAddress addr{propId, scopes[isCapture], elem};
|
|
|
|
return AudioObjectGetPropertyData(devId, &addr, 0, nullptr, &dataSize, propData);
|
|
|
|
}
|
|
|
|
|
|
|
|
OSStatus GetDevPropertySize(AudioDeviceID devId, AudioDevicePropertyID inPropertyID,
|
|
|
|
bool isCapture, UInt32 elem, UInt32 *outSize)
|
|
|
|
{
|
|
|
|
static const AudioObjectPropertyScope scopes[2]{kAudioDevicePropertyScopeOutput,
|
|
|
|
kAudioDevicePropertyScopeInput};
|
|
|
|
const AudioObjectPropertyAddress addr{inPropertyID, scopes[isCapture], elem};
|
|
|
|
return AudioObjectGetPropertyDataSize(devId, &addr, 0, nullptr, outSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::string GetDeviceName(AudioDeviceID devId)
|
|
|
|
{
|
|
|
|
std::string devname;
|
|
|
|
CFStringRef nameRef;
|
|
|
|
|
|
|
|
/* Try to get the device name as a CFString, for Unicode name support. */
|
|
|
|
OSStatus err{GetDevProperty(devId, kAudioDevicePropertyDeviceNameCFString, false, 0,
|
|
|
|
sizeof(nameRef), &nameRef)};
|
|
|
|
if(err == noErr)
|
|
|
|
{
|
|
|
|
const CFIndex propSize{CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef),
|
|
|
|
kCFStringEncodingUTF8)};
|
|
|
|
devname.resize(static_cast<size_t>(propSize)+1, '\0');
|
|
|
|
|
|
|
|
CFStringGetCString(nameRef, &devname[0], propSize+1, kCFStringEncodingUTF8);
|
|
|
|
CFRelease(nameRef);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* If that failed, just get the C string. Hopefully there's nothing bad
|
|
|
|
* with this.
|
|
|
|
*/
|
|
|
|
UInt32 propSize{};
|
|
|
|
if(GetDevPropertySize(devId, kAudioDevicePropertyDeviceName, false, 0, &propSize))
|
|
|
|
return devname;
|
|
|
|
|
|
|
|
devname.resize(propSize+1, '\0');
|
|
|
|
if(GetDevProperty(devId, kAudioDevicePropertyDeviceName, false, 0, propSize, &devname[0]))
|
|
|
|
{
|
|
|
|
devname.clear();
|
|
|
|
return devname;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear extraneous nul chars that may have been written with the name
|
|
|
|
* string, and return it.
|
|
|
|
*/
|
|
|
|
while(!devname.back())
|
|
|
|
devname.pop_back();
|
|
|
|
return devname;
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt32 GetDeviceChannelCount(AudioDeviceID devId, bool isCapture)
|
|
|
|
{
|
|
|
|
UInt32 propSize{};
|
|
|
|
auto err = GetDevPropertySize(devId, kAudioDevicePropertyStreamConfiguration, isCapture, 0,
|
|
|
|
&propSize);
|
|
|
|
if(err)
|
|
|
|
{
|
|
|
|
ERR("kAudioDevicePropertyStreamConfiguration size query failed: %u\n", err);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto buflist_data = std::make_unique<char[]>(propSize);
|
|
|
|
auto *buflist = reinterpret_cast<AudioBufferList*>(buflist_data.get());
|
|
|
|
|
|
|
|
err = GetDevProperty(devId, kAudioDevicePropertyStreamConfiguration, isCapture, 0, propSize,
|
|
|
|
buflist);
|
|
|
|
if(err)
|
|
|
|
{
|
|
|
|
ERR("kAudioDevicePropertyStreamConfiguration query failed: %u\n", err);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt32 numChannels{0};
|
|
|
|
for(size_t i{0};i < buflist->mNumberBuffers;++i)
|
|
|
|
numChannels += buflist->mBuffers[i].mNumberChannels;
|
|
|
|
|
|
|
|
return numChannels;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void EnumerateDevices(std::vector<DeviceEntry> &list, bool isCapture)
|
|
|
|
{
|
|
|
|
UInt32 propSize{};
|
|
|
|
if(auto err = GetHwPropertySize(kAudioHardwarePropertyDevices, &propSize))
|
|
|
|
{
|
|
|
|
ERR("Failed to get device list size: %u\n", err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto devIds = std::vector<AudioDeviceID>(propSize/sizeof(AudioDeviceID), kAudioDeviceUnknown);
|
|
|
|
if(auto err = GetHwProperty(kAudioHardwarePropertyDevices, propSize, devIds.data()))
|
|
|
|
{
|
|
|
|
ERR("Failed to get device list: %u\n", err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<DeviceEntry> newdevs;
|
|
|
|
newdevs.reserve(devIds.size());
|
|
|
|
|
|
|
|
AudioDeviceID defaultId{kAudioDeviceUnknown};
|
|
|
|
GetHwProperty(isCapture ? kAudioHardwarePropertyDefaultInputDevice :
|
|
|
|
kAudioHardwarePropertyDefaultOutputDevice, sizeof(defaultId), &defaultId);
|
|
|
|
|
|
|
|
if(defaultId != kAudioDeviceUnknown)
|
|
|
|
{
|
|
|
|
newdevs.emplace_back(DeviceEntry{defaultId, GetDeviceName(defaultId)});
|
|
|
|
const auto &entry = newdevs.back();
|
|
|
|
TRACE("Got device: %s = ID %u\n", entry.mName.c_str(), entry.mId);
|
|
|
|
}
|
|
|
|
for(const AudioDeviceID devId : devIds)
|
|
|
|
{
|
|
|
|
if(devId == kAudioDeviceUnknown)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto match_devid = [devId](const DeviceEntry &entry) noexcept -> bool
|
|
|
|
{ return entry.mId == devId; };
|
|
|
|
auto match = std::find_if(newdevs.cbegin(), newdevs.cend(), match_devid);
|
|
|
|
if(match != newdevs.cend()) continue;
|
|
|
|
|
|
|
|
auto numChannels = GetDeviceChannelCount(devId, isCapture);
|
|
|
|
if(numChannels > 0)
|
|
|
|
{
|
|
|
|
newdevs.emplace_back(DeviceEntry{devId, GetDeviceName(devId)});
|
|
|
|
const auto &entry = newdevs.back();
|
|
|
|
TRACE("Got device: %s = ID %u\n", entry.mName.c_str(), entry.mId);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(newdevs.size() > 1)
|
|
|
|
{
|
|
|
|
/* Rename entries that have matching names, by appending '#2', '#3',
|
|
|
|
* etc, as needed.
|
|
|
|
*/
|
|
|
|
for(auto curitem = newdevs.begin()+1;curitem != newdevs.end();++curitem)
|
|
|
|
{
|
|
|
|
auto check_match = [curitem](const DeviceEntry &entry) -> bool
|
|
|
|
{ return entry.mName == curitem->mName; };
|
|
|
|
if(std::find_if(newdevs.begin(), curitem, check_match) != curitem)
|
|
|
|
{
|
|
|
|
std::string name{curitem->mName};
|
|
|
|
size_t count{1};
|
|
|
|
auto check_name = [&name](const DeviceEntry &entry) -> bool
|
|
|
|
{ return entry.mName == name; };
|
|
|
|
do {
|
|
|
|
name = curitem->mName;
|
|
|
|
name += " #";
|
|
|
|
name += std::to_string(++count);
|
|
|
|
} while(std::find_if(newdevs.begin(), curitem, check_name) != curitem);
|
|
|
|
curitem->mName = std::move(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newdevs.shrink_to_fit();
|
|
|
|
newdevs.swap(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static constexpr char ca_device[] = "CoreAudio Default";
|
|
|
|
#endif
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
|
|
|
|
struct CoreAudioPlayback final : public BackendBase {
|
2021-05-14 10:15:42 +08:00
|
|
|
CoreAudioPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
|
2021-04-28 12:43:51 +08:00
|
|
|
~CoreAudioPlayback() override;
|
|
|
|
|
|
|
|
OSStatus MixerProc(AudioUnitRenderActionFlags *ioActionFlags,
|
|
|
|
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
|
|
|
|
AudioBufferList *ioData) noexcept;
|
|
|
|
static OSStatus MixerProcC(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
|
|
|
|
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
|
|
|
|
AudioBufferList *ioData) noexcept
|
|
|
|
{
|
|
|
|
return static_cast<CoreAudioPlayback*>(inRefCon)->MixerProc(ioActionFlags, inTimeStamp,
|
|
|
|
inBusNumber, inNumberFrames, ioData);
|
|
|
|
}
|
|
|
|
|
|
|
|
void open(const char *name) override;
|
|
|
|
bool reset() override;
|
|
|
|
void start() override;
|
|
|
|
void stop() override;
|
|
|
|
|
|
|
|
AudioUnit mAudioUnit{};
|
|
|
|
|
|
|
|
uint mFrameSize{0u};
|
|
|
|
AudioStreamBasicDescription mFormat{}; // This is the OpenAL format as a CoreAudio ASBD
|
|
|
|
|
|
|
|
DEF_NEWDEL(CoreAudioPlayback)
|
|
|
|
};
|
|
|
|
|
|
|
|
CoreAudioPlayback::~CoreAudioPlayback()
|
|
|
|
{
|
|
|
|
AudioUnitUninitialize(mAudioUnit);
|
|
|
|
AudioComponentInstanceDispose(mAudioUnit);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
OSStatus CoreAudioPlayback::MixerProc(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32,
|
|
|
|
UInt32, AudioBufferList *ioData) noexcept
|
|
|
|
{
|
|
|
|
for(size_t i{0};i < ioData->mNumberBuffers;++i)
|
|
|
|
{
|
|
|
|
auto &buffer = ioData->mBuffers[i];
|
|
|
|
mDevice->renderSamples(buffer.mData, buffer.mDataByteSize/mFrameSize,
|
|
|
|
buffer.mNumberChannels);
|
|
|
|
}
|
|
|
|
return noErr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CoreAudioPlayback::open(const char *name)
|
|
|
|
{
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
AudioDeviceID audioDevice{kAudioDeviceUnknown};
|
|
|
|
if(!name)
|
|
|
|
GetHwProperty(kAudioHardwarePropertyDefaultOutputDevice, sizeof(audioDevice),
|
|
|
|
&audioDevice);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if(PlaybackList.empty())
|
|
|
|
EnumerateDevices(PlaybackList, false);
|
|
|
|
|
|
|
|
auto find_name = [name](const DeviceEntry &entry) -> bool
|
|
|
|
{ return entry.mName == name; };
|
|
|
|
auto devmatch = std::find_if(PlaybackList.cbegin(), PlaybackList.cend(), find_name);
|
|
|
|
if(devmatch == PlaybackList.cend())
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice,
|
|
|
|
"Device name \"%s\" not found", name};
|
|
|
|
|
|
|
|
audioDevice = devmatch->mId;
|
|
|
|
}
|
|
|
|
#else
|
2021-04-28 12:43:51 +08:00
|
|
|
if(!name)
|
|
|
|
name = ca_device;
|
|
|
|
else if(strcmp(name, ca_device) != 0)
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
|
|
|
|
name};
|
2022-04-25 12:02:45 +08:00
|
|
|
#endif
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
/* open the default output unit */
|
|
|
|
AudioComponentDescription desc{};
|
|
|
|
desc.componentType = kAudioUnitType_Output;
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
desc.componentSubType = (audioDevice == kAudioDeviceUnknown) ?
|
|
|
|
kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
|
2021-04-28 12:43:51 +08:00
|
|
|
#else
|
2022-04-25 12:02:45 +08:00
|
|
|
desc.componentSubType = kAudioUnitSubType_RemoteIO;
|
2021-04-28 12:43:51 +08:00
|
|
|
#endif
|
|
|
|
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
|
|
|
desc.componentFlags = 0;
|
|
|
|
desc.componentFlagsMask = 0;
|
|
|
|
|
|
|
|
AudioComponent comp{AudioComponentFindNext(NULL, &desc)};
|
|
|
|
if(comp == nullptr)
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice, "Could not find audio component"};
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
AudioUnit audioUnit{};
|
|
|
|
OSStatus err{AudioComponentInstanceNew(comp, &audioUnit)};
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice,
|
|
|
|
"Could not create component instance: %u", err};
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
if(audioDevice != kAudioDeviceUnknown)
|
|
|
|
AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_CurrentDevice,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Global, OutputElement, &audioDevice, sizeof(AudioDeviceID));
|
2022-04-25 12:02:45 +08:00
|
|
|
#endif
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
err = AudioUnitInitialize(audioUnit);
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not initialize audio unit: %u", err};
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
/* WARNING: I don't know if "valid" audio unit values are guaranteed to be
|
|
|
|
* non-0. If not, this logic is broken.
|
|
|
|
*/
|
|
|
|
if(mAudioUnit)
|
|
|
|
{
|
|
|
|
AudioUnitUninitialize(mAudioUnit);
|
|
|
|
AudioComponentInstanceDispose(mAudioUnit);
|
|
|
|
}
|
|
|
|
mAudioUnit = audioUnit;
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
if(name)
|
|
|
|
mDevice->DeviceName = name;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
UInt32 propSize{sizeof(audioDevice)};
|
|
|
|
audioDevice = kAudioDeviceUnknown;
|
|
|
|
AudioUnitGetProperty(audioUnit, kAudioOutputUnitProperty_CurrentDevice,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Global, OutputElement, &audioDevice, &propSize);
|
2022-04-25 12:02:45 +08:00
|
|
|
|
|
|
|
std::string devname{GetDeviceName(audioDevice)};
|
|
|
|
if(!devname.empty()) mDevice->DeviceName = std::move(devname);
|
|
|
|
else mDevice->DeviceName = "Unknown Device Name";
|
|
|
|
}
|
|
|
|
#else
|
2021-04-28 12:43:51 +08:00
|
|
|
mDevice->DeviceName = name;
|
2022-04-25 12:02:45 +08:00
|
|
|
#endif
|
2021-04-28 12:43:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CoreAudioPlayback::reset()
|
|
|
|
{
|
|
|
|
OSStatus err{AudioUnitUninitialize(mAudioUnit)};
|
|
|
|
if(err != noErr)
|
|
|
|
ERR("-- AudioUnitUninitialize failed.\n");
|
|
|
|
|
|
|
|
/* retrieve default output unit's properties (output side) */
|
|
|
|
AudioStreamBasicDescription streamFormat{};
|
2022-04-25 12:02:45 +08:00
|
|
|
UInt32 size{sizeof(streamFormat)};
|
2021-04-28 12:43:51 +08:00
|
|
|
err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
|
2022-07-14 23:17:11 +08:00
|
|
|
OutputElement, &streamFormat, &size);
|
2022-04-25 12:02:45 +08:00
|
|
|
if(err != noErr || size != sizeof(streamFormat))
|
2021-04-28 12:43:51 +08:00
|
|
|
{
|
|
|
|
ERR("AudioUnitGetProperty failed\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
TRACE("Output streamFormat of default output unit -\n");
|
|
|
|
TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket);
|
|
|
|
TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame);
|
|
|
|
TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel);
|
|
|
|
TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket);
|
|
|
|
TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame);
|
|
|
|
TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate);
|
|
|
|
#endif
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
/* Use the sample rate from the output unit's current parameters, but reset
|
|
|
|
* everything else.
|
|
|
|
*/
|
2021-04-28 12:43:51 +08:00
|
|
|
if(mDevice->Frequency != streamFormat.mSampleRate)
|
|
|
|
{
|
2022-07-14 23:17:11 +08:00
|
|
|
mDevice->BufferSize = static_cast<uint>(mDevice->BufferSize*streamFormat.mSampleRate/
|
|
|
|
mDevice->Frequency + 0.5);
|
2021-04-28 12:43:51 +08:00
|
|
|
mDevice->Frequency = static_cast<uint>(streamFormat.mSampleRate);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: How to tell what channels are what in the output device, and how
|
2022-04-25 12:02:45 +08:00
|
|
|
* to specify what we're giving? e.g. 6.0 vs 5.1
|
|
|
|
*/
|
|
|
|
streamFormat.mChannelsPerFrame = mDevice->channelsFromFmt();
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
streamFormat.mFramesPerPacket = 1;
|
2022-04-25 12:02:45 +08:00
|
|
|
streamFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked;
|
|
|
|
streamFormat.mFormatID = kAudioFormatLinearPCM;
|
2021-04-28 12:43:51 +08:00
|
|
|
switch(mDevice->FmtType)
|
|
|
|
{
|
|
|
|
case DevFmtUByte:
|
|
|
|
mDevice->FmtType = DevFmtByte;
|
|
|
|
/* fall-through */
|
|
|
|
case DevFmtByte:
|
2022-04-25 12:02:45 +08:00
|
|
|
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
|
2021-04-28 12:43:51 +08:00
|
|
|
streamFormat.mBitsPerChannel = 8;
|
|
|
|
break;
|
|
|
|
case DevFmtUShort:
|
|
|
|
mDevice->FmtType = DevFmtShort;
|
|
|
|
/* fall-through */
|
|
|
|
case DevFmtShort:
|
2022-04-25 12:02:45 +08:00
|
|
|
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
|
2021-04-28 12:43:51 +08:00
|
|
|
streamFormat.mBitsPerChannel = 16;
|
|
|
|
break;
|
|
|
|
case DevFmtUInt:
|
|
|
|
mDevice->FmtType = DevFmtInt;
|
|
|
|
/* fall-through */
|
|
|
|
case DevFmtInt:
|
2022-04-25 12:02:45 +08:00
|
|
|
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
|
2021-04-28 12:43:51 +08:00
|
|
|
streamFormat.mBitsPerChannel = 32;
|
|
|
|
break;
|
|
|
|
case DevFmtFloat:
|
2022-04-25 12:02:45 +08:00
|
|
|
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsFloat;
|
2021-04-28 12:43:51 +08:00
|
|
|
streamFormat.mBitsPerChannel = 32;
|
|
|
|
break;
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame*streamFormat.mBitsPerChannel/8;
|
|
|
|
streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame*streamFormat.mFramesPerPacket;
|
2021-04-28 12:43:51 +08:00
|
|
|
|
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
|
2022-07-14 23:17:11 +08:00
|
|
|
OutputElement, &streamFormat, sizeof(streamFormat));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
{
|
|
|
|
ERR("AudioUnitSetProperty failed\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
setDefaultWFXChannelOrder();
|
|
|
|
|
2021-04-28 12:43:51 +08:00
|
|
|
/* setup callback */
|
|
|
|
mFrameSize = mDevice->frameSizeFromFmt();
|
|
|
|
AURenderCallbackStruct input{};
|
|
|
|
input.inputProc = CoreAudioPlayback::MixerProcC;
|
|
|
|
input.inputProcRefCon = this;
|
|
|
|
|
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_SetRenderCallback,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Input, OutputElement, &input, sizeof(AURenderCallbackStruct));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
{
|
|
|
|
ERR("AudioUnitSetProperty failed\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* init the default audio unit... */
|
|
|
|
err = AudioUnitInitialize(mAudioUnit);
|
|
|
|
if(err != noErr)
|
|
|
|
{
|
|
|
|
ERR("AudioUnitInitialize failed\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CoreAudioPlayback::start()
|
|
|
|
{
|
|
|
|
const OSStatus err{AudioOutputUnitStart(mAudioUnit)};
|
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"AudioOutputUnitStart failed: %d", err};
|
|
|
|
}
|
|
|
|
|
|
|
|
void CoreAudioPlayback::stop()
|
|
|
|
{
|
|
|
|
OSStatus err{AudioOutputUnitStop(mAudioUnit)};
|
|
|
|
if(err != noErr)
|
|
|
|
ERR("AudioOutputUnitStop failed\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct CoreAudioCapture final : public BackendBase {
|
2021-05-14 10:15:42 +08:00
|
|
|
CoreAudioCapture(DeviceBase *device) noexcept : BackendBase{device} { }
|
2021-04-28 12:43:51 +08:00
|
|
|
~CoreAudioCapture() override;
|
|
|
|
|
|
|
|
OSStatus RecordProc(AudioUnitRenderActionFlags *ioActionFlags,
|
|
|
|
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
|
|
|
|
UInt32 inNumberFrames, AudioBufferList *ioData) noexcept;
|
|
|
|
static OSStatus RecordProcC(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
|
|
|
|
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
|
|
|
|
AudioBufferList *ioData) noexcept
|
|
|
|
{
|
|
|
|
return static_cast<CoreAudioCapture*>(inRefCon)->RecordProc(ioActionFlags, inTimeStamp,
|
|
|
|
inBusNumber, inNumberFrames, ioData);
|
|
|
|
}
|
|
|
|
|
|
|
|
void open(const char *name) override;
|
|
|
|
void start() override;
|
|
|
|
void stop() override;
|
|
|
|
void captureSamples(al::byte *buffer, uint samples) override;
|
|
|
|
uint availableSamples() override;
|
|
|
|
|
|
|
|
AudioUnit mAudioUnit{0};
|
|
|
|
|
|
|
|
uint mFrameSize{0u};
|
|
|
|
AudioStreamBasicDescription mFormat{}; // This is the OpenAL format as a CoreAudio ASBD
|
|
|
|
|
|
|
|
SampleConverterPtr mConverter;
|
|
|
|
|
|
|
|
RingBufferPtr mRing{nullptr};
|
|
|
|
|
|
|
|
DEF_NEWDEL(CoreAudioCapture)
|
|
|
|
};
|
|
|
|
|
|
|
|
CoreAudioCapture::~CoreAudioCapture()
|
|
|
|
{
|
|
|
|
if(mAudioUnit)
|
|
|
|
AudioComponentInstanceDispose(mAudioUnit);
|
|
|
|
mAudioUnit = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
OSStatus CoreAudioCapture::RecordProc(AudioUnitRenderActionFlags*,
|
|
|
|
const AudioTimeStamp *inTimeStamp, UInt32, UInt32 inNumberFrames,
|
|
|
|
AudioBufferList*) noexcept
|
|
|
|
{
|
|
|
|
AudioUnitRenderActionFlags flags = 0;
|
|
|
|
union {
|
|
|
|
al::byte _[sizeof(AudioBufferList) + sizeof(AudioBuffer)*2];
|
|
|
|
AudioBufferList list;
|
|
|
|
} audiobuf{};
|
|
|
|
|
|
|
|
auto rec_vec = mRing->getWriteVector();
|
|
|
|
inNumberFrames = static_cast<UInt32>(minz(inNumberFrames,
|
|
|
|
rec_vec.first.len+rec_vec.second.len));
|
|
|
|
|
|
|
|
// Fill the ringbuffer's two segments with data from the input device
|
|
|
|
if(rec_vec.first.len >= inNumberFrames)
|
|
|
|
{
|
|
|
|
audiobuf.list.mNumberBuffers = 1;
|
|
|
|
audiobuf.list.mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame;
|
|
|
|
audiobuf.list.mBuffers[0].mData = rec_vec.first.buf;
|
|
|
|
audiobuf.list.mBuffers[0].mDataByteSize = inNumberFrames * mFormat.mBytesPerFrame;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
const auto remaining = static_cast<uint>(inNumberFrames - rec_vec.first.len);
|
|
|
|
audiobuf.list.mNumberBuffers = 2;
|
|
|
|
audiobuf.list.mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame;
|
|
|
|
audiobuf.list.mBuffers[0].mData = rec_vec.first.buf;
|
|
|
|
audiobuf.list.mBuffers[0].mDataByteSize = static_cast<UInt32>(rec_vec.first.len) *
|
|
|
|
mFormat.mBytesPerFrame;
|
|
|
|
audiobuf.list.mBuffers[1].mNumberChannels = mFormat.mChannelsPerFrame;
|
|
|
|
audiobuf.list.mBuffers[1].mData = rec_vec.second.buf;
|
|
|
|
audiobuf.list.mBuffers[1].mDataByteSize = remaining * mFormat.mBytesPerFrame;
|
|
|
|
}
|
|
|
|
OSStatus err{AudioUnitRender(mAudioUnit, &flags, inTimeStamp, audiobuf.list.mNumberBuffers,
|
|
|
|
inNumberFrames, &audiobuf.list)};
|
|
|
|
if(err != noErr)
|
|
|
|
{
|
|
|
|
ERR("AudioUnitRender error: %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
mRing->writeAdvance(inNumberFrames);
|
|
|
|
return noErr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CoreAudioCapture::open(const char *name)
|
|
|
|
{
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
AudioDeviceID audioDevice{kAudioDeviceUnknown};
|
|
|
|
if(!name)
|
|
|
|
GetHwProperty(kAudioHardwarePropertyDefaultInputDevice, sizeof(audioDevice),
|
|
|
|
&audioDevice);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if(CaptureList.empty())
|
|
|
|
EnumerateDevices(CaptureList, true);
|
2021-04-28 12:43:51 +08:00
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
auto find_name = [name](const DeviceEntry &entry) -> bool
|
|
|
|
{ return entry.mName == name; };
|
|
|
|
auto devmatch = std::find_if(CaptureList.cbegin(), CaptureList.cend(), find_name);
|
|
|
|
if(devmatch == CaptureList.cend())
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice,
|
|
|
|
"Device name \"%s\" not found", name};
|
|
|
|
|
|
|
|
audioDevice = devmatch->mId;
|
|
|
|
}
|
|
|
|
#else
|
2021-04-28 12:43:51 +08:00
|
|
|
if(!name)
|
|
|
|
name = ca_device;
|
|
|
|
else if(strcmp(name, ca_device) != 0)
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
|
|
|
|
name};
|
2022-04-25 12:02:45 +08:00
|
|
|
#endif
|
2021-04-28 12:43:51 +08:00
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
AudioComponentDescription desc{};
|
2021-04-28 12:43:51 +08:00
|
|
|
desc.componentType = kAudioUnitType_Output;
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
desc.componentSubType = (audioDevice == kAudioDeviceUnknown) ?
|
|
|
|
kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
|
2021-04-28 12:43:51 +08:00
|
|
|
#else
|
2022-04-25 12:02:45 +08:00
|
|
|
desc.componentSubType = kAudioUnitSubType_RemoteIO;
|
2021-04-28 12:43:51 +08:00
|
|
|
#endif
|
|
|
|
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
|
|
|
desc.componentFlags = 0;
|
|
|
|
desc.componentFlagsMask = 0;
|
|
|
|
|
|
|
|
// Search for component with given description
|
2022-04-25 12:02:45 +08:00
|
|
|
AudioComponent comp{AudioComponentFindNext(NULL, &desc)};
|
2021-04-28 12:43:51 +08:00
|
|
|
if(comp == NULL)
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice, "Could not find audio component"};
|
|
|
|
|
|
|
|
// Open the component
|
2022-04-25 12:02:45 +08:00
|
|
|
OSStatus err{AudioComponentInstanceNew(comp, &mAudioUnit)};
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::NoDevice,
|
|
|
|
"Could not create component instance: %u", err};
|
|
|
|
|
|
|
|
// Turn off AudioUnit output
|
2022-04-25 12:02:45 +08:00
|
|
|
UInt32 enableIO{0};
|
2021-04-28 12:43:51 +08:00
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_EnableIO,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Output, OutputElement, &enableIO, sizeof(enableIO));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not disable audio unit output property: %u", err};
|
|
|
|
|
|
|
|
// Turn on AudioUnit input
|
|
|
|
enableIO = 1;
|
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_EnableIO,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Input, InputElement, &enableIO, sizeof(enableIO));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not enable audio unit input property: %u", err};
|
|
|
|
|
2022-07-14 23:17:11 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
if(audioDevice != kAudioDeviceUnknown)
|
|
|
|
AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_CurrentDevice,
|
|
|
|
kAudioUnitScope_Global, InputElement, &audioDevice, sizeof(AudioDeviceID));
|
|
|
|
#endif
|
|
|
|
|
2021-04-28 12:43:51 +08:00
|
|
|
// set capture callback
|
2022-04-25 12:02:45 +08:00
|
|
|
AURenderCallbackStruct input{};
|
2021-04-28 12:43:51 +08:00
|
|
|
input.inputProc = CoreAudioCapture::RecordProcC;
|
|
|
|
input.inputProcRefCon = this;
|
|
|
|
|
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_SetInputCallback,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Global, InputElement, &input, sizeof(AURenderCallbackStruct));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not set capture callback: %u", err};
|
|
|
|
|
|
|
|
// Disable buffer allocation for capture
|
|
|
|
UInt32 flag{0};
|
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Output, InputElement, &flag, sizeof(flag));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not disable buffer allocation property: %u", err};
|
|
|
|
|
|
|
|
// Initialize the device
|
|
|
|
err = AudioUnitInitialize(mAudioUnit);
|
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not initialize audio unit: %u", err};
|
|
|
|
|
|
|
|
// Get the hardware format
|
2022-04-25 12:02:45 +08:00
|
|
|
AudioStreamBasicDescription hardwareFormat{};
|
|
|
|
UInt32 propertySize{sizeof(hardwareFormat)};
|
2021-04-28 12:43:51 +08:00
|
|
|
err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
|
2022-07-14 23:17:11 +08:00
|
|
|
InputElement, &hardwareFormat, &propertySize);
|
2022-04-25 12:02:45 +08:00
|
|
|
if(err != noErr || propertySize != sizeof(hardwareFormat))
|
2021-04-28 12:43:51 +08:00
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not get input format: %u", err};
|
|
|
|
|
|
|
|
// Set up the requested format description
|
2022-04-25 12:02:45 +08:00
|
|
|
AudioStreamBasicDescription requestedFormat{};
|
2021-04-28 12:43:51 +08:00
|
|
|
switch(mDevice->FmtType)
|
|
|
|
{
|
|
|
|
case DevFmtByte:
|
|
|
|
requestedFormat.mBitsPerChannel = 8;
|
|
|
|
requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
|
|
|
|
break;
|
|
|
|
case DevFmtUByte:
|
|
|
|
requestedFormat.mBitsPerChannel = 8;
|
|
|
|
requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
|
|
|
|
break;
|
|
|
|
case DevFmtShort:
|
|
|
|
requestedFormat.mBitsPerChannel = 16;
|
2022-04-25 12:02:45 +08:00
|
|
|
requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger
|
|
|
|
| kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
|
2021-04-28 12:43:51 +08:00
|
|
|
break;
|
|
|
|
case DevFmtUShort:
|
|
|
|
requestedFormat.mBitsPerChannel = 16;
|
|
|
|
requestedFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
|
|
|
|
break;
|
|
|
|
case DevFmtInt:
|
|
|
|
requestedFormat.mBitsPerChannel = 32;
|
2022-04-25 12:02:45 +08:00
|
|
|
requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger
|
|
|
|
| kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
|
2021-04-28 12:43:51 +08:00
|
|
|
break;
|
|
|
|
case DevFmtUInt:
|
|
|
|
requestedFormat.mBitsPerChannel = 32;
|
|
|
|
requestedFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
|
|
|
|
break;
|
|
|
|
case DevFmtFloat:
|
|
|
|
requestedFormat.mBitsPerChannel = 32;
|
2022-04-25 12:02:45 +08:00
|
|
|
requestedFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagsNativeEndian
|
|
|
|
| kAudioFormatFlagIsPacked;
|
2021-04-28 12:43:51 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch(mDevice->FmtChans)
|
|
|
|
{
|
|
|
|
case DevFmtMono:
|
|
|
|
requestedFormat.mChannelsPerFrame = 1;
|
|
|
|
break;
|
|
|
|
case DevFmtStereo:
|
|
|
|
requestedFormat.mChannelsPerFrame = 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DevFmtQuad:
|
|
|
|
case DevFmtX51:
|
|
|
|
case DevFmtX61:
|
|
|
|
case DevFmtX71:
|
2022-07-14 23:17:11 +08:00
|
|
|
case DevFmtX3D71:
|
2021-04-28 12:43:51 +08:00
|
|
|
case DevFmtAmbi3D:
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError, "%s not supported",
|
|
|
|
DevFmtChannelsString(mDevice->FmtChans)};
|
|
|
|
}
|
|
|
|
|
|
|
|
requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
|
|
|
|
requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
|
|
|
|
requestedFormat.mSampleRate = mDevice->Frequency;
|
|
|
|
requestedFormat.mFormatID = kAudioFormatLinearPCM;
|
|
|
|
requestedFormat.mReserved = 0;
|
|
|
|
requestedFormat.mFramesPerPacket = 1;
|
|
|
|
|
|
|
|
// save requested format description for later use
|
|
|
|
mFormat = requestedFormat;
|
|
|
|
mFrameSize = mDevice->frameSizeFromFmt();
|
|
|
|
|
|
|
|
// Use intermediate format for sample rate conversion (outputFormat)
|
|
|
|
// Set sample rate to the same as hardware for resampling later
|
2022-04-25 12:02:45 +08:00
|
|
|
AudioStreamBasicDescription outputFormat{requestedFormat};
|
2021-04-28 12:43:51 +08:00
|
|
|
outputFormat.mSampleRate = hardwareFormat.mSampleRate;
|
|
|
|
|
|
|
|
// The output format should be the requested format, but using the hardware sample rate
|
|
|
|
// This is because the AudioUnit will automatically scale other properties, except for sample rate
|
|
|
|
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
|
2022-07-14 23:17:11 +08:00
|
|
|
InputElement, &outputFormat, sizeof(outputFormat));
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not set input format: %u", err};
|
|
|
|
|
|
|
|
/* Calculate the minimum AudioUnit output format frame count for the pre-
|
|
|
|
* conversion ring buffer. Ensure at least 100ms for the total buffer.
|
|
|
|
*/
|
2022-07-14 23:17:11 +08:00
|
|
|
double srateScale{outputFormat.mSampleRate / mDevice->Frequency};
|
2021-04-28 12:43:51 +08:00
|
|
|
auto FrameCount64 = maxu64(static_cast<uint64_t>(std::ceil(mDevice->BufferSize*srateScale)),
|
|
|
|
static_cast<UInt32>(outputFormat.mSampleRate)/10);
|
|
|
|
FrameCount64 += MaxResamplerPadding;
|
|
|
|
if(FrameCount64 > std::numeric_limits<int32_t>::max())
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Calculated frame count is too large: %" PRIu64, FrameCount64};
|
|
|
|
|
|
|
|
UInt32 outputFrameCount{};
|
|
|
|
propertySize = sizeof(outputFrameCount);
|
|
|
|
err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_MaximumFramesPerSlice,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Global, OutputElement, &outputFrameCount, &propertySize);
|
2021-04-28 12:43:51 +08:00
|
|
|
if(err != noErr || propertySize != sizeof(outputFrameCount))
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"Could not get input frame count: %u", err};
|
|
|
|
|
|
|
|
outputFrameCount = static_cast<UInt32>(maxu64(outputFrameCount, FrameCount64));
|
|
|
|
mRing = RingBuffer::Create(outputFrameCount, mFrameSize, false);
|
|
|
|
|
|
|
|
/* Set up sample converter if needed */
|
|
|
|
if(outputFormat.mSampleRate != mDevice->Frequency)
|
|
|
|
mConverter = CreateSampleConverter(mDevice->FmtType, mDevice->FmtType,
|
|
|
|
mFormat.mChannelsPerFrame, static_cast<uint>(hardwareFormat.mSampleRate),
|
|
|
|
mDevice->Frequency, Resampler::FastBSinc24);
|
|
|
|
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
if(name)
|
|
|
|
mDevice->DeviceName = name;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
UInt32 propSize{sizeof(audioDevice)};
|
|
|
|
audioDevice = kAudioDeviceUnknown;
|
|
|
|
AudioUnitGetProperty(mAudioUnit, kAudioOutputUnitProperty_CurrentDevice,
|
2022-07-14 23:17:11 +08:00
|
|
|
kAudioUnitScope_Global, InputElement, &audioDevice, &propSize);
|
2022-04-25 12:02:45 +08:00
|
|
|
|
|
|
|
std::string devname{GetDeviceName(audioDevice)};
|
|
|
|
if(!devname.empty()) mDevice->DeviceName = std::move(devname);
|
|
|
|
else mDevice->DeviceName = "Unknown Device Name";
|
|
|
|
}
|
|
|
|
#else
|
2021-04-28 12:43:51 +08:00
|
|
|
mDevice->DeviceName = name;
|
2022-04-25 12:02:45 +08:00
|
|
|
#endif
|
2021-04-28 12:43:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CoreAudioCapture::start()
|
|
|
|
{
|
|
|
|
OSStatus err{AudioOutputUnitStart(mAudioUnit)};
|
|
|
|
if(err != noErr)
|
|
|
|
throw al::backend_exception{al::backend_error::DeviceError,
|
|
|
|
"AudioOutputUnitStart failed: %d", err};
|
|
|
|
}
|
|
|
|
|
|
|
|
void CoreAudioCapture::stop()
|
|
|
|
{
|
|
|
|
OSStatus err{AudioOutputUnitStop(mAudioUnit)};
|
|
|
|
if(err != noErr)
|
|
|
|
ERR("AudioOutputUnitStop failed\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void CoreAudioCapture::captureSamples(al::byte *buffer, uint samples)
|
|
|
|
{
|
|
|
|
if(!mConverter)
|
|
|
|
{
|
|
|
|
mRing->read(buffer, samples);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto rec_vec = mRing->getReadVector();
|
|
|
|
const void *src0{rec_vec.first.buf};
|
|
|
|
auto src0len = static_cast<uint>(rec_vec.first.len);
|
|
|
|
uint got{mConverter->convert(&src0, &src0len, buffer, samples)};
|
|
|
|
size_t total_read{rec_vec.first.len - src0len};
|
|
|
|
if(got < samples && !src0len && rec_vec.second.len > 0)
|
|
|
|
{
|
|
|
|
const void *src1{rec_vec.second.buf};
|
|
|
|
auto src1len = static_cast<uint>(rec_vec.second.len);
|
|
|
|
got += mConverter->convert(&src1, &src1len, buffer + got*mFrameSize, samples-got);
|
|
|
|
total_read += rec_vec.second.len - src1len;
|
|
|
|
}
|
|
|
|
|
|
|
|
mRing->readAdvance(total_read);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint CoreAudioCapture::availableSamples()
|
|
|
|
{
|
|
|
|
if(!mConverter) return static_cast<uint>(mRing->readSpace());
|
|
|
|
return mConverter->availableOut(static_cast<uint>(mRing->readSpace()));
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
BackendFactory &CoreAudioBackendFactory::getFactory()
|
|
|
|
{
|
|
|
|
static CoreAudioBackendFactory factory{};
|
|
|
|
return factory;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CoreAudioBackendFactory::init() { return true; }
|
|
|
|
|
|
|
|
bool CoreAudioBackendFactory::querySupport(BackendType type)
|
|
|
|
{ return type == BackendType::Playback || type == BackendType::Capture; }
|
|
|
|
|
|
|
|
std::string CoreAudioBackendFactory::probe(BackendType type)
|
|
|
|
{
|
|
|
|
std::string outnames;
|
2022-04-25 12:02:45 +08:00
|
|
|
#if CAN_ENUMERATE
|
|
|
|
auto append_name = [&outnames](const DeviceEntry &entry) -> void
|
|
|
|
{
|
|
|
|
/* Includes null char. */
|
|
|
|
outnames.append(entry.mName.c_str(), entry.mName.length()+1);
|
|
|
|
};
|
|
|
|
switch(type)
|
|
|
|
{
|
|
|
|
case BackendType::Playback:
|
|
|
|
EnumerateDevices(PlaybackList, false);
|
|
|
|
std::for_each(PlaybackList.cbegin(), PlaybackList.cend(), append_name);
|
|
|
|
break;
|
|
|
|
case BackendType::Capture:
|
|
|
|
EnumerateDevices(CaptureList, true);
|
|
|
|
std::for_each(CaptureList.cbegin(), CaptureList.cend(), append_name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2021-04-28 12:43:51 +08:00
|
|
|
switch(type)
|
|
|
|
{
|
|
|
|
case BackendType::Playback:
|
|
|
|
case BackendType::Capture:
|
|
|
|
/* Includes null char. */
|
|
|
|
outnames.append(ca_device, sizeof(ca_device));
|
|
|
|
break;
|
|
|
|
}
|
2022-04-25 12:02:45 +08:00
|
|
|
#endif
|
2021-04-28 12:43:51 +08:00
|
|
|
return outnames;
|
|
|
|
}
|
|
|
|
|
2021-05-14 10:15:42 +08:00
|
|
|
BackendPtr CoreAudioBackendFactory::createBackend(DeviceBase *device, BackendType type)
|
2021-04-28 12:43:51 +08:00
|
|
|
{
|
|
|
|
if(type == BackendType::Playback)
|
|
|
|
return BackendPtr{new CoreAudioPlayback{device}};
|
|
|
|
if(type == BackendType::Capture)
|
|
|
|
return BackendPtr{new CoreAudioCapture{device}};
|
|
|
|
return nullptr;
|
|
|
|
}
|