1
0
mirror of https://github.com/bsnes-emu/bsnes.git synced 2025-05-06 02:25:20 +02:00
Tim Allen e1223366a7 Update to v103r22 release.
byuu says:

Changelog:

  - ruby: ported all remaining drivers to new API¹
  - ruby/wasapi: fix for dropping one sample per period [SuperMikeMan]
  - gb: emulated most of the TAMA RTC; but RTC state is still volatile²

¹: the new ports are:

  - audio/{directsound, alsa, pulseaudio, pulseaudiosimple, ao}
  - input/{udev, quartz, carbon}

It's pretty much guaranteed many of them will have compilation errors.
Please paste the error logs and I'll try to fix them up. It may take a
WIP or two to get there.

It's also possible things broke from the updates. If so, I could use
help comparing the old file to the new file, looking for mistakes, since
I can't test on these platforms apart from audio/directsound.

Please report working drivers in this list, so we can mark them off the
list. I'll need both macOS and Linux testers.

audio/directsound.cpp:112:

    if(DirectSoundCreate(0, &_interface, 0) != DS_OK) return terminate(), false;

²: once I get this working, I'll add load/save support for the RTC
values. For now, the RTC data will be lost when you close the emulator.

Right now, you can set the date/time in real-time mode, and when you
start the game, the time will be correct, and the time will tick
forward. Note that it runs off emulated time instead of actual real
time, so if you fast-forward to 300%, one minute will be 20 seconds.

The really big limitation right now is that when you exit the game, and
restart it, and resume a new game, the hour spot gets corrupted, and
this seems to instantly kill your pet. Fun. This is crazy because the
commands the game sends to the TAMA interface are identical between
starting a new game and getting in-game versus loading a game.

It's likely going to require disassembling the game's code and seeing
what in the hell it's doing, but I am extremely bad at LR35092 assembly.
Hopefully endrift can help here :|
2017-07-28 21:42:24 +10:00

251 lines
8.8 KiB
C++

#include <avrt.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <audiopolicy.h>
#include <devicetopology.h>
#include <endpointvolume.h>
#include <functiondiscoverykeys_devpkey.h>
struct AudioWASAPI : Audio {
AudioWASAPI() { initialize(); }
~AudioWASAPI() { terminate(); }
auto ready() -> bool { return _ready; }
auto information() -> Information {
Information information;
for(auto& device : _devices) information.devices.append(device);
information.channels = {2};
information.frequencies = {(double)_frequency};
information.latencies = {0, 20, 40, 60, 80, 100};
return information;
}
auto exclusive() -> bool { return _exclusive; }
auto device() -> string { return _device; }
auto blocking() -> bool { return _blocking; }
auto channels() -> uint { return _channels; }
auto frequency() -> double { return (double)_frequency; }
auto latency() -> uint { return _latency; }
auto setExclusive(bool exclusive) -> bool {
if(_exclusive == exclusive) return true;
_exclusive = exclusive;
return initialize();
}
auto setDevice(string device) -> bool {
if(_device == device) return true;
_device = device;
return initialize();
}
auto setBlocking(bool blocking) -> bool {
if(_blocking == blocking) return true;
_blocking = blocking;
return true;
}
auto setFrequency(double frequency) -> bool {
if(_frequency == frequency) return true;
_frequency = frequency;
return initialize();
}
auto setLatency(uint latency) -> bool {
if(_latency == latency) return true;
_latency = latency;
return initialize();
}
auto clear() -> void {
if(!ready()) return;
_queue.read = 0;
_queue.write = 0;
_queue.count = 0;
_audioClient->Stop();
_audioClient->Reset();
_audioClient->Start();
}
auto output(const double samples[]) -> void {
if(!ready()) return;
for(uint n : range(_channels)) {
_queue.samples[_queue.write][n] = samples[n];
}
_queue.write++;
_queue.count++;
if(_queue.count >= _bufferSize) {
if(WaitForSingleObject(_eventHandle, _blocking ? INFINITE : 0) == WAIT_OBJECT_0) {
write();
}
}
}
private:
auto initialize() -> bool {
terminate();
if(CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&_enumerator) != S_OK) return false;
//enumerate all audio endpoint devices, and select the first to match the device() name
IMMDeviceCollection* deviceCollection = nullptr;
if(_enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &deviceCollection) != S_OK) return false;
uint deviceCount = 0;
if(deviceCollection->GetCount(&deviceCount) != S_OK) return false;
for(uint deviceIndex : range(deviceCount)) {
IMMDevice* device = nullptr;
if(deviceCollection->Item(deviceIndex, &device) != S_OK) return false;
IPropertyStore* propertyStore = nullptr;
device->OpenPropertyStore(STGM_READ, &propertyStore);
PROPVARIANT propVariant;
propertyStore->GetValue(PKEY_Device_FriendlyName, &propVariant);
_devices.append((const char*)utf8_t(propVariant.pwszVal));
propertyStore->Release();
if(!_audioDevice && _devices.right() == _device) {
_audioDevice = device;
} else {
device->Release();
}
}
deviceCollection->Release();
//if no match is found, choose the default audio endpoint for the device()
if(!_audioDevice) {
if(_enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &_audioDevice) != S_OK) return false;
}
if(_audioDevice->Activate(IID_IAudioClient, CLSCTX_ALL, nullptr, (void**)&_audioClient) != S_OK) return false;
WAVEFORMATEXTENSIBLE waveFormat = {};
if(_exclusive) {
IPropertyStore* propertyStore = nullptr;
if(_audioDevice->OpenPropertyStore(STGM_READ, &propertyStore) != S_OK) return false;
PROPVARIANT propVariant;
if(propertyStore->GetValue(PKEY_AudioEngine_DeviceFormat, &propVariant) != S_OK) return false;
waveFormat = *(WAVEFORMATEXTENSIBLE*)propVariant.blob.pBlobData;
propertyStore->Release();
if(_audioClient->GetDevicePeriod(nullptr, &_devicePeriod) != S_OK) return false;
auto latency = max(_devicePeriod, (REFERENCE_TIME)_latency * 10'000); //1ms to 100ns units
auto result = _audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, latency, latency, &waveFormat.Format, nullptr);
if(result == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
if(_audioClient->GetBufferSize(&_bufferSize) != S_OK) return false;
_audioClient->Release();
latency = (REFERENCE_TIME)(10'000 * 1'000 * _bufferSize / waveFormat.Format.nSamplesPerSec);
if(_audioDevice->Activate(IID_IAudioClient, CLSCTX_ALL, nullptr, (void**)&_audioClient) != S_OK) return false;
result = _audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, latency, latency, &waveFormat.Format, nullptr);
}
if(result != S_OK) return false;
DWORD taskIndex = 0;
_taskHandle = AvSetMmThreadCharacteristics(L"Pro Audio", &taskIndex);
} else {
WAVEFORMATEX* waveFormatEx = nullptr;
if(_audioClient->GetMixFormat(&waveFormatEx) != S_OK) return false;
waveFormat = *(WAVEFORMATEXTENSIBLE*)waveFormatEx;
CoTaskMemFree(waveFormatEx);
if(_audioClient->GetDevicePeriod(&_devicePeriod, nullptr)) return false;
auto latency = max(_devicePeriod, (REFERENCE_TIME)_latency * 10'000); //1ms to 100ns units
if(_audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, latency, 0, &waveFormat.Format, nullptr) != S_OK) return false;
}
_eventHandle = CreateEvent(nullptr, false, false, nullptr);
if(_audioClient->SetEventHandle(_eventHandle) != S_OK) return false;
if(_audioClient->GetService(IID_IAudioRenderClient, (void**)&_renderClient) != S_OK) return false;
if(_audioClient->GetBufferSize(&_bufferSize) != S_OK) return false;
_channels = waveFormat.Format.nChannels;
_frequency = waveFormat.Format.nSamplesPerSec;
_mode = waveFormat.SubFormat.Data1;
_precision = waveFormat.Format.wBitsPerSample;
_ready = true;
clear();
return true;
}
auto terminate() -> void {
_ready = false;
_devices.reset();
if(_audioClient) _audioClient->Stop();
if(_renderClient) _renderClient->Release(), _renderClient = nullptr;
if(_audioClient) _audioClient->Release(), _audioClient = nullptr;
if(_audioDevice) _audioDevice->Release(), _audioDevice = nullptr;
if(_eventHandle) CloseHandle(_eventHandle), _eventHandle = nullptr;
if(_taskHandle) AvRevertMmThreadCharacteristics(_taskHandle), _taskHandle = nullptr;
}
auto write() -> void {
uint32_t available = _bufferSize;
if(!_exclusive) {
uint32_t padding = 0;
_audioClient->GetCurrentPadding(&padding);
available = _bufferSize - padding;
}
uint32_t length = min(available, _queue.count);
uint8_t* buffer = nullptr;
if(_renderClient->GetBuffer(length, &buffer) == S_OK) {
uint bufferFlags = 0;
for(uint _ : range(length)) {
double samples[8] = {};
if(_queue.count) {
for(uint n : range(_channels)) {
samples[n] = _queue.samples[_queue.read][n];
}
_queue.read++;
_queue.count--;
}
if(_mode == 1 && _precision == 16) {
auto output = (int16_t*)buffer;
for(uint n : range(_channels)) *output++ = int16_t(samples[n] * 32768.0);
buffer = (uint8_t*)output;
} else if(_mode == 1 && _precision == 32) {
auto output = (int32_t*)buffer;
for(uint n : range(_channels)) *output++ = int32_t(samples[n] * 65536.0 * 32768.0);
buffer = (uint8_t*)output;
} else if(_mode == 3 && _precision == 32) {
auto output = (float*)buffer;
for(uint n : range(_channels)) *output++ = float(samples[n]);
buffer = (uint8_t*)output;
} else {
//output silence for unsupported sample formats
bufferFlags = AUDCLNT_BUFFERFLAGS_SILENT;
break;
}
}
_renderClient->ReleaseBuffer(length, bufferFlags);
}
}
bool _ready = false;
bool _exclusive = false;
string _device;
bool _blocking = true;
uint _channels = 2;
uint _frequency = 48000;
uint _latency = 20;
uint _mode = 0;
uint _precision = 0;
struct Queue {
double samples[65536][8];
uint16_t read;
uint16_t write;
uint16_t count;
} _queue;
IMMDeviceEnumerator* _enumerator = nullptr;
vector<string> _devices;
IMMDevice* _audioDevice = nullptr;
IAudioClient* _audioClient = nullptr;
IAudioRenderClient* _renderClient = nullptr;
HANDLE _eventHandle = nullptr;
HANDLE _taskHandle = nullptr;
REFERENCE_TIME _devicePeriod = 0;
uint32_t _bufferSize = 0; //in frames
};