Update to v093 release.

byuu says:

Changelog:
- added Cocoa target: higan can now be compiled for OS X Lion
  [Cydrak, byuu]
- SNES/accuracy profile hires color blending improvements - fixes
  Marvelous text [AWJ]
- fixed a slight bug in SNES/SA-1 VBR support caused by a typo
- added support for multi-pass shaders that can load external textures
  (requires OpenGL 3.2+)
- added game library path (used by ananke->Import Game) to
  Settings->Advanced
- system profiles, shaders and cheats database can be stored in "all
  users" shared folders now (eg /usr/share on Linux)
- all configuration files are in BML format now, instead of XML (much
  easier to read and edit this way)
- main window supports drag-and-drop of game folders (but not game files
  / ZIP archives)
- audio buffer clears when entering a modal loop on Windows (prevents
  audio repetition with DirectSound driver)
- a substantial amount of code clean-up (probably the biggest
  refactoring to date)

One highly desired target for this release was to default to the optimal
drivers instead of the safest drivers, but because AMD drivers don't
seem to like my OpenGL 3.2 driver, I've decided to postpone that. AMD
has too big a market share. Hopefully with v093 officially released, we
can get some public input on what AMD doesn't like.
This commit is contained in:
Tim Allen
2013-08-18 13:21:14 +10:00
parent c74865e171
commit 4e2eb23835
1928 changed files with 4834 additions and 84223 deletions

198
ruby/audio/xaudio2.cpp Normal file
View File

@@ -0,0 +1,198 @@
/*
audio.xaudio2 (2010-08-14)
author: OV2
*/
#include "xaudio2.hpp"
#include <windows.h>
namespace ruby {
class pAudioXAudio2: public IXAudio2VoiceCallback {
public:
IXAudio2* pXAudio2;
IXAudio2MasteringVoice* pMasterVoice;
IXAudio2SourceVoice* pSourceVoice;
//inherited from IXAudio2VoiceCallback
STDMETHODIMP_(void) OnBufferStart(void* pBufferContext){}
STDMETHODIMP_(void) OnLoopEnd(void* pBufferContext){}
STDMETHODIMP_(void) OnStreamEnd() {}
STDMETHODIMP_(void) OnVoiceError(void* pBufferContext, HRESULT Error) {}
STDMETHODIMP_(void) OnVoiceProcessingPassEnd() {}
STDMETHODIMP_(void) OnVoiceProcessingPassStart(UINT32 BytesRequired) {}
struct {
unsigned buffers;
unsigned latency;
uint32_t* buffer;
unsigned bufferoffset;
volatile long submitbuffers;
unsigned writebuffer;
} device;
struct {
bool synchronize;
unsigned frequency;
unsigned latency;
} settings;
bool cap(const string& name) {
if(name == Audio::Synchronize) return true;
if(name == Audio::Frequency) return true;
if(name == Audio::Latency) return true;
return false;
}
any get(const string& name) {
if(name == Audio::Synchronize) return settings.synchronize;
if(name == Audio::Frequency) return settings.frequency;
if(name == Audio::Latency) return settings.latency;
return false;
}
bool set(const string& name, const any& value) {
if(name == Audio::Synchronize) {
settings.synchronize = any_cast<bool>(value);
if(pXAudio2) clear();
return true;
}
if(name == Audio::Frequency) {
settings.frequency = any_cast<unsigned>(value);
if(pXAudio2) init();
return true;
}
if(name == Audio::Latency) {
settings.latency = any_cast<unsigned>(value);
if(pXAudio2) init();
return true;
}
return false;
}
void pushbuffer(unsigned bytes, uint32_t* pAudioData) {
XAUDIO2_BUFFER xa2buffer = {0};
xa2buffer.AudioBytes = bytes;
xa2buffer.pAudioData = reinterpret_cast<BYTE*>(pAudioData);
xa2buffer.pContext = 0;
InterlockedIncrement(&device.submitbuffers);
pSourceVoice->SubmitSourceBuffer(&xa2buffer);
}
void sample(uint16_t left, uint16_t right) {
device.buffer[device.writebuffer * device.latency + device.bufferoffset++] = left + (right << 16);
if(device.bufferoffset < device.latency) return;
device.bufferoffset = 0;
if(device.submitbuffers == device.buffers - 1) {
if(settings.synchronize == true) {
//wait until there is at least one other free buffer for the next sample
while(device.submitbuffers == device.buffers - 1) {
//Sleep(0);
}
} else { //we need one free buffer for the next sample, so ignore the current contents
return;
}
}
pushbuffer(device.latency * 4,device.buffer + device.writebuffer * device.latency);
device.writebuffer = (device.writebuffer + 1) % device.buffers;
}
void clear() {
if(!pSourceVoice) return;
pSourceVoice->Stop(0);
pSourceVoice->FlushSourceBuffers(); //calls OnBufferEnd for all currently submitted buffers
device.writebuffer = 0;
device.bufferoffset = 0;
if(device.buffer) memset(device.buffer, 0, device.latency * device.buffers * 4);
pSourceVoice->Start(0);
}
bool init() {
term();
device.buffers = 8;
device.latency = settings.frequency * settings.latency / device.buffers / 1000.0 + 0.5;
device.buffer = new uint32_t[device.latency * device.buffers];
device.bufferoffset = 0;
device.submitbuffers = 0;
HRESULT hr;
if(FAILED(hr = XAudio2Create(&pXAudio2, 0 , XAUDIO2_DEFAULT_PROCESSOR))) {
return false;
}
if(FAILED(hr = pXAudio2->CreateMasteringVoice( &pMasterVoice, 2, settings.frequency, 0, 0 , NULL))) {
return false;
}
WAVEFORMATEX wfx;
wfx.wFormatTag = WAVE_FORMAT_PCM;
wfx.nChannels = 2;
wfx.nSamplesPerSec = settings.frequency;
wfx.nBlockAlign = 4;
wfx.wBitsPerSample = 16;
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
wfx.cbSize = 0;
if(FAILED(hr = pXAudio2->CreateSourceVoice(&pSourceVoice, (WAVEFORMATEX*)&wfx, XAUDIO2_VOICE_NOSRC , XAUDIO2_DEFAULT_FREQ_RATIO, this, NULL, NULL))) {
return false;
}
clear();
return true;
}
void term() {
if(pSourceVoice) {
pSourceVoice->Stop(0);
pSourceVoice->DestroyVoice();
pSourceVoice = nullptr;
}
if(pMasterVoice) {
pMasterVoice->DestroyVoice();
pMasterVoice = nullptr;
}
if(pXAudio2) {
pXAudio2->Release();
pXAudio2 = nullptr;
}
if(device.buffer) {
delete[] device.buffer;
device.buffer = nullptr;
}
}
STDMETHODIMP_(void) OnBufferEnd(void* pBufferContext) {
InterlockedDecrement(&device.submitbuffers);
}
pAudioXAudio2() {
pXAudio2 = nullptr;
pMasterVoice = nullptr;
pSourceVoice = nullptr;
device.buffer = nullptr;
device.bufferoffset = 0;
device.submitbuffers = 0;
device.writebuffer = 0;
settings.synchronize = false;
settings.frequency = 22050;
settings.latency = 120;
}
};
DeclareAudio(XAudio2)
};