mirror of
https://github.com/bsnes-emu/bsnes.git
synced 2025-08-31 11:51:49 +02:00
Include all the code from the bsnes v068 tarball.
byuu describes the changes since v067: This release officially introduces the accuracy and performance cores, alongside the previously-existing compatibility core. The accuracy core allows the most accurate SNES emulation ever seen, with every last processor running at the lowest possible clock synchronization level. The performance core allows slower computers the chance to finally use bsnes. It is capable of attaining 60fps in standard games even on an entry-level Intel Atom processor, commonly found in netbooks. The accuracy core is absolutely not meant for casual gaming at all. It is meant solely for getting as close to 100% perfection as possible, no matter the cost to speed. It should only be used for testing, development or debugging. The compatibility core is identical to bsnes v067 and earlier, but is now roughly 10% faster. This is the default and recommended core for casual gaming. The performance core contains an entirely new S-CPU core, with range-tested IRQs; and uses blargg's heavily-optimized S-DSP core directly. Although there are very minor accuracy tradeoffs to increase speed, I am confident that the performance core is still more accurate and compatible than any other SNES emulator. The S-CPU, S-SMP, S-DSP, SuperFX and SA-1 processors are all clock-based, just as in the accuracy and compatibility cores; and as always, there are zero game-specific hacks. Its compatibility is still well above 99%, running even the most challenging games flawlessly. If you have held off from using bsnes in the past due to its system requirements, please give the performance core a try. I think you will be impressed. I'm also not finished: I believe performance can be increased even further. I would also strongly suggest Windows Vista and Windows 7 users to take advantage of the new XAudio2 driver by OV2. Not only does it give you a performance boost, it also lowers latency and provides better sound by way of skipping an API emulation layer. Changelog: - Split core into three profiles: accuracy, compatibility and performance - Accuracy core now takes advantage of variable-bitlength integers (eg uint24_t) - Performance core uses a new S-CPU core, written from scratch for speed - Performance core uses blargg's snes_dsp library for S-DSP emulation - Binaries are now compiled using GCC 4.5 - Added a workaround in the SA-1 core for a bug in GCC 4.5+ - The clock-based S-PPU renderer has greatly improved OAM emulation; fixing Winter Gold and Megalomania rendering issues - Corrected pseudo-hires color math in the clock-based S-PPU renderer; fixing Super Buster Bros backgrounds - Fixed a clamping bug in the Cx4 16-bit triangle operation [Jonas Quinn]; fixing Mega Man X2 "gained weapon" star background effect - Updated video renderer to properly handle mixed-resolution screens with interlace enabled; fixing Air Strike Patrol level briefing screen - Added mightymo's 2010-08-19 cheat code pack - Windows port: added XAudio2 output support [OV2] - Source: major code restructuring; virtual base classes for processor - cores removed, build system heavily modified, etc.
This commit is contained in:
@@ -1,453 +0,0 @@
|
||||
#undef interface
|
||||
#define interface struct
|
||||
#include <d3d9.h>
|
||||
#include <d3dx9.h>
|
||||
#undef interface
|
||||
|
||||
#define D3DVERTEX (D3DFVF_XYZRHW | D3DFVF_TEX1)
|
||||
|
||||
typedef HRESULT (__stdcall *EffectProc)(LPDIRECT3DDEVICE9, LPCVOID, UINT, D3DXMACRO const*, LPD3DXINCLUDE, DWORD, LPD3DXEFFECTPOOL, LPD3DXEFFECT*, LPD3DXBUFFER*);
|
||||
typedef HRESULT (__stdcall *TextureProc)(LPDIRECT3DDEVICE9, LPCTSTR, LPDIRECT3DTEXTURE9*);
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoD3D {
|
||||
public:
|
||||
LPDIRECT3D9 lpd3d;
|
||||
LPDIRECT3DDEVICE9 device;
|
||||
LPDIRECT3DVERTEXBUFFER9 vertex_buffer, *vertex_ptr;
|
||||
D3DPRESENT_PARAMETERS presentation;
|
||||
D3DSURFACE_DESC d3dsd;
|
||||
D3DLOCKED_RECT d3dlr;
|
||||
D3DRASTER_STATUS d3drs;
|
||||
D3DCAPS9 d3dcaps;
|
||||
LPDIRECT3DTEXTURE9 texture;
|
||||
LPDIRECT3DSURFACE9 surface;
|
||||
LPD3DXEFFECT effect;
|
||||
string shaderSource;
|
||||
|
||||
bool lost;
|
||||
unsigned iwidth, iheight;
|
||||
|
||||
struct d3dvertex {
|
||||
float x, y, z, rhw; //screen coords
|
||||
float u, v; //texture coords
|
||||
};
|
||||
|
||||
struct {
|
||||
uint32_t t_usage, v_usage;
|
||||
uint32_t t_pool, v_pool;
|
||||
uint32_t lock;
|
||||
uint32_t filter;
|
||||
} flags;
|
||||
|
||||
struct {
|
||||
bool dynamic; //device supports dynamic textures
|
||||
bool shader; //device supports pixel shaders
|
||||
} caps;
|
||||
|
||||
struct {
|
||||
HWND handle;
|
||||
bool synchronize;
|
||||
unsigned filter;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
struct {
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} state;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
if(name == Video::Synchronize) return true;
|
||||
if(name == Video::Filter) return true;
|
||||
if(name == Video::FragmentShader) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return (uintptr_t)settings.handle;
|
||||
if(name == Video::Synchronize) return settings.synchronize;
|
||||
if(name == Video::Filter) return settings.filter;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = (HWND)any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Synchronize) {
|
||||
settings.synchronize = any_cast<bool>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Filter) {
|
||||
settings.filter = any_cast<unsigned>(value);
|
||||
if(lpd3d) update_filter();
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::FragmentShader) {
|
||||
set_fragment_shader(any_cast<const char*>(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool recover() {
|
||||
if(!device) return false;
|
||||
|
||||
if(lost) {
|
||||
release_resources();
|
||||
if(device->Reset(&presentation) != D3D_OK) return false;
|
||||
}
|
||||
|
||||
lost = false;
|
||||
|
||||
device->SetDialogBoxMode(false);
|
||||
|
||||
device->SetTextureStageState(0, D3DTSS_COLOROP, D3DTOP_SELECTARG1);
|
||||
device->SetTextureStageState(0, D3DTSS_COLORARG1, D3DTA_TEXTURE);
|
||||
device->SetTextureStageState(0, D3DTSS_COLORARG2, D3DTA_DIFFUSE);
|
||||
|
||||
device->SetTextureStageState(0, D3DTSS_ALPHAOP, D3DTOP_SELECTARG1);
|
||||
device->SetTextureStageState(0, D3DTSS_ALPHAARG1, D3DTA_TEXTURE);
|
||||
device->SetTextureStageState(0, D3DTSS_ALPHAARG2, D3DTA_DIFFUSE);
|
||||
|
||||
device->SetRenderState(D3DRS_LIGHTING, false);
|
||||
device->SetRenderState(D3DRS_ZENABLE, false);
|
||||
device->SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE);
|
||||
|
||||
device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
|
||||
device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
|
||||
device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
|
||||
|
||||
device->SetVertexShader(NULL);
|
||||
device->SetFVF(D3DVERTEX);
|
||||
|
||||
device->CreateVertexBuffer(sizeof(d3dvertex) * 4, flags.v_usage, D3DVERTEX, (D3DPOOL)flags.v_pool, &vertex_buffer, NULL);
|
||||
iwidth = 0;
|
||||
iheight = 0;
|
||||
resize(settings.width = 256, settings.height = 256);
|
||||
update_filter();
|
||||
clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned rounded_power_of_two(unsigned n) {
|
||||
n--;
|
||||
n |= n >> 1;
|
||||
n |= n >> 2;
|
||||
n |= n >> 4;
|
||||
n |= n >> 8;
|
||||
n |= n >> 16;
|
||||
return n + 1;
|
||||
}
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(iwidth >= width && iheight >= height) return;
|
||||
|
||||
iwidth = rounded_power_of_two(max(width, iwidth ));
|
||||
iheight = rounded_power_of_two(max(height, iheight));
|
||||
|
||||
if(d3dcaps.MaxTextureWidth < iwidth || d3dcaps.MaxTextureWidth < iheight) {
|
||||
//TODO: attempt to handle this more gracefully
|
||||
return;
|
||||
}
|
||||
|
||||
if(texture) texture->Release();
|
||||
device->CreateTexture(iwidth, iheight, 1, flags.t_usage, D3DFMT_X8R8G8B8, (D3DPOOL)flags.t_pool, &texture, NULL);
|
||||
}
|
||||
|
||||
void update_filter() {
|
||||
if(!device) return;
|
||||
if(lost && !recover()) return;
|
||||
|
||||
switch(settings.filter) { default:
|
||||
case Video::FilterPoint: flags.filter = D3DTEXF_POINT; break;
|
||||
case Video::FilterLinear: flags.filter = D3DTEXF_LINEAR; break;
|
||||
}
|
||||
|
||||
device->SetSamplerState(0, D3DSAMP_MINFILTER, flags.filter);
|
||||
device->SetSamplerState(0, D3DSAMP_MAGFILTER, flags.filter);
|
||||
}
|
||||
|
||||
// Vertex format:
|
||||
//
|
||||
// 0----------1
|
||||
// | /|
|
||||
// | / |
|
||||
// | / |
|
||||
// | / |
|
||||
// | / |
|
||||
// 2----------3
|
||||
//
|
||||
// (x,y) screen coords, in pixels
|
||||
// (u,v) texture coords, betweeen 0.0 (top, left) to 1.0 (bottom, right)
|
||||
void set_vertex(
|
||||
uint32_t px, uint32_t py, uint32_t pw, uint32_t ph,
|
||||
uint32_t tw, uint32_t th,
|
||||
uint32_t x, uint32_t y, uint32_t w, uint32_t h
|
||||
) {
|
||||
d3dvertex vertex[4];
|
||||
vertex[0].x = vertex[2].x = (double)(x - 0.5);
|
||||
vertex[1].x = vertex[3].x = (double)(x + w - 0.5);
|
||||
vertex[0].y = vertex[1].y = (double)(y - 0.5);
|
||||
vertex[2].y = vertex[3].y = (double)(y + h - 0.5);
|
||||
|
||||
//Z-buffer and RHW are unused for 2D blit, set to normal values
|
||||
vertex[0].z = vertex[1].z = vertex[2].z = vertex[3].z = 0.0;
|
||||
vertex[0].rhw = vertex[1].rhw = vertex[2].rhw = vertex[3].rhw = 1.0;
|
||||
|
||||
double rw = (double)w / (double)pw * (double)tw;
|
||||
double rh = (double)h / (double)ph * (double)th;
|
||||
vertex[0].u = vertex[2].u = (double)(px ) / rw;
|
||||
vertex[1].u = vertex[3].u = (double)(px + w) / rw;
|
||||
vertex[0].v = vertex[1].v = (double)(py ) / rh;
|
||||
vertex[2].v = vertex[3].v = (double)(py + h) / rh;
|
||||
|
||||
vertex_buffer->Lock(0, sizeof(d3dvertex) * 4, (void**)&vertex_ptr, 0);
|
||||
memcpy(vertex_ptr, vertex, sizeof(d3dvertex) * 4);
|
||||
vertex_buffer->Unlock();
|
||||
|
||||
device->SetStreamSource(0, vertex_buffer, 0, sizeof(d3dvertex));
|
||||
}
|
||||
|
||||
void clear() {
|
||||
if(lost && !recover()) return;
|
||||
|
||||
texture->GetLevelDesc(0, &d3dsd);
|
||||
texture->GetSurfaceLevel(0, &surface);
|
||||
|
||||
if(surface) {
|
||||
device->ColorFill(surface, 0, D3DCOLOR_XRGB(0x00, 0x00, 0x00));
|
||||
surface->Release();
|
||||
}
|
||||
|
||||
//clear primary display and all backbuffers
|
||||
for(unsigned i = 0; i < 3; i++) {
|
||||
device->Clear(0, 0, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0x00, 0x00, 0x00), 1.0f, 0);
|
||||
device->Present(0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
if(lost && !recover()) return false;
|
||||
|
||||
if(width != settings.width || height != settings.height) {
|
||||
resize(settings.width = width, settings.height = height);
|
||||
}
|
||||
|
||||
texture->GetLevelDesc(0, &d3dsd);
|
||||
texture->GetSurfaceLevel(0, &surface);
|
||||
|
||||
surface->LockRect(&d3dlr, 0, flags.lock);
|
||||
pitch = d3dlr.Pitch;
|
||||
return data = (uint32_t*)d3dlr.pBits;
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
surface->UnlockRect();
|
||||
surface->Release();
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
if(lost && !recover()) return;
|
||||
|
||||
RECT rd, rs; //dest, source rectangles
|
||||
GetClientRect(settings.handle, &rd);
|
||||
SetRect(&rs, 0, 0, settings.width, settings.height);
|
||||
|
||||
//if output size changed, driver must be re-initialized.
|
||||
//failure to do so causes scaling issues on some video drivers.
|
||||
if(state.width != rd.right || state.height != rd.bottom) {
|
||||
init();
|
||||
set_fragment_shader(shaderSource);
|
||||
return;
|
||||
}
|
||||
|
||||
if(caps.shader && effect) {
|
||||
device->BeginScene();
|
||||
set_vertex(0, 0, settings.width, settings.height, iwidth, iheight, 0, 0, rd.right, rd.bottom);
|
||||
|
||||
D3DXVECTOR4 rubyTextureSize;
|
||||
rubyTextureSize.x = iwidth;
|
||||
rubyTextureSize.y = iheight;
|
||||
rubyTextureSize.z = 1.0 / iheight;
|
||||
rubyTextureSize.w = 1.0 / iwidth;
|
||||
effect->SetVector("rubyTextureSize", &rubyTextureSize);
|
||||
|
||||
D3DXVECTOR4 rubyInputSize;
|
||||
rubyInputSize.x = settings.width;
|
||||
rubyInputSize.y = settings.height;
|
||||
rubyInputSize.z = 1.0 / settings.height;
|
||||
rubyInputSize.w = 1.0 / settings.width;
|
||||
effect->SetVector("rubyInputSize", &rubyInputSize);
|
||||
|
||||
D3DXVECTOR4 rubyOutputSize;
|
||||
rubyOutputSize.x = rd.right;
|
||||
rubyOutputSize.y = rd.bottom;
|
||||
rubyOutputSize.z = 1.0 / rd.bottom;
|
||||
rubyOutputSize.w = 1.0 / rd.right;
|
||||
effect->SetVector("rubyOutputSize", &rubyOutputSize);
|
||||
|
||||
UINT passes;
|
||||
effect->Begin(&passes, 0);
|
||||
effect->SetTexture("rubyTexture", texture);
|
||||
device->SetTexture(0, texture);
|
||||
for(unsigned pass = 0; pass < passes; pass++) {
|
||||
effect->BeginPass(pass);
|
||||
device->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
|
||||
effect->EndPass();
|
||||
}
|
||||
effect->End();
|
||||
device->EndScene();
|
||||
} else {
|
||||
device->BeginScene();
|
||||
set_vertex(0, 0, settings.width, settings.height, iwidth, iheight, 0, 0, rd.right, rd.bottom);
|
||||
device->SetTexture(0, texture);
|
||||
device->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
|
||||
device->EndScene();
|
||||
}
|
||||
|
||||
if(settings.synchronize) {
|
||||
while(true) {
|
||||
D3DRASTER_STATUS status;
|
||||
device->GetRasterStatus(0, &status);
|
||||
if(status.InVBlank == true) break;
|
||||
}
|
||||
}
|
||||
|
||||
if(device->Present(0, 0, 0, 0) == D3DERR_DEVICELOST) lost = true;
|
||||
}
|
||||
|
||||
void set_fragment_shader(const char *source) {
|
||||
if(!caps.shader) return;
|
||||
|
||||
if(effect) {
|
||||
effect->Release();
|
||||
effect = NULL;
|
||||
}
|
||||
|
||||
if(!source || !*source) {
|
||||
shaderSource = "";
|
||||
return;
|
||||
}
|
||||
|
||||
shaderSource = source;
|
||||
|
||||
HMODULE d3dx;
|
||||
for(unsigned i = 0; i < 256; i++) {
|
||||
char t[256];
|
||||
sprintf(t, "d3dx9_%u.dll", i);
|
||||
d3dx = LoadLibrary(t);
|
||||
if(d3dx) break;
|
||||
}
|
||||
if(!d3dx) d3dx = LoadLibrary("d3dx9.dll");
|
||||
if(!d3dx) return;
|
||||
|
||||
EffectProc effectProc = (EffectProc)GetProcAddress(d3dx, "D3DXCreateEffect");
|
||||
TextureProc textureProc = (TextureProc)GetProcAddress(d3dx, "D3DXCreateTextureFromFileA");
|
||||
|
||||
LPD3DXBUFFER pBufferErrors = NULL;
|
||||
effectProc(device, shaderSource, lstrlen(source), NULL, NULL, 0, NULL, &effect, &pBufferErrors);
|
||||
|
||||
D3DXHANDLE hTech;
|
||||
effect->FindNextValidTechnique(NULL, &hTech);
|
||||
effect->SetTechnique(hTech);
|
||||
}
|
||||
|
||||
bool init() {
|
||||
term();
|
||||
|
||||
RECT rd;
|
||||
GetClientRect(settings.handle, &rd);
|
||||
state.width = rd.right;
|
||||
state.height = rd.bottom;
|
||||
|
||||
lpd3d = Direct3DCreate9(D3D_SDK_VERSION);
|
||||
if(!lpd3d) return false;
|
||||
|
||||
memset(&presentation, 0, sizeof(presentation));
|
||||
presentation.Flags = D3DPRESENTFLAG_VIDEO;
|
||||
presentation.SwapEffect = D3DSWAPEFFECT_FLIP;
|
||||
presentation.hDeviceWindow = settings.handle;
|
||||
presentation.BackBufferCount = 1;
|
||||
presentation.MultiSampleType = D3DMULTISAMPLE_NONE;
|
||||
presentation.MultiSampleQuality = 0;
|
||||
presentation.EnableAutoDepthStencil = false;
|
||||
presentation.AutoDepthStencilFormat = D3DFMT_UNKNOWN;
|
||||
presentation.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE;
|
||||
presentation.Windowed = true;
|
||||
presentation.BackBufferFormat = D3DFMT_UNKNOWN;
|
||||
presentation.BackBufferWidth = 0;
|
||||
presentation.BackBufferHeight = 0;
|
||||
|
||||
if(lpd3d->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, settings.handle,
|
||||
D3DCREATE_SOFTWARE_VERTEXPROCESSING, &presentation, &device) != D3D_OK) {
|
||||
return false;
|
||||
}
|
||||
|
||||
device->GetDeviceCaps(&d3dcaps);
|
||||
|
||||
caps.dynamic = bool(d3dcaps.Caps2 & D3DCAPS2_DYNAMICTEXTURES);
|
||||
caps.shader = d3dcaps.PixelShaderVersion > D3DPS_VERSION(1, 4);
|
||||
|
||||
if(caps.dynamic == true) {
|
||||
flags.t_usage = D3DUSAGE_DYNAMIC;
|
||||
flags.v_usage = D3DUSAGE_WRITEONLY | D3DUSAGE_DYNAMIC;
|
||||
flags.t_pool = D3DPOOL_DEFAULT;
|
||||
flags.v_pool = D3DPOOL_DEFAULT;
|
||||
flags.lock = D3DLOCK_NOSYSLOCK | D3DLOCK_DISCARD;
|
||||
} else {
|
||||
flags.t_usage = 0;
|
||||
flags.v_usage = D3DUSAGE_WRITEONLY;
|
||||
flags.t_pool = D3DPOOL_MANAGED;
|
||||
flags.v_pool = D3DPOOL_MANAGED;
|
||||
flags.lock = D3DLOCK_NOSYSLOCK | D3DLOCK_DISCARD;
|
||||
}
|
||||
|
||||
lost = false;
|
||||
recover();
|
||||
return true;
|
||||
}
|
||||
|
||||
void release_resources() {
|
||||
if(effect) { effect->Release(); effect = 0; }
|
||||
if(vertex_buffer) { vertex_buffer->Release(); vertex_buffer = 0; }
|
||||
if(surface) { surface->Release(); surface = 0; }
|
||||
if(texture) { texture->Release(); texture = 0; }
|
||||
}
|
||||
|
||||
void term() {
|
||||
release_resources();
|
||||
if(device) { device->Release(); device = 0; }
|
||||
if(lpd3d) { lpd3d->Release(); lpd3d = 0; }
|
||||
}
|
||||
|
||||
pVideoD3D() {
|
||||
effect = 0;
|
||||
vertex_buffer = 0;
|
||||
surface = 0;
|
||||
texture = 0;
|
||||
device = 0;
|
||||
lpd3d = 0;
|
||||
lost = true;
|
||||
|
||||
settings.handle = 0;
|
||||
settings.synchronize = false;
|
||||
settings.filter = Video::FilterLinear;
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(D3D)
|
||||
|
||||
};
|
||||
|
||||
#undef D3DVERTEX
|
@@ -1,186 +0,0 @@
|
||||
#include <ddraw.h>
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoDD {
|
||||
public:
|
||||
LPDIRECTDRAW lpdd;
|
||||
LPDIRECTDRAW7 lpdd7;
|
||||
LPDIRECTDRAWSURFACE7 screen, raster;
|
||||
LPDIRECTDRAWCLIPPER clipper;
|
||||
DDSURFACEDESC2 ddsd;
|
||||
DDSCAPS2 ddscaps;
|
||||
unsigned iwidth, iheight;
|
||||
|
||||
struct {
|
||||
HWND handle;
|
||||
bool synchronize;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
if(name == Video::Synchronize) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return (uintptr_t)settings.handle;
|
||||
if(name == Video::Synchronize) return settings.synchronize;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = (HWND)any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Synchronize) {
|
||||
settings.synchronize = any_cast<bool>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(iwidth >= width && iheight >= height) return;
|
||||
|
||||
iwidth = max(width, iwidth);
|
||||
iheight = max(height, iheight);
|
||||
|
||||
if(raster) raster->Release();
|
||||
|
||||
screen->GetSurfaceDesc(&ddsd);
|
||||
int depth = ddsd.ddpfPixelFormat.dwRGBBitCount;
|
||||
if(depth == 32) goto try_native_surface;
|
||||
|
||||
memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
|
||||
ddsd.dwSize = sizeof(DDSURFACEDESC2);
|
||||
ddsd.dwFlags = DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT;
|
||||
ddsd.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN | DDSCAPS_VIDEOMEMORY; //DDSCAPS_SYSTEMMEMORY
|
||||
ddsd.dwWidth = iwidth;
|
||||
ddsd.dwHeight = iheight;
|
||||
|
||||
ddsd.ddpfPixelFormat.dwSize = sizeof(DDPIXELFORMAT);
|
||||
ddsd.ddpfPixelFormat.dwFlags = DDPF_RGB;
|
||||
ddsd.ddpfPixelFormat.dwRGBBitCount = 32;
|
||||
ddsd.ddpfPixelFormat.dwRBitMask = 0xff0000;
|
||||
ddsd.ddpfPixelFormat.dwGBitMask = 0x00ff00;
|
||||
ddsd.ddpfPixelFormat.dwBBitMask = 0x0000ff;
|
||||
|
||||
if(lpdd7->CreateSurface(&ddsd, &raster, 0) == DD_OK) return clear();
|
||||
|
||||
try_native_surface:
|
||||
memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
|
||||
ddsd.dwSize = sizeof(DDSURFACEDESC2);
|
||||
ddsd.dwFlags = DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT;
|
||||
ddsd.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN | DDSCAPS_VIDEOMEMORY; //DDSCAPS_SYSTEMMEMORY
|
||||
ddsd.dwWidth = iwidth;
|
||||
ddsd.dwHeight = iheight;
|
||||
|
||||
if(lpdd7->CreateSurface(&ddsd, &raster, 0) == DD_OK) return clear();
|
||||
}
|
||||
|
||||
void clear() {
|
||||
DDBLTFX fx;
|
||||
fx.dwSize = sizeof(DDBLTFX);
|
||||
fx.dwFillColor = 0x00000000;
|
||||
screen->Blt(0, 0, 0, DDBLT_WAIT | DDBLT_COLORFILL, &fx);
|
||||
raster->Blt(0, 0, 0, DDBLT_WAIT | DDBLT_COLORFILL, &fx);
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
if(width != settings.width || height != settings.height) {
|
||||
resize(settings.width = width, settings.height = height);
|
||||
}
|
||||
|
||||
if(raster->Lock(0, &ddsd, DDLOCK_WAIT, 0) != DD_OK) {
|
||||
raster->Restore();
|
||||
if(raster->Lock(0, &ddsd, DDLOCK_WAIT, 0) != DD_OK) return false;
|
||||
}
|
||||
pitch = ddsd.lPitch;
|
||||
return data = (uint32_t*)ddsd.lpSurface;
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
raster->Unlock(0);
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
if(settings.synchronize) {
|
||||
while(true) {
|
||||
BOOL in_vblank;
|
||||
lpdd7->GetVerticalBlankStatus(&in_vblank);
|
||||
if(in_vblank == true) break;
|
||||
}
|
||||
}
|
||||
|
||||
HRESULT hr;
|
||||
RECT rd, rs;
|
||||
SetRect(&rs, 0, 0, settings.width, settings.height);
|
||||
|
||||
POINT p = { 0, 0 };
|
||||
ClientToScreen(settings.handle, &p);
|
||||
GetClientRect(settings.handle, &rd);
|
||||
OffsetRect(&rd, p.x, p.y);
|
||||
|
||||
if(screen->Blt(&rd, raster, &rs, DDBLT_WAIT, 0) == DDERR_SURFACELOST) {
|
||||
screen->Restore();
|
||||
raster->Restore();
|
||||
}
|
||||
}
|
||||
|
||||
bool init() {
|
||||
term();
|
||||
|
||||
DirectDrawCreate(0, &lpdd, 0);
|
||||
lpdd->QueryInterface(IID_IDirectDraw7, (void**)&lpdd7);
|
||||
if(lpdd) { lpdd->Release(); lpdd = 0; }
|
||||
|
||||
lpdd7->SetCooperativeLevel(settings.handle, DDSCL_NORMAL);
|
||||
|
||||
memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
|
||||
ddsd.dwSize = sizeof(DDSURFACEDESC2);
|
||||
|
||||
ddsd.dwFlags = DDSD_CAPS;
|
||||
ddsd.ddsCaps.dwCaps = DDSCAPS_PRIMARYSURFACE;
|
||||
lpdd7->CreateSurface(&ddsd, &screen, 0);
|
||||
|
||||
lpdd7->CreateClipper(0, &clipper, 0);
|
||||
clipper->SetHWnd(0, settings.handle);
|
||||
screen->SetClipper(clipper);
|
||||
|
||||
raster = 0;
|
||||
iwidth = 0;
|
||||
iheight = 0;
|
||||
resize(settings.width = 256, settings.height = 256);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
if(clipper) { clipper->Release(); clipper = 0; }
|
||||
if(raster) { raster->Release(); raster = 0; }
|
||||
if(screen) { screen->Release(); screen = 0; }
|
||||
if(lpdd7) { lpdd7->Release(); lpdd7 = 0; }
|
||||
if(lpdd) { lpdd->Release(); lpdd = 0; }
|
||||
}
|
||||
|
||||
pVideoDD() {
|
||||
lpdd = 0;
|
||||
lpdd7 = 0;
|
||||
screen = 0;
|
||||
raster = 0;
|
||||
clipper = 0;
|
||||
|
||||
settings.handle = 0;
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(DD)
|
||||
|
||||
};
|
@@ -1,100 +0,0 @@
|
||||
#include <assert.h>
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoGDI {
|
||||
public:
|
||||
uint32_t *buffer;
|
||||
HBITMAP bitmap;
|
||||
HDC bitmapdc;
|
||||
BITMAPINFO bmi;
|
||||
|
||||
struct {
|
||||
HWND handle;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return (uintptr_t)settings.handle;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = (HWND)any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
settings.width = width;
|
||||
settings.height = height;
|
||||
|
||||
pitch = 1024 * 4;
|
||||
return data = buffer;
|
||||
}
|
||||
|
||||
void unlock() {}
|
||||
|
||||
void clear() {}
|
||||
|
||||
void refresh() {
|
||||
RECT rc;
|
||||
GetClientRect(settings.handle, &rc);
|
||||
|
||||
SetDIBits(bitmapdc, bitmap, 0, settings.height, (void*)buffer, &bmi, DIB_RGB_COLORS);
|
||||
HDC hdc = GetDC(settings.handle);
|
||||
StretchBlt(hdc, rc.left, rc.top, rc.right, rc.bottom, bitmapdc, 0, 1024 - settings.height, settings.width, settings.height, SRCCOPY);
|
||||
ReleaseDC(settings.handle, hdc);
|
||||
}
|
||||
|
||||
bool init() {
|
||||
HDC hdc = GetDC(settings.handle);
|
||||
bitmapdc = CreateCompatibleDC(hdc);
|
||||
assert(bitmapdc);
|
||||
bitmap = CreateCompatibleBitmap(hdc, 1024, 1024);
|
||||
assert(bitmap);
|
||||
SelectObject(bitmapdc, bitmap);
|
||||
ReleaseDC(settings.handle, hdc);
|
||||
|
||||
memset(&bmi, 0, sizeof(BITMAPINFO));
|
||||
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
|
||||
bmi.bmiHeader.biWidth = 1024;
|
||||
bmi.bmiHeader.biHeight = -1024;
|
||||
bmi.bmiHeader.biPlanes = 1;
|
||||
bmi.bmiHeader.biBitCount = 32; //biBitCount of 15 is invalid, biBitCount of 16 is really RGB555
|
||||
bmi.bmiHeader.biCompression = BI_RGB;
|
||||
bmi.bmiHeader.biSizeImage = 1024 * 1024 * sizeof(uint32_t);
|
||||
|
||||
settings.width = 256;
|
||||
settings.height = 256;
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
DeleteObject(bitmap);
|
||||
DeleteDC(bitmapdc);
|
||||
}
|
||||
|
||||
pVideoGDI() {
|
||||
buffer = (uint32_t*)malloc(1024 * 1024 * sizeof(uint32_t));
|
||||
settings.handle = 0;
|
||||
}
|
||||
|
||||
~pVideoGDI() {
|
||||
if(buffer) free(buffer);
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(GDI)
|
||||
|
||||
};
|
@@ -1,231 +0,0 @@
|
||||
/*
|
||||
video.glx
|
||||
author: byuu
|
||||
license: public domain
|
||||
last updated: 2010-01-05
|
||||
|
||||
Design notes:
|
||||
SGI's GLX is the X11/Xlib interface to OpenGL.
|
||||
At the time of this writing, there are three relevant versions of the API: versions 1.2, 1.3 and 1.4.
|
||||
|
||||
Version 1.2 was released on March 4th, 1997.
|
||||
Version 1.3 was released on October 19th, 1998.
|
||||
Version 1.4 was released on December 16th, 2005.
|
||||
|
||||
Despite version 1.3 being roughly ten years old at this time, there are still many modern X11 GLX drivers
|
||||
that lack full support for the specification. Most notable would be the official video drivers from ATI.
|
||||
Given this, 1.4 support is pretty much hopeless to target.
|
||||
|
||||
Luckily, each version has been designed to be backwards compatible with the previous version. As well,
|
||||
version 1.2 is wholly sufficient, albeit less convenient, to implement this video module.
|
||||
|
||||
Therefore, for the purpose of compatibility, this driver only uses GLX 1.2 or earlier API commands.
|
||||
As well, it only uses raw Xlib API commands, so that it is compatible with any toolkit.
|
||||
*/
|
||||
|
||||
#include "opengl.hpp"
|
||||
|
||||
namespace ruby {
|
||||
|
||||
//returns true once window is mapped (created and displayed onscreen)
|
||||
static Bool glx_wait_for_map_notify(Display *d, XEvent *e, char *arg) {
|
||||
return (e->type == MapNotify) && (e->xmap.window == (Window)arg);
|
||||
}
|
||||
|
||||
class pVideoGLX : public OpenGL {
|
||||
public:
|
||||
int (*glSwapInterval)(int);
|
||||
|
||||
Display *display;
|
||||
int screen;
|
||||
Window xwindow;
|
||||
Colormap colormap;
|
||||
GLXContext glxcontext;
|
||||
GLXWindow glxwindow;
|
||||
|
||||
struct {
|
||||
int version_major, version_minor;
|
||||
bool double_buffer;
|
||||
bool is_direct;
|
||||
} glx;
|
||||
|
||||
struct {
|
||||
Window handle;
|
||||
bool synchronize;
|
||||
unsigned filter;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
if(name == Video::Synchronize) return true;
|
||||
if(name == Video::Filter) return true;
|
||||
if(name == Video::FragmentShader) return true;
|
||||
if(name == Video::VertexShader) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return (uintptr_t)settings.handle;
|
||||
if(name == Video::Synchronize) return settings.synchronize;
|
||||
if(name == Video::Filter) return settings.filter;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Synchronize) {
|
||||
if(settings.synchronize != any_cast<bool>(value)) {
|
||||
settings.synchronize = any_cast<bool>(value);
|
||||
if(glSwapInterval) glSwapInterval(settings.synchronize);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if(name == Video::Filter) {
|
||||
settings.filter = any_cast<unsigned>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::FragmentShader) {
|
||||
OpenGL::set_fragment_shader(any_cast<const char*>(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::VertexShader) {
|
||||
OpenGL::set_vertex_shader(any_cast<const char*>(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
resize(width, height);
|
||||
settings.width = width;
|
||||
settings.height = height;
|
||||
return OpenGL::lock(data, pitch);
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
OpenGL::clear();
|
||||
if(glx.double_buffer) glXSwapBuffers(display, glxwindow);
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
//we must ensure that the child window is the same size as the parent window.
|
||||
//unfortunately, we cannot hook the parent window resize event notification,
|
||||
//as we did not create the parent window, nor have any knowledge of the toolkit used.
|
||||
//therefore, inelegant as it may be, we query each window size and resize as needed.
|
||||
XWindowAttributes parent, child;
|
||||
XGetWindowAttributes(display, settings.handle, &parent);
|
||||
XGetWindowAttributes(display, xwindow, &child);
|
||||
if(child.width != parent.width || child.height != parent.height) {
|
||||
XResizeWindow(display, xwindow, parent.width, parent.height);
|
||||
}
|
||||
|
||||
OpenGL::refresh(settings.filter == Video::FilterLinear,
|
||||
settings.width, settings.height, parent.width, parent.height);
|
||||
if(glx.double_buffer) glXSwapBuffers(display, glxwindow);
|
||||
}
|
||||
|
||||
bool init() {
|
||||
term();
|
||||
|
||||
display = XOpenDisplay(0);
|
||||
screen = DefaultScreen(display);
|
||||
glXQueryVersion(display, &glx.version_major, &glx.version_minor);
|
||||
//require GLX 1.2+ API
|
||||
if(glx.version_major < 1 || (glx.version_major == 1 && glx.version_minor < 2)) return false;
|
||||
|
||||
XWindowAttributes window_attributes;
|
||||
XGetWindowAttributes(display, settings.handle, &window_attributes);
|
||||
|
||||
//let GLX determine the best Visual to use for GL output; provide a few hints
|
||||
//note: some video drivers will override double buffering attribute
|
||||
int attributelist[] = { GLX_RGBA, GLX_DOUBLEBUFFER, None };
|
||||
XVisualInfo *vi = glXChooseVisual(display, screen, attributelist);
|
||||
|
||||
//Window settings.handle has already been realized, most likely with DefaultVisual.
|
||||
//GLX requires that the GL output window has the same Visual as the GLX context.
|
||||
//it is not possible to change the Visual of an already realized (created) window.
|
||||
//therefore a new child window, using the same GLX Visual, must be created and binded to settings.handle.
|
||||
colormap = XCreateColormap(display, RootWindow(display, vi->screen), vi->visual, AllocNone);
|
||||
XSetWindowAttributes attributes;
|
||||
attributes.colormap = colormap;
|
||||
attributes.border_pixel = 0;
|
||||
attributes.event_mask = StructureNotifyMask;
|
||||
xwindow = XCreateWindow(display, /* parent = */ settings.handle,
|
||||
/* x = */ 0, /* y = */ 0, window_attributes.width, window_attributes.height,
|
||||
/* border_width = */ 0, vi->depth, InputOutput, vi->visual,
|
||||
CWColormap | CWBorderPixel | CWEventMask, &attributes);
|
||||
XSetWindowBackground(display, xwindow, /* color = */ 0);
|
||||
XMapWindow(display, xwindow);
|
||||
XEvent event;
|
||||
//window must be realized (appear onscreen) before we make the context current
|
||||
XIfEvent(display, &event, glx_wait_for_map_notify, (char*)xwindow);
|
||||
|
||||
glxcontext = glXCreateContext(display, vi, /* sharelist = */ 0, /* direct = */ GL_TRUE);
|
||||
glXMakeCurrent(display, glxwindow = xwindow, glxcontext);
|
||||
|
||||
//read attributes of frame buffer for later use, as requested attributes from above are not always granted
|
||||
int value = 0;
|
||||
glXGetConfig(display, vi, GLX_DOUBLEBUFFER, &value);
|
||||
glx.double_buffer = value;
|
||||
glx.is_direct = glXIsDirect(display, glxcontext);
|
||||
|
||||
OpenGL::init();
|
||||
settings.width = 256;
|
||||
settings.height = 256;
|
||||
|
||||
//vertical synchronization
|
||||
if(!glSwapInterval) glSwapInterval = (int (*)(int))glGetProcAddress("glXSwapIntervalSGI");
|
||||
if(!glSwapInterval) glSwapInterval = (int (*)(int))glGetProcAddress("glXSwapIntervalMESA");
|
||||
if( glSwapInterval) glSwapInterval(settings.synchronize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
OpenGL::term();
|
||||
|
||||
if(glxcontext) {
|
||||
glXDestroyContext(display, glxcontext);
|
||||
glxcontext = 0;
|
||||
}
|
||||
|
||||
if(xwindow) {
|
||||
XUnmapWindow(display, xwindow);
|
||||
xwindow = 0;
|
||||
}
|
||||
|
||||
if(colormap) {
|
||||
XFreeColormap(display, colormap);
|
||||
colormap = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pVideoGLX() : glSwapInterval(0) {
|
||||
settings.handle = 0;
|
||||
settings.synchronize = false;
|
||||
xwindow = 0;
|
||||
colormap = 0;
|
||||
glxcontext = 0;
|
||||
glxwindow = 0;
|
||||
}
|
||||
|
||||
~pVideoGLX() { term(); }
|
||||
};
|
||||
|
||||
DeclareVideo(GLX)
|
||||
|
||||
};
|
@@ -1,225 +0,0 @@
|
||||
#include <GL/gl.h>
|
||||
|
||||
#if defined(PLATFORM_X)
|
||||
#include <GL/glx.h>
|
||||
#define glGetProcAddress(name) (*glXGetProcAddress)((const GLubyte*)(name))
|
||||
#elif defined(PLATFORM_WIN)
|
||||
#include <GL/glext.h>
|
||||
#define glGetProcAddress(name) wglGetProcAddress(name)
|
||||
#else
|
||||
#error "ruby::OpenGL: unsupported platform"
|
||||
#endif
|
||||
|
||||
PFNGLCREATEPROGRAMPROC glCreateProgram = 0;
|
||||
PFNGLUSEPROGRAMPROC glUseProgram = 0;
|
||||
PFNGLCREATESHADERPROC glCreateShader = 0;
|
||||
PFNGLDELETESHADERPROC glDeleteShader = 0;
|
||||
PFNGLSHADERSOURCEPROC glShaderSource = 0;
|
||||
PFNGLCOMPILESHADERPROC glCompileShader = 0;
|
||||
PFNGLATTACHSHADERPROC glAttachShader = 0;
|
||||
PFNGLDETACHSHADERPROC glDetachShader = 0;
|
||||
PFNGLLINKPROGRAMPROC glLinkProgram = 0;
|
||||
PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation = 0;
|
||||
PFNGLUNIFORM1IPROC glUniform1i = 0;
|
||||
PFNGLUNIFORM2FVPROC glUniform2fv = 0;
|
||||
PFNGLUNIFORM4FVPROC glUniform4fv = 0;
|
||||
|
||||
class OpenGL {
|
||||
public:
|
||||
GLuint gltexture;
|
||||
GLuint glprogram;
|
||||
GLuint fragmentshader;
|
||||
GLuint vertexshader;
|
||||
bool shader_support;
|
||||
|
||||
uint32_t *buffer;
|
||||
unsigned iwidth, iheight;
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(iwidth >= width && iheight >= height) return;
|
||||
|
||||
if(gltexture) glDeleteTextures(1, &gltexture);
|
||||
iwidth = max(width, iwidth );
|
||||
iheight = max(height, iheight);
|
||||
if(buffer) delete[] buffer;
|
||||
buffer = new uint32_t[iwidth * iheight];
|
||||
|
||||
glGenTextures(1, &gltexture);
|
||||
glBindTexture(GL_TEXTURE_2D, gltexture);
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, iwidth);
|
||||
glTexImage2D(GL_TEXTURE_2D,
|
||||
/* mip-map level = */ 0, /* internal format = */ GL_RGB,
|
||||
iwidth, iheight, /* border = */ 0, /* format = */ GL_BGRA,
|
||||
GL_UNSIGNED_INT_8_8_8_8_REV, buffer);
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch) {
|
||||
pitch = iwidth * sizeof(uint32_t);
|
||||
return data = buffer;
|
||||
}
|
||||
|
||||
void clear() {
|
||||
memset(buffer, 0, iwidth * iheight * sizeof(uint32_t));
|
||||
glClearColor(0.0, 0.0, 0.0, 1.0);
|
||||
glClear(GL_COLOR_BUFFER_BIT);
|
||||
glFlush();
|
||||
}
|
||||
|
||||
void refresh(bool smooth, unsigned inwidth, unsigned inheight, unsigned outwidth, unsigned outheight) {
|
||||
if(shader_support) {
|
||||
glUseProgram(glprogram);
|
||||
GLint location;
|
||||
|
||||
float inputSize[2] = { (float)inwidth, (float)inheight };
|
||||
location = glGetUniformLocation(glprogram, "rubyInputSize");
|
||||
glUniform2fv(location, 1, inputSize);
|
||||
|
||||
float outputSize[2] = { (float)outwidth, (float)outheight };
|
||||
location = glGetUniformLocation(glprogram, "rubyOutputSize");
|
||||
glUniform2fv(location, 1, outputSize);
|
||||
|
||||
float textureSize[2] = { (float)iwidth, (float)iheight };
|
||||
location = glGetUniformLocation(glprogram, "rubyTextureSize");
|
||||
glUniform2fv(location, 1, textureSize);
|
||||
}
|
||||
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, smooth ? GL_LINEAR : GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, smooth ? GL_LINEAR : GL_NEAREST);
|
||||
|
||||
glMatrixMode(GL_PROJECTION);
|
||||
glLoadIdentity();
|
||||
glOrtho(0, outwidth, 0, outheight, -1.0, 1.0);
|
||||
glViewport(0, 0, outwidth, outheight);
|
||||
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glLoadIdentity();
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, iwidth);
|
||||
glTexSubImage2D(GL_TEXTURE_2D,
|
||||
/* mip-map level = */ 0, /* x = */ 0, /* y = */ 0,
|
||||
inwidth, inheight, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, buffer);
|
||||
|
||||
//OpenGL projection sets 0,0 as *bottom-left* of screen.
|
||||
//therefore, below vertices flip image to support top-left source.
|
||||
//texture range = x1:0.0, y1:0.0, x2:1.0, y2:1.0
|
||||
//vertex range = x1:0, y1:0, x2:width, y2:height
|
||||
double w = double(inwidth) / double(iwidth);
|
||||
double h = double(inheight) / double(iheight);
|
||||
int u = outwidth;
|
||||
int v = outheight;
|
||||
glBegin(GL_TRIANGLE_STRIP);
|
||||
glTexCoord2f(0, 0); glVertex3i(0, v, 0);
|
||||
glTexCoord2f(w, 0); glVertex3i(u, v, 0);
|
||||
glTexCoord2f(0, h); glVertex3i(0, 0, 0);
|
||||
glTexCoord2f(w, h); glVertex3i(u, 0, 0);
|
||||
glEnd();
|
||||
|
||||
glFlush();
|
||||
|
||||
if(shader_support) {
|
||||
glUseProgram(0);
|
||||
}
|
||||
}
|
||||
|
||||
void set_fragment_shader(const char *source) {
|
||||
if(!shader_support) return;
|
||||
|
||||
if(fragmentshader) {
|
||||
glDetachShader(glprogram, fragmentshader);
|
||||
glDeleteShader(fragmentshader);
|
||||
fragmentshader = 0;
|
||||
}
|
||||
|
||||
if(source) {
|
||||
fragmentshader = glCreateShader(GL_FRAGMENT_SHADER);
|
||||
glShaderSource(fragmentshader, 1, &source, 0);
|
||||
glCompileShader(fragmentshader);
|
||||
glAttachShader(glprogram, fragmentshader);
|
||||
}
|
||||
|
||||
glLinkProgram(glprogram);
|
||||
}
|
||||
|
||||
void set_vertex_shader(const char *source) {
|
||||
if(!shader_support) return;
|
||||
|
||||
if(vertexshader) {
|
||||
glDetachShader(glprogram, vertexshader);
|
||||
glDeleteShader(vertexshader);
|
||||
vertexshader = 0;
|
||||
}
|
||||
|
||||
if(source) {
|
||||
vertexshader = glCreateShader(GL_VERTEX_SHADER);
|
||||
glShaderSource(vertexshader, 1, &source, 0);
|
||||
glCompileShader(vertexshader);
|
||||
glAttachShader(glprogram, vertexshader);
|
||||
}
|
||||
|
||||
glLinkProgram(glprogram);
|
||||
}
|
||||
|
||||
void init() {
|
||||
//disable unused features
|
||||
glDisable(GL_ALPHA_TEST);
|
||||
glDisable(GL_BLEND);
|
||||
glDisable(GL_DEPTH_TEST);
|
||||
glDisable(GL_POLYGON_SMOOTH);
|
||||
glDisable(GL_STENCIL_TEST);
|
||||
|
||||
//enable useful and required features
|
||||
glEnable(GL_DITHER);
|
||||
glEnable(GL_TEXTURE_2D);
|
||||
|
||||
//bind shader functions
|
||||
glCreateProgram = (PFNGLCREATEPROGRAMPROC)glGetProcAddress("glCreateProgram");
|
||||
glUseProgram = (PFNGLUSEPROGRAMPROC)glGetProcAddress("glUseProgram");
|
||||
glCreateShader = (PFNGLCREATESHADERPROC)glGetProcAddress("glCreateShader");
|
||||
glDeleteShader = (PFNGLDELETESHADERPROC)glGetProcAddress("glDeleteShader");
|
||||
glShaderSource = (PFNGLSHADERSOURCEPROC)glGetProcAddress("glShaderSource");
|
||||
glCompileShader = (PFNGLCOMPILESHADERPROC)glGetProcAddress("glCompileShader");
|
||||
glAttachShader = (PFNGLATTACHSHADERPROC)glGetProcAddress("glAttachShader");
|
||||
glDetachShader = (PFNGLDETACHSHADERPROC)glGetProcAddress("glDetachShader");
|
||||
glLinkProgram = (PFNGLLINKPROGRAMPROC)glGetProcAddress("glLinkProgram");
|
||||
glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC)glGetProcAddress("glGetUniformLocation");
|
||||
glUniform1i = (PFNGLUNIFORM1IPROC)glGetProcAddress("glUniform1i");
|
||||
glUniform2fv = (PFNGLUNIFORM2FVPROC)glGetProcAddress("glUniform2fv");
|
||||
glUniform4fv = (PFNGLUNIFORM4FVPROC)glGetProcAddress("glUniform4fv");
|
||||
|
||||
shader_support = glCreateProgram && glUseProgram && glCreateShader
|
||||
&& glDeleteShader && glShaderSource && glCompileShader && glAttachShader
|
||||
&& glDetachShader && glLinkProgram && glGetUniformLocation
|
||||
&& glUniform1i && glUniform2fv && glUniform4fv;
|
||||
|
||||
if(shader_support) glprogram = glCreateProgram();
|
||||
|
||||
//create surface texture
|
||||
resize(256, 256);
|
||||
}
|
||||
|
||||
void term() {
|
||||
if(gltexture) {
|
||||
glDeleteTextures(1, &gltexture);
|
||||
gltexture = 0;
|
||||
}
|
||||
|
||||
if(buffer) {
|
||||
delete[] buffer;
|
||||
buffer = 0;
|
||||
iwidth = 0;
|
||||
iheight = 0;
|
||||
}
|
||||
}
|
||||
|
||||
OpenGL() {
|
||||
gltexture = 0;
|
||||
glprogram = 0;
|
||||
fragmentshader = 0;
|
||||
vertexshader = 0;
|
||||
|
||||
buffer = 0;
|
||||
iwidth = 0;
|
||||
iheight = 0;
|
||||
}
|
||||
};
|
@@ -1,174 +0,0 @@
|
||||
#ifdef __APPLE__
|
||||
#include <OpenGL/OpenGL.h>
|
||||
#endif
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoQtOpenGL {
|
||||
public:
|
||||
QWidget *parent;
|
||||
QVBoxLayout *layout;
|
||||
|
||||
class RubyGLWidget : public QGLWidget {
|
||||
public:
|
||||
GLuint texture;
|
||||
unsigned textureWidth, textureHeight;
|
||||
|
||||
uint32_t *buffer;
|
||||
unsigned rasterWidth, rasterHeight;
|
||||
|
||||
bool synchronize;
|
||||
unsigned filter;
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(width > textureWidth || height > textureHeight) {
|
||||
textureWidth = max(width, textureWidth);
|
||||
textureHeight = max(height, textureHeight);
|
||||
|
||||
if(buffer) {
|
||||
delete[] buffer;
|
||||
glDeleteTextures(1, &texture);
|
||||
}
|
||||
|
||||
buffer = new uint32_t[textureWidth * textureHeight];
|
||||
glGenTextures(1, &texture);
|
||||
glBindTexture(GL_TEXTURE_2D, texture);
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, textureWidth);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, textureWidth, textureHeight, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
void updateSynchronization() {
|
||||
#ifdef __APPLE__
|
||||
makeCurrent();
|
||||
CGLContextObj context = CGLGetCurrentContext();
|
||||
GLint value = synchronize; //0 = draw immediately (no vsync), 1 = draw once per frame (vsync)
|
||||
CGLSetParameter(context, kCGLCPSwapInterval, &value);
|
||||
#endif
|
||||
}
|
||||
|
||||
void paintGL() {
|
||||
unsigned outputWidth = width();
|
||||
unsigned outputHeight = height();
|
||||
|
||||
glMatrixMode(GL_PROJECTION);
|
||||
glLoadIdentity();
|
||||
glOrtho(0, outputWidth, 0, outputHeight, -1.0, 1.0);
|
||||
glViewport(0, 0, outputWidth, outputHeight);
|
||||
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glLoadIdentity();
|
||||
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, (filter == Video::FilterPoint) ? GL_NEAREST : GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, (filter == Video::FilterPoint) ? GL_NEAREST : GL_LINEAR);
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, rasterWidth, rasterHeight, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, buffer);
|
||||
|
||||
double w = (double)rasterWidth / (double)textureWidth;
|
||||
double h = (double)rasterHeight / (double)textureHeight;
|
||||
unsigned u = outputWidth;
|
||||
unsigned v = outputHeight;
|
||||
|
||||
glBegin(GL_TRIANGLE_STRIP);
|
||||
glTexCoord2f(0, 0); glVertex3i(0, v, 0);
|
||||
glTexCoord2f(w, 0); glVertex3i(u, v, 0);
|
||||
glTexCoord2f(0, h); glVertex3i(0, 0, 0);
|
||||
glTexCoord2f(w, h); glVertex3i(u, 0, 0);
|
||||
glEnd();
|
||||
}
|
||||
|
||||
void initializeGL() {
|
||||
format().setDoubleBuffer(true);
|
||||
|
||||
texture = 0;
|
||||
textureWidth = 0;
|
||||
textureHeight = 0;
|
||||
buffer = 0;
|
||||
resize(rasterWidth = 256, rasterHeight = 256);
|
||||
|
||||
glDisable(GL_ALPHA_TEST);
|
||||
glDisable(GL_BLEND);
|
||||
glDisable(GL_DEPTH_TEST);
|
||||
glDisable(GL_POLYGON_SMOOTH);
|
||||
glDisable(GL_STENCIL_TEST);
|
||||
glEnable(GL_DITHER);
|
||||
glEnable(GL_TEXTURE_2D);
|
||||
glClearColor(0.0, 0.0, 0.0, 0.0);
|
||||
}
|
||||
} *widget;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Synchronize) return true;
|
||||
if(name == Video::Filter) return true;
|
||||
if(name == "QWidget") return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Synchronize) return widget->synchronize;
|
||||
if(name == Video::Filter) return widget->filter;
|
||||
if(name == "QWidget") return parent;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Synchronize) {
|
||||
widget->synchronize = any_cast<bool>(value);
|
||||
widget->updateSynchronization();
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Filter) {
|
||||
widget->filter = any_cast<unsigned>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == "QWidget") {
|
||||
parent = any_cast<QWidget*>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
widget->resize(width, height);
|
||||
widget->rasterWidth = width;
|
||||
widget->rasterHeight = height;
|
||||
|
||||
pitch = widget->textureWidth * sizeof(uint32_t);
|
||||
return data = widget->buffer;
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
memset(widget->buffer, 0, widget->textureWidth * widget->textureHeight * sizeof(uint32_t));
|
||||
widget->updateGL();
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
widget->updateGL();
|
||||
}
|
||||
|
||||
bool init() {
|
||||
layout = new QVBoxLayout;
|
||||
layout->setMargin(0);
|
||||
|
||||
widget = new RubyGLWidget;
|
||||
widget->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
|
||||
layout->addWidget(widget);
|
||||
parent->setLayout(layout);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(QtOpenGL)
|
||||
|
||||
};
|
@@ -1,126 +0,0 @@
|
||||
namespace ruby {
|
||||
|
||||
struct VideoQtRasterContext {
|
||||
QImage *image;
|
||||
unsigned width, height;
|
||||
unsigned filter;
|
||||
} context;
|
||||
|
||||
class pVideoQtRaster {
|
||||
public:
|
||||
QWidget *parent;
|
||||
QVBoxLayout *layout;
|
||||
|
||||
struct QtImage : public QWidget {
|
||||
VideoQtRasterContext &context;
|
||||
|
||||
void paintEvent(QPaintEvent*) {
|
||||
if(context.image == 0) return;
|
||||
QPainter painter(this);
|
||||
|
||||
if(size().width() == context.width && size().height() == context.height) {
|
||||
painter.drawImage(0, 0, *context.image);
|
||||
} else {
|
||||
Qt::TransformationMode mode = Qt::FastTransformation;
|
||||
if(context.filter == Video::FilterLinear) mode = Qt::SmoothTransformation;
|
||||
painter.drawImage(0, 0, context.image->scaled(size(), Qt::IgnoreAspectRatio, mode));
|
||||
}
|
||||
}
|
||||
|
||||
QtImage(QWidget *parent, VideoQtRasterContext &context_) : QWidget(parent), context(context_) {}
|
||||
} *widget;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Filter) return true;
|
||||
if(name == "QWidget") return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Filter) return context.filter;
|
||||
if(name == "QWidget") return parent;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Filter) {
|
||||
context.filter = any_cast<unsigned>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == "QWidget") {
|
||||
parent = any_cast<QWidget*>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(context.width != width || context.height != height) {
|
||||
if(context.image) delete context.image;
|
||||
context.image = new QImage(context.width = width, context.height = height, QImage::Format_RGB32);
|
||||
}
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
//if image size has changed since last lock(), re-allocate buffer to match new size
|
||||
if(width != context.width || height != context.height) resize(width, height);
|
||||
|
||||
pitch = width * sizeof(uint32_t);
|
||||
return data = (uint32_t*)context.image->bits();
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
context.image->fill(0);
|
||||
widget->update();
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
widget->update();
|
||||
}
|
||||
|
||||
bool init() {
|
||||
term();
|
||||
|
||||
layout = new QVBoxLayout;
|
||||
layout->setMargin(0);
|
||||
|
||||
context.image = 0;
|
||||
context.width = 0;
|
||||
context.height = 0;
|
||||
context.filter = Video::FilterPoint;
|
||||
resize(256, 256);
|
||||
|
||||
widget = new QtImage(parent, context);
|
||||
widget->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
|
||||
layout->addWidget(widget);
|
||||
parent->setLayout(layout);
|
||||
clear();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
if(context.image) delete context.image;
|
||||
if(widget) delete widget;
|
||||
if(layout) delete layout;
|
||||
|
||||
context.image = 0;
|
||||
widget = 0;
|
||||
layout = 0;
|
||||
}
|
||||
|
||||
pVideoQtRaster() {
|
||||
context.image = 0;
|
||||
widget = 0;
|
||||
layout = 0;
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(QtRaster)
|
||||
|
||||
};
|
@@ -1,140 +0,0 @@
|
||||
#include <sys/ipc.h>
|
||||
#include <sys/shm.h>
|
||||
#include <X11/extensions/Xv.h>
|
||||
#include <X11/extensions/Xvlib.h>
|
||||
#include <X11/extensions/XShm.h>
|
||||
#include <SDL/SDL.h>
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoSDL {
|
||||
public:
|
||||
Display *display;
|
||||
SDL_Surface *screen, *buffer;
|
||||
unsigned iwidth, iheight;
|
||||
|
||||
struct {
|
||||
uintptr_t handle;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return settings.handle;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(iwidth >= width && iheight >= height) return;
|
||||
|
||||
iwidth = max(width, iwidth);
|
||||
iheight = max(height, iheight);
|
||||
|
||||
if(buffer) SDL_FreeSurface(buffer);
|
||||
buffer = SDL_CreateRGBSurface(
|
||||
SDL_SWSURFACE, iwidth, iheight, 32,
|
||||
0x00ff0000, 0x0000ff00, 0x000000ff, 0xff000000
|
||||
);
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
if(width != settings.width || height != settings.height) {
|
||||
resize(settings.width = width, settings.height = height);
|
||||
}
|
||||
|
||||
if(SDL_MUSTLOCK(buffer)) SDL_LockSurface(buffer);
|
||||
pitch = buffer->pitch;
|
||||
return data = (uint32_t*)buffer->pixels;
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
if(SDL_MUSTLOCK(buffer)) SDL_UnlockSurface(buffer);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
if(SDL_MUSTLOCK(buffer)) SDL_LockSurface(buffer);
|
||||
for(unsigned y = 0; y < iheight; y++) {
|
||||
uint32_t *data = (uint32_t*)buffer->pixels + y * (buffer->pitch >> 2);
|
||||
for(unsigned x = 0; x < iwidth; x++) *data++ = 0xff000000;
|
||||
}
|
||||
if(SDL_MUSTLOCK(buffer)) SDL_UnlockSurface(buffer);
|
||||
refresh();
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
//ruby input is X8R8G8B8, top 8-bits are ignored.
|
||||
//as SDL forces us to use a 32-bit buffer, we must set alpha to 255 (full opacity)
|
||||
//to prevent blending against the window beneath when X window visual is 32-bits.
|
||||
if(SDL_MUSTLOCK(buffer)) SDL_LockSurface(buffer);
|
||||
for(unsigned y = 0; y < settings.height; y++) {
|
||||
uint32_t *data = (uint32_t*)buffer->pixels + y * (buffer->pitch >> 2);
|
||||
for(unsigned x = 0; x < settings.width; x++) *data++ |= 0xff000000;
|
||||
}
|
||||
if(SDL_MUSTLOCK(buffer)) SDL_UnlockSurface(buffer);
|
||||
|
||||
XWindowAttributes attributes;
|
||||
XGetWindowAttributes(display, settings.handle, &attributes);
|
||||
|
||||
SDL_Rect src, dest;
|
||||
|
||||
src.x = 0;
|
||||
src.y = 0;
|
||||
src.w = settings.width;
|
||||
src.h = settings.height;
|
||||
|
||||
dest.x = 0;
|
||||
dest.y = 0;
|
||||
dest.w = attributes.width;
|
||||
dest.h = attributes.height;
|
||||
|
||||
SDL_SoftStretch(buffer, &src, screen, &dest);
|
||||
SDL_UpdateRect(screen, dest.x, dest.y, dest.w, dest.h);
|
||||
}
|
||||
|
||||
bool init() {
|
||||
display = XOpenDisplay(0);
|
||||
|
||||
char env[512];
|
||||
sprintf(env, "SDL_WINDOWID=%ld", (long int)settings.handle);
|
||||
putenv(env);
|
||||
|
||||
SDL_InitSubSystem(SDL_INIT_VIDEO);
|
||||
screen = SDL_SetVideoMode(2560, 1600, 32, SDL_HWSURFACE);
|
||||
|
||||
buffer = 0;
|
||||
iwidth = 0;
|
||||
iheight = 0;
|
||||
resize(settings.width = 256, settings.height = 256);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
XCloseDisplay(display);
|
||||
SDL_FreeSurface(buffer);
|
||||
SDL_QuitSubSystem(SDL_INIT_VIDEO);
|
||||
}
|
||||
|
||||
pVideoSDL() {
|
||||
settings.handle = 0;
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(SDL)
|
||||
|
||||
};
|
@@ -1,154 +0,0 @@
|
||||
/*
|
||||
video.wgl
|
||||
authors: byuu, krom
|
||||
*/
|
||||
|
||||
#include "opengl.hpp"
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoWGL : public OpenGL {
|
||||
public:
|
||||
BOOL (APIENTRY *glSwapInterval)(int);
|
||||
|
||||
HDC display;
|
||||
HGLRC wglcontext;
|
||||
HWND window;
|
||||
HINSTANCE glwindow;
|
||||
|
||||
struct {
|
||||
HWND handle;
|
||||
bool synchronize;
|
||||
unsigned filter;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
if(name == Video::Synchronize) return true;
|
||||
if(name == Video::Filter) return true;
|
||||
if(name == Video::FragmentShader) return true;
|
||||
if(name == Video::VertexShader) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return (uintptr_t)settings.handle;
|
||||
if(name == Video::Synchronize) return settings.synchronize;
|
||||
if(name == Video::Filter) return settings.filter;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = (HWND)any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Synchronize) {
|
||||
if(settings.synchronize != any_cast<bool>(value)) {
|
||||
settings.synchronize = any_cast<bool>(value);
|
||||
if(wglcontext) init();
|
||||
}
|
||||
}
|
||||
|
||||
if(name == Video::Filter) {
|
||||
settings.filter = any_cast<unsigned>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::FragmentShader) {
|
||||
OpenGL::set_fragment_shader(any_cast<const char*>(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::VertexShader) {
|
||||
OpenGL::set_vertex_shader(any_cast<const char*>(value));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
resize(width, height);
|
||||
settings.width = width;
|
||||
settings.height = height;
|
||||
return OpenGL::lock(data, pitch);
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
OpenGL::clear();
|
||||
SwapBuffers(display);
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
RECT rc;
|
||||
GetClientRect(settings.handle, &rc);
|
||||
|
||||
OpenGL::refresh(settings.filter == Video::FilterLinear,
|
||||
settings.width, settings.height,
|
||||
rc.right - rc.left, rc.bottom - rc.top);
|
||||
|
||||
SwapBuffers(display);
|
||||
}
|
||||
|
||||
bool init() {
|
||||
term();
|
||||
|
||||
GLuint pixel_format;
|
||||
PIXELFORMATDESCRIPTOR pfd;
|
||||
memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR));
|
||||
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
|
||||
pfd.nVersion = 1;
|
||||
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
|
||||
pfd.iPixelType = PFD_TYPE_RGBA;
|
||||
|
||||
display = GetDC(settings.handle);
|
||||
pixel_format = ChoosePixelFormat(display, &pfd);
|
||||
SetPixelFormat(display, pixel_format, &pfd);
|
||||
|
||||
wglcontext = wglCreateContext(display);
|
||||
wglMakeCurrent(display, wglcontext);
|
||||
|
||||
OpenGL::init();
|
||||
settings.width = 256;
|
||||
settings.height = 256;
|
||||
|
||||
//vertical synchronization
|
||||
if(!glSwapInterval) glSwapInterval = (BOOL (APIENTRY*)(int))glGetProcAddress("wglSwapIntervalEXT");
|
||||
if( glSwapInterval) glSwapInterval(settings.synchronize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
OpenGL::term();
|
||||
|
||||
if(wglcontext) {
|
||||
wglDeleteContext(wglcontext);
|
||||
wglcontext = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pVideoWGL() : glSwapInterval(0) {
|
||||
settings.handle = 0;
|
||||
settings.synchronize = false;
|
||||
settings.filter = 0;
|
||||
|
||||
window = 0;
|
||||
wglcontext = 0;
|
||||
glwindow = 0;
|
||||
}
|
||||
|
||||
~pVideoWGL() { term(); }
|
||||
};
|
||||
|
||||
DeclareVideo(WGL)
|
||||
|
||||
};
|
@@ -1,498 +0,0 @@
|
||||
#include <sys/ipc.h>
|
||||
#include <sys/shm.h>
|
||||
#include <X11/extensions/XShm.h>
|
||||
#include <X11/extensions/Xv.h>
|
||||
#include <X11/extensions/Xvlib.h>
|
||||
|
||||
extern "C" XvImage* XvShmCreateImage(Display*, XvPortID, int, char*, int, int, XShmSegmentInfo*);
|
||||
|
||||
namespace ruby {
|
||||
|
||||
class pVideoXv {
|
||||
public:
|
||||
uint32_t *buffer;
|
||||
uint8_t *ytable, *utable, *vtable;
|
||||
|
||||
enum XvFormat {
|
||||
XvFormatRGB32,
|
||||
XvFormatRGB24,
|
||||
XvFormatRGB16,
|
||||
XvFormatRGB15,
|
||||
XvFormatYUY2,
|
||||
XvFormatUYVY,
|
||||
XvFormatUnknown
|
||||
};
|
||||
|
||||
struct {
|
||||
Display *display;
|
||||
GC gc;
|
||||
Window window;
|
||||
Colormap colormap;
|
||||
XShmSegmentInfo shminfo;
|
||||
|
||||
int port;
|
||||
int depth;
|
||||
int visualid;
|
||||
|
||||
XvImage *image;
|
||||
XvFormat format;
|
||||
uint32_t fourcc;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} device;
|
||||
|
||||
struct {
|
||||
Window handle;
|
||||
bool synchronize;
|
||||
|
||||
unsigned width;
|
||||
unsigned height;
|
||||
} settings;
|
||||
|
||||
bool cap(const string& name) {
|
||||
if(name == Video::Handle) return true;
|
||||
if(name == Video::Synchronize) {
|
||||
return XInternAtom(XOpenDisplay(0), "XV_SYNC_TO_VBLANK", true) != None;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
any get(const string& name) {
|
||||
if(name == Video::Handle) return settings.handle;
|
||||
if(name == Video::Synchronize) return settings.synchronize;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set(const string& name, const any& value) {
|
||||
if(name == Video::Handle) {
|
||||
settings.handle = any_cast<uintptr_t>(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(name == Video::Synchronize) {
|
||||
Display *display = XOpenDisplay(0);
|
||||
Atom atom = XInternAtom(display, "XV_SYNC_TO_VBLANK", true);
|
||||
if(atom != None && device.port >= 0) {
|
||||
settings.synchronize = any_cast<bool>(value);
|
||||
XvSetPortAttribute(display, device.port, atom, settings.synchronize);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void resize(unsigned width, unsigned height) {
|
||||
if(device.width >= width && device.height >= height) return;
|
||||
device.width = max(width, device.width);
|
||||
device.height = max(height, device.height);
|
||||
|
||||
XShmDetach(device.display, &device.shminfo);
|
||||
shmdt(device.shminfo.shmaddr);
|
||||
shmctl(device.shminfo.shmid, IPC_RMID, NULL);
|
||||
XFree(device.image);
|
||||
delete[] buffer;
|
||||
|
||||
device.image = XvShmCreateImage(device.display, device.port, device.fourcc, 0, device.width, device.height, &device.shminfo);
|
||||
|
||||
device.shminfo.shmid = shmget(IPC_PRIVATE, device.image->data_size, IPC_CREAT | 0777);
|
||||
device.shminfo.shmaddr = device.image->data = (char*)shmat(device.shminfo.shmid, 0, 0);
|
||||
device.shminfo.readOnly = false;
|
||||
XShmAttach(device.display, &device.shminfo);
|
||||
|
||||
buffer = new uint32_t[device.width * device.height];
|
||||
}
|
||||
|
||||
bool lock(uint32_t *&data, unsigned &pitch, unsigned width, unsigned height) {
|
||||
if(width != settings.width || height != settings.height) {
|
||||
resize(settings.width = width, settings.height = height);
|
||||
}
|
||||
|
||||
pitch = device.width * 4;
|
||||
return data = buffer;
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
memset(buffer, 0, device.width * device.height * sizeof(uint32_t));
|
||||
//clear twice in case video is double buffered ...
|
||||
refresh();
|
||||
refresh();
|
||||
}
|
||||
|
||||
void refresh() {
|
||||
unsigned width = settings.width;
|
||||
unsigned height = settings.height;
|
||||
|
||||
XWindowAttributes target;
|
||||
XGetWindowAttributes(device.display, device.window, &target);
|
||||
|
||||
//we must ensure that the child window is the same size as the parent window.
|
||||
//unfortunately, we cannot hook the parent window resize event notification,
|
||||
//as we did not create the parent window, nor have any knowledge of the toolkit used.
|
||||
//therefore, query each window size and resize as needed.
|
||||
XWindowAttributes parent;
|
||||
XGetWindowAttributes(device.display, settings.handle, &parent);
|
||||
if(target.width != parent.width || target.height != parent.height) {
|
||||
XResizeWindow(device.display, device.window, parent.width, parent.height);
|
||||
}
|
||||
|
||||
//update target width and height attributes
|
||||
XGetWindowAttributes(device.display, device.window, &target);
|
||||
|
||||
switch(device.format) {
|
||||
case XvFormatRGB32: render_rgb32(width, height); break;
|
||||
case XvFormatRGB24: render_rgb24(width, height); break;
|
||||
case XvFormatRGB16: render_rgb16(width, height); break;
|
||||
case XvFormatRGB15: render_rgb15(width, height); break;
|
||||
case XvFormatYUY2: render_yuy2 (width, height); break;
|
||||
case XvFormatUYVY: render_uyvy (width, height); break;
|
||||
}
|
||||
|
||||
XvShmPutImage(device.display, device.port, device.window, device.gc, device.image,
|
||||
0, 0, width, height,
|
||||
0, 0, target.width, target.height,
|
||||
true);
|
||||
}
|
||||
|
||||
bool init() {
|
||||
device.display = XOpenDisplay(0);
|
||||
|
||||
if(!XShmQueryExtension(device.display)) {
|
||||
fprintf(stderr, "VideoXv: XShm extension not found.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
//find an appropriate Xv port
|
||||
device.port = -1;
|
||||
XvAdaptorInfo *adaptor_info;
|
||||
unsigned adaptor_count;
|
||||
XvQueryAdaptors(device.display, DefaultRootWindow(device.display), &adaptor_count, &adaptor_info);
|
||||
for(unsigned i = 0; i < adaptor_count; i++) {
|
||||
//find adaptor that supports both input (memory->drawable) and image (drawable->screen) masks
|
||||
if(adaptor_info[i].num_formats < 1) continue;
|
||||
if(!(adaptor_info[i].type & XvInputMask)) continue;
|
||||
if(!(adaptor_info[i].type & XvImageMask)) continue;
|
||||
|
||||
device.port = adaptor_info[i].base_id;
|
||||
device.depth = adaptor_info[i].formats->depth;
|
||||
device.visualid = adaptor_info[i].formats->visual_id;
|
||||
break;
|
||||
}
|
||||
XvFreeAdaptorInfo(adaptor_info);
|
||||
if(device.port < 0) {
|
||||
fprintf(stderr, "VideoXv: failed to find valid XvPort.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
//create child window to attach to parent window.
|
||||
//this is so that even if parent window visual depth doesn't match Xv visual
|
||||
//(common with composited windows), Xv can still render to child window.
|
||||
XWindowAttributes window_attributes;
|
||||
XGetWindowAttributes(device.display, settings.handle, &window_attributes);
|
||||
|
||||
XVisualInfo visualtemplate;
|
||||
visualtemplate.visualid = device.visualid;
|
||||
visualtemplate.screen = DefaultScreen(device.display);
|
||||
visualtemplate.depth = device.depth;
|
||||
visualtemplate.visual = 0;
|
||||
int visualmatches = 0;
|
||||
XVisualInfo *visualinfo = XGetVisualInfo(device.display, VisualIDMask | VisualScreenMask | VisualDepthMask, &visualtemplate, &visualmatches);
|
||||
if(visualmatches < 1 || !visualinfo->visual) {
|
||||
if(visualinfo) XFree(visualinfo);
|
||||
fprintf(stderr, "VideoXv: unable to find Xv-compatible visual.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
device.colormap = XCreateColormap(device.display, settings.handle, visualinfo->visual, AllocNone);
|
||||
XSetWindowAttributes attributes;
|
||||
attributes.colormap = device.colormap;
|
||||
attributes.border_pixel = 0;
|
||||
attributes.event_mask = StructureNotifyMask;
|
||||
device.window = XCreateWindow(device.display, /* parent = */ settings.handle,
|
||||
/* x = */ 0, /* y = */ 0, window_attributes.width, window_attributes.height,
|
||||
/* border_width = */ 0, device.depth, InputOutput, visualinfo->visual,
|
||||
CWColormap | CWBorderPixel | CWEventMask, &attributes);
|
||||
XFree(visualinfo);
|
||||
XSetWindowBackground(device.display, device.window, /* color = */ 0);
|
||||
XMapWindow(device.display, device.window);
|
||||
|
||||
device.gc = XCreateGC(device.display, device.window, 0, 0);
|
||||
|
||||
//set colorkey to auto paint, so that Xv video output is always visible
|
||||
Atom atom = XInternAtom(device.display, "XV_AUTOPAINT_COLORKEY", true);
|
||||
if(atom != None) XvSetPortAttribute(device.display, device.port, atom, 1);
|
||||
|
||||
//find optimal rendering format
|
||||
device.format = XvFormatUnknown;
|
||||
signed format_count;
|
||||
XvImageFormatValues *format = XvListImageFormats(device.display, device.port, &format_count);
|
||||
|
||||
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
|
||||
if(format[i].type == XvRGB && format[i].bits_per_pixel == 32) {
|
||||
device.format = XvFormatRGB32;
|
||||
device.fourcc = format[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
|
||||
if(format[i].type == XvRGB && format[i].bits_per_pixel == 24) {
|
||||
device.format = XvFormatRGB24;
|
||||
device.fourcc = format[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
|
||||
if(format[i].type == XvRGB && format[i].bits_per_pixel <= 16 && format[i].red_mask == 0xf800) {
|
||||
device.format = XvFormatRGB16;
|
||||
device.fourcc = format[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
|
||||
if(format[i].type == XvRGB && format[i].bits_per_pixel <= 16 && format[i].red_mask == 0x7c00) {
|
||||
device.format = XvFormatRGB15;
|
||||
device.fourcc = format[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
|
||||
if(format[i].type == XvYUV && format[i].bits_per_pixel == 16 && format[i].format == XvPacked) {
|
||||
if(format[i].component_order[0] == 'Y' && format[i].component_order[1] == 'U'
|
||||
&& format[i].component_order[2] == 'Y' && format[i].component_order[3] == 'V'
|
||||
) {
|
||||
device.format = XvFormatYUY2;
|
||||
device.fourcc = format[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
|
||||
if(format[i].type == XvYUV && format[i].bits_per_pixel == 16 && format[i].format == XvPacked) {
|
||||
if(format[i].component_order[0] == 'U' && format[i].component_order[1] == 'Y'
|
||||
&& format[i].component_order[2] == 'V' && format[i].component_order[3] == 'Y'
|
||||
) {
|
||||
device.format = XvFormatUYVY;
|
||||
device.fourcc = format[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(format);
|
||||
if(device.format == XvFormatUnknown) {
|
||||
fprintf(stderr, "VideoXv: unable to find a supported image format.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
device.width = 256;
|
||||
device.height = 256;
|
||||
|
||||
device.image = XvShmCreateImage(device.display, device.port, device.fourcc, 0, device.width, device.height, &device.shminfo);
|
||||
if(!device.image) {
|
||||
fprintf(stderr, "VideoXv: XShmCreateImage failed.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
device.shminfo.shmid = shmget(IPC_PRIVATE, device.image->data_size, IPC_CREAT | 0777);
|
||||
device.shminfo.shmaddr = device.image->data = (char*)shmat(device.shminfo.shmid, 0, 0);
|
||||
device.shminfo.readOnly = false;
|
||||
if(!XShmAttach(device.display, &device.shminfo)) {
|
||||
fprintf(stderr, "VideoXv: XShmAttach failed.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
buffer = new uint32_t[device.width * device.height];
|
||||
settings.width = 256;
|
||||
settings.height = 256;
|
||||
init_yuv_tables();
|
||||
clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
void term() {
|
||||
XShmDetach(device.display, &device.shminfo);
|
||||
shmdt(device.shminfo.shmaddr);
|
||||
shmctl(device.shminfo.shmid, IPC_RMID, NULL);
|
||||
XFree(device.image);
|
||||
|
||||
if(device.window) {
|
||||
XUnmapWindow(device.display, device.window);
|
||||
device.window = 0;
|
||||
}
|
||||
|
||||
if(device.colormap) {
|
||||
XFreeColormap(device.display, device.colormap);
|
||||
device.colormap = 0;
|
||||
}
|
||||
|
||||
if(buffer) { delete[] buffer; buffer = 0; }
|
||||
if(ytable) { delete[] ytable; ytable = 0; }
|
||||
if(utable) { delete[] utable; utable = 0; }
|
||||
if(vtable) { delete[] vtable; vtable = 0; }
|
||||
}
|
||||
|
||||
void render_rgb32(unsigned width, unsigned height) {
|
||||
uint32_t *input = (uint32_t*)buffer;
|
||||
uint32_t *output = (uint32_t*)device.image->data;
|
||||
|
||||
for(unsigned y = 0; y < height; y++) {
|
||||
memcpy(output, input, width * 4);
|
||||
input += device.width;
|
||||
output += device.width;
|
||||
}
|
||||
}
|
||||
|
||||
void render_rgb24(unsigned width, unsigned height) {
|
||||
uint32_t *input = (uint32_t*)buffer;
|
||||
uint8_t *output = (uint8_t*)device.image->data;
|
||||
|
||||
for(unsigned y = 0; y < height; y++) {
|
||||
for(unsigned x = 0; x < width; x++) {
|
||||
uint32_t p = *input++;
|
||||
*output++ = p;
|
||||
*output++ = p >> 8;
|
||||
*output++ = p >> 16;
|
||||
}
|
||||
|
||||
input += (device.width - width);
|
||||
output += (device.width - width) * 3;
|
||||
}
|
||||
}
|
||||
|
||||
void render_rgb16(unsigned width, unsigned height) {
|
||||
uint32_t *input = (uint32_t*)buffer;
|
||||
uint16_t *output = (uint16_t*)device.image->data;
|
||||
|
||||
for(unsigned y = 0; y < height; y++) {
|
||||
for(unsigned x = 0; x < width; x++) {
|
||||
uint32_t p = *input++;
|
||||
*output++ = ((p >> 8) & 0xf800) | ((p >> 5) & 0x07e0) | ((p >> 3) & 0x001f); //RGB32->RGB16
|
||||
}
|
||||
|
||||
input += device.width - width;
|
||||
output += device.width - width;
|
||||
}
|
||||
}
|
||||
|
||||
void render_rgb15(unsigned width, unsigned height) {
|
||||
uint32_t *input = (uint32_t*)buffer;
|
||||
uint16_t *output = (uint16_t*)device.image->data;
|
||||
|
||||
for(unsigned y = 0; y < height; y++) {
|
||||
for(unsigned x = 0; x < width; x++) {
|
||||
uint32_t p = *input++;
|
||||
*output++ = ((p >> 9) & 0x7c00) | ((p >> 6) & 0x03e0) | ((p >> 3) & 0x001f); //RGB32->RGB15
|
||||
}
|
||||
|
||||
input += device.width - width;
|
||||
output += device.width - width;
|
||||
}
|
||||
}
|
||||
|
||||
void render_yuy2(unsigned width, unsigned height) {
|
||||
uint32_t *input = (uint32_t*)buffer;
|
||||
uint16_t *output = (uint16_t*)device.image->data;
|
||||
|
||||
for(unsigned y = 0; y < height; y++) {
|
||||
for(unsigned x = 0; x < width >> 1; x++) {
|
||||
uint32_t p0 = *input++;
|
||||
uint32_t p1 = *input++;
|
||||
p0 = ((p0 >> 8) & 0xf800) + ((p0 >> 5) & 0x07e0) + ((p0 >> 3) & 0x001f); //RGB32->RGB16
|
||||
p1 = ((p1 >> 8) & 0xf800) + ((p1 >> 5) & 0x07e0) + ((p1 >> 3) & 0x001f); //RGB32->RGB16
|
||||
|
||||
uint8_t u = (utable[p0] + utable[p1]) >> 1;
|
||||
uint8_t v = (vtable[p0] + vtable[p1]) >> 1;
|
||||
|
||||
*output++ = (u << 8) | ytable[p0];
|
||||
*output++ = (v << 8) | ytable[p1];
|
||||
}
|
||||
|
||||
input += device.width - width;
|
||||
output += device.width - width;
|
||||
}
|
||||
}
|
||||
|
||||
void render_uyvy(unsigned width, unsigned height) {
|
||||
uint32_t *input = (uint32_t*)buffer;
|
||||
uint16_t *output = (uint16_t*)device.image->data;
|
||||
|
||||
for(unsigned y = 0; y < height; y++) {
|
||||
for(unsigned x = 0; x < width >> 1; x++) {
|
||||
uint32_t p0 = *input++;
|
||||
uint32_t p1 = *input++;
|
||||
p0 = ((p0 >> 8) & 0xf800) + ((p0 >> 5) & 0x07e0) + ((p0 >> 3) & 0x001f);
|
||||
p1 = ((p1 >> 8) & 0xf800) + ((p1 >> 5) & 0x07e0) + ((p1 >> 3) & 0x001f);
|
||||
|
||||
uint8_t u = (utable[p0] + utable[p1]) >> 1;
|
||||
uint8_t v = (vtable[p0] + vtable[p1]) >> 1;
|
||||
|
||||
*output++ = (ytable[p0] << 8) | u;
|
||||
*output++ = (ytable[p1] << 8) | v;
|
||||
}
|
||||
|
||||
input += device.width - width;
|
||||
output += device.width - width;
|
||||
}
|
||||
}
|
||||
|
||||
void init_yuv_tables() {
|
||||
ytable = new uint8_t[65536];
|
||||
utable = new uint8_t[65536];
|
||||
vtable = new uint8_t[65536];
|
||||
|
||||
for(unsigned i = 0; i < 65536; i++) {
|
||||
//extract RGB565 color data from i
|
||||
uint8_t r = (i >> 11) & 31, g = (i >> 5) & 63, b = (i) & 31;
|
||||
r = (r << 3) | (r >> 2); //R5->R8
|
||||
g = (g << 2) | (g >> 4); //G6->G8
|
||||
b = (b << 3) | (b >> 2); //B5->B8
|
||||
|
||||
//ITU-R Recommendation BT.601
|
||||
//double lr = 0.299, lg = 0.587, lb = 0.114;
|
||||
int y = int( +(double(r) * 0.257) + (double(g) * 0.504) + (double(b) * 0.098) + 16.0 );
|
||||
int u = int( -(double(r) * 0.148) - (double(g) * 0.291) + (double(b) * 0.439) + 128.0 );
|
||||
int v = int( +(double(r) * 0.439) - (double(g) * 0.368) - (double(b) * 0.071) + 128.0 );
|
||||
|
||||
//ITU-R Recommendation BT.709
|
||||
//double lr = 0.2126, lg = 0.7152, lb = 0.0722;
|
||||
//int y = int( double(r) * lr + double(g) * lg + double(b) * lb );
|
||||
//int u = int( (double(b) - y) / (2.0 - 2.0 * lb) + 128.0 );
|
||||
//int v = int( (double(r) - y) / (2.0 - 2.0 * lr) + 128.0 );
|
||||
|
||||
ytable[i] = y < 0 ? 0 : y > 255 ? 255 : y;
|
||||
utable[i] = u < 0 ? 0 : u > 255 ? 255 : u;
|
||||
vtable[i] = v < 0 ? 0 : v > 255 ? 255 : v;
|
||||
}
|
||||
}
|
||||
|
||||
pVideoXv() {
|
||||
device.window = 0;
|
||||
device.colormap = 0;
|
||||
device.port = -1;
|
||||
|
||||
ytable = 0;
|
||||
utable = 0;
|
||||
vtable = 0;
|
||||
|
||||
settings.handle = 0;
|
||||
settings.synchronize = false;
|
||||
}
|
||||
|
||||
~pVideoXv() {
|
||||
term();
|
||||
}
|
||||
};
|
||||
|
||||
DeclareVideo(Xv)
|
||||
|
||||
};
|
Reference in New Issue
Block a user