Update to v093 release.

byuu says:

Changelog:
- added Cocoa target: higan can now be compiled for OS X Lion
  [Cydrak, byuu]
- SNES/accuracy profile hires color blending improvements - fixes
  Marvelous text [AWJ]
- fixed a slight bug in SNES/SA-1 VBR support caused by a typo
- added support for multi-pass shaders that can load external textures
  (requires OpenGL 3.2+)
- added game library path (used by ananke->Import Game) to
  Settings->Advanced
- system profiles, shaders and cheats database can be stored in "all
  users" shared folders now (eg /usr/share on Linux)
- all configuration files are in BML format now, instead of XML (much
  easier to read and edit this way)
- main window supports drag-and-drop of game folders (but not game files
  / ZIP archives)
- audio buffer clears when entering a modal loop on Windows (prevents
  audio repetition with DirectSound driver)
- a substantial amount of code clean-up (probably the biggest
  refactoring to date)

One highly desired target for this release was to default to the optimal
drivers instead of the safest drivers, but because AMD drivers don't
seem to like my OpenGL 3.2 driver, I've decided to postpone that. AMD
has too big a market share. Hopefully with v093 officially released, we
can get some public input on what AMD doesn't like.
This commit is contained in:
Tim Allen
2013-08-18 13:21:14 +10:00
parent c74865e171
commit 4e2eb23835
1928 changed files with 4834 additions and 84223 deletions

190
ruby/video/cgl.cpp Normal file
View File

@@ -0,0 +1,190 @@
#include "opengl/opengl.hpp"
namespace ruby {
class pVideoCGL;
}
@interface RubyVideoCGL : NSOpenGLView {
@public
ruby::pVideoCGL* video;
}
-(id) initWith:(ruby::pVideoCGL*)video pixelFormat:(NSOpenGLPixelFormat*)pixelFormat;
-(void) reshape;
@end
namespace ruby {
struct pVideoCGL : OpenGL {
RubyVideoCGL* view;
struct {
NSView* handle;
bool synchronize;
unsigned filter;
string shader;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
if(name == Video::Synchronize) return true;
if(name == Video::Filter) return true;
if(name == Video::Shader) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return (uintptr_t)settings.handle;
if(name == Video::Synchronize) return settings.synchronize;
if(name == Video::Filter) return settings.filter;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = (NSView*)any_cast<uintptr_t>(value);
return true;
}
if(name == Video::Synchronize) {
if(settings.synchronize != any_cast<bool>(value)) {
settings.synchronize = any_cast<bool>(value);
if(view) {
@autoreleasepool {
[[view openGLContext] makeCurrentContext];
int synchronize = settings.synchronize;
[[view openGLContext] setValues:&synchronize forParameter:NSOpenGLCPSwapInterval];
}
}
}
return true;
}
if(name == Video::Filter) {
settings.filter = any_cast<unsigned>(value);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
return true;
}
if(name == Video::Shader) {
settings.shader = any_cast<const char*>(value);
@autoreleasepool {
[[view openGLContext] makeCurrentContext];
}
OpenGL::shader(settings.shader);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
return true;
}
return false;
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
OpenGL::size(width, height);
return OpenGL::lock(data, pitch);
}
void unlock() {
}
void clear() {
@autoreleasepool {
[view lockFocus];
OpenGL::clear();
[[view openGLContext] flushBuffer];
[view unlockFocus];
}
}
void refresh() {
@autoreleasepool {
if([view lockFocusIfCanDraw]) {
auto area = [view frame];
outputWidth = area.size.width, outputHeight = area.size.height;
OpenGL::refresh();
[[view openGLContext] flushBuffer];
[view unlockFocus];
}
}
}
bool init() {
term();
@autoreleasepool {
NSOpenGLPixelFormatAttribute attributes[] = {
NSOpenGLPFAOpenGLProfile, NSOpenGLProfileVersion3_2Core,
NSOpenGLPFAColorSize, 24,
NSOpenGLPFAAlphaSize, 8,
NSOpenGLPFADoubleBuffer,
0
};
auto size = [settings.handle frame].size;
auto format = [[[NSOpenGLPixelFormat alloc] initWithAttributes:attributes] autorelease];
auto context = [[[NSOpenGLContext alloc] initWithFormat:format shareContext:nil] autorelease];
view = [[RubyVideoCGL alloc] initWith:this pixelFormat:format];
[view setOpenGLContext:context];
[view setFrame:NSMakeRect(0, 0, size.width, size.height)];
[view setAutoresizingMask:NSViewWidthSizable | NSViewHeightSizable];
[settings.handle addSubview:view];
[context setView:view];
[view lockFocus];
OpenGL::init();
//print((const char*)glGetString(GL_VERSION), "\n");
int synchronize = settings.synchronize;
[[view openGLContext] setValues:&synchronize forParameter:NSOpenGLCPSwapInterval];
[view unlockFocus];
}
clear();
return true;
}
void term() {
OpenGL::term();
@autoreleasepool {
[view removeFromSuperview];
[view release];
view = nil;
}
}
pVideoCGL() {
view = nil;
settings.handle = nil;
settings.synchronize = false;
settings.filter = 0;
}
~pVideoCGL() {
term();
}
};
DeclareVideo(CGL)
}
@implementation RubyVideoCGL : NSOpenGLView
-(id) initWith:(ruby::pVideoCGL*)videoPointer pixelFormat:(NSOpenGLPixelFormat*)pixelFormat {
if(self = [super initWithFrame:NSMakeRect(0, 0, 0, 0) pixelFormat:pixelFormat]) {
video = videoPointer;
}
return self;
}
-(void) reshape {
video->refresh();
}
@end

463
ruby/video/direct3d.cpp Normal file
View File

@@ -0,0 +1,463 @@
#undef interface
#define interface struct
#include <d3d9.h>
#include <d3dx9.h>
#undef interface
#define D3DVERTEX (D3DFVF_XYZRHW | D3DFVF_TEX1)
typedef HRESULT (__stdcall* EffectProc)(LPDIRECT3DDEVICE9, LPCVOID, UINT, D3DXMACRO const*, LPD3DXINCLUDE, DWORD, LPD3DXEFFECTPOOL, LPD3DXEFFECT*, LPD3DXBUFFER*);
typedef HRESULT (__stdcall* TextureProc)(LPDIRECT3DDEVICE9, LPCTSTR, LPDIRECT3DTEXTURE9*);
namespace ruby {
class pVideoD3D {
public:
LPDIRECT3D9 lpd3d;
LPDIRECT3DDEVICE9 device;
LPDIRECT3DVERTEXBUFFER9 vertex_buffer;
LPDIRECT3DVERTEXBUFFER9* vertex_ptr;
D3DPRESENT_PARAMETERS presentation;
D3DSURFACE_DESC d3dsd;
D3DLOCKED_RECT d3dlr;
D3DRASTER_STATUS d3drs;
D3DCAPS9 d3dcaps;
LPDIRECT3DTEXTURE9 texture;
LPDIRECT3DSURFACE9 surface;
LPD3DXEFFECT effect;
string shader_source_markup;
bool lost;
unsigned iwidth, iheight;
struct d3dvertex {
float x, y, z, rhw; //screen coords
float u, v; //texture coords
};
struct {
uint32_t t_usage, v_usage;
uint32_t t_pool, v_pool;
uint32_t lock;
uint32_t filter;
} flags;
struct {
bool dynamic; //device supports dynamic textures
bool shader; //device supports pixel shaders
} caps;
struct {
HWND handle;
bool synchronize;
unsigned filter;
unsigned width;
unsigned height;
} settings;
struct {
unsigned width;
unsigned height;
} state;
bool cap(const string& name) {
if(name == Video::Handle) return true;
if(name == Video::Synchronize) return true;
if(name == Video::Filter) return true;
if(name == Video::Shader) return false;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return (uintptr_t)settings.handle;
if(name == Video::Synchronize) return settings.synchronize;
if(name == Video::Filter) return settings.filter;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = (HWND)any_cast<uintptr_t>(value);
return true;
}
if(name == Video::Synchronize) {
settings.synchronize = any_cast<bool>(value);
return true;
}
if(name == Video::Filter) {
settings.filter = any_cast<unsigned>(value);
if(lpd3d) update_filter();
return true;
}
if(name == Video::Shader) {
return false;
set_shader(any_cast<const char*>(value));
return true;
}
return false;
}
bool recover() {
if(!device) return false;
if(lost) {
release_resources();
if(device->Reset(&presentation) != D3D_OK) return false;
}
lost = false;
device->SetDialogBoxMode(false);
device->SetTextureStageState(0, D3DTSS_COLOROP, D3DTOP_SELECTARG1);
device->SetTextureStageState(0, D3DTSS_COLORARG1, D3DTA_TEXTURE);
device->SetTextureStageState(0, D3DTSS_COLORARG2, D3DTA_DIFFUSE);
device->SetTextureStageState(0, D3DTSS_ALPHAOP, D3DTOP_SELECTARG1);
device->SetTextureStageState(0, D3DTSS_ALPHAARG1, D3DTA_TEXTURE);
device->SetTextureStageState(0, D3DTSS_ALPHAARG2, D3DTA_DIFFUSE);
device->SetRenderState(D3DRS_LIGHTING, false);
device->SetRenderState(D3DRS_ZENABLE, false);
device->SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE);
device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
device->SetVertexShader(NULL);
device->SetFVF(D3DVERTEX);
device->CreateVertexBuffer(sizeof(d3dvertex) * 4, flags.v_usage, D3DVERTEX, (D3DPOOL)flags.v_pool, &vertex_buffer, NULL);
iwidth = 0;
iheight = 0;
resize(settings.width = 256, settings.height = 256);
update_filter();
clear();
return true;
}
unsigned rounded_power_of_two(unsigned n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
return n + 1;
}
void resize(unsigned width, unsigned height) {
if(iwidth >= width && iheight >= height) return;
iwidth = rounded_power_of_two(max(width, iwidth ));
iheight = rounded_power_of_two(max(height, iheight));
if(d3dcaps.MaxTextureWidth < iwidth || d3dcaps.MaxTextureWidth < iheight) {
//TODO: attempt to handle this more gracefully
return;
}
if(texture) texture->Release();
device->CreateTexture(iwidth, iheight, 1, flags.t_usage, D3DFMT_X8R8G8B8, (D3DPOOL)flags.t_pool, &texture, NULL);
}
void update_filter() {
if(!device) return;
if(lost && !recover()) return;
flags.filter = (settings.filter == Video::FilterNearest ? D3DTEXF_POINT : D3DTEXF_LINEAR);
device->SetSamplerState(0, D3DSAMP_MINFILTER, flags.filter);
device->SetSamplerState(0, D3DSAMP_MAGFILTER, flags.filter);
}
// Vertex format:
//
// 0----------1
// | /|
// | / |
// | / |
// | / |
// | / |
// 2----------3
//
// (x,y) screen coords, in pixels
// (u,v) texture coords, betweeen 0.0 (top, left) to 1.0 (bottom, right)
void set_vertex(
uint32_t px, uint32_t py, uint32_t pw, uint32_t ph,
uint32_t tw, uint32_t th,
uint32_t x, uint32_t y, uint32_t w, uint32_t h
) {
d3dvertex vertex[4];
vertex[0].x = vertex[2].x = (double)(x - 0.5);
vertex[1].x = vertex[3].x = (double)(x + w - 0.5);
vertex[0].y = vertex[1].y = (double)(y - 0.5);
vertex[2].y = vertex[3].y = (double)(y + h - 0.5);
//Z-buffer and RHW are unused for 2D blit, set to normal values
vertex[0].z = vertex[1].z = vertex[2].z = vertex[3].z = 0.0;
vertex[0].rhw = vertex[1].rhw = vertex[2].rhw = vertex[3].rhw = 1.0;
double rw = (double)w / (double)pw * (double)tw;
double rh = (double)h / (double)ph * (double)th;
vertex[0].u = vertex[2].u = (double)(px ) / rw;
vertex[1].u = vertex[3].u = (double)(px + w) / rw;
vertex[0].v = vertex[1].v = (double)(py ) / rh;
vertex[2].v = vertex[3].v = (double)(py + h) / rh;
vertex_buffer->Lock(0, sizeof(d3dvertex) * 4, (void**)&vertex_ptr, 0);
memcpy(vertex_ptr, vertex, sizeof(d3dvertex) * 4);
vertex_buffer->Unlock();
device->SetStreamSource(0, vertex_buffer, 0, sizeof(d3dvertex));
}
void clear() {
if(lost && !recover()) return;
texture->GetLevelDesc(0, &d3dsd);
texture->GetSurfaceLevel(0, &surface);
if(surface) {
device->ColorFill(surface, 0, D3DCOLOR_XRGB(0x00, 0x00, 0x00));
surface->Release();
surface = nullptr;
}
//clear primary display and all backbuffers
for(unsigned i = 0; i < 3; i++) {
device->Clear(0, 0, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0x00, 0x00, 0x00), 1.0f, 0);
device->Present(0, 0, 0, 0);
}
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
if(lost && !recover()) return false;
if(width != settings.width || height != settings.height) {
resize(settings.width = width, settings.height = height);
}
texture->GetLevelDesc(0, &d3dsd);
texture->GetSurfaceLevel(0, &surface);
surface->LockRect(&d3dlr, 0, flags.lock);
pitch = d3dlr.Pitch;
return data = (uint32_t*)d3dlr.pBits;
}
void unlock() {
surface->UnlockRect();
surface->Release();
surface = nullptr;
}
void refresh() {
if(lost && !recover()) return;
RECT rd, rs; //dest, source rectangles
GetClientRect(settings.handle, &rd);
SetRect(&rs, 0, 0, settings.width, settings.height);
//if output size changed, driver must be re-initialized.
//failure to do so causes scaling issues on some video drivers.
if(state.width != rd.right || state.height != rd.bottom) {
init();
set_shader(shader_source_markup);
return;
}
if(caps.shader && effect) {
device->BeginScene();
set_vertex(0, 0, settings.width, settings.height, iwidth, iheight, 0, 0, rd.right, rd.bottom);
D3DXVECTOR4 rubyTextureSize;
rubyTextureSize.x = iwidth;
rubyTextureSize.y = iheight;
rubyTextureSize.z = 1.0 / iheight;
rubyTextureSize.w = 1.0 / iwidth;
effect->SetVector("rubyTextureSize", &rubyTextureSize);
D3DXVECTOR4 rubyInputSize;
rubyInputSize.x = settings.width;
rubyInputSize.y = settings.height;
rubyInputSize.z = 1.0 / settings.height;
rubyInputSize.w = 1.0 / settings.width;
effect->SetVector("rubyInputSize", &rubyInputSize);
D3DXVECTOR4 rubyOutputSize;
rubyOutputSize.x = rd.right;
rubyOutputSize.y = rd.bottom;
rubyOutputSize.z = 1.0 / rd.bottom;
rubyOutputSize.w = 1.0 / rd.right;
effect->SetVector("rubyOutputSize", &rubyOutputSize);
UINT passes;
effect->Begin(&passes, 0);
effect->SetTexture("rubyTexture", texture);
device->SetTexture(0, texture);
for(unsigned pass = 0; pass < passes; pass++) {
effect->BeginPass(pass);
device->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
effect->EndPass();
}
effect->End();
device->EndScene();
} else {
device->BeginScene();
set_vertex(0, 0, settings.width, settings.height, iwidth, iheight, 0, 0, rd.right, rd.bottom);
device->SetTexture(0, texture);
device->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
device->EndScene();
}
if(settings.synchronize) {
D3DRASTER_STATUS status;
//wait for a previous vblank to finish, if necessary
while(true) {
device->GetRasterStatus(0, &status);
if(status.InVBlank == false) break;
}
//wait for next vblank to begin
while(true) {
device->GetRasterStatus(0, &status);
if(status.InVBlank == true) break;
}
}
if(device->Present(0, 0, 0, 0) == D3DERR_DEVICELOST) lost = true;
}
void set_shader(const char* source) {
if(!caps.shader) return;
if(effect) {
effect->Release();
effect = NULL;
}
if(!source || !*source) {
shader_source_markup = "";
return;
}
shader_source_markup = source;
XML::Document document(shader_source_markup);
bool is_hlsl = document["shader"]["language"].data == "HLSL";
string shader_source = document["shader"]["source"].data;
if(shader_source == "") return;
HMODULE d3dx;
for(unsigned i = 0; i < 256; i++) {
char t[256];
sprintf(t, "d3dx9_%u.dll", i);
d3dx = LoadLibraryW(utf16_t(t));
if(d3dx) break;
}
if(!d3dx) d3dx = LoadLibraryW(L"d3dx9.dll");
if(!d3dx) return;
EffectProc effectProc = (EffectProc)GetProcAddress(d3dx, "D3DXCreateEffect");
TextureProc textureProc = (TextureProc)GetProcAddress(d3dx, "D3DXCreateTextureFromFileA");
LPD3DXBUFFER pBufferErrors = NULL;
effectProc(device, shader_source, lstrlenA(shader_source), NULL, NULL, 0, NULL, &effect, &pBufferErrors);
D3DXHANDLE hTech;
effect->FindNextValidTechnique(NULL, &hTech);
effect->SetTechnique(hTech);
}
bool init() {
term();
RECT rd;
GetClientRect(settings.handle, &rd);
state.width = rd.right;
state.height = rd.bottom;
lpd3d = Direct3DCreate9(D3D_SDK_VERSION);
if(!lpd3d) return false;
memset(&presentation, 0, sizeof(presentation));
presentation.Flags = D3DPRESENTFLAG_VIDEO;
presentation.SwapEffect = D3DSWAPEFFECT_FLIP;
presentation.hDeviceWindow = settings.handle;
presentation.BackBufferCount = 1;
presentation.MultiSampleType = D3DMULTISAMPLE_NONE;
presentation.MultiSampleQuality = 0;
presentation.EnableAutoDepthStencil = false;
presentation.AutoDepthStencilFormat = D3DFMT_UNKNOWN;
presentation.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE;
presentation.Windowed = true;
presentation.BackBufferFormat = D3DFMT_UNKNOWN;
presentation.BackBufferWidth = 0;
presentation.BackBufferHeight = 0;
if(lpd3d->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, settings.handle,
D3DCREATE_FPU_PRESERVE | D3DCREATE_SOFTWARE_VERTEXPROCESSING, &presentation, &device) != D3D_OK) {
return false;
}
device->GetDeviceCaps(&d3dcaps);
caps.dynamic = bool(d3dcaps.Caps2 & D3DCAPS2_DYNAMICTEXTURES);
caps.shader = d3dcaps.PixelShaderVersion > D3DPS_VERSION(1, 4);
if(caps.dynamic == true) {
flags.t_usage = D3DUSAGE_DYNAMIC;
flags.v_usage = D3DUSAGE_WRITEONLY | D3DUSAGE_DYNAMIC;
flags.t_pool = D3DPOOL_DEFAULT;
flags.v_pool = D3DPOOL_DEFAULT;
flags.lock = D3DLOCK_NOSYSLOCK | D3DLOCK_DISCARD;
} else {
flags.t_usage = 0;
flags.v_usage = D3DUSAGE_WRITEONLY;
flags.t_pool = D3DPOOL_MANAGED;
flags.v_pool = D3DPOOL_MANAGED;
flags.lock = D3DLOCK_NOSYSLOCK | D3DLOCK_DISCARD;
}
lost = false;
recover();
return true;
}
void release_resources() {
if(effect) { effect->Release(); effect = 0; }
if(vertex_buffer) { vertex_buffer->Release(); vertex_buffer = 0; }
if(surface) { surface->Release(); surface = 0; }
if(texture) { texture->Release(); texture = 0; }
}
void term() {
release_resources();
if(device) { device->Release(); device = 0; }
if(lpd3d) { lpd3d->Release(); lpd3d = 0; }
}
pVideoD3D() {
effect = 0;
vertex_buffer = 0;
surface = 0;
texture = 0;
device = 0;
lpd3d = 0;
lost = true;
settings.handle = 0;
settings.synchronize = false;
settings.filter = Video::FilterLinear;
}
};
DeclareVideo(D3D)
};
#undef D3DVERTEX

186
ruby/video/directdraw.cpp Normal file
View File

@@ -0,0 +1,186 @@
#include <ddraw.h>
namespace ruby {
class pVideoDD {
public:
LPDIRECTDRAW lpdd;
LPDIRECTDRAW7 lpdd7;
LPDIRECTDRAWSURFACE7 screen, raster;
LPDIRECTDRAWCLIPPER clipper;
DDSURFACEDESC2 ddsd;
DDSCAPS2 ddscaps;
unsigned iwidth, iheight;
struct {
HWND handle;
bool synchronize;
unsigned width;
unsigned height;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
if(name == Video::Synchronize) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return (uintptr_t)settings.handle;
if(name == Video::Synchronize) return settings.synchronize;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = (HWND)any_cast<uintptr_t>(value);
return true;
}
if(name == Video::Synchronize) {
settings.synchronize = any_cast<bool>(value);
return true;
}
return false;
}
void resize(unsigned width, unsigned height) {
if(iwidth >= width && iheight >= height) return;
iwidth = max(width, iwidth);
iheight = max(height, iheight);
if(raster) raster->Release();
screen->GetSurfaceDesc(&ddsd);
int depth = ddsd.ddpfPixelFormat.dwRGBBitCount;
if(depth == 32) goto try_native_surface;
memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
ddsd.dwSize = sizeof(DDSURFACEDESC2);
ddsd.dwFlags = DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT;
ddsd.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN | DDSCAPS_VIDEOMEMORY; //DDSCAPS_SYSTEMMEMORY
ddsd.dwWidth = iwidth;
ddsd.dwHeight = iheight;
ddsd.ddpfPixelFormat.dwSize = sizeof(DDPIXELFORMAT);
ddsd.ddpfPixelFormat.dwFlags = DDPF_RGB;
ddsd.ddpfPixelFormat.dwRGBBitCount = 32;
ddsd.ddpfPixelFormat.dwRBitMask = 0xff0000;
ddsd.ddpfPixelFormat.dwGBitMask = 0x00ff00;
ddsd.ddpfPixelFormat.dwBBitMask = 0x0000ff;
if(lpdd7->CreateSurface(&ddsd, &raster, 0) == DD_OK) return clear();
try_native_surface:
memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
ddsd.dwSize = sizeof(DDSURFACEDESC2);
ddsd.dwFlags = DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT;
ddsd.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN | DDSCAPS_VIDEOMEMORY; //DDSCAPS_SYSTEMMEMORY
ddsd.dwWidth = iwidth;
ddsd.dwHeight = iheight;
if(lpdd7->CreateSurface(&ddsd, &raster, 0) == DD_OK) return clear();
}
void clear() {
DDBLTFX fx;
fx.dwSize = sizeof(DDBLTFX);
fx.dwFillColor = 0x00000000;
screen->Blt(0, 0, 0, DDBLT_WAIT | DDBLT_COLORFILL, &fx);
raster->Blt(0, 0, 0, DDBLT_WAIT | DDBLT_COLORFILL, &fx);
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
if(width != settings.width || height != settings.height) {
resize(settings.width = width, settings.height = height);
}
if(raster->Lock(0, &ddsd, DDLOCK_WAIT, 0) != DD_OK) {
raster->Restore();
if(raster->Lock(0, &ddsd, DDLOCK_WAIT, 0) != DD_OK) return false;
}
pitch = ddsd.lPitch;
return data = (uint32_t*)ddsd.lpSurface;
}
void unlock() {
raster->Unlock(0);
}
void refresh() {
if(settings.synchronize) {
while(true) {
BOOL in_vblank;
lpdd7->GetVerticalBlankStatus(&in_vblank);
if(in_vblank == true) break;
}
}
HRESULT hr;
RECT rd, rs;
SetRect(&rs, 0, 0, settings.width, settings.height);
POINT p = {0, 0};
ClientToScreen(settings.handle, &p);
GetClientRect(settings.handle, &rd);
OffsetRect(&rd, p.x, p.y);
if(screen->Blt(&rd, raster, &rs, DDBLT_WAIT, 0) == DDERR_SURFACELOST) {
screen->Restore();
raster->Restore();
}
}
bool init() {
term();
DirectDrawCreate(0, &lpdd, 0);
lpdd->QueryInterface(IID_IDirectDraw7, (void**)&lpdd7);
if(lpdd) { lpdd->Release(); lpdd = 0; }
lpdd7->SetCooperativeLevel(settings.handle, DDSCL_NORMAL);
memset(&ddsd, 0, sizeof(DDSURFACEDESC2));
ddsd.dwSize = sizeof(DDSURFACEDESC2);
ddsd.dwFlags = DDSD_CAPS;
ddsd.ddsCaps.dwCaps = DDSCAPS_PRIMARYSURFACE;
lpdd7->CreateSurface(&ddsd, &screen, 0);
lpdd7->CreateClipper(0, &clipper, 0);
clipper->SetHWnd(0, settings.handle);
screen->SetClipper(clipper);
raster = 0;
iwidth = 0;
iheight = 0;
resize(settings.width = 256, settings.height = 256);
return true;
}
void term() {
if(clipper) { clipper->Release(); clipper = 0; }
if(raster) { raster->Release(); raster = 0; }
if(screen) { screen->Release(); screen = 0; }
if(lpdd7) { lpdd7->Release(); lpdd7 = 0; }
if(lpdd) { lpdd->Release(); lpdd = 0; }
}
pVideoDD() {
lpdd = 0;
lpdd7 = 0;
screen = 0;
raster = 0;
clipper = 0;
settings.handle = 0;
}
};
DeclareVideo(DD)
};

100
ruby/video/gdi.cpp Normal file
View File

@@ -0,0 +1,100 @@
#include <assert.h>
namespace ruby {
class pVideoGDI {
public:
uint32_t* buffer;
HBITMAP bitmap;
HDC bitmapdc;
BITMAPINFO bmi;
struct {
HWND handle;
unsigned width;
unsigned height;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return (uintptr_t)settings.handle;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = (HWND)any_cast<uintptr_t>(value);
return true;
}
return false;
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
settings.width = width;
settings.height = height;
pitch = 1024 * 4;
return data = buffer;
}
void unlock() {}
void clear() {}
void refresh() {
RECT rc;
GetClientRect(settings.handle, &rc);
SetDIBits(bitmapdc, bitmap, 0, settings.height, (void*)buffer, &bmi, DIB_RGB_COLORS);
HDC hdc = GetDC(settings.handle);
StretchBlt(hdc, rc.left, rc.top, rc.right, rc.bottom, bitmapdc, 0, 1024 - settings.height, settings.width, settings.height, SRCCOPY);
ReleaseDC(settings.handle, hdc);
}
bool init() {
HDC hdc = GetDC(settings.handle);
bitmapdc = CreateCompatibleDC(hdc);
assert(bitmapdc);
bitmap = CreateCompatibleBitmap(hdc, 1024, 1024);
assert(bitmap);
SelectObject(bitmapdc, bitmap);
ReleaseDC(settings.handle, hdc);
memset(&bmi, 0, sizeof(BITMAPINFO));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = 1024;
bmi.bmiHeader.biHeight = -1024;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32; //biBitCount of 15 is invalid, biBitCount of 16 is really RGB555
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = 1024 * 1024 * sizeof(uint32_t);
settings.width = 256;
settings.height = 256;
return true;
}
void term() {
DeleteObject(bitmap);
DeleteDC(bitmapdc);
}
pVideoGDI() {
buffer = (uint32_t*)malloc(1024 * 1024 * sizeof(uint32_t));
settings.handle = 0;
}
~pVideoGDI() {
if(buffer) free(buffer);
}
};
DeclareVideo(GDI)
};

251
ruby/video/glx.cpp Normal file
View File

@@ -0,0 +1,251 @@
#include "opengl/opengl.hpp"
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
namespace ruby {
struct pVideoGLX : OpenGL {
GLXContext (*glXCreateContextAttribs)(Display*, GLXFBConfig, GLXContext, int, const int*) = nullptr;
int (*glXSwapInterval)(int) = nullptr;
Display* display;
int screen;
Window xwindow;
Colormap colormap;
GLXContext glxcontext;
GLXWindow glxwindow;
struct {
int version_major, version_minor;
bool double_buffer;
bool is_direct;
} glx;
struct {
Window handle;
bool synchronize;
unsigned depth;
unsigned filter;
string shader;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
if(name == Video::Synchronize) return true;
if(name == Video::Depth) return true;
if(name == Video::Filter) return true;
if(name == Video::Shader) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return (uintptr_t)settings.handle;
if(name == Video::Synchronize) return settings.synchronize;
if(name == Video::Depth) return settings.depth;
if(name == Video::Filter) return settings.filter;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = any_cast<uintptr_t>(value);
return true;
}
if(name == Video::Synchronize) {
if(settings.synchronize != any_cast<bool>(value)) {
settings.synchronize = any_cast<bool>(value);
if(glXSwapInterval) glXSwapInterval(settings.synchronize);
return true;
}
}
if(name == Video::Depth) {
unsigned depth = any_cast<unsigned>(value);
if(depth > DefaultDepth(display, screen)) return false;
switch(depth) {
case 24: format = GL_RGBA8; inputFormat = GL_UNSIGNED_INT_8_8_8_8_REV; break;
case 30: format = GL_RGB10_A2; inputFormat = GL_UNSIGNED_INT_2_10_10_10_REV; break;
default: return false;
}
settings.depth = depth;
return true;
}
if(name == Video::Filter) {
settings.filter = any_cast<unsigned>(value);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
return true;
}
if(name == Video::Shader) {
settings.shader = any_cast<const char*>(value);
OpenGL::shader(settings.shader);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
return true;
}
return false;
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
OpenGL::size(width, height);
return OpenGL::lock(data, pitch);
}
void unlock() {
}
void clear() {
OpenGL::clear();
if(glx.double_buffer) glXSwapBuffers(display, glxwindow);
}
void refresh() {
//we must ensure that the child window is the same size as the parent window.
//unfortunately, we cannot hook the parent window resize event notification,
//as we did not create the parent window, nor have any knowledge of the toolkit used.
//therefore, inelegant as it may be, we query each window size and resize as needed.
XWindowAttributes parent, child;
XGetWindowAttributes(display, settings.handle, &parent);
XGetWindowAttributes(display, xwindow, &child);
if(child.width != parent.width || child.height != parent.height) {
XResizeWindow(display, xwindow, parent.width, parent.height);
}
outputWidth = parent.width, outputHeight = parent.height;
OpenGL::refresh();
if(glx.double_buffer) glXSwapBuffers(display, glxwindow);
}
bool init() {
term();
glXQueryVersion(display, &glx.version_major, &glx.version_minor);
//require GLX 1.2+ API
if(glx.version_major < 1 || (glx.version_major == 1 && glx.version_minor < 2)) return false;
XWindowAttributes window_attributes;
XGetWindowAttributes(display, settings.handle, &window_attributes);
//let GLX determine the best Visual to use for GL output; provide a few hints
//note: some video drivers will override double buffering attribute
int attributeList[] = {
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_DOUBLEBUFFER, True,
GLX_RED_SIZE, (signed)(settings.depth / 3),
GLX_GREEN_SIZE, (signed)(settings.depth / 3) + (signed)(settings.depth % 3),
GLX_BLUE_SIZE, (signed)(settings.depth / 3),
None
};
int fbCount;
GLXFBConfig* fbConfig = glXChooseFBConfig(display, screen, attributeList, &fbCount);
if(fbCount == 0) return false;
XVisualInfo* vi = glXGetVisualFromFBConfig(display, fbConfig[0]);
//Window settings.handle has already been realized, most likely with DefaultVisual.
//GLX requires that the GL output window has the same Visual as the GLX context.
//it is not possible to change the Visual of an already realized (created) window.
//therefore a new child window, using the same GLX Visual, must be created and binded to settings.handle.
colormap = XCreateColormap(display, RootWindow(display, vi->screen), vi->visual, AllocNone);
XSetWindowAttributes attributes;
attributes.colormap = colormap;
attributes.border_pixel = 0;
xwindow = XCreateWindow(display, /* parent = */ settings.handle,
/* x = */ 0, /* y = */ 0, window_attributes.width, window_attributes.height,
/* border_width = */ 0, vi->depth, InputOutput, vi->visual,
CWColormap | CWBorderPixel, &attributes);
XSetWindowBackground(display, xwindow, /* color = */ 0);
XMapWindow(display, xwindow);
XFlush(display);
//window must be realized (appear onscreen) before we make the context current
while(XPending(display)) {
XEvent event;
XNextEvent(display, &event);
}
glxcontext = glXCreateContext(display, vi, /* sharelist = */ 0, /* direct = */ GL_TRUE);
glXMakeCurrent(display, glxwindow = xwindow, glxcontext);
glXCreateContextAttribs = (GLXContext (*)(Display*, GLXFBConfig, GLXContext, int, const int*))glGetProcAddress("glXCreateContextAttribsARB");
glXSwapInterval = (int (*)(int))glGetProcAddress("glXSwapIntervalSGI");
if(!glXSwapInterval) glXSwapInterval = (int (*)(int))glGetProcAddress("glXSwapIntervalMESA");
if(glXCreateContextAttribs) {
int attributes[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 2,
None
};
GLXContext context = glXCreateContextAttribs(display, fbConfig[0], nullptr, true, attributes);
if(context) {
glXMakeCurrent(display, 0, nullptr);
glXDestroyContext(display, glxcontext);
glXMakeCurrent(display, glxwindow, glxcontext = context);
}
}
if(glXSwapInterval) {
glXSwapInterval(settings.synchronize);
}
//read attributes of frame buffer for later use, as requested attributes from above are not always granted
int value = 0;
glXGetConfig(display, vi, GLX_DOUBLEBUFFER, &value);
glx.double_buffer = value;
glx.is_direct = glXIsDirect(display, glxcontext);
OpenGL::init();
return true;
}
void term() {
OpenGL::term();
if(glxcontext) {
glXDestroyContext(display, glxcontext);
glxcontext = nullptr;
}
if(xwindow) {
XUnmapWindow(display, xwindow);
xwindow = 0;
}
if(colormap) {
XFreeColormap(display, colormap);
colormap = 0;
}
}
pVideoGLX() {
display = XOpenDisplay(0);
screen = DefaultScreen(display);
settings.handle = 0;
settings.synchronize = false;
settings.depth = 24;
settings.filter = 1; //linear
xwindow = 0;
colormap = 0;
glxcontext = nullptr;
glxwindow = 0;
}
~pVideoGLX() {
term();
XCloseDisplay(display);
}
};
DeclareVideo(GLX)
};

103
ruby/video/opengl/bind.hpp Normal file
View File

@@ -0,0 +1,103 @@
#if !defined(PLATFORM_OSX)
PFNGLCREATEPROGRAMPROC glCreateProgram = nullptr;
PFNGLDELETEPROGRAMPROC glDeleteProgram = nullptr;
PFNGLUSEPROGRAMPROC glUseProgram = nullptr;
PFNGLCREATESHADERPROC glCreateShader = nullptr;
PFNGLDELETESHADERPROC glDeleteShader = nullptr;
PFNGLSHADERSOURCEPROC glShaderSource = nullptr;
PFNGLCOMPILESHADERPROC glCompileShader = nullptr;
PFNGLGETSHADERIVPROC glGetShaderiv = nullptr;
PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog = nullptr;
PFNGLATTACHSHADERPROC glAttachShader = nullptr;
PFNGLDETACHSHADERPROC glDetachShader = nullptr;
PFNGLLINKPROGRAMPROC glLinkProgram = nullptr;
PFNGLVALIDATEPROGRAMPROC glValidateProgram = nullptr;
PFNGLGETPROGRAMIVPROC glGetProgramiv = nullptr;
PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog = nullptr;
PFNGLGENVERTEXARRAYSPROC glGenVertexArrays = nullptr;
PFNGLDELETEVERTEXARRAYSPROC glDeleteVertexArrays = nullptr;
PFNGLBINDVERTEXARRAYPROC glBindVertexArray = nullptr;
PFNGLGENBUFFERSPROC glGenBuffers = nullptr;
PFNGLDELETEBUFFERSPROC glDeleteBuffers = nullptr;
PFNGLBINDBUFFERPROC glBindBuffer = nullptr;
PFNGLBUFFERDATAPROC glBufferData = nullptr;
PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation = nullptr;
PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer = nullptr;
PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray = nullptr;
PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray = nullptr;
PFNGLBINDFRAGDATALOCATIONPROC glBindFragDataLocation = nullptr;
PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation = nullptr;
PFNGLGETUNIFORMIVPROC glGetUniformiv = nullptr;
PFNGLUNIFORM1IPROC glUniform1i = nullptr;
PFNGLUNIFORM1FPROC glUniform1f = nullptr;
PFNGLUNIFORM2FPROC glUniform2f = nullptr;
PFNGLUNIFORM2FVPROC glUniform2fv = nullptr;
PFNGLUNIFORM4FPROC glUniform4f = nullptr;
PFNGLUNIFORM4FVPROC glUniform4fv = nullptr;
PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv = nullptr;
PFNGLGENFRAMEBUFFERSPROC glGenFramebuffers = nullptr;
PFNGLDELETEFRAMEBUFFERSPROC glDeleteFramebuffers = nullptr;
PFNGLBINDFRAMEBUFFERPROC glBindFramebuffer = nullptr;
PFNGLFRAMEBUFFERTEXTURE2DPROC glFramebufferTexture2D = nullptr;
PFNGLACTIVETEXTUREPROC glActiveTexture = nullptr;
static bool OpenGLBind() {
#define bind(prototype, function) \
function = (prototype)glGetProcAddress(#function); \
if(function == nullptr) return false
bind(PFNGLCREATEPROGRAMPROC, glCreateProgram);
bind(PFNGLDELETEPROGRAMPROC, glDeleteProgram);
bind(PFNGLUSEPROGRAMPROC, glUseProgram);
bind(PFNGLCREATESHADERPROC, glCreateShader);
bind(PFNGLDELETESHADERPROC, glDeleteShader);
bind(PFNGLSHADERSOURCEPROC, glShaderSource);
bind(PFNGLCOMPILESHADERPROC, glCompileShader);
bind(PFNGLGETSHADERIVPROC, glGetShaderiv);
bind(PFNGLGETSHADERINFOLOGPROC, glGetShaderInfoLog);
bind(PFNGLATTACHSHADERPROC, glAttachShader);
bind(PFNGLDETACHSHADERPROC, glDetachShader);
bind(PFNGLLINKPROGRAMPROC, glLinkProgram);
bind(PFNGLVALIDATEPROGRAMPROC, glValidateProgram);
bind(PFNGLGETPROGRAMIVPROC, glGetProgramiv);
bind(PFNGLGETPROGRAMINFOLOGPROC, glGetProgramInfoLog);
bind(PFNGLGENVERTEXARRAYSPROC, glGenVertexArrays);
bind(PFNGLDELETEVERTEXARRAYSPROC, glDeleteVertexArrays);
bind(PFNGLBINDVERTEXARRAYPROC, glBindVertexArray);
bind(PFNGLGENBUFFERSPROC, glGenBuffers);
bind(PFNGLDELETEBUFFERSPROC, glDeleteBuffers);
bind(PFNGLBINDBUFFERPROC, glBindBuffer);
bind(PFNGLBUFFERDATAPROC, glBufferData);
bind(PFNGLGETATTRIBLOCATIONPROC, glGetAttribLocation);
bind(PFNGLVERTEXATTRIBPOINTERPROC, glVertexAttribPointer);
bind(PFNGLENABLEVERTEXATTRIBARRAYPROC, glEnableVertexAttribArray);
bind(PFNGLDISABLEVERTEXATTRIBARRAYPROC, glDisableVertexAttribArray);
bind(PFNGLBINDFRAGDATALOCATIONPROC, glBindFragDataLocation);
bind(PFNGLGETUNIFORMLOCATIONPROC, glGetUniformLocation);
bind(PFNGLGETUNIFORMIVPROC, glGetUniformiv);
bind(PFNGLUNIFORM1IPROC, glUniform1i);
bind(PFNGLUNIFORM1FPROC, glUniform1f);
bind(PFNGLUNIFORM2FPROC, glUniform2f);
bind(PFNGLUNIFORM2FVPROC, glUniform2fv);
bind(PFNGLUNIFORM4FPROC, glUniform4f);
bind(PFNGLUNIFORM4FVPROC, glUniform4fv);
bind(PFNGLUNIFORMMATRIX4FVPROC, glUniformMatrix4fv);
bind(PFNGLGENFRAMEBUFFERSPROC, glGenFramebuffers);
bind(PFNGLDELETEFRAMEBUFFERSPROC, glDeleteFramebuffers);
bind(PFNGLBINDFRAMEBUFFERPROC, glBindFramebuffer);
bind(PFNGLFRAMEBUFFERTEXTURE2DPROC, glFramebufferTexture2D);
bind(PFNGLACTIVETEXTUREPROC, glActiveTexture);
#undef bind
return true;
}
#else
static bool OpenGLBind() {
return true;
}
#endif

147
ruby/video/opengl/main.hpp Normal file
View File

@@ -0,0 +1,147 @@
void OpenGL::shader(const char* pathname) {
for(auto& program : programs) program.release();
programs.reset();
format = GL_RGBA8;
filter = GL_LINEAR;
wrap = GL_CLAMP_TO_BORDER;
absoluteWidth = 0, absoluteHeight = 0;
relativeWidth = 0, relativeHeight = 0;
if(pathname) {
auto document = Markup::Document(file::read({pathname, "manifest.bml"}));
for(auto& node : document.find("program")) {
unsigned n = programs.size();
programs(n).bind(this, node, pathname);
}
bind(this, document["output"], pathname);
}
}
bool OpenGL::lock(uint32_t*& data, unsigned& pitch) {
pitch = width * sizeof(uint32_t);
return data = buffer;
}
void OpenGL::clear() {
for(auto& p : programs) {
glUseProgram(p.program);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, p.framebuffer);
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
}
glUseProgram(0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
}
void OpenGL::refresh() {
clear();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_BGRA, inputFormat, buffer);
struct History {
GLuint texture;
unsigned width, height;
GLuint filter, wrap;
};
vector<History> history;
unsigned sourceWidth = width, sourceHeight = height;
history.prepend({texture, sourceWidth, sourceHeight, filter, wrap});
for(auto& p : programs) {
unsigned targetWidth = p.absoluteWidth ? p.absoluteWidth : outputWidth;
unsigned targetHeight = p.absoluteHeight ? p.absoluteHeight : outputHeight;
if(p.relativeWidth) targetWidth = sourceWidth * p.relativeWidth;
if(p.relativeHeight) targetHeight = sourceHeight * p.relativeHeight;
p.size(targetWidth, targetHeight);
glUseProgram(p.program);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, p.framebuffer);
glrUniform1i("phase", p.phase);
glrUniform1i("sourceLength", history.size());
glrUniform1i("pixmapLength", p.pixmaps.size());
glrUniform4f("targetSize", targetWidth, targetHeight, 1.0 / targetWidth, 1.0 / targetHeight);
glrUniform4f("outputSize", outputWidth, outputHeight, 1.0 / outputWidth, 1.0 / outputHeight);
//glrUniform4f("targetActualSize", glrSize(targetWidth), glrSize(targetHeight), 1.0 / glrSize(targetWidth), 1.0 / glrSize(targetHeight));
//glrUniform4f("outputActualSize", glrSize(outputWidth), glrSize(outputHeight), 1.0 / glrSize(outputWidth), 1.0 / glrSize(outputHeight));
unsigned aid = 0;
for(auto& pixmap : history) {
glrUniform1i({"source[", aid, "]"}, aid);
glrUniform4f({"sourceSize[", aid, "]"}, pixmap.width, pixmap.height, 1.0 / pixmap.width, 1.0 / pixmap.height);
//glrUniform4f({"sourceActualSize[", aid, "]"}, glrSize(pixmap.width), glrSize(pixmap.height), 1.0 / glrSize(pixmap.width), 1.0 / glrSize(pixmap.height));
glActiveTexture(GL_TEXTURE0 + (aid++));
glBindTexture(GL_TEXTURE_2D, pixmap.texture);
glrParameters(pixmap.filter, pixmap.wrap);
}
unsigned bid = 0;
for(auto& pixmap : p.pixmaps) {
glrUniform1i({"pixmap[", bid, "]"}, aid + bid);
glrUniform4f({"pixmapSize[", bid, "]"}, pixmap.width, pixmap.height, 1.0 / pixmap.width, 1.0 / pixmap.height);
//glrUniform4f({"pixmapActualSize[", bid, "]"}, glrSize(pixmap.width), glrSize(pixmap.height), 1.0 / glrSize(pixmap.width), 1.0 / glrSize(pixmap.height));
glActiveTexture(GL_TEXTURE0 + aid + (bid++));
glBindTexture(GL_TEXTURE_2D, pixmap.texture);
glrParameters(pixmap.filter, pixmap.wrap);
}
glActiveTexture(GL_TEXTURE0);
glrParameters(p.filter, p.wrap);
p.render(sourceWidth, sourceHeight, targetWidth, targetHeight);
glBindTexture(GL_TEXTURE_2D, p.texture);
p.phase = (p.phase + 1) % p.modulo;
sourceWidth = p.width, sourceHeight = p.height;
history.prepend({p.texture, sourceWidth, sourceHeight, p.filter, p.wrap});
}
unsigned targetWidth = absoluteWidth ? absoluteWidth : outputWidth;
unsigned targetHeight = absoluteHeight ? absoluteHeight : outputHeight;
if(relativeWidth) targetWidth = sourceWidth * relativeWidth;
if(relativeHeight) targetHeight = sourceHeight * relativeHeight;
glUseProgram(program);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glrUniform1i("source[0]", 0);
glrUniform4f("targetSize", targetWidth, targetHeight, 1.0 / targetWidth, 1.0 / targetHeight);
glrUniform4f("outputSize", outputWidth, outputHeight, 1.0 / outputWidth, 1.0 / outputHeight);
glrParameters(filter, wrap);
render(sourceWidth, sourceHeight, outputWidth, outputHeight);
}
bool OpenGL::init() {
if(!OpenGLBind()) return false;
glDisable(GL_ALPHA_TEST);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glDisable(GL_POLYGON_SMOOTH);
glDisable(GL_STENCIL_TEST);
glEnable(GL_DITHER);
glEnable(GL_TEXTURE_2D);
program = glCreateProgram();
vertex = glrCreateShader(program, GL_VERTEX_SHADER, OpenGLOutputVertexShader);
//geometry = glrCreateShader(program, GL_GEOMETRY_SHADER, OpenGLGeometryShader);
fragment = glrCreateShader(program, GL_FRAGMENT_SHADER, OpenGLFragmentShader);
OpenGLSurface::allocate();
glrLinkProgram(program);
shader(nullptr);
return true;
}
void OpenGL::term() {
OpenGLSurface::release();
if(buffer) { delete[] buffer; buffer = nullptr; }
}

View File

@@ -0,0 +1,82 @@
#if defined(PLATFORM_X)
#include <GL/gl.h>
#include <GL/glx.h>
#define glGetProcAddress(name) (*glXGetProcAddress)((const GLubyte*)(name))
#elif defined(PLATFORM_OSX)
#include <OpenGL/gl.h>
#include <OpenGL/gl3.h>
#elif defined(PLATFORM_WIN)
#include <GL/gl.h>
#include <GL/glext.h>
#define glGetProcAddress(name) wglGetProcAddress(name)
#else
#error "ruby::OpenGL: unsupported platform"
#endif
namespace ruby {
#include "bind.hpp"
#include "shaders.hpp"
#include "utility.hpp"
struct OpenGL;
struct OpenGLTexture {
GLuint texture = 0;
unsigned width = 0;
unsigned height = 0;
GLuint format = GL_RGBA8;
GLuint filter = GL_LINEAR;
GLuint wrap = GL_CLAMP_TO_BORDER;
};
struct OpenGLSurface : OpenGLTexture {
GLuint program = 0;
GLuint framebuffer = 0;
GLuint vao = 0;
GLuint vbo[3] = {0, 0, 0};
GLuint vertex = 0;
GLuint geometry = 0;
GLuint fragment = 0;
uint32_t* buffer = nullptr;
void allocate();
void size(unsigned width, unsigned height);
void release();
void render(unsigned sourceWidth, unsigned sourceHeight, unsigned targetWidth, unsigned targetHeight);
};
struct OpenGLProgram : OpenGLSurface {
//configuration
unsigned phase = 0; //frame counter
unsigned modulo = 0; //frame counter modulus
unsigned absoluteWidth = 0;
unsigned absoluteHeight = 0;
double relativeWidth = 0;
double relativeHeight = 0;
vector<OpenGLTexture> pixmaps;
void bind(OpenGL* instance, const Markup::Node& node, const string& pathname);
void release();
};
struct OpenGL : OpenGLProgram {
vector<OpenGLProgram> programs;
GLuint inputFormat = GL_UNSIGNED_INT_8_8_8_8_REV;
unsigned outputWidth = 0;
unsigned outputHeight = 0;
void shader(const char* pathname);
bool lock(uint32_t*& data, unsigned& pitch);
void clear();
void refresh();
bool init();
void term();
};
#include "surface.hpp"
#include "program.hpp"
#include "main.hpp"
}

View File

@@ -0,0 +1,87 @@
void OpenGLProgram::bind(OpenGL* instance, const Markup::Node& node, const string& pathname) {
filter = glrFilter(node["filter"].text());
wrap = glrWrap(node["wrap"].text());
modulo = glrModulo(node["modulo"].integer());
string w = node["width"].text(), h = node["height"].text();
if(w.endswith("%")) relativeWidth = real(w.rtrim<1>("%")) / 100.0;
else absoluteWidth = decimal(w);
if(h.endswith("%")) relativeHeight = real(h.rtrim<1>("%")) / 100.0;
else absoluteHeight = decimal(h);
if(node.name != "program") return;
format = glrFormat(node["format"].text());
program = glCreateProgram();
glGenFramebuffers(1, &framebuffer);
if(file::exists({pathname, node["vertex"].text()})) {
string source = file::read({pathname, node["vertex"].text()});
vertex = glrCreateShader(program, GL_VERTEX_SHADER, source);
} else {
vertex = glrCreateShader(program, GL_VERTEX_SHADER, OpenGLVertexShader);
}
if(file::exists({pathname, node["geometry"].text()})) {
string source = file::read({pathname, node["geometry"].text()});
geometry = glrCreateShader(program, GL_GEOMETRY_SHADER, source);
} else {
//geometry shaders, when attached, must pass all vertex output through to the fragment shaders
//geometry = glrCreateShader(program, GL_GEOMETRY_SHADER, OpenGLGeometryShader);
}
if(file::exists({pathname, node["fragment"].text()})) {
string source = file::read({pathname, node["fragment"].text()});
fragment = glrCreateShader(program, GL_FRAGMENT_SHADER, source);
} else {
fragment = glrCreateShader(program, GL_FRAGMENT_SHADER, OpenGLFragmentShader);
}
for(auto& leaf : node.find("pixmap")) {
nall::image image({pathname, leaf.text()});
image.transform(0, 32, 255u << 24, 255u << 16, 255u << 8, 255u << 0);
if(image.empty()) continue;
GLuint texture;
glGenTextures(1, &texture);
unsigned n = pixmaps.size();
pixmaps(n).texture = texture;
pixmaps(n).width = image.width;
pixmaps(n).height = image.height;
pixmaps(n).format = format;
pixmaps(n).filter = filter;
pixmaps(n).wrap = wrap;
if(leaf["format"].exists()) pixmaps(n).format = glrFormat(leaf["format"].text());
if(leaf["filter"].exists()) pixmaps(n).filter = glrFilter(leaf["filter"].text());
if(leaf["wrap"].exists()) pixmaps(n).wrap = glrWrap(leaf["wrap"].text());
unsigned w = glrSize(image.width), h = glrSize(image.height);
uint32_t* buffer = new uint32_t[w * h]();
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, pixmaps(n).format, w, h, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, buffer);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, image.width, image.height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, image.data);
delete[] buffer;
}
OpenGLSurface::allocate();
glrLinkProgram(program);
}
void OpenGLProgram::release() {
OpenGLSurface::release();
for(auto& pixmap : pixmaps) glDeleteTextures(1, &pixmap.texture);
pixmaps.reset();
width = 0;
height = 0;
format = GL_RGBA8;
filter = GL_LINEAR;
wrap = GL_CLAMP_TO_BORDER;
phase = 0;
modulo = 0;
absoluteWidth = 0;
absoluteHeight = 0;
relativeWidth = 0;
relativeHeight = 0;
}

View File

@@ -0,0 +1,93 @@
static string OpenGLOutputVertexShader = R"(
#version 150
uniform vec4 targetSize;
uniform vec4 outputSize;
in vec2 texCoord;
out Vertex {
vec4 position;
vec2 texCoord;
} vertexOut;
void main() {
//center image within output window
if(gl_VertexID == 0 || gl_VertexID == 2) {
gl_Position.x = -(targetSize.x / outputSize.x);
} else {
gl_Position.x = +(targetSize.x / outputSize.x);
}
//center and flip vertically (buffer[0, 0] = top-left; OpenGL[0, 0] = bottom-left)
if(gl_VertexID == 0 || gl_VertexID == 1) {
gl_Position.y = +(targetSize.y / outputSize.y);
} else {
gl_Position.y = -(targetSize.y / outputSize.y);
}
//align image to even pixel boundary to prevent aliasing
vec2 align = fract((outputSize.xy + targetSize.xy) / 2.0) * 2.0;
gl_Position.xy -= align / outputSize.xy;
gl_Position.zw = vec2(0.0, 1.0);
vertexOut.texCoord = texCoord;
}
)";
static string OpenGLVertexShader = R"(
#version 150
in vec4 position;
in vec2 texCoord;
out Vertex {
vec4 position;
vec2 texCoord;
} vertexOut;
void main() {
gl_Position = position;
vertexOut.texCoord = texCoord;
}
)";
static string OpenGLGeometryShader = R"(
#version 150
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
in Vertex {
vec2 texCoord;
} vertexIn[];
out Vertex {
vec2 texCoord;
};
void main() {
for(int i = 0; i < gl_in.length(); i++) {
gl_Position = gl_in[i].gl_Position;
texCoord = vertexIn[i].texCoord;
EmitVertex();
}
EndPrimitive();
}
)";
static string OpenGLFragmentShader = R"(
#version 150
uniform sampler2D source[];
in Vertex {
vec2 texCoord;
};
out vec4 fragColor;
void main() {
fragColor = texture(source[0], texCoord);
}
)";

View File

@@ -0,0 +1,114 @@
void OpenGLSurface::allocate() {
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(3, &vbo[0]);
}
void OpenGLSurface::size(unsigned w, unsigned h) {
if(width == w && height == h) return;
width = w, height = h;
w = glrSize(w), h = glrSize(h);
if(texture) { glDeleteTextures(1, &texture); texture = 0; }
if(buffer) { delete[] buffer; buffer = nullptr; }
buffer = new uint32_t[w * h]();
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, format, w, h, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, buffer);
if(framebuffer) {
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
delete[] buffer;
buffer = nullptr;
}
}
void OpenGLSurface::release() {
if(vbo[0]) { glDeleteBuffers(3, &vbo[0]); for(auto &o : vbo) o = 0; }
if(vao) { glDeleteVertexArrays(1, &vao); vao = 0; }
if(vertex) { glDetachShader(program, vertex); glDeleteShader(vertex); vertex = 0; }
if(geometry) { glDetachShader(program, geometry); glDeleteShader(geometry); geometry = 0; }
if(fragment) { glDetachShader(program, fragment); glDeleteShader(fragment); fragment = 0; }
if(texture) { glDeleteTextures(1, &texture); texture = 0; }
if(framebuffer) { glDeleteFramebuffers(1, &framebuffer); framebuffer = 0; }
if(program) { glDeleteProgram(program); program = 0; }
width = 0, height = 0;
}
void OpenGLSurface::render(unsigned sourceWidth, unsigned sourceHeight, unsigned targetWidth, unsigned targetHeight) {
glViewport(0, 0, targetWidth, targetHeight);
float w = (float)sourceWidth / (float)glrSize(sourceWidth);
float h = (float)sourceHeight / (float)glrSize(sourceHeight);
float u = (float)targetWidth, v = (float)targetHeight;
GLint location;
GLfloat modelView[] = {
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
GLfloat projection[] = {
2.0f/u, 0.0f, 0.0f, 0.0f,
0.0f, 2.0f/v, 0.0f, 0.0f,
0.0f, 0.0f, -1.0f, 0.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
};
GLfloat modelViewProjection[4 * 4];
Matrix::Multiply(modelViewProjection, modelView, 4, 4, projection, 4, 4);
GLfloat vertices[] = {
0, 0, 0, 1,
u, 0, 0, 1,
0, v, 0, 1,
u, v, 0, 1,
};
GLfloat positions[4 * 4];
for(unsigned n = 0; n < 16; n += 4) {
Matrix::Multiply(&positions[n], &vertices[n], 1, 4, modelViewProjection, 4, 4);
}
GLfloat texCoords[] = {
0, 0,
w, 0,
0, h,
w, h,
};
glrUniformMatrix4fv("modelView", modelView);
glrUniformMatrix4fv("projection", projection);
glrUniformMatrix4fv("modelViewProjection", modelViewProjection);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo[0]);
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(GLfloat), vertices, GL_STATIC_DRAW);
GLuint locationVertex = glGetAttribLocation(program, "vertex");
glEnableVertexAttribArray(locationVertex);
glVertexAttribPointer(locationVertex, 4, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(GLfloat), positions, GL_STATIC_DRAW);
GLuint locationPosition = glGetAttribLocation(program, "position");
glEnableVertexAttribArray(locationPosition);
glVertexAttribPointer(locationPosition, 4, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo[2]);
glBufferData(GL_ARRAY_BUFFER, 8 * sizeof(GLfloat), texCoords, GL_STATIC_DRAW);
GLuint locationTexCoord = glGetAttribLocation(program, "texCoord");
glEnableVertexAttribArray(locationTexCoord);
glVertexAttribPointer(locationTexCoord, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindFragDataLocation(program, 0, "fragColor");
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(locationVertex);
glDisableVertexAttribArray(locationPosition);
glDisableVertexAttribArray(locationTexCoord);
}

View File

@@ -0,0 +1,104 @@
static unsigned glrSize(unsigned size) {
return size;
//return bit::round(size); //return nearest power of two
}
static GLuint glrFormat(const string& format) {
if(format == "rgba8" ) return GL_RGBA8;
if(format == "rgb10a2") return GL_RGB10_A2;
if(format == "rgba12" ) return GL_RGBA12;
if(format == "rgba16" ) return GL_RGBA16;
if(format == "rgba16f") return GL_RGBA16F;
if(format == "rgba32f") return GL_RGBA32F;
return GL_RGBA8;
}
static GLuint glrFilter(const string& filter) {
if(filter == "nearest") return GL_NEAREST;
if(filter == "linear" ) return GL_LINEAR;
return GL_LINEAR;
}
static GLuint glrWrap(const string& wrap) {
if(wrap == "border") return GL_CLAMP_TO_BORDER;
if(wrap == "edge" ) return GL_CLAMP_TO_EDGE;
if(wrap == "repeat") return GL_REPEAT;
return GL_CLAMP_TO_BORDER;
}
static unsigned glrModulo(unsigned modulo) {
if(modulo) return modulo;
return 300; //divisible by 2, 3, 4, 5, 6, 10, 12, 15, 20, 25, 30, 50, 60, 100, 150
}
static GLuint glrProgram() {
GLuint program = 0;
glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&program);
return program;
}
static void glrUniform1i(const string& name, GLint value) {
GLint location = glGetUniformLocation(glrProgram(), name);
glUniform1i(location, value);
}
static void glrUniform4f(const string& name, GLfloat value0, GLfloat value1, GLfloat value2, GLfloat value3) {
GLint location = glGetUniformLocation(glrProgram(), name);
glUniform4f(location, value0, value1, value2, value3);
}
static void glrUniformMatrix4fv(const string& name, GLfloat *values) {
GLint location = glGetUniformLocation(glrProgram(), name);
glUniformMatrix4fv(location, 1, GL_FALSE, values);
}
static void glrParameters(GLuint filter, GLuint wrap) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap);
}
static GLuint glrCreateShader(GLuint program, GLuint type, const char* source) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &source, 0);
glCompileShader(shader);
GLint result = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &result);
if(result == GL_FALSE) {
GLint length = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &length);
char text[length + 1];
glGetShaderInfoLog(shader, length, &length, text);
text[length] = 0;
print("[ruby::OpenGL: shader compiler error]\n", (const char*)text, "\n\n");
return 0;
}
glAttachShader(program, shader);
return shader;
}
static void glrLinkProgram(GLuint program) {
glLinkProgram(program);
GLint result = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &result);
if(result == GL_FALSE) {
GLint length = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length);
char text[length + 1];
glGetProgramInfoLog(program, length, &length, text);
text[length] = 0;
print("[ruby::OpenGL: shader linker error]\n", (const char*)text, "\n\n");
}
glValidateProgram(program);
result = GL_FALSE;
glGetProgramiv(program, GL_VALIDATE_STATUS, &result);
if(result == GL_FALSE) {
GLint length = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length);
char text[length + 1];
glGetProgramInfoLog(program, length, &length, text);
text[length] = 0;
print("[ruby::OpenGL: shader validation error]\n", (const char*)text, "\n\n");
}
}

141
ruby/video/sdl.cpp Normal file
View File

@@ -0,0 +1,141 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <X11/extensions/Xv.h>
#include <X11/extensions/Xvlib.h>
#include <X11/extensions/XShm.h>
#include <SDL/SDL.h>
namespace ruby {
struct pVideoSDL {
Display* display;
SDL_Surface* screen;
SDL_Surface* buffer;
unsigned iwidth, iheight;
struct {
uintptr_t handle;
unsigned width;
unsigned height;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return settings.handle;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = any_cast<uintptr_t>(value);
return true;
}
return false;
}
void resize(unsigned width, unsigned height) {
if(iwidth >= width && iheight >= height) return;
iwidth = max(width, iwidth);
iheight = max(height, iheight);
if(buffer) SDL_FreeSurface(buffer);
buffer = SDL_CreateRGBSurface(
SDL_SWSURFACE, iwidth, iheight, 32,
0x00ff0000, 0x0000ff00, 0x000000ff, 0xff000000
);
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
if(width != settings.width || height != settings.height) {
resize(settings.width = width, settings.height = height);
}
if(SDL_MUSTLOCK(buffer)) SDL_LockSurface(buffer);
pitch = buffer->pitch;
return data = (uint32_t*)buffer->pixels;
}
void unlock() {
if(SDL_MUSTLOCK(buffer)) SDL_UnlockSurface(buffer);
}
void clear() {
if(SDL_MUSTLOCK(buffer)) SDL_LockSurface(buffer);
for(unsigned y = 0; y < iheight; y++) {
uint32_t* data = (uint32_t*)buffer->pixels + y * (buffer->pitch >> 2);
for(unsigned x = 0; x < iwidth; x++) *data++ = 0xff000000;
}
if(SDL_MUSTLOCK(buffer)) SDL_UnlockSurface(buffer);
refresh();
}
void refresh() {
//ruby input is X8R8G8B8, top 8-bits are ignored.
//as SDL forces us to use a 32-bit buffer, we must set alpha to 255 (full opacity)
//to prevent blending against the window beneath when X window visual is 32-bits.
if(SDL_MUSTLOCK(buffer)) SDL_LockSurface(buffer);
for(unsigned y = 0; y < settings.height; y++) {
uint32_t *data = (uint32_t*)buffer->pixels + y * (buffer->pitch >> 2);
for(unsigned x = 0; x < settings.width; x++) *data++ |= 0xff000000;
}
if(SDL_MUSTLOCK(buffer)) SDL_UnlockSurface(buffer);
XWindowAttributes attributes;
XGetWindowAttributes(display, settings.handle, &attributes);
SDL_Rect src, dest;
src.x = 0;
src.y = 0;
src.w = settings.width;
src.h = settings.height;
dest.x = 0;
dest.y = 0;
dest.w = attributes.width;
dest.h = attributes.height;
SDL_SoftStretch(buffer, &src, screen, &dest);
SDL_UpdateRect(screen, dest.x, dest.y, dest.w, dest.h);
}
bool init() {
display = XOpenDisplay(0);
char env[512];
sprintf(env, "SDL_WINDOWID=%ld", (long int)settings.handle);
putenv(env);
SDL_InitSubSystem(SDL_INIT_VIDEO);
screen = SDL_SetVideoMode(2560, 1600, 32, SDL_HWSURFACE);
XUndefineCursor(display, settings.handle);
buffer = 0;
iwidth = 0;
iheight = 0;
resize(settings.width = 256, settings.height = 256);
return true;
}
void term() {
XCloseDisplay(display);
SDL_FreeSurface(buffer);
SDL_QuitSubSystem(SDL_INIT_VIDEO);
}
pVideoSDL() {
settings.handle = 0;
}
};
DeclareVideo(SDL)
};

160
ruby/video/wgl.cpp Normal file
View File

@@ -0,0 +1,160 @@
#include "opengl/opengl.hpp"
#define WGL_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define WGL_CONTEXT_MINOR_VERSION_ARB 0x2092
namespace ruby {
struct pVideoWGL : OpenGL {
HGLRC (APIENTRY* wglCreateContextAttribs)(HDC, HGLRC, const int*) = nullptr;
BOOL (APIENTRY* wglSwapInterval)(int) = nullptr;
HDC display;
HGLRC wglcontext;
HWND window;
HINSTANCE glwindow;
struct {
HWND handle;
bool synchronize;
unsigned filter;
string shader;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
if(name == Video::Synchronize) return true;
if(name == Video::Filter) return true;
if(name == Video::Shader) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return (uintptr_t)settings.handle;
if(name == Video::Synchronize) return settings.synchronize;
if(name == Video::Filter) return settings.filter;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = (HWND)any_cast<uintptr_t>(value);
return true;
}
if(name == Video::Synchronize) {
if(settings.synchronize != any_cast<bool>(value)) {
settings.synchronize = any_cast<bool>(value);
if(wglcontext) {
init();
OpenGL::shader(settings.shader);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
}
}
}
if(name == Video::Filter) {
settings.filter = any_cast<unsigned>(value);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
return true;
}
if(name == Video::Shader) {
settings.shader = any_cast<const char*>(value);
OpenGL::shader(settings.shader);
if(settings.shader.empty()) OpenGL::filter = settings.filter ? GL_LINEAR : GL_NEAREST;
return true;
}
return false;
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
OpenGL::size(width, height);
return OpenGL::lock(data, pitch);
}
void unlock() {
}
void clear() {
OpenGL::clear();
SwapBuffers(display);
}
void refresh() {
RECT rc;
GetClientRect(settings.handle, &rc);
outputWidth = rc.right - rc.left, outputHeight = rc.bottom - rc.top;
OpenGL::refresh();
SwapBuffers(display);
}
bool init() {
term();
GLuint pixel_format;
PIXELFORMATDESCRIPTOR pfd;
memset(&pfd, 0, sizeof(PIXELFORMATDESCRIPTOR));
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
display = GetDC(settings.handle);
pixel_format = ChoosePixelFormat(display, &pfd);
SetPixelFormat(display, pixel_format, &pfd);
wglcontext = wglCreateContext(display);
wglMakeCurrent(display, wglcontext);
wglCreateContextAttribs = (HGLRC (APIENTRY*)(HDC, HGLRC, const int*))glGetProcAddress("wglCreateContextAttribsARB");
wglSwapInterval = (BOOL (APIENTRY*)(int))glGetProcAddress("wglSwapIntervalEXT");
if(wglCreateContextAttribs) {
int attributes[] = {
WGL_CONTEXT_MAJOR_VERSION_ARB, 3,
WGL_CONTEXT_MINOR_VERSION_ARB, 2,
0
};
HGLRC context = wglCreateContextAttribs(display, 0, attributes);
if(context) {
wglMakeCurrent(NULL, NULL);
wglDeleteContext(wglcontext);
wglMakeCurrent(display, wglcontext = context);
}
}
if(wglSwapInterval) {
wglSwapInterval(settings.synchronize);
}
OpenGL::init();
return true;
}
void term() {
OpenGL::term();
if(wglcontext) {
wglDeleteContext(wglcontext);
wglcontext = 0;
}
}
pVideoWGL() {
settings.handle = 0;
settings.synchronize = false;
settings.filter = 0;
window = 0;
wglcontext = 0;
glwindow = 0;
}
~pVideoWGL() { term(); }
};
DeclareVideo(WGL)
};

170
ruby/video/xshm.cpp Normal file
View File

@@ -0,0 +1,170 @@
#include <sys/shm.h>
#include <X11/extensions/XShm.h>
namespace ruby {
struct pVideoXShm {
struct {
Display* display;
int screen;
Visual *visual;
int depth;
Window window;
XShmSegmentInfo shmInfo;
XImage* image;
uint32_t* buffer;
unsigned width, height;
} device;
struct {
uintptr_t handle;
uint32_t* buffer;
unsigned width, height;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
return false;
}
any get(const string& name) {
if(name == Video::Handle) return settings.handle;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = any_cast<uintptr_t>(value);
return true;
}
return false;
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
if(settings.buffer == nullptr || settings.width != width || settings.height != height) {
if(settings.buffer) delete[] settings.buffer;
settings.width = width, settings.height = height;
settings.buffer = new uint32_t[width * height]();
}
data = settings.buffer;
pitch = settings.width * sizeof(uint32_t);
return true;
}
void unlock() {
}
void clear() {
if(settings.buffer == nullptr) return;
memset(settings.buffer, 0, settings.width * settings.height * sizeof(uint32_t));
refresh();
}
void refresh() {
if(settings.buffer == nullptr) return;
size();
float xRatio = (float)settings.width / (float)device.width;
float yRatio = (float)settings.height / (float)device.height;
float yStep = 0;
for(unsigned y = 0; y < device.height; y++) {
uint32_t* sp = settings.buffer + (unsigned)yStep * settings.width;
uint32_t* dp = device.buffer + y * device.width;
yStep += yRatio;
float xStep = 0;
for(unsigned x = 0; x < device.width; x++) {
uint32_t color = sp[(unsigned)xStep];
xStep += xRatio;
*dp++ = ((color >> 20) & 0x000003ff) | ((color) & 0x000ffc00) | ((color << 20) & 0x3ff00000);
}
}
GC gc = XCreateGC(device.display, device.window, 0, 0);
XShmPutImage(
device.display, device.window, gc, device.image,
0, 0, 0, 0, device.width, device.height, False
);
XFreeGC(device.display, gc);
XFlush(device.display);
}
bool init() {
device.display = XOpenDisplay(0);
device.screen = DefaultScreen(device.display);
device.visual = DefaultVisual(device.display, device.screen);
device.depth = DefaultDepth(device.display, device.screen);
XSetWindowAttributes attributes;
attributes.border_pixel = 0;
device.window = XCreateWindow(device.display, (Window)settings.handle,
0, 0, 256, 256,
0, device.depth, InputOutput, device.visual,
CWBorderPixel, &attributes
);
XSetWindowBackground(device.display, device.window, 0);
XMapWindow(device.display, device.window);
XFlush(device.display);
while(XPending(device.display)) {
XEvent event;
XNextEvent(device.display, &event);
}
if(size() == false) return false;
return true;
}
void term() {
free();
}
pVideoXShm() {
device.buffer = nullptr;
settings.buffer = nullptr;
}
~pVideoXShm() {
term();
}
//internal:
bool size() {
XWindowAttributes windowAttributes;
XGetWindowAttributes(device.display, settings.handle, &windowAttributes);
if(device.buffer && device.width == windowAttributes.width && device.height == windowAttributes.height) return true;
device.width = windowAttributes.width, device.height = windowAttributes.height;
XResizeWindow(device.display, device.window, device.width, device.height);
free();
//create
device.shmInfo.shmid = shmget(IPC_PRIVATE, device.width * device.height * sizeof(uint32_t), IPC_CREAT | 0777);
if(device.shmInfo.shmid < 0) return false;
device.shmInfo.shmaddr = (char*)shmat(device.shmInfo.shmid, 0, 0);
device.shmInfo.readOnly = False;
XShmAttach(device.display, &device.shmInfo);
device.buffer = (uint32_t*)device.shmInfo.shmaddr;
device.image = XShmCreateImage(device.display, device.visual, device.depth,
ZPixmap, device.shmInfo.shmaddr, &device.shmInfo, device.width, device.height
);
return true;
}
void free() {
if(device.buffer == nullptr) return;
device.buffer = nullptr;
XShmDetach(device.display, &device.shmInfo);
XDestroyImage(device.image);
shmdt(device.shmInfo.shmaddr);
shmctl(device.shmInfo.shmid, IPC_RMID, 0);
}
};
DeclareVideo(XShm)
}

499
ruby/video/xv.cpp Normal file
View File

@@ -0,0 +1,499 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <X11/extensions/XShm.h>
#include <X11/extensions/Xv.h>
#include <X11/extensions/Xvlib.h>
extern "C" XvImage* XvShmCreateImage(Display*, XvPortID, int, char*, int, int, XShmSegmentInfo*);
namespace ruby {
struct pVideoXv {
uint32_t* buffer;
uint8_t* ytable;
uint8_t* utable;
uint8_t* vtable;
enum XvFormat {
XvFormatRGB32,
XvFormatRGB24,
XvFormatRGB16,
XvFormatRGB15,
XvFormatYUY2,
XvFormatUYVY,
XvFormatUnknown
};
struct {
Display* display;
GC gc;
Window window;
Colormap colormap;
XShmSegmentInfo shminfo;
int port;
int depth;
int visualid;
XvImage* image;
XvFormat format;
uint32_t fourcc;
unsigned width;
unsigned height;
} device;
struct {
Window handle;
bool synchronize;
unsigned width;
unsigned height;
} settings;
bool cap(const string& name) {
if(name == Video::Handle) return true;
if(name == Video::Synchronize) {
return XInternAtom(XOpenDisplay(0), "XV_SYNC_TO_VBLANK", true) != None;
}
return false;
}
any get(const string& name) {
if(name == Video::Handle) return settings.handle;
if(name == Video::Synchronize) return settings.synchronize;
return false;
}
bool set(const string& name, const any& value) {
if(name == Video::Handle) {
settings.handle = any_cast<uintptr_t>(value);
return true;
}
if(name == Video::Synchronize) {
Display* display = XOpenDisplay(0);
Atom atom = XInternAtom(display, "XV_SYNC_TO_VBLANK", true);
if(atom != None && device.port >= 0) {
settings.synchronize = any_cast<bool>(value);
XvSetPortAttribute(display, device.port, atom, settings.synchronize);
return true;
}
return false;
}
return false;
}
void resize(unsigned width, unsigned height) {
if(device.width >= width && device.height >= height) return;
device.width = max(width, device.width);
device.height = max(height, device.height);
XShmDetach(device.display, &device.shminfo);
shmdt(device.shminfo.shmaddr);
shmctl(device.shminfo.shmid, IPC_RMID, NULL);
XFree(device.image);
delete[] buffer;
device.image = XvShmCreateImage(device.display, device.port, device.fourcc, 0, device.width, device.height, &device.shminfo);
device.shminfo.shmid = shmget(IPC_PRIVATE, device.image->data_size, IPC_CREAT | 0777);
device.shminfo.shmaddr = device.image->data = (char*)shmat(device.shminfo.shmid, 0, 0);
device.shminfo.readOnly = false;
XShmAttach(device.display, &device.shminfo);
buffer = new uint32_t[device.width * device.height];
}
bool lock(uint32_t*& data, unsigned& pitch, unsigned width, unsigned height) {
if(width != settings.width || height != settings.height) {
resize(settings.width = width, settings.height = height);
}
pitch = device.width * 4;
return data = buffer;
}
void unlock() {
}
void clear() {
memset(buffer, 0, device.width * device.height * sizeof(uint32_t));
//clear twice in case video is double buffered ...
refresh();
refresh();
}
void refresh() {
unsigned width = settings.width;
unsigned height = settings.height;
XWindowAttributes target;
XGetWindowAttributes(device.display, device.window, &target);
//we must ensure that the child window is the same size as the parent window.
//unfortunately, we cannot hook the parent window resize event notification,
//as we did not create the parent window, nor have any knowledge of the toolkit used.
//therefore, query each window size and resize as needed.
XWindowAttributes parent;
XGetWindowAttributes(device.display, settings.handle, &parent);
if(target.width != parent.width || target.height != parent.height) {
XResizeWindow(device.display, device.window, parent.width, parent.height);
}
//update target width and height attributes
XGetWindowAttributes(device.display, device.window, &target);
switch(device.format) {
case XvFormatRGB32: render_rgb32(width, height); break;
case XvFormatRGB24: render_rgb24(width, height); break;
case XvFormatRGB16: render_rgb16(width, height); break;
case XvFormatRGB15: render_rgb15(width, height); break;
case XvFormatYUY2: render_yuy2 (width, height); break;
case XvFormatUYVY: render_uyvy (width, height); break;
}
XvShmPutImage(device.display, device.port, device.window, device.gc, device.image,
0, 0, width, height,
0, 0, target.width, target.height,
true);
}
bool init() {
device.display = XOpenDisplay(0);
if(!XShmQueryExtension(device.display)) {
fprintf(stderr, "VideoXv: XShm extension not found.\n");
return false;
}
//find an appropriate Xv port
device.port = -1;
XvAdaptorInfo* adaptor_info;
unsigned adaptor_count;
XvQueryAdaptors(device.display, DefaultRootWindow(device.display), &adaptor_count, &adaptor_info);
for(unsigned i = 0; i < adaptor_count; i++) {
//find adaptor that supports both input (memory->drawable) and image (drawable->screen) masks
if(adaptor_info[i].num_formats < 1) continue;
if(!(adaptor_info[i].type & XvInputMask)) continue;
if(!(adaptor_info[i].type & XvImageMask)) continue;
device.port = adaptor_info[i].base_id;
device.depth = adaptor_info[i].formats->depth;
device.visualid = adaptor_info[i].formats->visual_id;
break;
}
XvFreeAdaptorInfo(adaptor_info);
if(device.port < 0) {
fprintf(stderr, "VideoXv: failed to find valid XvPort.\n");
return false;
}
//create child window to attach to parent window.
//this is so that even if parent window visual depth doesn't match Xv visual
//(common with composited windows), Xv can still render to child window.
XWindowAttributes window_attributes;
XGetWindowAttributes(device.display, settings.handle, &window_attributes);
XVisualInfo visualtemplate;
visualtemplate.visualid = device.visualid;
visualtemplate.screen = DefaultScreen(device.display);
visualtemplate.depth = device.depth;
visualtemplate.visual = 0;
int visualmatches = 0;
XVisualInfo *visualinfo = XGetVisualInfo(device.display, VisualIDMask | VisualScreenMask | VisualDepthMask, &visualtemplate, &visualmatches);
if(visualmatches < 1 || !visualinfo->visual) {
if(visualinfo) XFree(visualinfo);
fprintf(stderr, "VideoXv: unable to find Xv-compatible visual.\n");
return false;
}
device.colormap = XCreateColormap(device.display, settings.handle, visualinfo->visual, AllocNone);
XSetWindowAttributes attributes;
attributes.colormap = device.colormap;
attributes.border_pixel = 0;
attributes.event_mask = StructureNotifyMask;
device.window = XCreateWindow(device.display, /* parent = */ settings.handle,
/* x = */ 0, /* y = */ 0, window_attributes.width, window_attributes.height,
/* border_width = */ 0, device.depth, InputOutput, visualinfo->visual,
CWColormap | CWBorderPixel | CWEventMask, &attributes);
XFree(visualinfo);
XSetWindowBackground(device.display, device.window, /* color = */ 0);
XMapWindow(device.display, device.window);
device.gc = XCreateGC(device.display, device.window, 0, 0);
//set colorkey to auto paint, so that Xv video output is always visible
Atom atom = XInternAtom(device.display, "XV_AUTOPAINT_COLORKEY", true);
if(atom != None) XvSetPortAttribute(device.display, device.port, atom, 1);
//find optimal rendering format
device.format = XvFormatUnknown;
signed format_count;
XvImageFormatValues* format = XvListImageFormats(device.display, device.port, &format_count);
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
if(format[i].type == XvRGB && format[i].bits_per_pixel == 32) {
device.format = XvFormatRGB32;
device.fourcc = format[i].id;
break;
}
}
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
if(format[i].type == XvRGB && format[i].bits_per_pixel == 24) {
device.format = XvFormatRGB24;
device.fourcc = format[i].id;
break;
}
}
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
if(format[i].type == XvRGB && format[i].bits_per_pixel <= 16 && format[i].red_mask == 0xf800) {
device.format = XvFormatRGB16;
device.fourcc = format[i].id;
break;
}
}
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
if(format[i].type == XvRGB && format[i].bits_per_pixel <= 16 && format[i].red_mask == 0x7c00) {
device.format = XvFormatRGB15;
device.fourcc = format[i].id;
break;
}
}
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
if(format[i].type == XvYUV && format[i].bits_per_pixel == 16 && format[i].format == XvPacked) {
if(format[i].component_order[0] == 'Y' && format[i].component_order[1] == 'U'
&& format[i].component_order[2] == 'Y' && format[i].component_order[3] == 'V'
) {
device.format = XvFormatYUY2;
device.fourcc = format[i].id;
break;
}
}
}
if(device.format == XvFormatUnknown) for(signed i = 0; i < format_count; i++) {
if(format[i].type == XvYUV && format[i].bits_per_pixel == 16 && format[i].format == XvPacked) {
if(format[i].component_order[0] == 'U' && format[i].component_order[1] == 'Y'
&& format[i].component_order[2] == 'V' && format[i].component_order[3] == 'Y'
) {
device.format = XvFormatUYVY;
device.fourcc = format[i].id;
break;
}
}
}
free(format);
if(device.format == XvFormatUnknown) {
fprintf(stderr, "VideoXv: unable to find a supported image format.\n");
return false;
}
device.width = 256;
device.height = 256;
device.image = XvShmCreateImage(device.display, device.port, device.fourcc, 0, device.width, device.height, &device.shminfo);
if(!device.image) {
fprintf(stderr, "VideoXv: XShmCreateImage failed.\n");
return false;
}
device.shminfo.shmid = shmget(IPC_PRIVATE, device.image->data_size, IPC_CREAT | 0777);
device.shminfo.shmaddr = device.image->data = (char*)shmat(device.shminfo.shmid, 0, 0);
device.shminfo.readOnly = false;
if(!XShmAttach(device.display, &device.shminfo)) {
fprintf(stderr, "VideoXv: XShmAttach failed.\n");
return false;
}
buffer = new uint32_t[device.width * device.height];
settings.width = 256;
settings.height = 256;
init_yuv_tables();
clear();
return true;
}
void term() {
XShmDetach(device.display, &device.shminfo);
shmdt(device.shminfo.shmaddr);
shmctl(device.shminfo.shmid, IPC_RMID, NULL);
XFree(device.image);
if(device.window) {
XUnmapWindow(device.display, device.window);
device.window = 0;
}
if(device.colormap) {
XFreeColormap(device.display, device.colormap);
device.colormap = 0;
}
if(buffer) { delete[] buffer; buffer = 0; }
if(ytable) { delete[] ytable; ytable = 0; }
if(utable) { delete[] utable; utable = 0; }
if(vtable) { delete[] vtable; vtable = 0; }
}
void render_rgb32(unsigned width, unsigned height) {
uint32_t* input = (uint32_t*)buffer;
uint32_t* output = (uint32_t*)device.image->data;
for(unsigned y = 0; y < height; y++) {
memcpy(output, input, width * 4);
input += device.width;
output += device.width;
}
}
void render_rgb24(unsigned width, unsigned height) {
uint32_t* input = (uint32_t*)buffer;
uint8_t* output = (uint8_t*)device.image->data;
for(unsigned y = 0; y < height; y++) {
for(unsigned x = 0; x < width; x++) {
uint32_t p = *input++;
*output++ = p;
*output++ = p >> 8;
*output++ = p >> 16;
}
input += (device.width - width);
output += (device.width - width) * 3;
}
}
void render_rgb16(unsigned width, unsigned height) {
uint32_t* input = (uint32_t*)buffer;
uint16_t* output = (uint16_t*)device.image->data;
for(unsigned y = 0; y < height; y++) {
for(unsigned x = 0; x < width; x++) {
uint32_t p = *input++;
*output++ = ((p >> 8) & 0xf800) | ((p >> 5) & 0x07e0) | ((p >> 3) & 0x001f); //RGB32->RGB16
}
input += device.width - width;
output += device.width - width;
}
}
void render_rgb15(unsigned width, unsigned height) {
uint32_t* input = (uint32_t*)buffer;
uint16_t* output = (uint16_t*)device.image->data;
for(unsigned y = 0; y < height; y++) {
for(unsigned x = 0; x < width; x++) {
uint32_t p = *input++;
*output++ = ((p >> 9) & 0x7c00) | ((p >> 6) & 0x03e0) | ((p >> 3) & 0x001f); //RGB32->RGB15
}
input += device.width - width;
output += device.width - width;
}
}
void render_yuy2(unsigned width, unsigned height) {
uint32_t* input = (uint32_t*)buffer;
uint16_t* output = (uint16_t*)device.image->data;
for(unsigned y = 0; y < height; y++) {
for(unsigned x = 0; x < width >> 1; x++) {
uint32_t p0 = *input++;
uint32_t p1 = *input++;
p0 = ((p0 >> 8) & 0xf800) + ((p0 >> 5) & 0x07e0) + ((p0 >> 3) & 0x001f); //RGB32->RGB16
p1 = ((p1 >> 8) & 0xf800) + ((p1 >> 5) & 0x07e0) + ((p1 >> 3) & 0x001f); //RGB32->RGB16
uint8_t u = (utable[p0] + utable[p1]) >> 1;
uint8_t v = (vtable[p0] + vtable[p1]) >> 1;
*output++ = (u << 8) | ytable[p0];
*output++ = (v << 8) | ytable[p1];
}
input += device.width - width;
output += device.width - width;
}
}
void render_uyvy(unsigned width, unsigned height) {
uint32_t* input = (uint32_t*)buffer;
uint16_t* output = (uint16_t*)device.image->data;
for(unsigned y = 0; y < height; y++) {
for(unsigned x = 0; x < width >> 1; x++) {
uint32_t p0 = *input++;
uint32_t p1 = *input++;
p0 = ((p0 >> 8) & 0xf800) + ((p0 >> 5) & 0x07e0) + ((p0 >> 3) & 0x001f);
p1 = ((p1 >> 8) & 0xf800) + ((p1 >> 5) & 0x07e0) + ((p1 >> 3) & 0x001f);
uint8_t u = (utable[p0] + utable[p1]) >> 1;
uint8_t v = (vtable[p0] + vtable[p1]) >> 1;
*output++ = (ytable[p0] << 8) | u;
*output++ = (ytable[p1] << 8) | v;
}
input += device.width - width;
output += device.width - width;
}
}
void init_yuv_tables() {
ytable = new uint8_t[65536];
utable = new uint8_t[65536];
vtable = new uint8_t[65536];
for(unsigned i = 0; i < 65536; i++) {
//extract RGB565 color data from i
uint8_t r = (i >> 11) & 31, g = (i >> 5) & 63, b = (i) & 31;
r = (r << 3) | (r >> 2); //R5->R8
g = (g << 2) | (g >> 4); //G6->G8
b = (b << 3) | (b >> 2); //B5->B8
//ITU-R Recommendation BT.601
//double lr = 0.299, lg = 0.587, lb = 0.114;
int y = int( +(double(r) * 0.257) + (double(g) * 0.504) + (double(b) * 0.098) + 16.0 );
int u = int( -(double(r) * 0.148) - (double(g) * 0.291) + (double(b) * 0.439) + 128.0 );
int v = int( +(double(r) * 0.439) - (double(g) * 0.368) - (double(b) * 0.071) + 128.0 );
//ITU-R Recommendation BT.709
//double lr = 0.2126, lg = 0.7152, lb = 0.0722;
//int y = int( double(r) * lr + double(g) * lg + double(b) * lb );
//int u = int( (double(b) - y) / (2.0 - 2.0 * lb) + 128.0 );
//int v = int( (double(r) - y) / (2.0 - 2.0 * lr) + 128.0 );
ytable[i] = y < 0 ? 0 : y > 255 ? 255 : y;
utable[i] = u < 0 ? 0 : u > 255 ? 255 : u;
vtable[i] = v < 0 ? 0 : v > 255 ? 255 : v;
}
}
pVideoXv() {
device.window = 0;
device.colormap = 0;
device.port = -1;
ytable = 0;
utable = 0;
vtable = 0;
settings.handle = 0;
settings.synchronize = false;
}
~pVideoXv() {
term();
}
};
DeclareVideo(Xv)
};