nanovega: some multithreading guards; also, it should be safe to finalize NVGImage anytime and in any thread now

This commit is contained in:
Ketmar Dark 2018-03-19 09:14:45 +02:00 committed by Adam D. Ruppe
parent a055c35e9a
commit 3a63dbebcc
1 changed files with 95 additions and 17 deletions

View File

@ -149,6 +149,13 @@ The drawing context is created using platform specific constructor function.
NVGContext vg = nvgCreateContext(); NVGContext vg = nvgCreateContext();
--- ---
$(WARNING You must use created context ONLY in that thread where you created it.
There is no way to "transfer" context between threads. Trying to do so
will lead to UB.)
$(WARNING Never issue any commands outside of [beginFrame]/[endFrame]. Trying to
do so will lead to UB.)
Drawing shapes with NanoVega Drawing shapes with NanoVega
============================ ============================
@ -1392,7 +1399,7 @@ struct NVGparams {
bool function (void* uptr) nothrow @trusted @nogc renderCreate; bool function (void* uptr) nothrow @trusted @nogc renderCreate;
int function (void* uptr, NVGtexture type, int w, int h, int imageFlags, const(ubyte)* data) nothrow @trusted @nogc renderCreateTexture; int function (void* uptr, NVGtexture type, int w, int h, int imageFlags, const(ubyte)* data) nothrow @trusted @nogc renderCreateTexture;
bool function (void* uptr, int image) nothrow @trusted @nogc renderTextureIncRef; bool function (void* uptr, int image) nothrow @trusted @nogc renderTextureIncRef;
bool function (void* uptr, int image) nothrow @trusted @nogc renderDeleteTexture; bool function (void* uptr, int image) nothrow @trusted @nogc renderDeleteTexture; // this basically does decref; also, it should be thread-safe, and postpone real deletion to next `renderViewport()` call
bool function (void* uptr, int image, int x, int y, int w, int h, const(ubyte)* data) nothrow @trusted @nogc renderUpdateTexture; bool function (void* uptr, int image, int x, int y, int w, int h, const(ubyte)* data) nothrow @trusted @nogc renderUpdateTexture;
bool function (void* uptr, int image, int* w, int* h) nothrow @trusted @nogc renderGetTextureSize; bool function (void* uptr, int image, int* w, int* h) nothrow @trusted @nogc renderGetTextureSize;
void function (void* uptr, int width, int height) nothrow @trusted @nogc renderViewport; // called in [beginFrame] void function (void* uptr, int width, int height) nothrow @trusted @nogc renderViewport; // called in [beginFrame]
@ -1647,7 +1654,7 @@ private:
NVGMatrix gpuAffine; NVGMatrix gpuAffine;
int mWidth, mHeight; int mWidth, mHeight;
// image manager // image manager
int imageCount; // number of alive images in this context shared int imageCount; // number of alive images in this context
bool contextAlive; // context can be dead, but still contain some images bool contextAlive; // context can be dead, but still contain some images
@disable this (this); // no copies @disable this (this); // no copies
@ -1708,7 +1715,8 @@ public bool renderPathComplex (NVGContext ctx, int pathidx) pure nothrow @truste
void nvg__imageIncRef (NVGContext ctx, int imgid, bool increfInGL=true) nothrow @trusted @nogc { void nvg__imageIncRef (NVGContext ctx, int imgid, bool increfInGL=true) nothrow @trusted @nogc {
if (ctx !is null && imgid > 0) { if (ctx !is null && imgid > 0) {
++ctx.imageCount; import core.atomic : atomicOp;
atomicOp!"+="(ctx.imageCount, 1);
version(nanovega_debug_image_manager_rc) { import core.stdc.stdio; printf("image[++]ref: context %p: %d image refs (%d)\n", ctx, ctx.imageCount, imgid); } version(nanovega_debug_image_manager_rc) { import core.stdc.stdio; printf("image[++]ref: context %p: %d image refs (%d)\n", ctx, ctx.imageCount, imgid); }
if (ctx.contextAlive && increfInGL) ctx.params.renderTextureIncRef(ctx.params.userPtr, imgid); if (ctx.contextAlive && increfInGL) ctx.params.renderTextureIncRef(ctx.params.userPtr, imgid);
} }
@ -1716,12 +1724,13 @@ void nvg__imageIncRef (NVGContext ctx, int imgid, bool increfInGL=true) nothrow
void nvg__imageDecRef (NVGContext ctx, int imgid) nothrow @trusted @nogc { void nvg__imageDecRef (NVGContext ctx, int imgid) nothrow @trusted @nogc {
if (ctx !is null && imgid > 0) { if (ctx !is null && imgid > 0) {
assert(ctx.imageCount > 0); import core.atomic : atomicOp;
--ctx.imageCount; int icnt = atomicOp!"-="(ctx.imageCount, 1);
if (icnt < 0) assert(0, "NanoVega: internal image refcounting error");
version(nanovega_debug_image_manager_rc) { import core.stdc.stdio; printf("image[--]ref: context %p: %d image refs (%d)\n", ctx, ctx.imageCount, imgid); } version(nanovega_debug_image_manager_rc) { import core.stdc.stdio; printf("image[--]ref: context %p: %d image refs (%d)\n", ctx, ctx.imageCount, imgid); }
if (ctx.contextAlive) ctx.params.renderDeleteTexture(ctx.params.userPtr, imgid); if (ctx.contextAlive) ctx.params.renderDeleteTexture(ctx.params.userPtr, imgid);
version(nanovega_debug_image_manager) if (!ctx.contextAlive) { import core.stdc.stdio; printf("image[--]ref: zombie context %p: %d image refs (%d)\n", ctx, ctx.imageCount, imgid); } version(nanovega_debug_image_manager) if (!ctx.contextAlive) { import core.stdc.stdio; printf("image[--]ref: zombie context %p: %d image refs (%d)\n", ctx, ctx.imageCount, imgid); }
if (!ctx.contextAlive && ctx.imageCount == 0) { if (!ctx.contextAlive && icnt == 0) {
// it is finally safe to free context memory // it is finally safe to free context memory
import core.stdc.stdlib : free; import core.stdc.stdlib : free;
version(nanovega_debug_image_manager) { import core.stdc.stdio; printf("killed zombie context %p\n", ctx); } version(nanovega_debug_image_manager) { import core.stdc.stdio; printf("killed zombie context %p\n", ctx); }
@ -1927,7 +1936,8 @@ void deleteInternal (ref NVGContext ctx) nothrow @trusted @nogc {
ctx.contextAlive = false; ctx.contextAlive = false;
if (ctx.imageCount == 0) { import core.atomic : atomicLoad;
if (atomicLoad(ctx.imageCount) == 0) {
version(nanovega_debug_image_manager) { import core.stdc.stdio; printf("destroyed context %p\n", ctx); } version(nanovega_debug_image_manager) { import core.stdc.stdio; printf("destroyed context %p\n", ctx); }
free(ctx); free(ctx);
} else { } else {
@ -11848,7 +11858,7 @@ struct GLNVGtexture {
int width, height; int width, height;
NVGtexture type; NVGtexture type;
int flags; int flags;
int rc; shared int rc; // this can be 0 with tex != 0 -- postponed deletion
int nextfree; int nextfree;
} }
@ -11934,7 +11944,11 @@ enum GLMaskState {
JustCleared = 2, JustCleared = 2,
} }
final class GLNVGTextureLocker {}
struct GLNVGcontext { struct GLNVGcontext {
private import core.thread : ThreadID;
GLNVGshader shader; GLNVGshader shader;
GLNVGtexture* textures; GLNVGtexture* textures;
float[2] view; float[2] view;
@ -11956,6 +11970,10 @@ struct GLNVGcontext {
GLNVGshader shaderFillFBO; GLNVGshader shaderFillFBO;
GLNVGshader shaderCopyFBO; GLNVGshader shaderCopyFBO;
bool inFrame; // will be `true` if we can perform OpenGL operations (used in texture deletion)
shared bool mustCleanTextures; // will be `true` if we should delete some textures
ThreadID mainTID;
// Per frame buffers // Per frame buffers
GLNVGcall* calls; GLNVGcall* calls;
int ccalls; int ccalls;
@ -12071,13 +12089,36 @@ bool glnvg__deleteTexture (GLNVGcontext* gl, ref int id) nothrow @trusted @nogc
assert(tx.id == id); assert(tx.id == id);
assert(tx.tex != 0); assert(tx.tex != 0);
version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("decrefing texture with id %d (%d)\n", tx.id, id); }} version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("decrefing texture with id %d (%d)\n", tx.id, id); }}
if (--tx.rc == 0) { import core.atomic : atomicOp;
if (atomicOp!"-="(tx.rc, 1) == 0) {
import core.thread : ThreadID;
ThreadID mytid;
try { import core.thread; mytid = Thread.getThis.id; } catch (Exception e) {}
if (gl.mainTID == mytid && gl.inFrame) {
// can delete it right now
if ((tx.flags&NVGImageFlagsGL.NoDelete) == 0) glDeleteTextures(1, &tx.tex); if ((tx.flags&NVGImageFlagsGL.NoDelete) == 0) glDeleteTextures(1, &tx.tex);
version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("deleted texture with id %d (%d); glid=%u\n", tx.id, id, tx.tex); }} version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("*** deleted texture with id %d (%d); glid=%u\n", tx.id, id, tx.tex); }}
memset(tx, 0, (*tx).sizeof); memset(tx, 0, (*tx).sizeof);
//{ import core.stdc.stdio; printf("deleting texture with id %d\n", id); } //{ import core.stdc.stdio; printf("deleting texture with id %d\n", id); }
tx.nextfree = gl.freetexid; tx.nextfree = gl.freetexid;
gl.freetexid = id-1; gl.freetexid = id-1;
} else {
// alas, we aren't doing frame business, so we should postpone deletion
version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("*** POSTPONED texture deletion with id %d (%d); glid=%u\n", tx.id, id, tx.tex); }}
version(aliced) {
synchronized(GLNVGTextureLocker.classinfo) {
tx.id = 0; // mark it as dead
gl.mustCleanTextures = true; // set "need cleanup" flag
}
} else {
try {
synchronized(GLNVGTextureLocker.classinfo) {
tx.id = 0; // mark it as dead
gl.mustCleanTextures = true; // set "need cleanup" flag
}
} catch (Exception e) {}
}
}
} }
id = 0; id = 0;
return true; return true;
@ -12742,7 +12783,8 @@ bool glnvg__renderTextureIncRef (void* uptr, int image) nothrow @trusted @nogc {
version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("CANNOT incref texture with id %d\n", image); }} version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("CANNOT incref texture with id %d\n", image); }}
return false; return false;
} }
++tex.rc; import core.atomic : atomicOp;
atomicOp!"+="(tex.rc, 1);
version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("texture #%d: incref; newref=%d\n", image, tex.rc); }} version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("texture #%d: incref; newref=%d\n", image, tex.rc); }}
return true; return true;
} }
@ -13010,6 +13052,7 @@ void glnvg__setClipUniforms (GLNVGcontext* gl, int uniformOffset, NVGClipMode cl
void glnvg__renderViewport (void* uptr, int width, int height) nothrow @trusted @nogc { void glnvg__renderViewport (void* uptr, int width, int height) nothrow @trusted @nogc {
GLNVGcontext* gl = cast(GLNVGcontext*)uptr; GLNVGcontext* gl = cast(GLNVGcontext*)uptr;
gl.inFrame = true;
gl.view.ptr[0] = cast(float)width; gl.view.ptr[0] = cast(float)width;
gl.view.ptr[1] = cast(float)height; gl.view.ptr[1] = cast(float)height;
// kill FBOs if we need to create new ones (flushing will recreate 'em if necessary) // kill FBOs if we need to create new ones (flushing will recreate 'em if necessary)
@ -13020,6 +13063,28 @@ void glnvg__renderViewport (void* uptr, int width, int height) nothrow @trusted
} }
gl.msp = 1; gl.msp = 1;
gl.maskStack.ptr[0] = GLMaskState.DontMask; gl.maskStack.ptr[0] = GLMaskState.DontMask;
// texture cleanup
import core.atomic : atomicLoad;
if (atomicLoad(gl.mustCleanTextures)) {
try {
import core.thread : Thread;
if (gl.mainTID != Thread.getThis.id) assert(0, "NanoVega: cannot use context in alien thread");
synchronized(GLNVGTextureLocker.classinfo) {
gl.mustCleanTextures = false;
foreach (immutable tidx, ref GLNVGtexture tex; gl.textures[0..gl.ntextures]) {
// no need to use atomic ops here, as we're locked
if (tex.rc == 0 && tex.tex != 0 && tex.id == 0) {
version(nanovega_debug_textures) {{ import core.stdc.stdio; printf("*** cleaned up texture with glid=%u\n", tex.tex); }}
import core.stdc.string : memset;
if ((tex.flags&NVGImageFlagsGL.NoDelete) == 0) glDeleteTextures(1, &tex.tex);
memset(&tex, 0, tex.sizeof);
tex.nextfree = gl.freetexid;
gl.freetexid = cast(int)tidx;
}
}
}
} catch (Exception e) {}
}
} }
void glnvg__fill (GLNVGcontext* gl, GLNVGcall* call) nothrow @trusted @nogc { void glnvg__fill (GLNVGcontext* gl, GLNVGcall* call) nothrow @trusted @nogc {
@ -13182,7 +13247,12 @@ void glnvg__affine (GLNVGcontext* gl, GLNVGcall* call) nothrow @trusted @nogc {
} }
void glnvg__renderCancelInternal (GLNVGcontext* gl, bool clearTextures) nothrow @trusted @nogc { void glnvg__renderCancelInternal (GLNVGcontext* gl, bool clearTextures) nothrow @trusted @nogc {
if (clearTextures) { scope(exit) gl.inFrame = false;
if (clearTextures && gl.inFrame) {
try {
import core.thread : Thread;
if (gl.mainTID != Thread.getThis.id) assert(0, "NanoVega: cannot use context in alien thread");
} catch (Exception e) {}
foreach (ref GLNVGcall c; gl.calls[0..gl.ncalls]) if (c.image > 0) glnvg__deleteTexture(gl, c.image); foreach (ref GLNVGcall c; gl.calls[0..gl.ncalls]) if (c.image > 0) glnvg__deleteTexture(gl, c.image);
} }
gl.nverts = 0; gl.nverts = 0;
@ -13277,6 +13347,12 @@ version(nanovega_debug_clipping) public __gshared bool nanovegaClipDebugDump = f
void glnvg__renderFlush (void* uptr) nothrow @trusted @nogc { void glnvg__renderFlush (void* uptr) nothrow @trusted @nogc {
GLNVGcontext* gl = cast(GLNVGcontext*)uptr; GLNVGcontext* gl = cast(GLNVGcontext*)uptr;
if (!gl.inFrame) assert(0, "NanoVega: internal driver error");
try {
import core.thread : Thread;
if (gl.mainTID != Thread.getThis.id) assert(0, "NanoVega: cannot use context in alien thread");
} catch (Exception e) {}
scope(exit) gl.inFrame = false;
enum ShaderType { None, Fill, Clip } enum ShaderType { None, Fill, Clip }
auto lastShader = ShaderType.None; auto lastShader = ShaderType.None;
if (gl.ncalls > 0) { if (gl.ncalls > 0) {
@ -13798,6 +13874,8 @@ public NVGContext nvgCreateContext (const(NVGContextFlag)[] flagList...) nothrow
ctx = createInternal(&params); ctx = createInternal(&params);
if (ctx is null) goto error; if (ctx is null) goto error;
try { import core.thread; gl.mainTID = Thread.getThis.id; } catch (Exception e) {}
return ctx; return ctx;
error: error: