-updated tests:

*updated build scripts
 *removed tls variables from code (needed to support WebAssembly)
 *some mmutils tweaks
 *some fixes
 *pthread TLS thread ID implementation
-added Atomic file (reimplementation of atomics templates for emscripten)
-added emscripten support to ecs.std
This commit is contained in:
Mergul 2019-11-25 20:06:16 +00:00
parent 46de0f6adb
commit 946fbf2934
18 changed files with 443 additions and 229 deletions

View file

@ -2,20 +2,20 @@ module glad.gl.funcs;
private import glad.gl.types;
bool GL_VERSION_1_0;
bool GL_VERSION_1_1;
bool GL_VERSION_1_2;
bool GL_VERSION_1_3;
bool GL_VERSION_1_4;
bool GL_VERSION_1_5;
bool GL_VERSION_2_0;
bool GL_VERSION_2_1;
bool GL_VERSION_3_0;
bool GL_VERSION_3_1;
bool GL_VERSION_3_2;
bool GL_VERSION_3_3;
bool GL_ES_VERSION_2_0;
bool GL_ES_VERSION_3_0;
__gshared bool GL_VERSION_1_0;
__gshared bool GL_VERSION_1_1;
__gshared bool GL_VERSION_1_2;
__gshared bool GL_VERSION_1_3;
__gshared bool GL_VERSION_1_4;
__gshared bool GL_VERSION_1_5;
__gshared bool GL_VERSION_2_0;
__gshared bool GL_VERSION_2_1;
__gshared bool GL_VERSION_3_0;
__gshared bool GL_VERSION_3_1;
__gshared bool GL_VERSION_3_2;
__gshared bool GL_VERSION_3_3;
__gshared bool GL_ES_VERSION_2_0;
__gshared bool GL_ES_VERSION_3_0;
nothrow @nogc extern(System) {
alias fp_glCullFace = void function(GLenum);
alias fp_glFrontFace = void function(GLenum);

View file

@ -109,7 +109,7 @@ bool gladLoadGL() {
return status;
}
static struct GLVersion { static int major = 0; static int minor = 0; }
__gshared struct GLVersion { __gshared int major = 0; __gshared int minor = 0; }
private extern(C) char* strstr(const(char)*, const(char)*) @nogc;
private extern(C) int strcmp(const(char)*, const(char)*) @nogc;
private extern(C) int strncmp(const(char)*, const(char)*, size_t) @nogc;

View file

@ -1,12 +1,11 @@
module mmutils.thread_pool;
import core.atomic;
import ecs.atomic;
//import core.stdc.stdio;
//import core.stdc.stdlib : free, malloc, realloc;
//import core.stdc.string : memcpy;
//import std.stdio;
import std.algorithm : map;
@ -108,6 +107,9 @@ version (WebAssembly)
}
extern(C) int clock_gettime(clockid_t, timespec*) @nogc nothrow @system;
extern(C) double emscripten_get_now() @nogc nothrow @system;
}
/// High precison timer
@ -122,13 +124,14 @@ long useconds()
return t.tv_sec * 1_000_000 + t.tv_usec;*/
time_t time;
timespec spec;
//time_t time;
//timespec spec;
clock_gettime(CLOCK_REALTIME, &spec);
//lock_gettime(CLOCK_REALTIME, &spec);
return cast(long)(emscripten_get_now() * 1000.0);
//time = spec.tv_sec;
return spec.tv_sec * 1000_000 + spec.tv_nsec / 1000;
//return spec.tv_sec * 1000_000 + spec.tv_nsec / 1000;
}
else version (Posix)
{
@ -190,6 +193,22 @@ void instructionPause()
static assert(0);
}
}
else version(WebAssembly)
{
version(LDC)
{
import ldc.attributes;
@optStrategy("none")
static void nop()
{
int i;
i++;
}
nop();
}
else static assert(0);
}
else static assert(0);
}
//////////////////////////////////////////////
@ -198,18 +217,17 @@ void instructionPause()
version (MM_USE_POSIX_THREADS)
{
version (Posix)
{
import core.sys.posix.pthread;
import core.sys.posix.semaphore;
}
else version (WebAssembly)
version (WebAssembly)
{
extern(C):
//alias uint time_t;
struct pthread_attr_t
{
union
{
int[10] __i;
uint[10] __s;
}
}
struct pthread_t
@ -230,7 +248,11 @@ version (MM_USE_POSIX_THREADS)
void pthread_exit(void *retval);
// semaphore.h
alias sem_t = void*;
//alias sem_t = void*;
struct sem_t
{
shared int[4] __val;
}
int sem_init(sem_t*, int, uint);
int sem_wait(sem_t*);
int sem_trywait(sem_t*);
@ -240,6 +262,11 @@ version (MM_USE_POSIX_THREADS)
//import core.sys.posix.pthread;
//import core.sys.posix.semaphore;
}
else version (Posix)
{
import core.sys.posix.pthread;
import core.sys.posix.semaphore;
}
else version (Windows)
{
extern (C):
@ -317,7 +344,7 @@ version (MM_USE_POSIX_THREADS)
void post()
{
int ret = sem_post(&mutex);
assert(ret == 0);
assert(ret >= 0);
}
void destroy()
@ -362,6 +389,9 @@ else version(D_BetterC)
{
version(Posix)
{
import core.sys.posix.pthread;
import core.sys.posix.semaphore;
struct Semaphore
{
sem_t mutex;
@ -619,7 +649,7 @@ else
///////////////// ThreadPool /////////////////
//////////////////////////////////////////////
private enum gMaxThreadsNum = 128;
private enum gMaxThreadsNum = 32;
alias JobDelegate = void delegate(ThreadData*, JobData*);
@ -634,7 +664,7 @@ struct JobLog
/// First in first out queue with atomic lock
struct JobQueue
{
alias LockType = long;
alias LockType = int;
align(64) shared LockType lock; /// Lock for accesing list of Jobs
align(64) JobData* first; /// Fist element in list of Jobs
@ -849,7 +879,8 @@ public:
threadData.threadPool = &this;
threadData.semaphore.initialize();
threadData.externalThread = true;
threadData.acceptJobs = true;
atomicStore(threadData.acceptJobs, true);
//threadData.acceptJobs = true;
int threadNum = atomicOp!"+="(threadsNum, 1) - 1;
@ -866,18 +897,19 @@ public:
void unregistExternalThread(ThreadData* threadData)
{
lockThreadsData();
scope (exit)
unlockThreadsData();
//scope (exit)
// unlockThreadsData();
disposeThreadData(threadData);
unlockThreadsData();
}
/// Allows external threads to return from threadStartFunc
void releaseExternalThreads()
{
lockThreadsData();
scope (exit)
unlockThreadsData();
//scope (exit)
// unlockThreadsData();
// Release external threads (including main thread)
foreach (i, ref ThreadData* th; threadsData)
@ -891,14 +923,15 @@ public:
addJobsRange(rng, cast(int) i);
atomicStore(th.end, true);
}
unlockThreadsData();
}
/// Waits for all threads to finish and joins them (excluding external threads)
void waitThreads()
{
lockThreadsData();
scope (exit)
unlockThreadsData();
//scope (exit)
// unlockThreadsData();
foreach (i, ref ThreadData* th; threadsData)
{
if (th is null)
@ -915,6 +948,7 @@ public:
th.thread.join();
disposeThreadData(th);
}
unlockThreadsData();
}
/// Sets number of threads to accept new jobs
@ -927,8 +961,8 @@ public:
assert(num > 0);
lockThreadsData();
scope (exit)
unlockThreadsData();
//scope (exit)
// unlockThreadsData();
foreach (i, ref ThreadData* th; threadsData)
{
@ -947,14 +981,15 @@ public:
th = makeThreadData();
th.threadPool = &this;
th.threadId = cast(int) i;
th.acceptJobs = true;
atomicStore(th.acceptJobs, true);
//th.acceptJobs = true;
th.semaphore.initialize();
th.thread.start(&th.threadStartFunc);
}
atomicStore(threadsNum, num);
unlockThreadsData();
}
/// Adds job to be executed by thread pool, such a job won't be synchronized with any group or job
@ -1012,8 +1047,9 @@ public:
{
assert(rng[0].group == threadData.group);
}
atomicOp!"+="(rng[0].group.jobsToBeDoneCount, cast(int) rng.length);
int threadsNumLocal = threadsNum;
int threadsNumLocal = atomicLoad(threadsNum);
int part = cast(int) rng.length / threadsNumLocal;
if (part > 0)
{
@ -1040,13 +1076,13 @@ public:
void addGroupAsynchronous(JobsGroup* group)
{
group.thPool = &this;
if (group.jobs.length == 0)
{
// Immediately call group end
group.onGroupFinish();
return;
}
group.setUpJobs();
auto rng = group.jobs[].map!((ref a) => &a);
addJobsRange(rng, group.executeOnThreadNum);
@ -1066,8 +1102,8 @@ public:
void flushAllLogs()
{
lockThreadsData();
scope (exit)
unlockThreadsData();
//scope (exit)
// unlockThreadsData();
foreach (thNum; 0 .. atomicLoad(threadsNum))
{
ThreadData* th = threadsData[thNum];
@ -1081,6 +1117,7 @@ public:
onThreadFlushLogs(th);
}
unlockThreadsData();
}
/// Default implementation of flushing logs
@ -1161,7 +1198,7 @@ private:
foreach (i; 0 .. 1_000)
{
if (threadNum >= threadsNum)
if (threadNum >= atomicLoad(threadsNum))
{
threadNum = 0;
atomicStore(threadSelector, 0);
@ -1287,7 +1324,10 @@ public:
this.name = name;
this.jobs = jobs;
this.executeOnThreadNum = executeOnThreadNum;
jobsToBeDoneCount = 0;
//jobsToBeDoneCount = 0;
//dependenciesWaitCount = 0;
atomicStore(jobsToBeDoneCount,0);
atomicStore(dependenciesWaitCount,0);
}
~this() nothrow
@ -1330,6 +1370,7 @@ private:
if (spawnedByGroup)
{
auto num = atomicOp!"-="(spawnedByGroup.jobsToBeDoneCount, 1);
assert(num >= 0);
if (num == 0)
{
spawnedByGroup.onGroupFinish();
@ -1438,7 +1479,8 @@ private void threadFunc(ThreadData* threadData)
}
}
threadData.end = false;
//threadData.end = false;
atomicStore(threadData.end, false);
assert(threadData.jobsQueue.empty());
}