3177 lines
67 KiB
C++
3177 lines
67 KiB
C++
/* thread.cc: Locking and threading module functions
|
|
|
|
Copyright 1998, 1999, 2000, 2001, 2002, 2003 Red Hat, Inc.
|
|
|
|
Originally written by Marco Fuykschot <marco@ddi.nl>
|
|
Substantialy enhanced by Robert Collins <rbtcollins@hotmail.com>
|
|
|
|
This file is part of Cygwin.
|
|
|
|
This software is a copyrighted work licensed under the terms of the
|
|
Cygwin license. Please consult the file "CYGWIN_LICENSE" for
|
|
details. */
|
|
|
|
/* Implementation overview and caveats:
|
|
|
|
Win32 puts some contraints on what can and cannot be implemented. Where
|
|
possible we work around those contrainsts. Where we cannot work around
|
|
the constraints we either pretend to be conformant, or return an error
|
|
code.
|
|
|
|
Some caveats: PROCESS_SHARED objects while they pretend to be process
|
|
shared, may not actually work. Some test cases are needed to determine
|
|
win32's behaviour. My suspicion is that the win32 handle needs to be
|
|
opened with different flags for proper operation.
|
|
|
|
R.Collins, April 2001. */
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
# include "config.h"
|
|
#endif
|
|
|
|
#ifdef _MT_SAFE
|
|
#include "winsup.h"
|
|
#include <limits.h>
|
|
#include "cygerrno.h"
|
|
#include <assert.h>
|
|
#include <stdlib.h>
|
|
#include <syslog.h>
|
|
#include "pinfo.h"
|
|
#include "perprocess.h"
|
|
#include "security.h"
|
|
#include <semaphore.h>
|
|
#include <stdio.h>
|
|
#include <sys/timeb.h>
|
|
|
|
extern int threadsafe;
|
|
|
|
struct _reent *
|
|
_reent_clib ()
|
|
{
|
|
struct __reent_t *_r =
|
|
(struct __reent_t *) MT_INTERFACE->reent_key.get ();
|
|
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
if (_r == 0)
|
|
system_printf ("local thread storage not inited");
|
|
#endif
|
|
return _r->_clib;
|
|
}
|
|
|
|
struct _winsup_t *
|
|
_reent_winsup ()
|
|
{
|
|
struct __reent_t *_r =
|
|
(struct __reent_t *) MT_INTERFACE->reent_key.get ();
|
|
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
if (_r == 0)
|
|
system_printf ("local thread storage not inited");
|
|
#endif
|
|
return _r->_winsup;
|
|
}
|
|
|
|
bool
|
|
native_mutex::init ()
|
|
{
|
|
theHandle = CreateMutex (&sec_none_nih, FALSE, NULL);
|
|
if (!theHandle)
|
|
{
|
|
debug_printf ("CreateMutex failed. %E");
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
native_mutex::lock ()
|
|
{
|
|
DWORD waitResult = WaitForSingleObject (theHandle, INFINITE);
|
|
if (waitResult != WAIT_OBJECT_0)
|
|
{
|
|
system_printf ("Received unexpected wait result %d on handle %p, %E", waitResult, theHandle);
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
void
|
|
native_mutex::unlock ()
|
|
{
|
|
if (!ReleaseMutex (theHandle))
|
|
system_printf ("Received a unexpected result releasing mutex. %E");
|
|
}
|
|
|
|
inline LPCRITICAL_SECTION
|
|
ResourceLocks::Lock (int _resid)
|
|
{
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
if (!inited)
|
|
system_printf ("lock called before initialization");
|
|
|
|
thread_printf
|
|
("Get Resource lock %d ==> %p for %p , real : %d , threadid %d ", _resid,
|
|
&lock, user_data, myself->pid, GetCurrentThreadId ());
|
|
#endif
|
|
return &lock;
|
|
}
|
|
|
|
void
|
|
SetResourceLock (int _res_id, int _mode, const char *_function)
|
|
{
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
thread_printf ("Set resource lock %d mode %d for %s start",
|
|
_res_id, _mode, _function);
|
|
#endif
|
|
EnterCriticalSection (user_data->resourcelocks->Lock (_res_id));
|
|
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
user_data->resourcelocks->owner = GetCurrentThreadId ();
|
|
user_data->resourcelocks->count++;
|
|
#endif
|
|
}
|
|
|
|
void
|
|
ReleaseResourceLock (int _res_id, int _mode, const char *_function)
|
|
{
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
thread_printf ("Release resource lock %d mode %d for %s done", _res_id,
|
|
_mode, _function);
|
|
|
|
AssertResourceOwner (_res_id, _mode);
|
|
user_data->resourcelocks->count--;
|
|
if (user_data->resourcelocks->count == 0)
|
|
user_data->resourcelocks->owner = 0;
|
|
#endif
|
|
|
|
LeaveCriticalSection (user_data->resourcelocks->Lock (_res_id));
|
|
}
|
|
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
void
|
|
AssertResourceOwner (int _res_id, int _mode)
|
|
{
|
|
|
|
thread_printf
|
|
("Assert Resource lock %d ==> for %p , real : %d , threadid %d count %d owner %d",
|
|
_res_id, user_data, myself->pid, GetCurrentThreadId (),
|
|
user_data->resourcelocks->count, user_data->resourcelocks->owner);
|
|
if (user_data && (user_data->resourcelocks->owner != GetCurrentThreadId ()))
|
|
system_printf ("assertion failed, not the resource owner");
|
|
}
|
|
|
|
#endif
|
|
|
|
void
|
|
ResourceLocks::Init ()
|
|
{
|
|
InitializeCriticalSection (&lock);
|
|
inited = true;
|
|
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
owner = 0;
|
|
count = 0;
|
|
#endif
|
|
|
|
thread_printf ("lock %p inited by %p , %d", &lock, user_data, myself->pid);
|
|
}
|
|
|
|
void
|
|
ResourceLocks::Delete ()
|
|
{
|
|
if (inited)
|
|
{
|
|
thread_printf ("Close Resource Locks %p ", &lock);
|
|
DeleteCriticalSection (&lock);
|
|
inited = false;
|
|
}
|
|
}
|
|
|
|
void
|
|
MTinterface::Init ()
|
|
{
|
|
reents._clib = _impure_ptr;
|
|
reents._winsup = &winsup_reent;
|
|
winsup_reent._process_logmask = LOG_UPTO (LOG_DEBUG);
|
|
reent_key.set (&reents);
|
|
|
|
pthread_mutex::init_mutex ();
|
|
pthread_cond::init_mutex ();
|
|
pthread_rwlock::init_mutex ();
|
|
}
|
|
|
|
void
|
|
MTinterface::fixup_before_fork (void)
|
|
{
|
|
pthread_key::fixup_before_fork ();
|
|
}
|
|
|
|
/* This function is called from a single threaded process */
|
|
void
|
|
MTinterface::fixup_after_fork (void)
|
|
{
|
|
pthread_key::fixup_after_fork ();
|
|
|
|
threadcount = 1;
|
|
pthread::init_mainthread ();
|
|
|
|
pthread_mutex::fixup_after_fork ();
|
|
pthread_cond::fixup_after_fork ();
|
|
pthread_rwlock::fixup_after_fork ();
|
|
semaphore::fixup_after_fork ();
|
|
}
|
|
|
|
/* pthread calls */
|
|
|
|
/* static methods */
|
|
void
|
|
pthread::init_mainthread ()
|
|
{
|
|
pthread *thread = get_tls_self_pointer ();
|
|
if (!thread)
|
|
{
|
|
thread = new pthread ();
|
|
if (!thread)
|
|
api_fatal ("failed to create mainthread object");
|
|
}
|
|
|
|
thread->init_current_thread ();
|
|
}
|
|
|
|
pthread *
|
|
pthread::self ()
|
|
{
|
|
pthread *thread = get_tls_self_pointer ();
|
|
if (thread)
|
|
return thread;
|
|
return pthread_null::get_null_pthread ();
|
|
}
|
|
|
|
void
|
|
pthread::set_tls_self_pointer (pthread *thisThread)
|
|
{
|
|
MT_INTERFACE->thread_self_key.set (thisThread);
|
|
}
|
|
|
|
pthread *
|
|
pthread::get_tls_self_pointer ()
|
|
{
|
|
return (pthread *) MT_INTERFACE->thread_self_key.get ();
|
|
}
|
|
|
|
|
|
|
|
/* member methods */
|
|
pthread::pthread ():verifyable_object (PTHREAD_MAGIC), win32_obj_id (0),
|
|
cancelstate (0), canceltype (0), cancel_event (0),
|
|
joiner (NULL), cleanup_stack (NULL)
|
|
{
|
|
}
|
|
|
|
pthread::~pthread ()
|
|
{
|
|
if (win32_obj_id)
|
|
CloseHandle (win32_obj_id);
|
|
if (cancel_event)
|
|
CloseHandle (cancel_event);
|
|
}
|
|
|
|
void
|
|
pthread::set_thread_id_to_current ()
|
|
{
|
|
thread_id = GetCurrentThreadId ();
|
|
}
|
|
|
|
void
|
|
pthread::precreate (pthread_attr *newattr)
|
|
{
|
|
pthread_mutex *verifyable_mutex_obj = &mutex;
|
|
|
|
/* already running ? */
|
|
if (win32_obj_id)
|
|
return;
|
|
|
|
if (newattr)
|
|
{
|
|
attr.joinable = newattr->joinable;
|
|
attr.contentionscope = newattr->contentionscope;
|
|
attr.inheritsched = newattr->inheritsched;
|
|
attr.stacksize = newattr->stacksize;
|
|
}
|
|
|
|
if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
|
|
{
|
|
thread_printf ("New thread object access mutex is not valid. this %p",
|
|
this);
|
|
magic = 0;
|
|
return;
|
|
}
|
|
/* Change the mutex type to NORMAL to speed up mutex operations */
|
|
mutex.type = PTHREAD_MUTEX_NORMAL;
|
|
|
|
cancel_event = ::CreateEvent (&sec_none_nih, TRUE, FALSE, NULL);
|
|
if (!cancel_event)
|
|
{
|
|
system_printf ("couldn't create cancel event, this %p LastError %E", this);
|
|
/* we need the event for correct behaviour */
|
|
magic = 0;
|
|
return;
|
|
}
|
|
}
|
|
|
|
void
|
|
pthread::create (void *(*func) (void *), pthread_attr *newattr,
|
|
void *threadarg)
|
|
{
|
|
precreate (newattr);
|
|
if (!magic)
|
|
return;
|
|
function = func;
|
|
arg = threadarg;
|
|
|
|
win32_obj_id = ::CreateThread (&sec_none_nih, attr.stacksize,
|
|
(LPTHREAD_START_ROUTINE) thread_init_wrapper,
|
|
this, CREATE_SUSPENDED, &thread_id);
|
|
|
|
if (!win32_obj_id)
|
|
{
|
|
thread_printf ("CreateThread failed: this %p LastError %E", this);
|
|
magic = 0;
|
|
}
|
|
else {
|
|
postcreate ();
|
|
ResumeThread (win32_obj_id);
|
|
}
|
|
}
|
|
|
|
void
|
|
pthread::postcreate ()
|
|
{
|
|
InterlockedIncrement (&MT_INTERFACE->threadcount);
|
|
/* FIXME: set the priority appropriately for system contention scope */
|
|
if (attr.inheritsched == PTHREAD_EXPLICIT_SCHED)
|
|
{
|
|
/* FIXME: set the scheduling settings for the new thread */
|
|
/* sched_thread_setparam (win32_obj_id, attr.schedparam); */
|
|
}
|
|
}
|
|
|
|
void
|
|
pthread::exit (void *value_ptr)
|
|
{
|
|
class pthread *thread = this;
|
|
|
|
// run cleanup handlers
|
|
pop_all_cleanup_handlers ();
|
|
|
|
pthread_key::run_all_destructors ();
|
|
|
|
mutex.lock ();
|
|
// cleanup if thread is in detached state and not joined
|
|
if (equal (joiner, thread))
|
|
delete this;
|
|
else
|
|
{
|
|
return_ptr = value_ptr;
|
|
mutex.unlock ();
|
|
}
|
|
|
|
if (InterlockedDecrement (&MT_INTERFACE->threadcount) == 0)
|
|
::exit (0);
|
|
else
|
|
ExitThread (0);
|
|
}
|
|
|
|
int
|
|
pthread::cancel (void)
|
|
{
|
|
class pthread *thread = this;
|
|
class pthread *self = pthread::self ();
|
|
|
|
mutex.lock ();
|
|
|
|
if (canceltype == PTHREAD_CANCEL_DEFERRED ||
|
|
cancelstate == PTHREAD_CANCEL_DISABLE)
|
|
{
|
|
// cancel deferred
|
|
mutex.unlock ();
|
|
SetEvent (cancel_event);
|
|
return 0;
|
|
}
|
|
|
|
else if (equal (thread, self))
|
|
{
|
|
mutex.unlock ();
|
|
cancel_self ();
|
|
return 0; // Never reached
|
|
}
|
|
|
|
// cancel asynchronous
|
|
SuspendThread (win32_obj_id);
|
|
if (WaitForSingleObject (win32_obj_id, 0) == WAIT_TIMEOUT)
|
|
{
|
|
CONTEXT context;
|
|
context.ContextFlags = CONTEXT_CONTROL;
|
|
GetThreadContext (win32_obj_id, &context);
|
|
context.Eip = (DWORD) pthread::static_cancel_self;
|
|
SetThreadContext (win32_obj_id, &context);
|
|
}
|
|
mutex.unlock ();
|
|
ResumeThread (win32_obj_id);
|
|
|
|
return 0;
|
|
/*
|
|
TODO: insert pthread_testcancel into the required functions
|
|
the required function list is: *indicates done, X indicates not present in cygwin.
|
|
aio_suspend ()
|
|
*close ()
|
|
*creat ()
|
|
fcntl ()
|
|
fsync ()
|
|
getmsg ()
|
|
getpmsg ()
|
|
lockf ()
|
|
mq_receive ()
|
|
mq_send ()
|
|
msgrcv ()
|
|
msgsnd ()
|
|
msync ()
|
|
nanosleep ()
|
|
open ()
|
|
*pause ()
|
|
poll ()
|
|
pread ()
|
|
*pthread_cond_timedwait ()
|
|
*pthread_cond_wait ()
|
|
*pthread_join ()
|
|
*pthread_testcancel ()
|
|
putmsg ()
|
|
putpmsg ()
|
|
pwrite ()
|
|
read ()
|
|
readv ()
|
|
select ()
|
|
*sem_wait ()
|
|
*sigpause ()
|
|
*sigsuspend ()
|
|
sigtimedwait ()
|
|
sigwait ()
|
|
sigwaitinfo ()
|
|
*sleep ()
|
|
*system ()
|
|
tcdrain ()
|
|
*usleep ()
|
|
*wait ()
|
|
*wait3()
|
|
waitid ()
|
|
*waitpid ()
|
|
write ()
|
|
writev ()
|
|
|
|
the optional list is:
|
|
catclose ()
|
|
catgets ()
|
|
catopen ()
|
|
closedir ()
|
|
closelog ()
|
|
ctermid ()
|
|
dbm_close ()
|
|
dbm_delete ()
|
|
dbm_fetch ()
|
|
dbm_nextkey ()
|
|
dbm_open ()
|
|
dbm_store ()
|
|
dlclose ()
|
|
dlopen ()
|
|
endgrent ()
|
|
endpwent ()
|
|
endutxent ()
|
|
fclose ()
|
|
fcntl ()
|
|
fflush ()
|
|
fgetc ()
|
|
fgetpos ()
|
|
fgets ()
|
|
fgetwc ()
|
|
fgetws ()
|
|
fopen ()
|
|
fprintf ()
|
|
fputc ()
|
|
fputs ()
|
|
fputwc ()
|
|
fputws ()
|
|
fread ()
|
|
freopen ()
|
|
fscanf ()
|
|
fseek ()
|
|
fseeko ()
|
|
fsetpos ()
|
|
ftell ()
|
|
ftello ()
|
|
ftw ()
|
|
fwprintf ()
|
|
fwrite ()
|
|
fwscanf ()
|
|
getc ()
|
|
getc_unlocked ()
|
|
getchar ()
|
|
getchar_unlocked ()
|
|
getcwd ()
|
|
getdate ()
|
|
getgrent ()
|
|
getgrgid ()
|
|
getgrgid_r ()
|
|
getgrnam ()
|
|
getgrnam_r ()
|
|
getlogin ()
|
|
getlogin_r ()
|
|
getpwent ()
|
|
*getpwnam ()
|
|
*getpwnam_r ()
|
|
*getpwuid ()
|
|
*getpwuid_r ()
|
|
gets ()
|
|
getutxent ()
|
|
getutxid ()
|
|
getutxline ()
|
|
getw ()
|
|
getwc ()
|
|
getwchar ()
|
|
getwd ()
|
|
glob ()
|
|
iconv_close ()
|
|
iconv_open ()
|
|
ioctl ()
|
|
lseek ()
|
|
mkstemp ()
|
|
nftw ()
|
|
opendir ()
|
|
openlog ()
|
|
pclose ()
|
|
perror ()
|
|
popen ()
|
|
printf ()
|
|
putc ()
|
|
putc_unlocked ()
|
|
putchar ()
|
|
putchar_unlocked ()
|
|
puts ()
|
|
pututxline ()
|
|
putw ()
|
|
putwc ()
|
|
putwchar ()
|
|
readdir ()
|
|
readdir_r ()
|
|
remove ()
|
|
rename ()
|
|
rewind ()
|
|
rewinddir ()
|
|
scanf ()
|
|
seekdir ()
|
|
semop ()
|
|
setgrent ()
|
|
setpwent ()
|
|
setutxent ()
|
|
strerror ()
|
|
syslog ()
|
|
tmpfile ()
|
|
tmpnam ()
|
|
ttyname ()
|
|
ttyname_r ()
|
|
ungetc ()
|
|
ungetwc ()
|
|
unlink ()
|
|
vfprintf ()
|
|
vfwprintf ()
|
|
vprintf ()
|
|
vwprintf ()
|
|
wprintf ()
|
|
wscanf ()
|
|
|
|
Note, that for fcntl (), for any value of the cmd argument.
|
|
|
|
And we must not introduce cancellation points anywhere else that's part of the posix or
|
|
opengroup specs.
|
|
*/
|
|
}
|
|
|
|
void
|
|
pthread::testcancel (void)
|
|
{
|
|
if (cancelstate == PTHREAD_CANCEL_DISABLE)
|
|
return;
|
|
|
|
if (WaitForSingleObject (cancel_event, 0) == WAIT_OBJECT_0)
|
|
cancel_self ();
|
|
}
|
|
|
|
void
|
|
pthread::static_cancel_self (void)
|
|
{
|
|
pthread::self ()->cancel_self ();
|
|
}
|
|
|
|
|
|
DWORD
|
|
pthread::cancelable_wait (HANDLE object, DWORD timeout, const bool do_cancel)
|
|
{
|
|
DWORD res;
|
|
HANDLE wait_objects[2];
|
|
pthread_t thread = self ();
|
|
|
|
if (!is_good_object (&thread) || thread->cancelstate == PTHREAD_CANCEL_DISABLE)
|
|
return WaitForSingleObject (object, timeout);
|
|
|
|
// Do not change the wait order
|
|
// The object must have higher priority than the cancel event,
|
|
// because WaitForMultipleObjects will return the smallest index
|
|
// if both objects are signaled
|
|
wait_objects[0] = object;
|
|
wait_objects[1] = thread->cancel_event;
|
|
|
|
res = WaitForMultipleObjects (2, wait_objects, FALSE, timeout);
|
|
if (do_cancel && res == WAIT_CANCELED)
|
|
pthread::static_cancel_self ();
|
|
return res;
|
|
}
|
|
|
|
int
|
|
pthread::setcancelstate (int state, int *oldstate)
|
|
{
|
|
int result = 0;
|
|
|
|
mutex.lock ();
|
|
|
|
if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE)
|
|
result = EINVAL;
|
|
else
|
|
{
|
|
if (oldstate)
|
|
*oldstate = cancelstate;
|
|
cancelstate = state;
|
|
}
|
|
|
|
mutex.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread::setcanceltype (int type, int *oldtype)
|
|
{
|
|
int result = 0;
|
|
|
|
mutex.lock ();
|
|
|
|
if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
result = EINVAL;
|
|
else
|
|
{
|
|
if (oldtype)
|
|
*oldtype = canceltype;
|
|
canceltype = type;
|
|
}
|
|
|
|
mutex.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
void
|
|
pthread::push_cleanup_handler (__pthread_cleanup_handler *handler)
|
|
{
|
|
if (this != self ())
|
|
// TODO: do it?
|
|
api_fatal ("Attempt to push a cleanup handler across threads");
|
|
handler->next = cleanup_stack;
|
|
InterlockedExchangePointer (&cleanup_stack, handler);
|
|
}
|
|
|
|
void
|
|
pthread::pop_cleanup_handler (int const execute)
|
|
{
|
|
if (this != self ())
|
|
// TODO: send a signal or something to the thread ?
|
|
api_fatal ("Attempt to execute a cleanup handler across threads");
|
|
|
|
mutex.lock ();
|
|
|
|
if (cleanup_stack != NULL)
|
|
{
|
|
__pthread_cleanup_handler *handler = cleanup_stack;
|
|
|
|
if (execute)
|
|
(*handler->function) (handler->arg);
|
|
cleanup_stack = handler->next;
|
|
}
|
|
|
|
mutex.unlock ();
|
|
}
|
|
|
|
void
|
|
pthread::pop_all_cleanup_handlers ()
|
|
{
|
|
while (cleanup_stack != NULL)
|
|
pop_cleanup_handler (1);
|
|
}
|
|
|
|
void
|
|
pthread::cancel_self ()
|
|
{
|
|
exit (PTHREAD_CANCELED);
|
|
}
|
|
|
|
DWORD
|
|
pthread::get_thread_id ()
|
|
{
|
|
return thread_id;
|
|
}
|
|
|
|
void
|
|
pthread::init_current_thread ()
|
|
{
|
|
cancel_event = ::CreateEvent (&sec_none_nih, TRUE, FALSE, NULL);
|
|
if (!DuplicateHandle (GetCurrentProcess (), GetCurrentThread (),
|
|
GetCurrentProcess (), &win32_obj_id,
|
|
0, FALSE, DUPLICATE_SAME_ACCESS))
|
|
win32_obj_id = NULL;
|
|
set_thread_id_to_current ();
|
|
set_tls_self_pointer (this);
|
|
}
|
|
|
|
/* static members */
|
|
bool
|
|
pthread_attr::is_good_object (pthread_attr_t const *attr)
|
|
{
|
|
if (verifyable_object_isvalid (attr, PTHREAD_ATTR_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
/* instance members */
|
|
|
|
pthread_attr::pthread_attr ():verifyable_object (PTHREAD_ATTR_MAGIC),
|
|
joinable (PTHREAD_CREATE_JOINABLE), contentionscope (PTHREAD_SCOPE_PROCESS),
|
|
inheritsched (PTHREAD_INHERIT_SCHED), stacksize (0)
|
|
{
|
|
schedparam.sched_priority = 0;
|
|
}
|
|
|
|
pthread_attr::~pthread_attr ()
|
|
{
|
|
}
|
|
|
|
bool
|
|
pthread_condattr::is_good_object (pthread_condattr_t const *attr)
|
|
{
|
|
if (verifyable_object_isvalid (attr, PTHREAD_CONDATTR_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
pthread_condattr::pthread_condattr ():verifyable_object
|
|
(PTHREAD_CONDATTR_MAGIC), shared (PTHREAD_PROCESS_PRIVATE)
|
|
{
|
|
}
|
|
|
|
pthread_condattr::~pthread_condattr ()
|
|
{
|
|
}
|
|
|
|
List<pthread_cond> pthread_cond::conds;
|
|
|
|
/* This is used for cond creation protection within a single process only */
|
|
native_mutex NO_COPY pthread_cond::cond_initialization_lock;
|
|
|
|
/* We can only be called once.
|
|
TODO: (no rush) use a non copied memory section to
|
|
hold an initialization flag. */
|
|
void
|
|
pthread_cond::init_mutex ()
|
|
{
|
|
if (!cond_initialization_lock.init ())
|
|
api_fatal ("Could not create win32 Mutex for pthread cond static initializer support.");
|
|
}
|
|
|
|
pthread_cond::pthread_cond (pthread_condattr *attr) :
|
|
verifyable_object (PTHREAD_COND_MAGIC),
|
|
shared (0), waiting (0), pending (0), sem_wait (NULL),
|
|
mtx_cond(NULL), next (NULL)
|
|
{
|
|
pthread_mutex *verifyable_mutex_obj;
|
|
|
|
if (attr)
|
|
if (attr->shared != PTHREAD_PROCESS_PRIVATE)
|
|
{
|
|
magic = 0;
|
|
return;
|
|
}
|
|
|
|
verifyable_mutex_obj = &mtx_in;
|
|
if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
|
|
{
|
|
thread_printf ("Internal cond mutex is not valid. this %p", this);
|
|
magic = 0;
|
|
return;
|
|
}
|
|
/*
|
|
* Change the mutex type to NORMAL.
|
|
* This mutex MUST be of type normal
|
|
*/
|
|
mtx_in.type = PTHREAD_MUTEX_NORMAL;
|
|
|
|
verifyable_mutex_obj = &mtx_out;
|
|
if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
|
|
{
|
|
thread_printf ("Internal cond mutex is not valid. this %p", this);
|
|
magic = 0;
|
|
return;
|
|
}
|
|
/* Change the mutex type to NORMAL to speed up mutex operations */
|
|
mtx_out.type = PTHREAD_MUTEX_NORMAL;
|
|
|
|
sem_wait = ::CreateSemaphore (&sec_none_nih, 0, LONG_MAX, NULL);
|
|
if (!sem_wait)
|
|
{
|
|
debug_printf ("CreateSemaphore failed. %E");
|
|
magic = 0;
|
|
return;
|
|
}
|
|
|
|
conds.insert (this);
|
|
}
|
|
|
|
pthread_cond::~pthread_cond ()
|
|
{
|
|
if (sem_wait)
|
|
CloseHandle (sem_wait);
|
|
|
|
conds.remove (this);
|
|
}
|
|
|
|
void
|
|
pthread_cond::unblock (const bool all)
|
|
{
|
|
unsigned long releaseable;
|
|
|
|
/*
|
|
* Block outgoing threads (and avoid simultanous unblocks)
|
|
*/
|
|
mtx_out.lock ();
|
|
|
|
releaseable = waiting - pending;
|
|
if (releaseable)
|
|
{
|
|
unsigned long released;
|
|
|
|
if (!pending)
|
|
{
|
|
/*
|
|
* Block incoming threads until all waiting threads are released.
|
|
*/
|
|
mtx_in.lock ();
|
|
|
|
/*
|
|
* Calculate releaseable again because threads can enter until
|
|
* the semaphore has been taken, but they can not leave, therefore pending
|
|
* is unchanged and releaseable can only get higher
|
|
*/
|
|
releaseable = waiting - pending;
|
|
}
|
|
|
|
released = all ? releaseable : 1;
|
|
pending += released;
|
|
/*
|
|
* Signal threads
|
|
*/
|
|
::ReleaseSemaphore (sem_wait, released, NULL);
|
|
}
|
|
|
|
/*
|
|
* And let the threads release.
|
|
*/
|
|
mtx_out.unlock ();
|
|
}
|
|
|
|
int
|
|
pthread_cond::wait (pthread_mutex_t mutex, DWORD dwMilliseconds)
|
|
{
|
|
DWORD rv;
|
|
|
|
mtx_in.lock ();
|
|
if (InterlockedIncrement ((long *)&waiting) == 1)
|
|
mtx_cond = mutex;
|
|
else if (mtx_cond != mutex)
|
|
{
|
|
InterlockedDecrement ((long *)&waiting);
|
|
mtx_in.unlock ();
|
|
return EINVAL;
|
|
}
|
|
mtx_in.unlock ();
|
|
|
|
/*
|
|
* Release the mutex and wait on semaphore
|
|
*/
|
|
++mutex->condwaits;
|
|
mutex->unlock ();
|
|
|
|
rv = pthread::cancelable_wait (sem_wait, dwMilliseconds, false);
|
|
|
|
mtx_out.lock ();
|
|
|
|
if (rv != WAIT_OBJECT_0)
|
|
{
|
|
/*
|
|
* It might happen that a signal is sent while the thread got canceled
|
|
* or timed out. Try to take one.
|
|
* If the thread gets one than a signal|broadcast is in progress.
|
|
*/
|
|
if (WaitForSingleObject (sem_wait, 0) == WAIT_OBJECT_0)
|
|
/*
|
|
* thread got cancelled ot timed out while a signalling is in progress.
|
|
* Set wait result back to signaled
|
|
*/
|
|
rv = WAIT_OBJECT_0;
|
|
}
|
|
|
|
InterlockedDecrement ((long *)&waiting);
|
|
|
|
if (rv == WAIT_OBJECT_0 && --pending == 0)
|
|
/*
|
|
* All signaled threads are released,
|
|
* new threads can enter Wait
|
|
*/
|
|
mtx_in.unlock ();
|
|
|
|
mtx_out.unlock ();
|
|
|
|
mutex->lock ();
|
|
--mutex->condwaits;
|
|
|
|
if (rv == WAIT_CANCELED)
|
|
pthread::static_cancel_self ();
|
|
else if (rv == WAIT_TIMEOUT)
|
|
return ETIMEDOUT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
pthread_cond::_fixup_after_fork ()
|
|
{
|
|
waiting = pending = 0;
|
|
mtx_cond = NULL;
|
|
|
|
/* Unlock eventually locked mutexes */
|
|
mtx_in.unlock ();
|
|
mtx_out.unlock ();
|
|
|
|
sem_wait = ::CreateSemaphore (&sec_none_nih, 0, LONG_MAX, NULL);
|
|
if (!sem_wait)
|
|
api_fatal ("pthread_cond::_fixup_after_fork () failed to recreate win32 semaphore");
|
|
}
|
|
|
|
bool
|
|
pthread_rwlockattr::is_good_object (pthread_rwlockattr_t const *attr)
|
|
{
|
|
if (verifyable_object_isvalid (attr, PTHREAD_RWLOCKATTR_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
pthread_rwlockattr::pthread_rwlockattr ():verifyable_object
|
|
(PTHREAD_RWLOCKATTR_MAGIC), shared (PTHREAD_PROCESS_PRIVATE)
|
|
{
|
|
}
|
|
|
|
pthread_rwlockattr::~pthread_rwlockattr ()
|
|
{
|
|
}
|
|
|
|
List<pthread_rwlock> pthread_rwlock::rwlocks;
|
|
|
|
/* This is used for rwlock creation protection within a single process only */
|
|
native_mutex NO_COPY pthread_rwlock::rwlock_initialization_lock;
|
|
|
|
/* We can only be called once.
|
|
TODO: (no rush) use a non copied memory section to
|
|
hold an initialization flag. */
|
|
void
|
|
pthread_rwlock::init_mutex ()
|
|
{
|
|
if (!rwlock_initialization_lock.init ())
|
|
api_fatal ("Could not create win32 Mutex for pthread rwlock static initializer support.");
|
|
}
|
|
|
|
pthread_rwlock::pthread_rwlock (pthread_rwlockattr *attr) :
|
|
verifyable_object (PTHREAD_RWLOCK_MAGIC),
|
|
shared (0), waiting_readers (0), waiting_writers (0), writer (NULL),
|
|
readers (NULL), mtx (NULL), cond_readers (NULL), cond_writers (NULL),
|
|
next (NULL)
|
|
{
|
|
pthread_mutex *verifyable_mutex_obj = &mtx;
|
|
pthread_cond *verifyable_cond_obj;
|
|
|
|
if (attr)
|
|
if (attr->shared != PTHREAD_PROCESS_PRIVATE)
|
|
{
|
|
magic = 0;
|
|
return;
|
|
}
|
|
|
|
if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
|
|
{
|
|
thread_printf ("Internal rwlock mutex is not valid. this %p", this);
|
|
magic = 0;
|
|
return;
|
|
}
|
|
/* Change the mutex type to NORMAL to speed up mutex operations */
|
|
mtx.type = PTHREAD_MUTEX_NORMAL;
|
|
|
|
verifyable_cond_obj = &cond_readers;
|
|
if (!pthread_cond::is_good_object (&verifyable_cond_obj))
|
|
{
|
|
thread_printf ("Internal rwlock readers cond is not valid. this %p", this);
|
|
magic = 0;
|
|
return;
|
|
}
|
|
|
|
verifyable_cond_obj = &cond_writers;
|
|
if (!pthread_cond::is_good_object (&verifyable_cond_obj))
|
|
{
|
|
thread_printf ("Internal rwlock writers cond is not valid. this %p", this);
|
|
magic = 0;
|
|
return;
|
|
}
|
|
|
|
|
|
rwlocks.insert (this);
|
|
}
|
|
|
|
pthread_rwlock::~pthread_rwlock ()
|
|
{
|
|
rwlocks.remove (this);
|
|
}
|
|
|
|
int
|
|
pthread_rwlock::rdlock ()
|
|
{
|
|
int result = 0;
|
|
struct RWLOCK_READER *reader;
|
|
pthread_t self = pthread::self ();
|
|
|
|
mtx.lock ();
|
|
|
|
if (lookup_reader (self))
|
|
{
|
|
result = EDEADLK;
|
|
goto DONE;
|
|
}
|
|
|
|
reader = new struct RWLOCK_READER;
|
|
if (!reader)
|
|
{
|
|
result = EAGAIN;
|
|
goto DONE;
|
|
}
|
|
|
|
while (writer || waiting_writers)
|
|
{
|
|
pthread_cleanup_push (pthread_rwlock::rdlock_cleanup, this);
|
|
|
|
++waiting_readers;
|
|
cond_readers.wait (&mtx);
|
|
--waiting_readers;
|
|
|
|
pthread_cleanup_pop (0);
|
|
}
|
|
|
|
reader->thread = self;
|
|
add_reader (reader);
|
|
|
|
DONE:
|
|
mtx.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread_rwlock::tryrdlock ()
|
|
{
|
|
int result = 0;
|
|
pthread_t self = pthread::self ();
|
|
|
|
mtx.lock ();
|
|
|
|
if (writer || waiting_writers || lookup_reader (self))
|
|
result = EBUSY;
|
|
else
|
|
{
|
|
struct RWLOCK_READER *reader = new struct RWLOCK_READER;
|
|
if (reader)
|
|
{
|
|
reader->thread = self;
|
|
add_reader (reader);
|
|
}
|
|
else
|
|
result = EAGAIN;
|
|
}
|
|
|
|
mtx.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread_rwlock::wrlock ()
|
|
{
|
|
int result = 0;
|
|
pthread_t self = pthread::self ();
|
|
|
|
mtx.lock ();
|
|
|
|
if (writer == self || lookup_reader (self))
|
|
{
|
|
result = EDEADLK;
|
|
goto DONE;
|
|
}
|
|
|
|
while (writer || readers)
|
|
{
|
|
pthread_cleanup_push (pthread_rwlock::wrlock_cleanup, this);
|
|
|
|
++waiting_writers;
|
|
cond_writers.wait (&mtx);
|
|
--waiting_writers;
|
|
|
|
pthread_cleanup_pop (0);
|
|
}
|
|
|
|
writer = self;
|
|
|
|
DONE:
|
|
mtx.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread_rwlock::trywrlock ()
|
|
{
|
|
int result = 0;
|
|
pthread_t self = pthread::self ();
|
|
|
|
mtx.lock ();
|
|
|
|
if (writer || readers)
|
|
result = EBUSY;
|
|
else
|
|
writer = self;
|
|
|
|
mtx.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread_rwlock::unlock ()
|
|
{
|
|
int result = 0;
|
|
pthread_t self = pthread::self ();
|
|
|
|
mtx.lock ();
|
|
|
|
if (writer)
|
|
{
|
|
if (writer != self)
|
|
{
|
|
result = EPERM;
|
|
goto DONE;
|
|
}
|
|
|
|
writer = NULL;
|
|
}
|
|
else
|
|
{
|
|
struct RWLOCK_READER *reader = lookup_reader (self);
|
|
|
|
if (!reader)
|
|
{
|
|
result = EPERM;
|
|
goto DONE;
|
|
}
|
|
|
|
remove_reader (reader);
|
|
delete reader;
|
|
}
|
|
|
|
release ();
|
|
|
|
DONE:
|
|
mtx.unlock ();
|
|
|
|
return result;
|
|
}
|
|
|
|
void
|
|
pthread_rwlock::add_reader (struct RWLOCK_READER *rd)
|
|
{
|
|
rd->next = (struct RWLOCK_READER *)
|
|
InterlockedExchangePointer (&readers, rd);
|
|
}
|
|
|
|
void
|
|
pthread_rwlock::remove_reader (struct RWLOCK_READER *rd)
|
|
{
|
|
if (readers == rd)
|
|
InterlockedExchangePointer (&readers, rd->next);
|
|
else
|
|
{
|
|
struct RWLOCK_READER *temp = readers;
|
|
while (temp->next && temp->next != rd)
|
|
temp = temp->next;
|
|
/* but there may be a race between the loop above and this statement */
|
|
InterlockedExchangePointer (&temp->next, rd->next);
|
|
}
|
|
}
|
|
|
|
struct pthread_rwlock::RWLOCK_READER *
|
|
pthread_rwlock::lookup_reader (pthread_t thread)
|
|
{
|
|
struct RWLOCK_READER *temp = readers;
|
|
|
|
while (temp && temp->thread != thread)
|
|
temp = temp->next;
|
|
|
|
return temp;
|
|
}
|
|
|
|
void
|
|
pthread_rwlock::rdlock_cleanup (void *arg)
|
|
{
|
|
pthread_rwlock *rwlock = (pthread_rwlock *) arg;
|
|
|
|
--(rwlock->waiting_readers);
|
|
rwlock->release ();
|
|
rwlock->mtx.unlock ();
|
|
}
|
|
|
|
void
|
|
pthread_rwlock::wrlock_cleanup (void *arg)
|
|
{
|
|
pthread_rwlock *rwlock = (pthread_rwlock *) arg;
|
|
|
|
--(rwlock->waiting_writers);
|
|
rwlock->release ();
|
|
rwlock->mtx.unlock ();
|
|
}
|
|
|
|
void
|
|
pthread_rwlock::_fixup_after_fork ()
|
|
{
|
|
pthread_t self = pthread::self ();
|
|
struct RWLOCK_READER **temp = &readers;
|
|
|
|
waiting_readers = 0;
|
|
waiting_writers = 0;
|
|
|
|
/* Unlock eventually locked mutex */
|
|
mtx.unlock ();
|
|
/*
|
|
* Remove all readers except self
|
|
*/
|
|
while (*temp)
|
|
{
|
|
if ((*temp)->thread == self)
|
|
temp = &((*temp)->next);
|
|
else
|
|
{
|
|
struct RWLOCK_READER *cur = *temp;
|
|
*temp = (*temp)->next;
|
|
delete cur;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* pthread_key */
|
|
/* static members */
|
|
/* This stores pthread_key information across fork() boundaries */
|
|
List<pthread_key> pthread_key::keys;
|
|
|
|
bool
|
|
pthread_key::is_good_object (pthread_key_t const *key)
|
|
{
|
|
if (verifyable_object_isvalid (key, PTHREAD_KEY_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
/* non-static members */
|
|
|
|
pthread_key::pthread_key (void (*aDestructor) (void *)):verifyable_object (PTHREAD_KEY_MAGIC), destructor (aDestructor)
|
|
{
|
|
tls_index = TlsAlloc ();
|
|
if (tls_index == TLS_OUT_OF_INDEXES)
|
|
magic = 0;
|
|
else
|
|
keys.insert (this);
|
|
}
|
|
|
|
pthread_key::~pthread_key ()
|
|
{
|
|
/* We may need to make the list code lock the list during operations
|
|
*/
|
|
if (magic != 0)
|
|
{
|
|
keys.remove (this);
|
|
TlsFree (tls_index);
|
|
}
|
|
}
|
|
|
|
int
|
|
pthread_key::set (const void *value)
|
|
{
|
|
/* the OS function doesn't perform error checking */
|
|
TlsSetValue (tls_index, (void *) value);
|
|
return 0;
|
|
}
|
|
|
|
void *
|
|
pthread_key::get () const
|
|
{
|
|
int saved_error = ::GetLastError ();
|
|
void *result = TlsGetValue (tls_index);
|
|
::SetLastError (saved_error);
|
|
return result;
|
|
}
|
|
|
|
void
|
|
pthread_key::save_key_to_buffer ()
|
|
{
|
|
fork_buf = get ();
|
|
}
|
|
|
|
void
|
|
pthread_key::recreate_key_from_buffer ()
|
|
{
|
|
tls_index = TlsAlloc ();
|
|
if (tls_index == TLS_OUT_OF_INDEXES)
|
|
api_fatal ("pthread_key::recreate_key_from_buffer () failed to reallocate Tls storage");
|
|
set (fork_buf);
|
|
}
|
|
|
|
void
|
|
pthread_key::run_destructor ()
|
|
{
|
|
if (destructor)
|
|
{
|
|
void *oldValue = get ();
|
|
if (oldValue)
|
|
{
|
|
set (NULL);
|
|
destructor (oldValue);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* pshared mutexs:
|
|
|
|
REMOVED FROM CURRENT. These can be reinstated with the daemon, when all the
|
|
gymnastics can be a lot easier.
|
|
|
|
the mutex_t (size 4) is not used as a verifyable object because we cannot
|
|
guarantee the same address space for all processes.
|
|
we use the following:
|
|
high bit set (never a valid address).
|
|
second byte is reserved for the priority.
|
|
third byte is reserved
|
|
fourth byte is the mutex id. (max 255 cygwin mutexs system wide).
|
|
creating mutex's does get slower and slower, but as creation is a one time
|
|
job, it should never become an issue
|
|
|
|
And if you're looking at this and thinking, why not an array in cygwin for all mutexs,
|
|
- you incur a penalty on _every_ mutex call and you have toserialise them all.
|
|
... Bad karma.
|
|
|
|
option 2? put everything in userspace and update the ABI?
|
|
- bad karma as well - the HANDLE, while identical across process's,
|
|
Isn't duplicated, it's reopened. */
|
|
|
|
/* static members */
|
|
bool
|
|
pthread_mutex::is_good_object (pthread_mutex_t const *mutex)
|
|
{
|
|
if (verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_mutex::is_good_initializer (pthread_mutex_t const *mutex)
|
|
{
|
|
if (verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC, PTHREAD_MUTEX_INITIALIZER) != VALID_STATIC_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_mutex::is_good_initializer_or_object (pthread_mutex_t const *mutex)
|
|
{
|
|
if (verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC, PTHREAD_MUTEX_INITIALIZER) == INVALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_mutex::is_good_initializer_or_bad_object (pthread_mutex_t const *mutex)
|
|
{
|
|
verifyable_object_state objectState = verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC, PTHREAD_MUTEX_INITIALIZER);
|
|
if (objectState == VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_mutex::can_be_unlocked (pthread_mutex_t const *mutex)
|
|
{
|
|
pthread_t self = pthread::self ();
|
|
|
|
if (!is_good_object (mutex))
|
|
return false;
|
|
/*
|
|
* Check if the mutex is owned by the current thread and can be unlocked
|
|
*/
|
|
return ((*mutex)->recursion_counter == 1 && pthread::equal ((*mutex)->owner, self));
|
|
}
|
|
|
|
List<pthread_mutex> pthread_mutex::mutexes;
|
|
|
|
/* This is used for mutex creation protection within a single process only */
|
|
native_mutex NO_COPY pthread_mutex::mutex_initialization_lock;
|
|
|
|
/* We can only be called once.
|
|
TODO: (no rush) use a non copied memory section to
|
|
hold an initialization flag. */
|
|
void
|
|
pthread_mutex::init_mutex ()
|
|
{
|
|
if (!mutex_initialization_lock.init ())
|
|
api_fatal ("Could not create win32 Mutex for pthread mutex static initializer support.");
|
|
}
|
|
|
|
pthread_mutex::pthread_mutex (pthread_mutexattr *attr) :
|
|
verifyable_object (PTHREAD_MUTEX_MAGIC),
|
|
lock_counter (0),
|
|
win32_obj_id (NULL), recursion_counter (0),
|
|
condwaits (0), owner (NULL), type (PTHREAD_MUTEX_DEFAULT),
|
|
pshared (PTHREAD_PROCESS_PRIVATE)
|
|
{
|
|
win32_obj_id = ::CreateSemaphore (&sec_none_nih, 0, LONG_MAX, NULL);
|
|
if (!win32_obj_id)
|
|
{
|
|
magic = 0;
|
|
return;
|
|
}
|
|
/*attr checked in the C call */
|
|
if (attr)
|
|
{
|
|
if (attr->pshared == PTHREAD_PROCESS_SHARED)
|
|
{
|
|
// fail
|
|
magic = 0;
|
|
return;
|
|
}
|
|
|
|
type = attr->mutextype;
|
|
}
|
|
|
|
mutexes.insert (this);
|
|
}
|
|
|
|
pthread_mutex::~pthread_mutex ()
|
|
{
|
|
if (win32_obj_id)
|
|
CloseHandle (win32_obj_id);
|
|
|
|
mutexes.remove (this);
|
|
}
|
|
|
|
int
|
|
pthread_mutex::_lock (pthread_t self)
|
|
{
|
|
int result = 0;
|
|
|
|
if (InterlockedIncrement ((long *)&lock_counter) == 1)
|
|
set_owner (self);
|
|
else if (type != PTHREAD_MUTEX_NORMAL && pthread::equal (owner, self))
|
|
{
|
|
InterlockedDecrement ((long *) &lock_counter);
|
|
if (type == PTHREAD_MUTEX_RECURSIVE)
|
|
result = lock_recursive ();
|
|
else
|
|
result = EDEADLK;
|
|
}
|
|
else
|
|
{
|
|
WaitForSingleObject (win32_obj_id, INFINITE);
|
|
set_owner (self);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread_mutex::_trylock (pthread_t self)
|
|
{
|
|
int result = 0;
|
|
|
|
if (InterlockedCompareExchange ((long *)&lock_counter, 1, 0 ) == 0)
|
|
set_owner (self);
|
|
else if (type == PTHREAD_MUTEX_RECURSIVE && pthread::equal (owner, self))
|
|
result = lock_recursive ();
|
|
else
|
|
result = EBUSY;
|
|
|
|
return result;
|
|
}
|
|
|
|
int
|
|
pthread_mutex::_unlock (pthread_t self)
|
|
{
|
|
if (!pthread::equal (owner, self))
|
|
return EPERM;
|
|
|
|
if (--recursion_counter == 0)
|
|
{
|
|
owner = NULL;
|
|
if (InterlockedDecrement ((long *)&lock_counter))
|
|
// Another thread is waiting
|
|
::ReleaseSemaphore (win32_obj_id, 1, NULL);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread_mutex::_destroy (pthread_t self)
|
|
{
|
|
if (condwaits || _trylock (self))
|
|
// Do not destroy a condwaited or locked mutex
|
|
return EBUSY;
|
|
else if (recursion_counter != 1)
|
|
{
|
|
// Do not destroy a recursive locked mutex
|
|
--recursion_counter;
|
|
return EBUSY;
|
|
}
|
|
|
|
delete this;
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
pthread_mutex::_fixup_after_fork ()
|
|
{
|
|
debug_printf ("mutex %x in _fixup_after_fork", this);
|
|
if (pshared != PTHREAD_PROCESS_PRIVATE)
|
|
api_fatal ("pthread_mutex::_fixup_after_fork () doesn'tunderstand PROCESS_SHARED mutex's");
|
|
|
|
if (owner == NULL)
|
|
/* mutex has no owner, reset to initial */
|
|
lock_counter = 0;
|
|
else if (lock_counter != 0)
|
|
/* All waiting threads are gone after a fork */
|
|
lock_counter = 1;
|
|
|
|
win32_obj_id = ::CreateSemaphore (&sec_none_nih, 0, LONG_MAX, NULL);
|
|
if (!win32_obj_id)
|
|
api_fatal ("pthread_mutex::_fixup_after_fork () failed to recreate win32 semaphore for mutex");
|
|
|
|
condwaits = 0;
|
|
}
|
|
|
|
bool
|
|
pthread_mutexattr::is_good_object (pthread_mutexattr_t const * attr)
|
|
{
|
|
if (verifyable_object_isvalid (attr, PTHREAD_MUTEXATTR_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
pthread_mutexattr::pthread_mutexattr ():verifyable_object (PTHREAD_MUTEXATTR_MAGIC),
|
|
pshared (PTHREAD_PROCESS_PRIVATE), mutextype (PTHREAD_MUTEX_DEFAULT)
|
|
{
|
|
}
|
|
|
|
pthread_mutexattr::~pthread_mutexattr ()
|
|
{
|
|
}
|
|
|
|
List<semaphore> semaphore::semaphores;
|
|
|
|
semaphore::semaphore (int pshared, unsigned int value):verifyable_object (SEM_MAGIC)
|
|
{
|
|
this->win32_obj_id = ::CreateSemaphore (&sec_none_nih, value, LONG_MAX,
|
|
NULL);
|
|
if (!this->win32_obj_id)
|
|
magic = 0;
|
|
this->shared = pshared;
|
|
currentvalue = value;
|
|
|
|
semaphores.insert (this);
|
|
}
|
|
|
|
semaphore::~semaphore ()
|
|
{
|
|
if (win32_obj_id)
|
|
CloseHandle (win32_obj_id);
|
|
|
|
semaphores.remove (this);
|
|
}
|
|
|
|
void
|
|
semaphore::_post ()
|
|
{
|
|
/* we can't use the currentvalue, because the wait functions don't let us access it */
|
|
ReleaseSemaphore (win32_obj_id, 1, NULL);
|
|
currentvalue++;
|
|
}
|
|
|
|
int
|
|
semaphore::_trywait ()
|
|
{
|
|
/* FIXME: signals should be able to interrupt semaphores...
|
|
*We probably need WaitForMultipleObjects here.
|
|
*/
|
|
if (WaitForSingleObject (win32_obj_id, 0) == WAIT_TIMEOUT)
|
|
{
|
|
set_errno (EAGAIN);
|
|
return -1;
|
|
}
|
|
currentvalue--;
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
semaphore::_wait ()
|
|
{
|
|
switch (pthread::cancelable_wait (win32_obj_id, INFINITE))
|
|
{
|
|
case WAIT_OBJECT_0:
|
|
currentvalue--;
|
|
break;
|
|
default:
|
|
debug_printf ("cancelable_wait failed. %E");
|
|
return;
|
|
}
|
|
}
|
|
|
|
void
|
|
semaphore::_fixup_after_fork ()
|
|
{
|
|
debug_printf ("sem %x in _fixup_after_fork", this);
|
|
if (shared != PTHREAD_PROCESS_PRIVATE)
|
|
api_fatal ("doesn't understand PROCESS_SHARED semaphores variables");
|
|
/* FIXME: duplicate code here and in the constructor. */
|
|
this->win32_obj_id = ::CreateSemaphore (&sec_none_nih, currentvalue, LONG_MAX, NULL);
|
|
if (!win32_obj_id)
|
|
api_fatal ("failed to create new win32 semaphore");
|
|
}
|
|
|
|
verifyable_object::verifyable_object (long verifyer):
|
|
magic (verifyer)
|
|
{
|
|
}
|
|
|
|
verifyable_object::~verifyable_object ()
|
|
{
|
|
magic = 0;
|
|
}
|
|
|
|
/* Generic memory acccess routine - where should it live ? */
|
|
int __stdcall
|
|
check_valid_pointer (void const *pointer)
|
|
{
|
|
if (!pointer || IsBadWritePtr ((void *) pointer, sizeof (verifyable_object)))
|
|
return EFAULT;
|
|
return 0;
|
|
}
|
|
|
|
verifyable_object_state
|
|
verifyable_object_isvalid (void const * objectptr, long magic, void *static_ptr)
|
|
{
|
|
verifyable_object **object = (verifyable_object **)objectptr;
|
|
if (check_valid_pointer (object))
|
|
return INVALID_OBJECT;
|
|
if (static_ptr && *object == static_ptr)
|
|
return VALID_STATIC_OBJECT;
|
|
if (!*object)
|
|
return INVALID_OBJECT;
|
|
if (check_valid_pointer (*object))
|
|
return INVALID_OBJECT;
|
|
if ((*object)->magic != magic)
|
|
return INVALID_OBJECT;
|
|
return VALID_OBJECT;
|
|
}
|
|
|
|
verifyable_object_state
|
|
verifyable_object_isvalid (void const * objectptr, long magic)
|
|
{
|
|
return verifyable_object_isvalid (objectptr, magic, NULL);
|
|
}
|
|
|
|
/* Pthreads */
|
|
void *
|
|
pthread::thread_init_wrapper (void *_arg)
|
|
{
|
|
// Setup the local/global storage of this thread
|
|
|
|
pthread *thread = (pthread *) _arg;
|
|
struct __reent_t local_reent;
|
|
struct _winsup_t local_winsup;
|
|
struct _reent local_clib = _REENT_INIT (local_clib);
|
|
|
|
struct sigaction _sigs[NSIG];
|
|
sigset_t _sig_mask; /* one set for everything to ignore. */
|
|
LONG _sigtodo[NSIG + __SIGOFFSET];
|
|
|
|
// setup signal structures
|
|
thread->sigs = _sigs;
|
|
thread->sigmask = &_sig_mask;
|
|
thread->sigtodo = _sigtodo;
|
|
|
|
memset (&local_winsup, 0, sizeof (struct _winsup_t));
|
|
|
|
local_reent._clib = &local_clib;
|
|
local_reent._winsup = &local_winsup;
|
|
|
|
local_winsup._process_logmask = LOG_UPTO (LOG_DEBUG);
|
|
|
|
MT_INTERFACE->reent_key.set (&local_reent);
|
|
|
|
thread->set_thread_id_to_current ();
|
|
set_tls_self_pointer (thread);
|
|
|
|
thread->mutex.lock ();
|
|
// if thread is detached force cleanup on exit
|
|
if (thread->attr.joinable == PTHREAD_CREATE_DETACHED && thread->joiner == NULL)
|
|
thread->joiner = thread;
|
|
thread->mutex.unlock ();
|
|
|
|
#ifdef _CYG_THREAD_FAILSAFE
|
|
if (_REENT == _impure_ptr)
|
|
system_printf ("local storage for thread isn't setup correctly");
|
|
#endif
|
|
|
|
thread_printf ("started thread %p %p %p %p %p %p", _arg, &local_clib,
|
|
_impure_ptr, thread, thread->function, thread->arg);
|
|
|
|
// call the user's thread
|
|
void *ret = thread->function (thread->arg);
|
|
|
|
thread->exit (ret);
|
|
|
|
#if 0
|
|
// ??? This code only runs if the thread exits by returning.
|
|
// it's all now in __pthread_exit ();
|
|
#endif
|
|
/* never reached */
|
|
return 0;
|
|
}
|
|
|
|
bool
|
|
pthread::is_good_object (pthread_t const *thread)
|
|
{
|
|
if (verifyable_object_isvalid (thread, PTHREAD_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
unsigned long
|
|
pthread::getsequence_np ()
|
|
{
|
|
return get_thread_id ();
|
|
}
|
|
|
|
int
|
|
pthread::create (pthread_t *thread, const pthread_attr_t *attr,
|
|
void *(*start_routine) (void *), void *arg)
|
|
{
|
|
DECLARE_TLS_STORAGE;
|
|
if (attr && !pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
|
|
*thread = new pthread ();
|
|
(*thread)->create (start_routine, attr ? *attr : NULL, arg);
|
|
if (!is_good_object (thread))
|
|
{
|
|
delete (*thread);
|
|
*thread = NULL;
|
|
return EAGAIN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread::once (pthread_once_t *once_control, void (*init_routine) (void))
|
|
{
|
|
// already done ?
|
|
if (once_control->state)
|
|
return 0;
|
|
|
|
pthread_mutex_lock (&once_control->mutex);
|
|
/* Here we must set a cancellation handler to unlock the mutex if needed */
|
|
/* but a cancellation handler is not the right thing. We need this in the thread
|
|
*cleanup routine. Assumption: a thread can only be in one pthread_once routine
|
|
*at a time. Stote a mutex_t *in the pthread_structure. if that's non null unlock
|
|
*on pthread_exit ();
|
|
*/
|
|
if (!once_control->state)
|
|
{
|
|
init_routine ();
|
|
once_control->state = 1;
|
|
}
|
|
/* Here we must remove our cancellation handler */
|
|
pthread_mutex_unlock (&once_control->mutex);
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread::cancel (pthread_t thread)
|
|
{
|
|
if (!is_good_object (&thread))
|
|
return ESRCH;
|
|
|
|
return thread->cancel ();
|
|
}
|
|
|
|
/* Races in pthread_atfork:
|
|
We are race safe in that any additions to the lists are made via
|
|
InterlockedExchangePointer.
|
|
However, if the user application doesn't perform syncronisation of some sort
|
|
It's not guaranteed that a near simultaneous call to pthread_atfork and fork
|
|
will result in the new atfork handlers being calls.
|
|
More rigorous internal syncronisation isn't needed as the user program isn't
|
|
guaranteeing their own state.
|
|
|
|
as far as multiple calls to pthread_atfork, the worst case is simultaneous calls
|
|
will result in an indeterminate order for parent and child calls (what gets inserted
|
|
first isn't guaranteed.)
|
|
|
|
There is one potential race... Does the result of InterlockedExchangePointer
|
|
get committed to the return location _before_ any context switches can occur?
|
|
If yes, we're safe, if no, we're not. */
|
|
void
|
|
pthread::atforkprepare (void)
|
|
{
|
|
MT_INTERFACE->fixup_before_fork ();
|
|
|
|
callback *cb = MT_INTERFACE->pthread_prepare;
|
|
while (cb)
|
|
{
|
|
cb->cb ();
|
|
cb = cb->next;
|
|
}
|
|
}
|
|
|
|
void
|
|
pthread::atforkparent (void)
|
|
{
|
|
callback *cb = MT_INTERFACE->pthread_parent;
|
|
while (cb)
|
|
{
|
|
cb->cb ();
|
|
cb = cb->next;
|
|
}
|
|
}
|
|
|
|
void
|
|
pthread::atforkchild (void)
|
|
{
|
|
MT_INTERFACE->fixup_after_fork ();
|
|
|
|
callback *cb = MT_INTERFACE->pthread_child;
|
|
while (cb)
|
|
{
|
|
cb->cb ();
|
|
cb = cb->next;
|
|
}
|
|
}
|
|
|
|
/* Register a set of functions to run before and after fork.
|
|
prepare calls are called in LI-FC order.
|
|
parent and child calls are called in FI-FC order. */
|
|
int
|
|
pthread::atfork (void (*prepare)(void), void (*parent)(void), void (*child)(void))
|
|
{
|
|
callback *prepcb = NULL, *parentcb = NULL, *childcb = NULL;
|
|
if (prepare)
|
|
{
|
|
prepcb = new callback;
|
|
if (!prepcb)
|
|
return ENOMEM;
|
|
}
|
|
if (parent)
|
|
{
|
|
parentcb = new callback;
|
|
if (!parentcb)
|
|
{
|
|
if (prepcb)
|
|
delete prepcb;
|
|
return ENOMEM;
|
|
}
|
|
}
|
|
if (child)
|
|
{
|
|
childcb = new callback;
|
|
if (!childcb)
|
|
{
|
|
if (prepcb)
|
|
delete prepcb;
|
|
if (parentcb)
|
|
delete parentcb;
|
|
return ENOMEM;
|
|
}
|
|
}
|
|
|
|
if (prepcb)
|
|
{
|
|
prepcb->cb = prepare;
|
|
prepcb->next = (callback *) InterlockedExchangePointer ((LONG *) &MT_INTERFACE->pthread_prepare, (long int) prepcb);
|
|
}
|
|
if (parentcb)
|
|
{
|
|
parentcb->cb = parent;
|
|
callback **t = &MT_INTERFACE->pthread_parent;
|
|
while (*t)
|
|
t = &(*t)->next;
|
|
/* t = pointer to last next in the list */
|
|
parentcb->next = (callback *) InterlockedExchangePointer ((LONG *) t, (long int) parentcb);
|
|
}
|
|
if (childcb)
|
|
{
|
|
childcb->cb = child;
|
|
callback **t = &MT_INTERFACE->pthread_child;
|
|
while (*t)
|
|
t = &(*t)->next;
|
|
/* t = pointer to last next in the list */
|
|
childcb->next = (callback *) InterlockedExchangePointer ((LONG *) t, (long int) childcb);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_init (pthread_attr_t *attr)
|
|
{
|
|
if (check_valid_pointer (attr))
|
|
return EINVAL;
|
|
*attr = new pthread_attr;
|
|
if (!pthread_attr::is_good_object (attr))
|
|
{
|
|
delete (*attr);
|
|
*attr = NULL;
|
|
return EAGAIN;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_getinheritsched (const pthread_attr_t *attr,
|
|
int *inheritsched)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
*inheritsched = (*attr)->inheritsched;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_getschedparam (const pthread_attr_t *attr,
|
|
struct sched_param *param)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
*param = (*attr)->schedparam;
|
|
return 0;
|
|
}
|
|
|
|
/* From a pure code point of view, this should call a helper in sched.cc,
|
|
to allow for someone adding scheduler policy changes to win32 in the future.
|
|
However that's extremely unlikely, so short and sweet will do us */
|
|
extern "C" int
|
|
pthread_attr_getschedpolicy (const pthread_attr_t *attr, int *policy)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
*policy = SCHED_FIFO;
|
|
return 0;
|
|
}
|
|
|
|
|
|
extern "C" int
|
|
pthread_attr_getscope (const pthread_attr_t *attr, int *contentionscope)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
*contentionscope = (*attr)->contentionscope;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_setdetachstate (pthread_attr_t *attr, int detachstate)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (detachstate < 0 || detachstate > 1)
|
|
return EINVAL;
|
|
(*attr)->joinable = detachstate;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_getdetachstate (const pthread_attr_t *attr, int *detachstate)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
*detachstate = (*attr)->joinable;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_setinheritsched (pthread_attr_t *attr, int inheritsched)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (inheritsched != PTHREAD_INHERIT_SCHED
|
|
&& inheritsched != PTHREAD_EXPLICIT_SCHED)
|
|
return ENOTSUP;
|
|
(*attr)->inheritsched = inheritsched;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_setschedparam (pthread_attr_t *attr,
|
|
const struct sched_param *param)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (!valid_sched_parameters (param))
|
|
return ENOTSUP;
|
|
(*attr)->schedparam = *param;
|
|
return 0;
|
|
}
|
|
|
|
/* See __pthread_attr_getschedpolicy for some notes */
|
|
extern "C" int
|
|
pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (policy != SCHED_FIFO)
|
|
return ENOTSUP;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_setscope (pthread_attr_t *attr, int contentionscope)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (contentionscope != PTHREAD_SCOPE_SYSTEM
|
|
&& contentionscope != PTHREAD_SCOPE_PROCESS)
|
|
return EINVAL;
|
|
/* In future, we may be able to support system scope by escalating the thread
|
|
priority to exceed the priority class. For now we only support PROCESS scope. */
|
|
if (contentionscope != PTHREAD_SCOPE_PROCESS)
|
|
return ENOTSUP;
|
|
(*attr)->contentionscope = contentionscope;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_setstacksize (pthread_attr_t *attr, size_t size)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
(*attr)->stacksize = size;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_getstacksize (const pthread_attr_t *attr, size_t *size)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
*size = (*attr)->stacksize;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_attr_destroy (pthread_attr_t *attr)
|
|
{
|
|
if (!pthread_attr::is_good_object (attr))
|
|
return EINVAL;
|
|
delete (*attr);
|
|
*attr = NULL;
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread::join (pthread_t *thread, void **return_val)
|
|
{
|
|
pthread_t joiner = self ();
|
|
|
|
joiner->testcancel ();
|
|
|
|
// Initialize return val with NULL
|
|
if (return_val)
|
|
*return_val = NULL;
|
|
|
|
if (!is_good_object (&joiner))
|
|
return EINVAL;
|
|
|
|
if (!is_good_object (thread))
|
|
return ESRCH;
|
|
|
|
if (equal (*thread,joiner))
|
|
return EDEADLK;
|
|
|
|
(*thread)->mutex.lock ();
|
|
|
|
if ((*thread)->attr.joinable == PTHREAD_CREATE_DETACHED)
|
|
{
|
|
(*thread)->mutex.unlock ();
|
|
return EINVAL;
|
|
}
|
|
else
|
|
{
|
|
(*thread)->joiner = joiner;
|
|
(*thread)->attr.joinable = PTHREAD_CREATE_DETACHED;
|
|
(*thread)->mutex.unlock ();
|
|
|
|
switch (cancelable_wait ((*thread)->win32_obj_id, INFINITE, false))
|
|
{
|
|
case WAIT_OBJECT_0:
|
|
if (return_val)
|
|
*return_val = (*thread)->return_ptr;
|
|
delete (*thread);
|
|
break;
|
|
case WAIT_CANCELED:
|
|
// set joined thread back to joinable since we got canceled
|
|
(*thread)->joiner = NULL;
|
|
(*thread)->attr.joinable = PTHREAD_CREATE_JOINABLE;
|
|
joiner->cancel_self ();
|
|
// never reached
|
|
break;
|
|
default:
|
|
// should never happen
|
|
return EINVAL;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread::detach (pthread_t *thread)
|
|
{
|
|
if (!is_good_object (thread))
|
|
return ESRCH;
|
|
|
|
(*thread)->mutex.lock ();
|
|
if ((*thread)->attr.joinable == PTHREAD_CREATE_DETACHED)
|
|
{
|
|
(*thread)->mutex.unlock ();
|
|
return EINVAL;
|
|
}
|
|
|
|
// check if thread is still alive
|
|
if (WaitForSingleObject ((*thread)->win32_obj_id, 0) == WAIT_TIMEOUT)
|
|
{
|
|
// force cleanup on exit
|
|
(*thread)->joiner = *thread;
|
|
(*thread)->attr.joinable = PTHREAD_CREATE_DETACHED;
|
|
(*thread)->mutex.unlock ();
|
|
}
|
|
else
|
|
{
|
|
// thread has already terminated.
|
|
(*thread)->mutex.unlock ();
|
|
delete (*thread);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread::suspend (pthread_t *thread)
|
|
{
|
|
if (!is_good_object (thread))
|
|
return ESRCH;
|
|
|
|
if ((*thread)->suspended == false)
|
|
{
|
|
(*thread)->suspended = true;
|
|
SuspendThread ((*thread)->win32_obj_id);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int
|
|
pthread::resume (pthread_t *thread)
|
|
{
|
|
if (!is_good_object (thread))
|
|
return ESRCH;
|
|
|
|
if ((*thread)->suspended == true)
|
|
ResumeThread ((*thread)->win32_obj_id);
|
|
(*thread)->suspended = false;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* provided for source level compatability.
|
|
See http://www.opengroup.org/onlinepubs/007908799/xsh/pthread_getconcurrency.html
|
|
*/
|
|
extern "C" int
|
|
pthread_getconcurrency (void)
|
|
{
|
|
return MT_INTERFACE->concurrency;
|
|
}
|
|
|
|
/* keep this in sync with sched.cc */
|
|
extern "C" int
|
|
pthread_getschedparam (pthread_t thread, int *policy,
|
|
struct sched_param *param)
|
|
{
|
|
if (!pthread::is_good_object (&thread))
|
|
return ESRCH;
|
|
*policy = SCHED_FIFO;
|
|
/* we don't return the current effective priority, we return the current
|
|
requested priority */
|
|
*param = thread->attr.schedparam;
|
|
return 0;
|
|
}
|
|
|
|
/* Thread SpecificData */
|
|
extern "C" int
|
|
pthread_key_create (pthread_key_t *key, void (*destructor) (void *))
|
|
{
|
|
/* The opengroup docs don't define if we should check this or not,
|
|
but creation is relatively rare. */
|
|
if (pthread_key::is_good_object (key))
|
|
return EBUSY;
|
|
|
|
*key = new pthread_key (destructor);
|
|
|
|
if (!pthread_key::is_good_object (key))
|
|
{
|
|
delete (*key);
|
|
*key = NULL;
|
|
return EAGAIN;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_key_delete (pthread_key_t key)
|
|
{
|
|
if (!pthread_key::is_good_object (&key))
|
|
return EINVAL;
|
|
|
|
delete (key);
|
|
return 0;
|
|
}
|
|
|
|
/* provided for source level compatability. See
|
|
http://www.opengroup.org/onlinepubs/007908799/xsh/pthread_getconcurrency.html
|
|
*/
|
|
extern "C" int
|
|
pthread_setconcurrency (int new_level)
|
|
{
|
|
if (new_level < 0)
|
|
return EINVAL;
|
|
MT_INTERFACE->concurrency = new_level;
|
|
return 0;
|
|
}
|
|
|
|
/* keep syncronised with sched.cc */
|
|
extern "C" int
|
|
pthread_setschedparam (pthread_t thread, int policy,
|
|
const struct sched_param *param)
|
|
{
|
|
if (!pthread::is_good_object (&thread))
|
|
return ESRCH;
|
|
if (policy != SCHED_FIFO)
|
|
return ENOTSUP;
|
|
if (!param)
|
|
return EINVAL;
|
|
int rv =
|
|
sched_set_thread_priority (thread->win32_obj_id, param->sched_priority);
|
|
if (!rv)
|
|
thread->attr.schedparam.sched_priority = param->sched_priority;
|
|
return rv;
|
|
}
|
|
|
|
|
|
extern "C" int
|
|
pthread_setspecific (pthread_key_t key, const void *value)
|
|
{
|
|
if (!pthread_key::is_good_object (&key))
|
|
return EINVAL;
|
|
(key)->set (value);
|
|
return 0;
|
|
}
|
|
|
|
extern "C" void *
|
|
pthread_getspecific (pthread_key_t key)
|
|
{
|
|
if (!pthread_key::is_good_object (&key))
|
|
return NULL;
|
|
|
|
return (key)->get ();
|
|
|
|
}
|
|
|
|
/* Thread synchronisation */
|
|
bool
|
|
pthread_cond::is_good_object (pthread_cond_t const *cond)
|
|
{
|
|
if (verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_cond::is_good_initializer (pthread_cond_t const *cond)
|
|
{
|
|
if (verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC, PTHREAD_COND_INITIALIZER) != VALID_STATIC_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_cond::is_good_initializer_or_object (pthread_cond_t const *cond)
|
|
{
|
|
if (verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC, PTHREAD_COND_INITIALIZER) == INVALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_cond::is_good_initializer_or_bad_object (pthread_cond_t const *cond)
|
|
{
|
|
verifyable_object_state objectState = verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC, PTHREAD_COND_INITIALIZER);
|
|
if (objectState == VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_cond_destroy (pthread_cond_t *cond)
|
|
{
|
|
if (pthread_cond::is_good_initializer (cond))
|
|
return 0;
|
|
if (!pthread_cond::is_good_object (cond))
|
|
return EINVAL;
|
|
|
|
/* reads are atomic */
|
|
if ((*cond)->waiting)
|
|
return EBUSY;
|
|
|
|
delete (*cond);
|
|
*cond = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread_cond::init (pthread_cond_t *cond, const pthread_condattr_t *attr)
|
|
{
|
|
if (attr && !pthread_condattr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (!cond_initialization_lock.lock ())
|
|
return EINVAL;
|
|
|
|
if (!is_good_initializer_or_bad_object (cond))
|
|
{
|
|
cond_initialization_lock.unlock ();
|
|
return EBUSY;
|
|
}
|
|
|
|
*cond = new pthread_cond (attr ? (*attr) : NULL);
|
|
if (!is_good_object (cond))
|
|
{
|
|
delete (*cond);
|
|
*cond = NULL;
|
|
cond_initialization_lock.unlock ();
|
|
return EAGAIN;
|
|
}
|
|
cond_initialization_lock.unlock ();
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_cond_broadcast (pthread_cond_t *cond)
|
|
{
|
|
if (pthread_cond::is_good_initializer (cond))
|
|
return 0;
|
|
if (!pthread_cond::is_good_object (cond))
|
|
return EINVAL;
|
|
|
|
(*cond)->unblock (true);
|
|
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_cond_signal (pthread_cond_t *cond)
|
|
{
|
|
if (pthread_cond::is_good_initializer (cond))
|
|
return 0;
|
|
if (!pthread_cond::is_good_object (cond))
|
|
return EINVAL;
|
|
|
|
(*cond)->unblock (false);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
__pthread_cond_dowait (pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
DWORD waitlength)
|
|
{
|
|
if (!pthread_mutex::is_good_object (mutex))
|
|
return EINVAL;
|
|
if (!pthread_mutex::can_be_unlocked (mutex))
|
|
return EPERM;
|
|
|
|
if (pthread_cond::is_good_initializer (cond))
|
|
pthread_cond::init (cond, NULL);
|
|
if (!pthread_cond::is_good_object (cond))
|
|
return EINVAL;
|
|
|
|
return (*cond)->wait (*mutex, waitlength);
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
const struct timespec *abstime)
|
|
{
|
|
struct timeval tv;
|
|
long waitlength;
|
|
|
|
pthread_testcancel ();
|
|
|
|
if (check_valid_pointer (abstime))
|
|
return EINVAL;
|
|
|
|
gettimeofday (&tv, NULL);
|
|
waitlength = abstime->tv_sec * 1000 + abstime->tv_nsec / (1000 * 1000);
|
|
waitlength -= tv.tv_sec * 1000 + tv.tv_usec / 1000;
|
|
if (waitlength < 0)
|
|
return ETIMEDOUT;
|
|
return __pthread_cond_dowait (cond, mutex, waitlength);
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
|
|
{
|
|
pthread_testcancel ();
|
|
|
|
return __pthread_cond_dowait (cond, mutex, INFINITE);
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_condattr_init (pthread_condattr_t *condattr)
|
|
{
|
|
if (check_valid_pointer (condattr))
|
|
return EINVAL;
|
|
*condattr = new pthread_condattr;
|
|
if (!pthread_condattr::is_good_object (condattr))
|
|
{
|
|
delete (*condattr);
|
|
*condattr = NULL;
|
|
return EAGAIN;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared)
|
|
{
|
|
if (!pthread_condattr::is_good_object (attr))
|
|
return EINVAL;
|
|
*pshared = (*attr)->shared;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_condattr_setpshared (pthread_condattr_t *attr, int pshared)
|
|
{
|
|
if (!pthread_condattr::is_good_object (attr))
|
|
return EINVAL;
|
|
if ((pshared < 0) || (pshared > 1))
|
|
return EINVAL;
|
|
/* shared cond vars not currently supported */
|
|
if (pshared != PTHREAD_PROCESS_PRIVATE)
|
|
return EINVAL;
|
|
(*attr)->shared = pshared;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_condattr_destroy (pthread_condattr_t *condattr)
|
|
{
|
|
if (!pthread_condattr::is_good_object (condattr))
|
|
return EINVAL;
|
|
delete (*condattr);
|
|
*condattr = NULL;
|
|
return 0;
|
|
}
|
|
|
|
/* RW locks */
|
|
bool
|
|
pthread_rwlock::is_good_object (pthread_rwlock_t const *rwlock)
|
|
{
|
|
if (verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_rwlock::is_good_initializer (pthread_rwlock_t const *rwlock)
|
|
{
|
|
if (verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC, PTHREAD_RWLOCK_INITIALIZER) != VALID_STATIC_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_rwlock::is_good_initializer_or_object (pthread_rwlock_t const *rwlock)
|
|
{
|
|
if (verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC, PTHREAD_RWLOCK_INITIALIZER) == INVALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
pthread_rwlock::is_good_initializer_or_bad_object (pthread_rwlock_t const *rwlock)
|
|
{
|
|
verifyable_object_state objectState = verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC, PTHREAD_RWLOCK_INITIALIZER);
|
|
if (objectState == VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
|
|
{
|
|
if (pthread_rwlock::is_good_initializer (rwlock))
|
|
return 0;
|
|
if (!pthread_rwlock::is_good_object (rwlock))
|
|
return EINVAL;
|
|
|
|
if ((*rwlock)->writer || (*rwlock)->readers ||
|
|
(*rwlock)->waiting_readers || (*rwlock)->waiting_writers)
|
|
return EBUSY;
|
|
|
|
delete (*rwlock);
|
|
*rwlock = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
pthread_rwlock::init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
|
|
{
|
|
if (attr && !pthread_rwlockattr::is_good_object (attr))
|
|
return EINVAL;
|
|
if (!rwlock_initialization_lock.lock ())
|
|
return EINVAL;
|
|
|
|
if (!is_good_initializer_or_bad_object (rwlock))
|
|
{
|
|
rwlock_initialization_lock.unlock ();
|
|
return EBUSY;
|
|
}
|
|
|
|
*rwlock = new pthread_rwlock (attr ? (*attr) : NULL);
|
|
if (!is_good_object (rwlock))
|
|
{
|
|
delete (*rwlock);
|
|
*rwlock = NULL;
|
|
rwlock_initialization_lock.unlock ();
|
|
return EAGAIN;
|
|
}
|
|
rwlock_initialization_lock.unlock ();
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
|
|
{
|
|
pthread_testcancel ();
|
|
|
|
if (pthread_rwlock::is_good_initializer (rwlock))
|
|
pthread_rwlock::init (rwlock, NULL);
|
|
if (!pthread_rwlock::is_good_object (rwlock))
|
|
return EINVAL;
|
|
|
|
return (*rwlock)->rdlock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
|
{
|
|
if (pthread_rwlock::is_good_initializer (rwlock))
|
|
pthread_rwlock::init (rwlock, NULL);
|
|
if (!pthread_rwlock::is_good_object (rwlock))
|
|
return EINVAL;
|
|
|
|
return (*rwlock)->tryrdlock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
|
|
{
|
|
pthread_testcancel ();
|
|
|
|
if (pthread_rwlock::is_good_initializer (rwlock))
|
|
pthread_rwlock::init (rwlock, NULL);
|
|
if (!pthread_rwlock::is_good_object (rwlock))
|
|
return EINVAL;
|
|
|
|
return (*rwlock)->wrlock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
|
{
|
|
if (pthread_rwlock::is_good_initializer (rwlock))
|
|
pthread_rwlock::init (rwlock, NULL);
|
|
if (!pthread_rwlock::is_good_object (rwlock))
|
|
return EINVAL;
|
|
|
|
return (*rwlock)->trywrlock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
|
{
|
|
if (pthread_rwlock::is_good_initializer (rwlock))
|
|
return 0;
|
|
if (!pthread_rwlock::is_good_object (rwlock))
|
|
return EINVAL;
|
|
|
|
return (*rwlock)->unlock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlockattr_init (pthread_rwlockattr_t *rwlockattr)
|
|
{
|
|
if (check_valid_pointer (rwlockattr))
|
|
return EINVAL;
|
|
*rwlockattr = new pthread_rwlockattr;
|
|
if (!pthread_rwlockattr::is_good_object (rwlockattr))
|
|
{
|
|
delete (*rwlockattr);
|
|
*rwlockattr = NULL;
|
|
return EAGAIN;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared)
|
|
{
|
|
if (!pthread_rwlockattr::is_good_object (attr))
|
|
return EINVAL;
|
|
*pshared = (*attr)->shared;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
|
|
{
|
|
if (!pthread_rwlockattr::is_good_object (attr))
|
|
return EINVAL;
|
|
if ((pshared < 0) || (pshared > 1))
|
|
return EINVAL;
|
|
/* shared rwlock vars not currently supported */
|
|
if (pshared != PTHREAD_PROCESS_PRIVATE)
|
|
return EINVAL;
|
|
(*attr)->shared = pshared;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_rwlockattr_destroy (pthread_rwlockattr_t *rwlockattr)
|
|
{
|
|
if (!pthread_rwlockattr::is_good_object (rwlockattr))
|
|
return EINVAL;
|
|
delete (*rwlockattr);
|
|
*rwlockattr = NULL;
|
|
return 0;
|
|
}
|
|
|
|
/* Thread signal */
|
|
extern "C" int
|
|
pthread_kill (pthread_t thread, int sig)
|
|
{
|
|
// lock myself, for the use of thread2signal
|
|
// two different kills might clash: FIXME
|
|
|
|
if (!pthread::is_good_object (&thread))
|
|
return EINVAL;
|
|
|
|
if (thread->sigs)
|
|
myself->setthread2signal (thread);
|
|
|
|
int rval = raise (sig);
|
|
|
|
// unlock myself
|
|
return rval;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_sigmask (int operation, const sigset_t *set, sigset_t *old_set)
|
|
{
|
|
pthread *thread = pthread::self ();
|
|
|
|
// lock this myself, for the use of thread2signal
|
|
// two differt kills might clash: FIXME
|
|
|
|
if (thread->sigs)
|
|
myself->setthread2signal (thread);
|
|
|
|
int rval = sigprocmask (operation, set, old_set);
|
|
|
|
// unlock this myself
|
|
|
|
return rval;
|
|
}
|
|
|
|
/* ID */
|
|
|
|
extern "C" int
|
|
pthread_equal (pthread_t t1, pthread_t t2)
|
|
{
|
|
return pthread::equal (t1, t2);
|
|
}
|
|
|
|
/* Mutexes */
|
|
|
|
/* FIXME: there's a potential race with PTHREAD_MUTEX_INITALIZER:
|
|
the mutex is not actually inited until the first use.
|
|
So two threads trying to lock/trylock may collide.
|
|
Solution: we need a global mutex on mutex creation, or possibly simply
|
|
on all constructors that allow INITIALIZER macros.
|
|
the lock should be very small: only around the init routine, not
|
|
every test, or all mutex access will be synchronised. */
|
|
|
|
int
|
|
pthread_mutex::init (pthread_mutex_t *mutex,
|
|
const pthread_mutexattr_t *attr)
|
|
{
|
|
if (attr && !pthread_mutexattr::is_good_object (attr) || check_valid_pointer (mutex))
|
|
return EINVAL;
|
|
if (!mutex_initialization_lock.lock ())
|
|
return EINVAL;
|
|
|
|
if (!is_good_initializer_or_bad_object (mutex))
|
|
{
|
|
mutex_initialization_lock.unlock ();
|
|
return EBUSY;
|
|
}
|
|
|
|
*mutex = new pthread_mutex (attr ? (*attr) : NULL);
|
|
if (!is_good_object (mutex))
|
|
{
|
|
delete (*mutex);
|
|
*mutex = NULL;
|
|
mutex_initialization_lock.unlock ();
|
|
return EAGAIN;
|
|
}
|
|
mutex_initialization_lock.unlock ();
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutex_getprioceiling (const pthread_mutex_t *mutex,
|
|
int *prioceiling)
|
|
{
|
|
pthread_mutex_t *themutex = (pthread_mutex_t *) mutex;
|
|
if (pthread_mutex::is_good_initializer (mutex))
|
|
pthread_mutex::init ((pthread_mutex_t *) mutex, NULL);
|
|
if (!pthread_mutex::is_good_object (themutex))
|
|
return EINVAL;
|
|
/* We don't define _POSIX_THREAD_PRIO_PROTECT because we do't currently support
|
|
mutex priorities.
|
|
|
|
We can support mutex priorities in the future though:
|
|
Store a priority with each mutex.
|
|
When the mutex is optained, set the thread priority as appropriate
|
|
When the mutex is released, reset the thread priority. */
|
|
return ENOSYS;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutex_lock (pthread_mutex_t *mutex)
|
|
{
|
|
pthread_mutex_t *themutex = mutex;
|
|
/* This could be simplified via is_good_initializer_or_object
|
|
and is_good_initializer, but in a performance critical call like this....
|
|
no. */
|
|
switch (verifyable_object_isvalid (themutex, PTHREAD_MUTEX_MAGIC, PTHREAD_MUTEX_INITIALIZER))
|
|
{
|
|
case INVALID_OBJECT:
|
|
return EINVAL;
|
|
break;
|
|
case VALID_STATIC_OBJECT:
|
|
if (pthread_mutex::is_good_initializer (mutex))
|
|
{
|
|
int rv = pthread_mutex::init (mutex, NULL);
|
|
if (rv && rv != EBUSY)
|
|
return rv;
|
|
}
|
|
/* No else needed. If it's been initialized while we waited,
|
|
we can just attempt to lock it */
|
|
break;
|
|
case VALID_OBJECT:
|
|
break;
|
|
}
|
|
return (*themutex)->lock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutex_trylock (pthread_mutex_t *mutex)
|
|
{
|
|
pthread_mutex_t *themutex = mutex;
|
|
if (pthread_mutex::is_good_initializer (mutex))
|
|
pthread_mutex::init (mutex, NULL);
|
|
if (!pthread_mutex::is_good_object (themutex))
|
|
return EINVAL;
|
|
return (*themutex)->trylock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutex_unlock (pthread_mutex_t *mutex)
|
|
{
|
|
if (pthread_mutex::is_good_initializer (mutex))
|
|
pthread_mutex::init (mutex, NULL);
|
|
if (!pthread_mutex::is_good_object (mutex))
|
|
return EINVAL;
|
|
return (*mutex)->unlock ();
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutex_destroy (pthread_mutex_t *mutex)
|
|
{
|
|
int rv;
|
|
|
|
if (pthread_mutex::is_good_initializer (mutex))
|
|
return 0;
|
|
if (!pthread_mutex::is_good_object (mutex))
|
|
return EINVAL;
|
|
|
|
rv = (*mutex)->destroy ();
|
|
if (rv)
|
|
return rv;
|
|
|
|
*mutex = NULL;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling,
|
|
int *old_ceiling)
|
|
{
|
|
pthread_mutex_t *themutex = mutex;
|
|
if (pthread_mutex::is_good_initializer (mutex))
|
|
pthread_mutex::init (mutex, NULL);
|
|
if (!pthread_mutex::is_good_object (themutex))
|
|
return EINVAL;
|
|
return ENOSYS;
|
|
}
|
|
|
|
/* Win32 doesn't support mutex priorities - see __pthread_mutex_getprioceiling
|
|
for more detail */
|
|
extern "C" int
|
|
pthread_mutexattr_getprotocol (const pthread_mutexattr_t *attr,
|
|
int *protocol)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
return ENOSYS;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutexattr_getpshared (const pthread_mutexattr_t *attr,
|
|
int *pshared)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
*pshared = (*attr)->pshared;
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutexattr_gettype (const pthread_mutexattr_t *attr, int *type)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
*type = (*attr)->mutextype;
|
|
return 0;
|
|
}
|
|
|
|
/* FIXME: write and test process shared mutex's. */
|
|
extern "C" int
|
|
pthread_mutexattr_init (pthread_mutexattr_t *attr)
|
|
{
|
|
if (pthread_mutexattr::is_good_object (attr))
|
|
return EBUSY;
|
|
|
|
*attr = new pthread_mutexattr ();
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
{
|
|
delete (*attr);
|
|
*attr = NULL;
|
|
return ENOMEM;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutexattr_destroy (pthread_mutexattr_t *attr)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
delete (*attr);
|
|
*attr = NULL;
|
|
return 0;
|
|
}
|
|
|
|
|
|
/* Win32 doesn't support mutex priorities */
|
|
extern "C" int
|
|
pthread_mutexattr_setprotocol (pthread_mutexattr_t *attr, int protocol)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
return ENOSYS;
|
|
}
|
|
|
|
/* Win32 doesn't support mutex priorities */
|
|
extern "C" int
|
|
pthread_mutexattr_setprioceiling (pthread_mutexattr_t *attr,
|
|
int prioceiling)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
return ENOSYS;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutexattr_getprioceiling (const pthread_mutexattr_t *attr,
|
|
int *prioceiling)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
return ENOSYS;
|
|
}
|
|
|
|
extern "C" int
|
|
pthread_mutexattr_setpshared (pthread_mutexattr_t *attr, int pshared)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
/* we don't use pshared for anything as yet. We need to test PROCESS_SHARED
|
|
*functionality
|
|
*/
|
|
if (pshared != PTHREAD_PROCESS_PRIVATE)
|
|
return EINVAL;
|
|
(*attr)->pshared = pshared;
|
|
return 0;
|
|
}
|
|
|
|
/* see pthread_mutex_gettype */
|
|
extern "C" int
|
|
pthread_mutexattr_settype (pthread_mutexattr_t *attr, int type)
|
|
{
|
|
if (!pthread_mutexattr::is_good_object (attr))
|
|
return EINVAL;
|
|
|
|
switch (type)
|
|
{
|
|
case PTHREAD_MUTEX_ERRORCHECK:
|
|
case PTHREAD_MUTEX_RECURSIVE:
|
|
case PTHREAD_MUTEX_NORMAL:
|
|
(*attr)->mutextype = type;
|
|
break;
|
|
default:
|
|
return EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Semaphores */
|
|
|
|
/* static members */
|
|
bool
|
|
semaphore::is_good_object (sem_t const * sem)
|
|
{
|
|
if (verifyable_object_isvalid (sem, SEM_MAGIC) != VALID_OBJECT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
int
|
|
semaphore::init (sem_t *sem, int pshared, unsigned int value)
|
|
{
|
|
/* opengroup calls this undefined */
|
|
if (is_good_object (sem))
|
|
return EBUSY;
|
|
|
|
if (value > SEM_VALUE_MAX)
|
|
return EINVAL;
|
|
|
|
*sem = new semaphore (pshared, value);
|
|
|
|
if (!is_good_object (sem))
|
|
{
|
|
delete (*sem);
|
|
*sem = NULL;
|
|
return EAGAIN;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
semaphore::destroy (sem_t *sem)
|
|
{
|
|
if (!is_good_object (sem))
|
|
return EINVAL;
|
|
|
|
/* FIXME - new feature - test for busy against threads... */
|
|
|
|
delete (*sem);
|
|
*sem = NULL;
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
semaphore::wait (sem_t *sem)
|
|
{
|
|
pthread_testcancel ();
|
|
|
|
if (!is_good_object (sem))
|
|
{
|
|
set_errno (EINVAL);
|
|
return -1;
|
|
}
|
|
|
|
(*sem)->_wait ();
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
semaphore::trywait (sem_t *sem)
|
|
{
|
|
if (!is_good_object (sem))
|
|
{
|
|
set_errno (EINVAL);
|
|
return -1;
|
|
}
|
|
|
|
return (*sem)->_trywait ();
|
|
}
|
|
|
|
int
|
|
semaphore::post (sem_t *sem)
|
|
{
|
|
if (!is_good_object (sem))
|
|
return EINVAL;
|
|
|
|
(*sem)->_post ();
|
|
return 0;
|
|
}
|
|
|
|
/* pthread_null */
|
|
pthread *
|
|
pthread_null::get_null_pthread ()
|
|
{
|
|
/* because of weird entry points */
|
|
_instance.magic = 0;
|
|
return &_instance;
|
|
}
|
|
|
|
pthread_null::pthread_null ()
|
|
{
|
|
attr.joinable = PTHREAD_CREATE_DETACHED;
|
|
/* Mark ourselves as invalid */
|
|
magic = 0;
|
|
}
|
|
|
|
pthread_null::~pthread_null ()
|
|
{
|
|
}
|
|
|
|
void
|
|
pthread_null::create (void *(*)(void *), pthread_attr *, void *)
|
|
{
|
|
}
|
|
|
|
void
|
|
pthread_null::exit (void *value_ptr)
|
|
{
|
|
ExitThread (0);
|
|
}
|
|
|
|
int
|
|
pthread_null::cancel ()
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
pthread_null::testcancel ()
|
|
{
|
|
}
|
|
|
|
int
|
|
pthread_null::setcancelstate (int state, int *oldstate)
|
|
{
|
|
return EINVAL;
|
|
}
|
|
|
|
int
|
|
pthread_null::setcanceltype (int type, int *oldtype)
|
|
{
|
|
return EINVAL;
|
|
}
|
|
|
|
void
|
|
pthread_null::push_cleanup_handler (__pthread_cleanup_handler *handler)
|
|
{
|
|
}
|
|
|
|
void
|
|
pthread_null::pop_cleanup_handler (int const execute)
|
|
{
|
|
}
|
|
|
|
unsigned long
|
|
pthread_null::getsequence_np ()
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
pthread_null pthread_null::_instance;
|
|
|
|
#endif // MT_SAFE
|