Unify names of all lock objects

In preparation for the patch that would allow retargeting of locking
routines, rename all lock objects to follow this pattern:

"__<name>_[recursive_]mutex".

Following locks were renamed:
__dd_hash_lock -> __dd_hash_mutex
__sfp_lock -> __sfp_recursive_mutex
__sinit_lock -> __sinit_recursive_mutex
__atexit_lock -> __atexit_recursive_mutex
_arc4random_mutex -> __arc4random_mutex
__env_lock_object -> __env_recursive_mutex
__malloc_lock_object -> __malloc_recursive_mutex
__atexit_mutex -> __at_quick_exit_mutex
__tz_lock_object -> __tz_mutex
This commit is contained in:
Freddie Chopin 2017-01-29 10:27:17 +01:00 committed by Jeff Johnston
parent 4e46ff3e81
commit 0eeb4c1d32
9 changed files with 40 additions and 40 deletions

View File

@ -71,7 +71,7 @@ static long dd_loccnt = 1; /* Index of entry for sequential readdir's */
static struct ddloc *dd_hash[NDIRHASH]; /* Hash list heads for ddlocs */
#ifdef HAVE_DD_LOCK
__LOCK_INIT(static, __dd_hash_lock);
__LOCK_INIT(static, __dd_hash_mutex);
#endif
/*
@ -92,7 +92,7 @@ _DEFUN(telldir, (dirp),
#ifdef HAVE_DD_LOCK
__lock_acquire_recursive(dirp->dd_lock);
__lock_acquire(__dd_hash_lock);
__lock_acquire(__dd_hash_mutex);
#endif
index = dd_loccnt++;
lp->loc_index = index;
@ -102,7 +102,7 @@ _DEFUN(telldir, (dirp),
lp->loc_next = dd_hash[LOCHASH(index)];
dd_hash[LOCHASH(index)] = lp;
#ifdef HAVE_DD_LOCK
__lock_release(__dd_hash_lock);
__lock_release(__dd_hash_mutex);
__lock_release_recursive(dirp->dd_lock);
#endif
return (index);
@ -124,7 +124,7 @@ _DEFUN(_seekdir, (dirp, loc),
struct dirent *dp;
#ifdef HAVE_DD_LOCK
__lock_acquire(__dd_hash_lock);
__lock_acquire(__dd_hash_mutex);
#endif
if (loc != 0) {
prevlp = &dd_hash[LOCHASH(loc)];
@ -137,7 +137,7 @@ _DEFUN(_seekdir, (dirp, loc),
}
if (lp == NULL) {
#ifdef HAVE_DD_LOCK
__lock_release(__dd_hash_lock);
__lock_release(__dd_hash_mutex);
#endif
return;
}
@ -163,7 +163,7 @@ found:
dirp->dd_loc = 0;
}
#ifdef HAVE_DD_LOCK
__lock_release(__dd_hash_lock);
__lock_release(__dd_hash_mutex);
#endif
}
@ -175,7 +175,7 @@ _DEFUN(_cleanupdir, (dirp),
int i;
#ifdef HAVE_DD_LOCK
__lock_acquire(__dd_hash_lock);
__lock_acquire(__dd_hash_mutex);
#endif
for (i = 0; i < NDIRHASH; ++i) {
struct ddloc head;
@ -200,7 +200,7 @@ _DEFUN(_cleanupdir, (dirp),
dd_hash[i] = head.loc_next;
}
#ifdef HAVE_DD_LOCK
__lock_release(__dd_hash_lock);
__lock_release(__dd_hash_mutex);
#endif
}

View File

@ -261,31 +261,31 @@ _DEFUN(__sinit, (s),
#ifndef __SINGLE_THREAD__
__LOCK_INIT_RECURSIVE(static, __sfp_lock);
__LOCK_INIT_RECURSIVE(static, __sinit_lock);
__LOCK_INIT_RECURSIVE(static, __sfp_recursive_mutex);
__LOCK_INIT_RECURSIVE(static, __sinit_recursive_mutex);
_VOID
_DEFUN_VOID(__sfp_lock_acquire)
{
__lock_acquire_recursive (__sfp_lock);
__lock_acquire_recursive (__sfp_recursive_mutex);
}
_VOID
_DEFUN_VOID(__sfp_lock_release)
{
__lock_release_recursive (__sfp_lock);
__lock_release_recursive (__sfp_recursive_mutex);
}
_VOID
_DEFUN_VOID(__sinit_lock_acquire)
{
__lock_acquire_recursive (__sinit_lock);
__lock_acquire_recursive (__sinit_recursive_mutex);
}
_VOID
_DEFUN_VOID(__sinit_lock_release)
{
__lock_release_recursive (__sinit_lock);
__lock_release_recursive (__sinit_recursive_mutex);
}
/* Walkable file locking routine. */

View File

@ -48,7 +48,7 @@ const void * __atexit_dummy = &__call_exitprocs;
#endif
#ifndef __SINGLE_THREAD__
extern _LOCK_RECURSIVE_T __atexit_lock;
extern _LOCK_RECURSIVE_T __atexit_recursive_mutex;
#endif
#ifdef _REENT_GLOBAL_ATEXIT
@ -74,7 +74,7 @@ _DEFUN (__register_exitproc,
register struct _atexit *p;
#ifndef __SINGLE_THREAD__
__lock_acquire_recursive(__atexit_lock);
__lock_acquire_recursive(__atexit_recursive_mutex);
#endif
p = _GLOBAL_ATEXIT;
@ -91,7 +91,7 @@ _DEFUN (__register_exitproc,
{
#ifndef _ATEXIT_DYNAMIC_ALLOC
#ifndef __SINGLE_THREAD__
__lock_release_recursive(__atexit_lock);
__lock_release_recursive(__atexit_recursive_mutex);
#endif
return -1;
#else
@ -100,7 +100,7 @@ _DEFUN (__register_exitproc,
if (!malloc)
{
#ifndef __SINGLE_THREAD__
__lock_release_recursive(__atexit_lock);
__lock_release_recursive(__atexit_recursive_mutex);
#endif
return -1;
}
@ -109,7 +109,7 @@ _DEFUN (__register_exitproc,
if (p == NULL)
{
#ifndef __SINGLE_THREAD__
__lock_release_recursive(__atexit_lock);
__lock_release_recursive(__atexit_recursive_mutex);
#endif
return -1;
}
@ -133,7 +133,7 @@ _DEFUN (__register_exitproc,
{
#ifndef _ATEXIT_DYNAMIC_ALLOC
#ifndef __SINGLE_THREAD__
__lock_release_recursive(__atexit_lock);
__lock_release_recursive(__atexit_recursive_mutex);
#endif
return -1;
#else
@ -143,7 +143,7 @@ _DEFUN (__register_exitproc,
if (args == NULL)
{
#ifndef __SINGLE_THREAD__
__lock_release(__atexit_lock);
__lock_release(__atexit_recursive_mutex);
#endif
return -1;
}
@ -163,7 +163,7 @@ _DEFUN (__register_exitproc,
}
p->_fns[p->_ind++] = fn;
#ifndef __SINGLE_THREAD__
__lock_release_recursive(__atexit_lock);
__lock_release_recursive(__atexit_recursive_mutex);
#endif
return 0;
}

View File

@ -11,7 +11,7 @@
/* Make this a weak reference to avoid pulling in free. */
void free(void *) _ATTRIBUTE((__weak__));
__LOCK_INIT_RECURSIVE(, __atexit_lock);
__LOCK_INIT_RECURSIVE(, __atexit_recursive_mutex);
#ifdef _REENT_GLOBAL_ATEXIT
struct _atexit *_global_atexit = _NULL;
@ -75,7 +75,7 @@ _DEFUN (__call_exitprocs, (code, d),
#ifndef __SINGLE_THREAD__
__lock_acquire_recursive(__atexit_lock);
__lock_acquire_recursive(__atexit_recursive_mutex);
#endif
restart:
@ -157,7 +157,7 @@ _DEFUN (__call_exitprocs, (code, d),
#endif
}
#ifndef __SINGLE_THREAD__
__lock_release_recursive(__atexit_lock);
__lock_release_recursive(__atexit_recursive_mutex);
#endif
}

View File

@ -39,11 +39,11 @@
#ifndef _ARC4_LOCK_INIT
#define _ARC4_LOCK_INIT __LOCK_INIT(static, _arc4random_mutex);
#define _ARC4_LOCK_INIT __LOCK_INIT(static, __arc4random_mutex);
#define _ARC4_LOCK() __lock_acquire(_arc4random_mutex)
#define _ARC4_LOCK() __lock_acquire(__arc4random_mutex)
#define _ARC4_UNLOCK() __lock_release(_arc4random_mutex)
#define _ARC4_UNLOCK() __lock_release(__arc4random_mutex)
#endif /* _ARC4_LOCK_INIT */

View File

@ -39,7 +39,7 @@ that it already holds.
#include <sys/lock.h>
#ifndef __SINGLE_THREAD__
__LOCK_INIT_RECURSIVE(static, __env_lock_object);
__LOCK_INIT_RECURSIVE(static, __env_recursive_mutex);
#endif
void
@ -47,7 +47,7 @@ __env_lock (ptr)
struct _reent *ptr;
{
#ifndef __SINGLE_THREAD__
__lock_acquire_recursive (__env_lock_object);
__lock_acquire_recursive (__env_recursive_mutex);
#endif
}
@ -56,6 +56,6 @@ __env_unlock (ptr)
struct _reent *ptr;
{
#ifndef __SINGLE_THREAD__
__lock_release_recursive (__env_lock_object);
__lock_release_recursive (__env_recursive_mutex);
#endif
}

View File

@ -40,7 +40,7 @@ that it already holds.
#include <sys/lock.h>
#ifndef __SINGLE_THREAD__
__LOCK_INIT_RECURSIVE(static, __malloc_lock_object);
__LOCK_INIT_RECURSIVE(static, __malloc_recursive_mutex);
#endif
void
@ -48,7 +48,7 @@ __malloc_lock (ptr)
struct _reent *ptr;
{
#ifndef __SINGLE_THREAD__
__lock_acquire_recursive (__malloc_lock_object);
__lock_acquire_recursive (__malloc_recursive_mutex);
#endif
}
@ -57,7 +57,7 @@ __malloc_unlock (ptr)
struct _reent *ptr;
{
#ifndef __SINGLE_THREAD__
__lock_release_recursive (__malloc_lock_object);
__lock_release_recursive (__malloc_recursive_mutex);
#endif
}

View File

@ -44,7 +44,7 @@ struct quick_exit_handler {
/**
* Lock protecting the handlers list.
*/
__LOCK_INIT(static, __atexit_mutex);
__LOCK_INIT(static, __at_quick_exit_mutex);
/**
* Stack of cleanup handlers. These will be invoked in reverse order when
*/
@ -60,10 +60,10 @@ at_quick_exit(void (*func)(void))
if (NULL == h)
return (1);
h->cleanup = func;
__lock_acquire(__atexit_mutex);
__lock_acquire(__at_quick_exit_mutex);
h->next = handlers;
handlers = h;
__lock_release(__atexit_mutex);
__lock_release(__at_quick_exit_mutex);
return (0);
}

View File

@ -36,14 +36,14 @@ until the corresponding <<__tz_unlock>> call on the same thread is made.
#include <sys/lock.h>
#ifndef __SINGLE_THREAD__
__LOCK_INIT(static, __tz_lock_object);
__LOCK_INIT(static, __tz_mutex);
#endif
_VOID
_DEFUN_VOID (__tz_lock)
{
#ifndef __SINGLE_THREAD__
__lock_acquire(__tz_lock_object);
__lock_acquire(__tz_mutex);
#endif
}
@ -51,6 +51,6 @@ _VOID
_DEFUN_VOID (__tz_unlock)
{
#ifndef __SINGLE_THREAD__
__lock_release(__tz_lock_object);
__lock_release(__tz_mutex);
#endif
}