Drop using _tlsbase and _tlstop in favor of access via NtCurrentTeb.

* cygtls.h (_tlsbase): Remove.  Replace throughout with
        NtCurrentTeb()->Tib.StackBase.
        (_tlstop): Remove. Replace throughout with
        NtCurrentTeb()->Tib.StackLimit.
        * dcrt0.cc (child_info_fork::alloc_stack): Move definition of local
        teb variable up to be used throughout.
        * include/cygwin/config.h (__getreent): Use inline function on both
        architectures.

Signed-off-by: Corinna Vinschen <corinna@vinschen.de>
This commit is contained in:
Corinna Vinschen 2015-12-02 12:11:06 +01:00
parent 8e6213210d
commit 7b0c063f12
7 changed files with 43 additions and 42 deletions

View File

@ -1,3 +1,14 @@
2015-12-02 Corinna Vinschen <corinna@vinschen.de>
* cygtls.h (_tlsbase): Remove. Replace throughout with
NtCurrentTeb()->Tib.StackBase.
(_tlstop): Remove. Replace throughout with
NtCurrentTeb()->Tib.StackLimit.
* dcrt0.cc (child_info_fork::alloc_stack): Move definition of local
teb variable up to be used throughout.
* include/cygwin/config.h (__getreent): Use inline function on both
architectures.
2015-11-29 Corinna Vinschen <corinna@vinschen.de> 2015-11-29 Corinna Vinschen <corinna@vinschen.de>
* uinfo.cc (pwdgrp::fetch_account_from_windows): Only create 1-5-32-x * uinfo.cc (pwdgrp::fetch_account_from_windows): Only create 1-5-32-x

View File

@ -286,18 +286,7 @@ private:
#include "cygerrno.h" #include "cygerrno.h"
#include "ntdll.h" #include "ntdll.h"
#ifdef __x86_64__ #define _my_tls (*((_cygtls *) ((PBYTE) NtCurrentTeb()->Tib.StackBase - CYGTLS_PADSIZE)))
/* When just using a "gs:X" asm for the x86_64 code, gcc wrongly creates
pc-relative instructions. However, NtCurrentTeb() is inline assembler
anyway, so using it here should be fast enough on x86_64. */
#define _tlsbase (NtCurrentTeb()->Tib.StackBase)
#define _tlstop (NtCurrentTeb()->Tib.StackLimit)
#else
extern PVOID _tlsbase __asm__ ("%fs:4");
extern PVOID _tlstop __asm__ ("%fs:8");
#endif
#define _my_tls (*((_cygtls *) ((char *)_tlsbase - CYGTLS_PADSIZE)))
extern _cygtls *_main_tls; extern _cygtls *_main_tls;
extern _cygtls *_sig_tls; extern _cygtls *_sig_tls;

View File

@ -411,15 +411,16 @@ child_info_fork::alloc_stack ()
{ {
/* Make sure not to try a hard allocation if we have been forked off from /* Make sure not to try a hard allocation if we have been forked off from
the main thread of a Cygwin process which has been started from a 64 bit the main thread of a Cygwin process which has been started from a 64 bit
parent. In that case the _tlsbase of the forked child is not the same parent. In that case the StackBase of the forked child is not the same
as the _tlsbase of the parent (== stackbottom), but only because the as the StackBase of the parent (== stackbottom), but only because the
stack of the parent has been slightly rearranged. See comment in stack of the parent has been slightly rearranged. See comment in
wow64_revert_to_original_stack for details. We check here if the wow64_revert_to_original_stack for details. We check here if the
parent stack fits into the child stack. */ parent stack fits into the child stack. */
if (_tlsbase != stackbottom PTEB teb = NtCurrentTeb ();
if (teb->Tib.StackBase != stackbottom
&& (!wincap.is_wow64 () && (!wincap.is_wow64 ()
|| stacktop < NtCurrentTeb ()->DeallocationStack || stacktop < teb->DeallocationStack
|| stackbottom > _tlsbase)) || stackbottom > teb->Tib.StackBase))
{ {
void *stack_ptr; void *stack_ptr;
size_t stacksize; size_t stacksize;
@ -432,10 +433,10 @@ child_info_fork::alloc_stack ()
stacksize = (PBYTE) stackbottom - (PBYTE) stackaddr; stacksize = (PBYTE) stackbottom - (PBYTE) stackaddr;
if (!VirtualAlloc (stackaddr, stacksize, MEM_RESERVE, PAGE_NOACCESS)) if (!VirtualAlloc (stackaddr, stacksize, MEM_RESERVE, PAGE_NOACCESS))
{ {
PTEB teb = NtCurrentTeb ();
api_fatal ("fork: can't reserve memory for parent stack " api_fatal ("fork: can't reserve memory for parent stack "
"%p - %p, (child has %p - %p), %E", "%p - %p, (child has %p - %p), %E",
stackaddr, stackbottom, teb->DeallocationStack, _tlsbase); stackaddr, stackbottom, teb->DeallocationStack,
teb->Tib.StackBase);
} }
/* Commit the area commited in parent. */ /* Commit the area commited in parent. */
stacksize = (PBYTE) stackbottom - (PBYTE) stacktop; stacksize = (PBYTE) stackbottom - (PBYTE) stacktop;
@ -471,9 +472,10 @@ child_info_fork::alloc_stack ()
/* Fork has been called from main thread. Simply commit the region /* Fork has been called from main thread. Simply commit the region
of the stack commited in the parent but not yet commited in the of the stack commited in the parent but not yet commited in the
child and create new guardpages. */ child and create new guardpages. */
if (_tlstop > stacktop) if (NtCurrentTeb()->Tib.StackLimit > stacktop)
{ {
SIZE_T commitsize = (PBYTE) _tlstop - (PBYTE) stacktop; SIZE_T commitsize = (PBYTE) NtCurrentTeb()->Tib.StackLimit
- (PBYTE) stacktop;
if (!VirtualAlloc (stacktop, commitsize, MEM_COMMIT, PAGE_READWRITE)) if (!VirtualAlloc (stacktop, commitsize, MEM_COMMIT, PAGE_READWRITE))
api_fatal ("can't commit child memory for stack %p(%ly), %E", api_fatal ("can't commit child memory for stack %p(%ly), %E",
stacktop, commitsize); stacktop, commitsize);
@ -482,14 +484,14 @@ child_info_fork::alloc_stack ()
MEM_COMMIT, PAGE_READWRITE | PAGE_GUARD)) MEM_COMMIT, PAGE_READWRITE | PAGE_GUARD))
api_fatal ("fork: couldn't allocate new stack guard page %p, %E", api_fatal ("fork: couldn't allocate new stack guard page %p, %E",
guardpage); guardpage);
_tlstop = stacktop; NtCurrentTeb()->Tib.StackLimit = stacktop;
} }
stackaddr = 0; stackaddr = 0;
/* This only affects forked children of a process started from a native /* This only affects forked children of a process started from a native
64 bit process, but it doesn't hurt to do it unconditionally. Fix 64 bit process, but it doesn't hurt to do it unconditionally. Fix
StackBase in the child to be the same as in the parent, so that the StackBase in the child to be the same as in the parent, so that the
computation of _my_tls is correct. */ computation of _my_tls is correct. */
_tlsbase = (PVOID) stackbottom; teb->Tib.StackBase = (PVOID) stackbottom;
} }
} }
@ -918,8 +920,8 @@ dll_crt0_1 (void *)
this step. */ this step. */
if (fork_info->stackaddr) if (fork_info->stackaddr)
{ {
_tlsbase = (PVOID) fork_info->stackbottom; NtCurrentTeb()->Tib.StackBase = (PVOID) fork_info->stackbottom;
_tlstop = (PVOID) fork_info->stacktop; NtCurrentTeb()->Tib.StackLimit = (PVOID) fork_info->stacktop;
} }
/* Not resetting _my_tls.incyg here because presumably fork will overwrite /* Not resetting _my_tls.incyg here because presumably fork will overwrite

View File

@ -307,7 +307,7 @@ frok::parent (volatile char * volatile stack_here)
ch.forker_finished = forker_finished; ch.forker_finished = forker_finished;
ch.stackbottom = _tlsbase; ch.stackbottom = NtCurrentTeb()->Tib.StackBase;
ch.stackaddr = NtCurrentTeb ()->DeallocationStack; ch.stackaddr = NtCurrentTeb ()->DeallocationStack;
if (!ch.stackaddr) if (!ch.stackaddr)
{ {
@ -315,7 +315,7 @@ frok::parent (volatile char * volatile stack_here)
stack. If so, the entire stack is committed anyway and StackLimit stack. If so, the entire stack is committed anyway and StackLimit
points to the allocation address of the stack. Mark in guardsize that points to the allocation address of the stack. Mark in guardsize that
we must not set up guard pages. */ we must not set up guard pages. */
ch.stackaddr = ch.stacktop = _tlstop; ch.stackaddr = ch.stacktop = NtCurrentTeb()->Tib.StackLimit;
ch.guardsize = (size_t) -1; ch.guardsize = (size_t) -1;
} }
else else

View File

@ -40,20 +40,19 @@ extern "C" {
#ifdef _COMPILING_NEWLIB #ifdef _COMPILING_NEWLIB
#ifdef __x86_64__ #ifdef __x86_64__
#include "../tlsoffsets64.h" #include "../tlsoffsets64.h"
/* We would like to use just "%gs:8", but on x86_64 gcc uses pc-relative
addressing and translates "gs:8" into the wrong addressing mode. */
static inline char *___getreent (void)
{
register char *ret;
__asm __volatile__ ("movq %%gs:8,%0" : "=r" (ret));
return ret + tls_local_clib;
}
#define __getreent() ((struct _reent *) ___getreent())
#else #else
#include "../tlsoffsets.h" #include "../tlsoffsets.h"
extern char *_tlsbase __asm__ ("%fs:4");
#define __getreent() (struct _reent *)(_tlsbase + tls_local_clib)
#endif #endif
extern inline struct _reent *__getreent (void)
{
register char *ret;
#ifdef __x86_64__
__asm __volatile__ ("movq %%gs:8,%0" : "=r" (ret));
#else
__asm __volatile__ ("movl %%fs:4,%0" : "=r" (ret));
#endif
return (struct _reent *) (ret + tls_local_clib);
}
#endif /* _COMPILING_NEWLIB */ #endif /* _COMPILING_NEWLIB */
#ifdef __x86_64__ #ifdef __x86_64__

View File

@ -42,7 +42,7 @@ munge_threadfunc ()
if (!threadfunc_ix[0]) if (!threadfunc_ix[0])
{ {
char **peb; char **peb;
char **top = (char **) _tlsbase; char **top = (char **) NtCurrentTeb()->Tib.StackBase;
for (peb = ebp, i = 0; peb < top && i < 7; peb++) for (peb = ebp, i = 0; peb < top && i < 7; peb++)
if (*peb == search_for) if (*peb == search_for)
threadfunc_ix[i++] = peb - ebp; threadfunc_ix[i++] = peb - ebp;

View File

@ -171,10 +171,10 @@ wow64_revert_to_original_stack (PVOID &allocationbase)
accordingly, and return the new, 16 byte aligned address for the accordingly, and return the new, 16 byte aligned address for the
stack pointer. The second half of the stack move is done by the stack pointer. The second half of the stack move is done by the
caller _dll_crt0. */ caller _dll_crt0. */
_tlsbase = (char *) newbase; NtCurrentTeb()->Tib.StackBase = (char *) newbase;
_tlstop = (char *) newtop; NtCurrentTeb()->Tib.StackLimit = (char *) newtop;
_main_tls = &_my_tls; _main_tls = &_my_tls;
return PTR_ADD (_tlsbase, -16); return PTR_ADD (NtCurrentTeb()->Tib.StackBase, -16);
} }
/* Respawn WOW64 process. This is only called if we can't reuse the original /* Respawn WOW64 process. This is only called if we can't reuse the original