* gendef (sigdelayed): 64 bit only: Push CPU flags before aligning

stack to avoid changing flag values.
This commit is contained in:
Corinna Vinschen 2014-10-24 13:40:02 +00:00
parent 63a2c2c204
commit d3779269ca
2 changed files with 14 additions and 5 deletions

View File

@ -1,3 +1,9 @@
2014-10-24 Kai Tietz <corinna@vinschen.de>
Corinna Vinschen <corinna@vinschen.de>
* gendef (sigdelayed): 64 bit only: Push CPU flags before aligning
stack to avoid changing flag values.
2014-10-22 Yaakov Selkowitz <yselkowi@redhat.com> 2014-10-22 Yaakov Selkowitz <yselkowi@redhat.com>
* common.din (stime): Export. * common.din (stime): Export.

View File

@ -187,11 +187,14 @@ _sigbe: # return here after cygwin syscall
.global sigdelayed .global sigdelayed
.seh_proc sigdelayed .seh_proc sigdelayed
sigdelayed: sigdelayed:
.seh_pushreg %rbp
pushq %r10 # used for return address injection pushq %r10 # used for return address injection
.seh_pushreg %rbp .seh_pushreg %rbp
pushq %rbp pushq %rbp
.seh_pushreg %rbp .seh_pushreg %rbp
movq %rsp,%rbp movq %rsp,%rbp
.seh_pushreg %rax # fake, there's no .seh_pushreg for the flags
pushf
# stack is aligned or unaligned on entry! # stack is aligned or unaligned on entry!
# make sure it is aligned from here on # make sure it is aligned from here on
# We could be called from an interrupted thread which doesn't know # We could be called from an interrupted thread which doesn't know
@ -224,9 +227,8 @@ sigdelayed:
.seh_pushreg %rbx .seh_pushreg %rbx
pushq %rax pushq %rax
.seh_pushreg %rax .seh_pushreg %rax
pushf subq \$0x128,%rsp
subq \$0x130,%rsp .seh_stackalloc 0x128
.seh_stackalloc 0x130
fnstcw 0x120(%rsp) fnstcw 0x120(%rsp)
movdqa %xmm15,0x110(%rsp) movdqa %xmm15,0x110(%rsp)
movdqa %xmm14,0x100(%rsp) movdqa %xmm14,0x100(%rsp)
@ -288,8 +290,7 @@ sigdelayed:
movdqa 0x110(%rsp),%xmm15 movdqa 0x110(%rsp),%xmm15
fninit fninit
fldcw 0x120(%rsp) fldcw 0x120(%rsp)
addq \$0x130,%rsp addq \$0x128,%rsp
popf
popq %rax popq %rax
popq %rbx popq %rbx
popq %rcx popq %rcx
@ -304,6 +305,8 @@ sigdelayed:
popq %r14 popq %r14
popq %r15 popq %r15
movq %rbp,%rsp movq %rbp,%rsp
subq \$8, %rsp
popf
popq %rbp popq %rbp
xchgq %r10,(%rsp) xchgq %r10,(%rsp)
ret ret