mirror of
https://github.com/OpenVoiceOS/OpenVoiceOS
synced 2024-12-12 08:56:25 +01:00
118 lines
4.4 KiB
Diff
118 lines
4.4 KiB
Diff
From 27570b59eda95f93f62c30d343f9c913a9d2a137 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 1 Nov 2012 10:14:11 +0100
|
|
Subject: [PATCH 37/62] powerpc: Add support for lazy preemption
|
|
|
|
Implement the powerpc pieces for lazy preempt.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/powerpc/Kconfig | 1 +
|
|
arch/powerpc/include/asm/thread_info.h | 8 ++++++++
|
|
arch/powerpc/kernel/interrupt.c | 8 ++++++--
|
|
3 files changed, 15 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
|
|
index 6050e6e10d32..0eff864d6ec3 100644
|
|
--- a/arch/powerpc/Kconfig
|
|
+++ b/arch/powerpc/Kconfig
|
|
@@ -242,6 +242,7 @@ config PPC
|
|
select HAVE_PERF_EVENTS_NMI if PPC64
|
|
select HAVE_PERF_REGS
|
|
select HAVE_PERF_USER_STACK_DUMP
|
|
+ select HAVE_PREEMPT_LAZY
|
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
|
select HAVE_RELIABLE_STACKTRACE
|
|
select HAVE_RSEQ
|
|
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
|
|
index af58f1ed3952..520864de8bb2 100644
|
|
--- a/arch/powerpc/include/asm/thread_info.h
|
|
+++ b/arch/powerpc/include/asm/thread_info.h
|
|
@@ -53,6 +53,8 @@
|
|
struct thread_info {
|
|
int preempt_count; /* 0 => preemptable,
|
|
<0 => BUG */
|
|
+ int preempt_lazy_count; /* 0 => preemptable,
|
|
+ <0 => BUG */
|
|
#ifdef CONFIG_SMP
|
|
unsigned int cpu;
|
|
#endif
|
|
@@ -77,6 +79,7 @@ struct thread_info {
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
{ \
|
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
|
+ .preempt_lazy_count = 0, \
|
|
.flags = 0, \
|
|
}
|
|
|
|
@@ -102,6 +105,7 @@ void arch_setup_new_exec(void);
|
|
#define TIF_PATCH_PENDING 6 /* pending live patching update */
|
|
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
|
#define TIF_SINGLESTEP 8 /* singlestepping active */
|
|
+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
|
|
#define TIF_SECCOMP 10 /* secure computing */
|
|
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
|
#define TIF_NOERROR 12 /* Force successful syscall return */
|
|
@@ -117,6 +121,7 @@ void arch_setup_new_exec(void);
|
|
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
|
#define TIF_32BIT 20 /* 32 bit binary */
|
|
|
|
+
|
|
/* as above, but as bit values */
|
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
|
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
|
@@ -128,6 +133,7 @@ void arch_setup_new_exec(void);
|
|
#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
|
|
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
|
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
|
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
|
|
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
|
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
|
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
|
@@ -141,10 +147,12 @@ void arch_setup_new_exec(void);
|
|
_TIF_SYSCALL_EMU)
|
|
|
|
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
|
+ _TIF_NEED_RESCHED_LAZY | \
|
|
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
|
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
|
_TIF_NOTIFY_SIGNAL)
|
|
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
|
|
|
/* Bits in local_flags */
|
|
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
|
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
|
|
index cf770d86c03c..2c454731c250 100644
|
|
--- a/arch/powerpc/kernel/interrupt.c
|
|
+++ b/arch/powerpc/kernel/interrupt.c
|
|
@@ -186,7 +186,7 @@ interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
|
|
ti_flags = read_thread_flags();
|
|
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
|
|
local_irq_enable();
|
|
- if (ti_flags & _TIF_NEED_RESCHED) {
|
|
+ if (ti_flags & _TIF_NEED_RESCHED_MASK) {
|
|
schedule();
|
|
} else {
|
|
/*
|
|
@@ -397,11 +397,15 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
|
|
/* Returning to a kernel context with local irqs enabled. */
|
|
WARN_ON_ONCE(!(regs->msr & MSR_EE));
|
|
again:
|
|
- if (IS_ENABLED(CONFIG_PREEMPT)) {
|
|
+ if (IS_ENABLED(CONFIG_PREEMPTION)) {
|
|
/* Return to preemptible kernel context */
|
|
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
|
|
if (preempt_count() == 0)
|
|
preempt_schedule_irq();
|
|
+ } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) {
|
|
+ if ((preempt_count() == 0) &&
|
|
+ (current_thread_info()->preempt_lazy_count == 0))
|
|
+ preempt_schedule_irq();
|
|
}
|
|
}
|
|
|
|
--
|
|
2.43.0
|
|
|