!970 update Kernel-rt:preempt-RT to openEuler 5.10.0-144.0.0

From: @r2018 
Reviewed-by: @guohaocs2c 
Signed-off-by: @guohaocs2c
This commit is contained in:
openeuler-ci-bot 2023-03-10 09:26:00 +00:00 committed by Gitee
commit 0254ad5cec
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
2 changed files with 153 additions and 167 deletions

View File

@ -1,6 +1,6 @@
From bc25ef53fb7619733a689039f66e5e4438a91522 Mon Sep 17 00:00:00 2001
From 6ec3a78026d8b2eec230d6c06fec79e0d066b4f9 Mon Sep 17 00:00:00 2001
From: liyulei <liyulei@kylinos.cn>
Date: Mon, 6 Feb 2023 18:19:04 +0800
Date: Fri, 10 Mar 2023 16:22:16 +0800
Subject: [PATCH] apply preempt RT patch
---
@ -26,10 +26,9 @@ Subject: [PATCH] apply preempt RT patch
arch/arm/include/asm/irq.h | 2 +
arch/arm/include/asm/kmap_types.h | 10 -
arch/arm/include/asm/spinlock_types.h | 4 -
arch/arm/include/asm/thread_info.h | 10 +-
arch/arm/include/asm/thread_info.h | 12 +-
arch/arm/kernel/asm-offsets.c | 1 +
arch/arm/kernel/entry-armv.S | 19 +-
arch/arm/kernel/entry-common.S | 9 +-
arch/arm/kernel/signal.c | 3 +-
arch/arm/kernel/smp.c | 2 -
arch/arm/mm/Makefile | 1 -
@ -41,7 +40,7 @@ Subject: [PATCH] apply preempt RT patch
arch/arm64/include/asm/hardirq.h | 7 +-
arch/arm64/include/asm/preempt.h | 28 +-
arch/arm64/include/asm/spinlock_types.h | 4 -
arch/arm64/include/asm/thread_info.h | 7 +-
arch/arm64/include/asm/thread_info.h | 8 +-
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry.S | 13 +-
arch/arm64/kernel/fpsimd.c | 14 +-
@ -251,7 +250,7 @@ Subject: [PATCH] apply preempt RT patch
include/linux/dcache.h | 4 +-
include/linux/debug_locks.h | 3 +-
include/linux/delay.h | 6 +
include/linux/entry-common.h | 2 +-
include/linux/entry-common.h | 3 +-
include/linux/eventfd.h | 11 +-
include/linux/fs.h | 2 +-
include/linux/hardirq.h | 7 +-
@ -410,7 +409,7 @@ Subject: [PATCH] apply preempt RT patch
net/sched/sch_generic.c | 10 +
net/sunrpc/svc_xprt.c | 4 +-
net/xfrm/xfrm_state.c | 3 +-
406 files changed, 9015 insertions(+), 4804 deletions(-)
405 files changed, 9011 insertions(+), 4803 deletions(-)
delete mode 100644 arch/alpha/include/asm/kmap_types.h
delete mode 100644 arch/arc/include/asm/kmap_types.h
delete mode 100644 arch/arm/include/asm/kmap_types.h
@ -1247,7 +1246,7 @@ index 5976958647fe..a37c0803954b 100644
typedef struct {
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 9f7ca79cc76a..9f31470f695a 100644
index 070375d96e24..c636a1bf754a 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -55,6 +55,7 @@ struct cpu_context_save {
@ -1258,19 +1257,22 @@ index 9f7ca79cc76a..9f31470f695a 100644
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
@@ -145,8 +146,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
@@ -147,9 +148,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
-#define TIF_PATCH_PENDING 8 /* pending live patching update */
-#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
+
+#define TIF_NEED_RESCHED_LAZY 7
+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
+#define TIF_PATCH_PENDING 9 /* pending live patching update */
+#define TIF_NOTIFY_SIGNAL 10 /* signal notifications exist */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -155,6 +157,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
@@ -158,6 +161,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@ -1278,16 +1280,15 @@ index 9f7ca79cc76a..9f31470f695a 100644
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
@@ -171,7 +174,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
@@ -175,7 +179,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED_LAZY)
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | _TIF_NEED_RESCHED_LAZY | \
_TIF_NOTIFY_SIGNAL)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 70993af22d80..024c65c3a0f2 100644
--- a/arch/arm/kernel/asm-offsets.c
@ -1341,36 +1342,8 @@ index 4332e5950042..efb2d0755ce7 100644
#endif
__und_fault:
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7a2e63dfb4d9..37c91a55027e 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -54,7 +54,9 @@ __ret_fast_syscall:
cmp r2, r1
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
+ bne fast_work_pending
+ tst r1, #_TIF_SECCOMP
bne fast_work_pending
@@ -92,8 +94,11 @@ __ret_fast_syscall:
cmp r2, r1
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
+ bne do_slower_path
+ tst r1, #_TIF_SECCOMP
beq no_work_pending
+do_slower_path:
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 2f81d3af5f9a..6e69f7b3d581 100644
index a3a38d0a4c85..f04ccf19ab1f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -649,7 +649,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
@ -1598,7 +1571,7 @@ index 187fab227b50..000000000000
- return (void *)vaddr;
-}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a0bba8e5426a..c528009516fd 100644
index 6ccc012f62c2..6b3a32110ef4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -78,6 +78,7 @@ config ARM64
@ -1708,7 +1681,7 @@ index 18782f0c4721..6672b05350b4 100644
#include <asm-generic/qrwlock_types.h>
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index af49b6190aee..0ad7b958c566 100644
index dd8d27ea7e78..6464a3224372 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -27,6 +27,7 @@ struct thread_info {
@ -1719,30 +1692,33 @@ index af49b6190aee..0ad7b958c566 100644
union {
u64 preempt_count; /* 0 => preemptible, <0 => bug */
struct {
@@ -69,6 +70,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
@@ -70,6 +71,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
+#define TIF_NEED_RESCHED_LAZY 6
#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
+#define TIF_NEED_RESCHED_LAZY 7
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
@@ -101,14 +103,17 @@ void arch_release_task_struct(struct task_struct *tsk);
@@ -102,6 +104,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
@@ -109,9 +112,12 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT)
+ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
+ _TIF_NEED_RESCHED_LAZY)
- _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
+ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | _TIF_NEED_RESCHED_LAZY |\
_TIF_NOTIFY_SIGNAL)
+
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
_TIF_SYSCALL_EMU)
@ -1844,7 +1820,7 @@ index 9a8f7c256117..c0753dcdb22a 100644
static DEFINE_PER_CPU(call_single_data_t, cpu_backtrace_csd) =
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index e5e2f1e888a2..c5fd06d5285b 100644
index 17cb54d1e420..7f4a034530bd 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -694,7 +694,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
@ -1857,7 +1833,7 @@ index e5e2f1e888a2..c5fd06d5285b 100644
local_daif_restore(DAIF_PROCCTX_NOIRQ);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7527ac19332f..7d4b7d6d097d 100644
index a94acb502237..5d1e308e54f8 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -860,7 +860,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
@ -2931,7 +2907,7 @@ index 1c8460e23583..b1653c160bab 100644
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 7480fbc4d79d..0f7ccf38a014 100644
index f4f4564c62f9..5b7e9bb9ff03 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -54,6 +54,8 @@
@ -2943,7 +2919,7 @@ index 7480fbc4d79d..0f7ccf38a014 100644
#ifdef CONFIG_SMP
unsigned int cpu;
#endif
@@ -106,11 +108,12 @@ void arch_setup_new_exec(void);
@@ -107,11 +109,12 @@ void arch_setup_new_exec(void);
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
@ -2959,7 +2935,7 @@ index 7480fbc4d79d..0f7ccf38a014 100644
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
@@ -119,6 +122,9 @@ void arch_setup_new_exec(void);
@@ -120,6 +123,9 @@ void arch_setup_new_exec(void);
#endif
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 20 /* 32 bit binary */
@ -2969,7 +2945,7 @@ index 7480fbc4d79d..0f7ccf38a014 100644
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -138,6 +144,7 @@ void arch_setup_new_exec(void);
@@ -140,6 +146,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@ -2977,18 +2953,20 @@ index 7480fbc4d79d..0f7ccf38a014 100644
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
@@ -145,8 +152,10 @@ void arch_setup_new_exec(void);
@@ -147,10 +154,12 @@ void arch_setup_new_exec(void);
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+ _TIF_NEED_RESCHED_LAZY)
- _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | _TIF_NEED_RESCHED_LAZY | \
_TIF_NOTIFY_SIGNAL)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 760f656efcf6..0a351a99d090 100644
--- a/arch/powerpc/kernel/asm-offsets.c
@ -4894,7 +4872,7 @@ index 7fb482f0f25b..3df0a95c9e13 100644
canary += tsc + (tsc << 32UL);
canary &= CANARY_MASK;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a225c6e2ca6d..414c90f04bc3 100644
index 2b3c980698a3..c67e0be1e68d 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -60,6 +60,8 @@ struct thread_info {
@ -4924,23 +4902,23 @@ index a225c6e2ca6d..414c90f04bc3 100644
#endif
/*
@@ -99,6 +106,7 @@ struct thread_info {
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* IA32 compatibility process */
#define TIF_SLD 18 /* Restore split lock detection on context switch */
+#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
@@ -128,6 +136,7 @@ struct thread_info {
#define _TIF_NOTSC (1 << TIF_NOTSC)
@@ -109,6 +116,7 @@ struct thread_info {
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
#define TIF_X32 30 /* 32-bit native x86-64 binary */
+#define TIF_NEED_RESCHED_LAZY 31 /* lazy rescheduling necessary */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -130,6 +138,7 @@ struct thread_info {
#define _TIF_IA32 (1 << TIF_IA32)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_SLD (1 << TIF_SLD)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
@@ -160,6 +169,8 @@ struct thread_info {
@@ -162,6 +171,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@ -5085,10 +5063,10 @@ index 440eed558558..7cfc4e6b7c94 100644
}
+#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bdc7b9c1f82a..d24459dd8f69 100644
index 53ef53d5b414..5d35b8e5346c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8229,6 +8229,14 @@ int kvm_arch_init(void *opaque)
@@ -8266,6 +8266,14 @@ int kvm_arch_init(void *opaque)
goto out;
}
@ -5438,7 +5416,7 @@ index 673196fe862e..0735ca5e8f86 100644
kmap_waitqueues_init();
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c02e42071615..6791698ebca1 100644
index 663c9f5d6556..4025e1bfab2e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -44,7 +44,7 @@
@ -6572,7 +6550,7 @@ index 7845fa5de79e..043e058bb27c 100644
#include "hv_trace.h"
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 5d820037e291..ef5e12364119 100644
index 514279dac7cb..cda7c8b90659 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -22,6 +22,7 @@
@ -8350,10 +8328,10 @@ index 7436a17a20c1..45a821a8cc46 100644
inode->dirtied_when = 0;
diff --git a/fs/namei.c b/fs/namei.c
index 07c00ade4c1a..a53e99d66dfa 100644
index efed178cbf59..0a9034c97a31 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1528,7 +1528,7 @@ static struct dentry *__lookup_slow(const struct qstr *name,
@@ -1538,7 +1538,7 @@ static struct dentry *__lookup_slow(const struct qstr *name,
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
@ -8362,7 +8340,7 @@ index 07c00ade4c1a..a53e99d66dfa 100644
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
@@ -3023,7 +3023,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
@@ -3035,7 +3035,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@ -8765,18 +8743,21 @@ index e8607992c68a..cd24f34b4ad0 100644
+
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index d8e1c798dc9d..d2aca09f7027 100644
index de029656de13..6f262f3d63a4 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -70,7 +70,7 @@
@@ -70,9 +70,10 @@
#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \
+ _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL |_TIF_NEED_RESCHED_MASK | \
ARCH_EXIT_TO_USER_MODE_WORK)
+
/**
* arch_check_user_regs - Architecture specific sanity check for user mode regs
* @regs: Pointer to currents pt_regs
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index dc4fd8a6644d..836b4c021a0a 100644
--- a/include/linux/eventfd.h
@ -8816,7 +8797,7 @@ index dc4fd8a6644d..836b4c021a0a 100644
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 45ea1243118c..3fb43df18354 100644
index 98236a86cca0..b63177f6db27 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -719,7 +719,7 @@ struct inode {
@ -11175,7 +11156,7 @@ index 4c715be48717..9323af8a9244 100644
* lock for reading
*/
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a84372945f9e..58e834355ee4 100644
index 4c6e8c5183fb..dc0ae903665c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -36,6 +36,7 @@
@ -11547,7 +11528,7 @@ index 3038a0610407..fff1656c6b6f 100644
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 26c431883c69..c91fa019e28d 100644
index d16c8bd085f3..d7248f71dcc3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -298,6 +298,7 @@ struct sk_buff_head {
@ -11558,7 +11539,7 @@ index 26c431883c69..c91fa019e28d 100644
};
struct sk_buff;
@@ -1924,6 +1925,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
@@ -1929,6 +1930,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
@ -12548,7 +12529,7 @@ index 9144e0f09a30..464d14b2aca3 100644
spinlock_t xfrm_policy_lock;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 250569d8df65..c8b8dba10d9a 100644
index 73c699355470..8568edc0b633 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
@ -12732,7 +12713,7 @@ index e62a623031ea..b95f8784c4e4 100644
This option turns the kernel into a real-time kernel by replacing
various locking primitives (spinlocks, rwlocks, etc.) with
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index dc1f782f8e0a..879204826e1b 100644
index 60489cc5a92a..0a7c0a5635a1 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -351,7 +351,7 @@ void cpuset_read_unlock(void)
@ -12846,7 +12827,7 @@ index dc1f782f8e0a..879204826e1b 100644
}
free_cpumasks(NULL, &tmpmask);
@@ -2457,7 +2457,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
@@ -2456,7 +2456,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
@ -12855,7 +12836,7 @@ index dc1f782f8e0a..879204826e1b 100644
switch (type) {
case FILE_CPULIST:
@@ -2479,7 +2479,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
@@ -2478,7 +2478,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
ret = -EINVAL;
}
@ -12864,7 +12845,7 @@ index dc1f782f8e0a..879204826e1b 100644
return ret;
}
@@ -2792,14 +2792,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
@@ -2791,14 +2791,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
@ -12881,7 +12862,7 @@ index dc1f782f8e0a..879204826e1b 100644
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
@@ -2826,12 +2826,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
@@ -2825,12 +2825,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
}
rcu_read_unlock();
@ -12896,7 +12877,7 @@ index dc1f782f8e0a..879204826e1b 100644
out_unlock:
percpu_up_write(&cpuset_rwsem);
put_online_cpus();
@@ -2887,7 +2887,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
@@ -2886,7 +2886,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
percpu_down_write(&cpuset_rwsem);
@ -12905,7 +12886,7 @@ index dc1f782f8e0a..879204826e1b 100644
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2898,7 +2898,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
@@ -2897,7 +2897,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
@ -12914,7 +12895,7 @@ index dc1f782f8e0a..879204826e1b 100644
percpu_up_write(&cpuset_rwsem);
}
@@ -2995,12 +2995,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
@@ -2994,12 +2994,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
{
bool is_empty;
@ -12929,7 +12910,7 @@ index dc1f782f8e0a..879204826e1b 100644
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -3037,10 +3037,10 @@ hotplug_update_tasks(struct cpuset *cs,
@@ -3036,10 +3036,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
@ -12942,7 +12923,7 @@ index dc1f782f8e0a..879204826e1b 100644
if (cpus_updated)
update_tasks_cpumask(cs);
@@ -3107,10 +3107,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
@@ -3106,10 +3106,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
(parent->partition_root_state == PRS_ERROR))) {
if (cs->nr_subparts_cpus) {
@ -12955,7 +12936,7 @@ index dc1f782f8e0a..879204826e1b 100644
compute_effective_cpumask(&new_cpus, cs, parent);
}
@@ -3124,9 +3124,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
@@ -3123,9 +3123,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
cpumask_empty(&new_cpus)) {
update_parent_subparts_cpumask(cs, partcmd_disable,
NULL, tmp);
@ -12967,7 +12948,7 @@ index dc1f782f8e0a..879204826e1b 100644
}
cpuset_force_rebuild();
}
@@ -3206,7 +3206,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
@@ -3205,7 +3205,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
@ -12976,7 +12957,7 @@ index dc1f782f8e0a..879204826e1b 100644
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
@@ -3226,17 +3226,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
@@ -3225,17 +3225,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
@ -12997,7 +12978,7 @@ index dc1f782f8e0a..879204826e1b 100644
update_tasks_nodemask(&top_cpuset);
}
@@ -3340,11 +3340,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
@@ -3339,11 +3339,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
@ -13011,7 +12992,7 @@ index dc1f782f8e0a..879204826e1b 100644
}
/**
@@ -3405,11 +3405,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
@@ -3404,11 +3404,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
unsigned long flags;
@ -13025,7 +13006,7 @@ index dc1f782f8e0a..879204826e1b 100644
return mask;
}
@@ -3501,14 +3501,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
@@ -3500,14 +3500,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
@ -13129,7 +13110,7 @@ index 4e09fab52faf..1f5c577b926e 100644
skip--;
continue;
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 2228de39bb4f..4d29b123f0d5 100644
index a028b28daed5..382c0284a07f 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -2,6 +2,7 @@
@ -13140,7 +13121,7 @@ index 2228de39bb4f..4d29b123f0d5 100644
#include <linux/livepatch.h>
#include <linux/audit.h>
@@ -148,9 +149,17 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
@@ -156,9 +157,17 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
local_irq_enable_exit_to_user(ti_work);
@ -13159,7 +13140,7 @@ index 2228de39bb4f..4d29b123f0d5 100644
if (ti_work & _TIF_UPROBE)
uprobe_notify_resume(regs);
@@ -201,6 +210,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
@@ -209,6 +218,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
/* Ensure that the address limit is intact and no locks are held */
addr_limit_user_check();
@ -13167,7 +13148,7 @@ index 2228de39bb4f..4d29b123f0d5 100644
lockdep_assert_irqs_disabled();
lockdep_sys_exit();
}
@@ -360,7 +370,7 @@ void irqentry_exit_cond_resched(void)
@@ -368,7 +378,7 @@ void irqentry_exit_cond_resched(void)
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
@ -13177,7 +13158,7 @@ index 2228de39bb4f..4d29b123f0d5 100644
}
}
diff --git a/kernel/exit.c b/kernel/exit.c
index d612cb5b5943..f1c818aa5eed 100644
index 26a81ea63156..c15ca545011a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -153,7 +153,7 @@ static void __exit_signal(struct task_struct *tsk)
@ -13190,7 +13171,7 @@ index d612cb5b5943..f1c818aa5eed 100644
spin_unlock(&sighand->siglock);
diff --git a/kernel/fork.c b/kernel/fork.c
index a01cda37dd25..d66c7f0fa43f 100644
index 0e141623a95d..7cb01624ae0c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -42,6 +42,7 @@
@ -13246,20 +13227,21 @@ index a01cda37dd25..d66c7f0fa43f 100644
io_uring_free(tsk);
cgroup_free(tsk);
task_numa_free(tsk, true);
@@ -950,10 +973,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->splice_pipe = NULL;
@@ -951,11 +974,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
tsk->pf_io_worker = NULL;
+ tsk->wake_q_sleeper.next = NULL;
account_kernel_stack(tsk, 1);
kcov_task_init(tsk);
+ kmap_local_fork(tsk);
+ kmap_local_fork(tsk);
#ifdef CONFIG_FAULT_INJECTION
tsk->fail_nth = 0;
@@ -2076,6 +2101,7 @@ static __latent_entropy struct task_struct *copy_process(
#endif
@@ -2079,6 +2104,7 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
@ -19458,7 +19440,7 @@ index b1d7aef10e6a..5e11e30f45eb 100644
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4e6a44683248..8937a7a2b33f 100644
index 4d20763aea33..c85b881c99d9 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -100,8 +100,10 @@ static struct rcu_state rcu_state = {
@ -19490,7 +19472,7 @@ index 849f0aa99333..dd94a602a6d2 100644
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 62d14fba4ca6..37a6eaceaad5 100644
index a2ea3b0ab334..e7a295c4fd0a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -65,7 +65,11 @@ const_debug unsigned int sysctl_sched_features =
@ -22132,7 +22114,7 @@ index e1c655f928c7..f230b1ac7f91 100644
list_splice_init(&q->task_list, &tmp);
while (!list_empty(&tmp)) {
diff --git a/kernel/signal.c b/kernel/signal.c
index d874c96315a6..2b8ba0d38444 100644
index cf498d949f2f..9996986c22fa 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -20,6 +20,7 @@
@ -23324,10 +23306,10 @@ index 7042544c5bde..c711eb334811 100644
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a27605c17f07..f27c35376159 100644
index 9db32d9739f5..663f295f9948 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4818,9 +4818,7 @@ void show_workqueue_state(void)
@@ -4816,9 +4816,7 @@ void show_workqueue_state(void)
* drivers that queue work while holding locks
* also taken in their write paths.
*/
@ -23337,7 +23319,7 @@ index a27605c17f07..f27c35376159 100644
}
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
@@ -4844,7 +4842,6 @@ void show_workqueue_state(void)
@@ -4842,7 +4840,6 @@ void show_workqueue_state(void)
* queue work while holding locks also taken in their write
* paths.
*/
@ -23345,7 +23327,7 @@ index a27605c17f07..f27c35376159 100644
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
@@ -4859,7 +4856,6 @@ void show_workqueue_state(void)
@@ -4857,7 +4854,6 @@ void show_workqueue_state(void)
first = false;
}
pr_cont("\n");
@ -23353,7 +23335,7 @@ index a27605c17f07..f27c35376159 100644
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
@@ -4951,6 +4947,10 @@ static void unbind_workers(int cpu)
@@ -4949,6 +4945,10 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
raw_spin_unlock_irq(&pool->lock);
@ -23365,7 +23347,7 @@ index a27605c17f07..f27c35376159 100644
/*
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 10e425c30486..68857da74e6f 100644
index f53afec6f7ae..b285b57741c2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1406,7 +1406,7 @@ config DEBUG_ATOMIC_SLEEP
@ -23729,7 +23711,7 @@ index 78a630bbd03d..d27a80502204 100644
if ((wait_state != TASK_RUNNING ||
diff --git a/mm/Kconfig b/mm/Kconfig
index be7fd4ed2c4f..03a22fc5ae3b 100644
index f66457168de9..fccd4ebdbc1f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -404,7 +404,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
@ -24060,7 +24042,7 @@ index efe38ab479b5..ad72e587ce54 100644
#if defined(HASHED_PAGE_VIRTUAL)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b2c4bc4bb591..8c5d1d0c62dd 100644
index 635cb8b65b86..a6fbb7f7a297 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -67,6 +67,7 @@
@ -24102,7 +24084,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
}
/**
@@ -2172,6 +2183,7 @@ void unlock_page_memcg(struct page *page)
@@ -2169,6 +2180,7 @@ void unlock_page_memcg(struct page *page)
EXPORT_SYMBOL(unlock_page_memcg);
struct memcg_stock_pcp {
@ -24110,7 +24092,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -2223,7 +2235,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2220,7 +2232,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
@ -24119,7 +24101,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2231,7 +2243,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2228,7 +2240,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
ret = true;
}
@ -24128,7 +24110,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
return ret;
}
@@ -2266,14 +2278,14 @@ static void drain_local_stock(struct work_struct *dummy)
@@ -2263,14 +2275,14 @@ static void drain_local_stock(struct work_struct *dummy)
* The only protection from memory hotplug vs. drain_stock races is
* that we always operate on local CPU stock here with IRQ disabled
*/
@ -24145,7 +24127,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
}
/*
@@ -2285,7 +2297,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2282,7 +2294,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
struct memcg_stock_pcp *stock;
unsigned long flags;
@ -24154,7 +24136,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
@@ -2298,7 +2310,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2295,7 +2307,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
@ -24163,7 +24145,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
}
/*
@@ -2318,7 +2330,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
@@ -2315,7 +2327,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@ -24172,7 +24154,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
@@ -2341,7 +2353,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
@@ -2338,7 +2350,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
schedule_work_on(cpu, &stock->work);
}
}
@ -24181,7 +24163,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
mutex_unlock(&percpu_charge_mutex);
}
@@ -3143,7 +3155,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
@@ -3140,7 +3152,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
unsigned long flags;
bool ret = false;
@ -24190,7 +24172,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
@@ -3151,7 +3163,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
@@ -3148,7 +3160,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
ret = true;
}
@ -24199,7 +24181,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
return ret;
}
@@ -3207,7 +3219,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
@@ -3204,7 +3216,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
struct memcg_stock_pcp *stock;
unsigned long flags;
@ -24208,7 +24190,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
@@ -3221,7 +3233,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
@@ -3218,7 +3230,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
@ -24217,7 +24199,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -6312,12 +6324,12 @@ static int mem_cgroup_move_account(struct page *page,
@@ -6309,12 +6321,12 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;
@ -24232,7 +24214,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
out_unlock:
unlock_page(page);
out:
@@ -7294,10 +7306,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
@@ -7291,10 +7303,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
css_get(&memcg->css);
commit_charge(page, memcg);
@ -24245,7 +24227,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
/*
* Cgroup1's unified memory+swap counter has been charged with the
@@ -7353,11 +7365,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
@@ -7350,11 +7362,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
memcg_oom_recover(ug->memcg);
}
@ -24259,7 +24241,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
@@ -7529,10 +7541,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
@@ -7526,10 +7538,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
css_get(&memcg->css);
commit_charge(newpage, memcg);
@ -24272,7 +24254,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
@@ -7662,9 +7674,13 @@ static int __init mem_cgroup_init(void)
@@ -7659,9 +7671,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
@ -24289,7 +24271,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
for_each_node(node) {
struct mem_cgroup_tree_per_node *rtpn;
@@ -7715,6 +7731,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
@@ -7712,6 +7728,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@ -24297,7 +24279,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
@@ -7760,9 +7777,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
@@ -7757,9 +7774,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
@ -24312,7 +24294,7 @@ index b2c4bc4bb591..8c5d1d0c62dd 100644
css_put(&memcg->css);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d58ddd6e7f73..2ae33a303830 100644
index 12da70f39e0c..bb19b160ecd8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@ -24668,7 +24650,7 @@ index d58ddd6e7f73..2ae33a303830 100644
return NULL;
}
@@ -9288,7 +9358,7 @@ void zone_pcp_reset(struct zone *zone)
@@ -9336,7 +9406,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@ -24677,7 +24659,7 @@ index d58ddd6e7f73..2ae33a303830 100644
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -9297,7 +9367,7 @@ void zone_pcp_reset(struct zone *zone)
@@ -9345,7 +9415,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
@ -25159,7 +25141,7 @@ index 8414c345127b..d937f8673193 100644
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
diff --git a/mm/slub.c b/mm/slub.c
index ad44734dbf72..84a55c8bb3d6 100644
index ec1c3a376d36..559fcc2a3fbf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -458,7 +458,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
@ -25652,10 +25634,10 @@ index ad44734dbf72..84a55c8bb3d6 100644
for (i = 0; i < t.count; i++) {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d7a68eb0db42..ebe4de5f90d9 100644
index e27cd716ca95..1285e5cb072a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1887,7 +1887,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
@@ -1889,7 +1889,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@ -25664,7 +25646,7 @@ index d7a68eb0db42..ebe4de5f90d9 100644
void *vaddr;
node = numa_node_id();
@@ -1924,11 +1924,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
@@ -1926,11 +1926,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
return ERR_PTR(err);
}
@ -25679,7 +25661,7 @@ index d7a68eb0db42..ebe4de5f90d9 100644
return vaddr;
}
@@ -1993,6 +1994,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
@@ -1995,6 +1996,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@ -25687,7 +25669,7 @@ index d7a68eb0db42..ebe4de5f90d9 100644
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -2007,7 +2009,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
@@ -2009,7 +2011,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
order = get_order(size);
rcu_read_lock();
@ -25697,7 +25679,7 @@ index d7a68eb0db42..ebe4de5f90d9 100644
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
@@ -2030,7 +2033,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
@@ -2032,7 +2035,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
break;
}
@ -26324,10 +26306,10 @@ index e491b083b348..ef432cea2e10 100644
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
diff --git a/net/core/sock.c b/net/core/sock.c
index 56a927b9b372..28acdfbcdd22 100644
index d8d42ff15d20..d23b79afa25f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3059,12 +3059,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
@@ -3068,12 +3068,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
@ -26341,7 +26323,7 @@ index 56a927b9b372..28acdfbcdd22 100644
}
EXPORT_SYMBOL(lock_sock_nested);
@@ -3113,12 +3112,11 @@ bool lock_sock_fast(struct sock *sk)
@@ -3122,12 +3121,11 @@ bool lock_sock_fast(struct sock *sk)
__lock_sock(sk);
sk->sk_lock.owned = 1;
@ -26356,10 +26338,10 @@ index 56a927b9b372..28acdfbcdd22 100644
}
EXPORT_SYMBOL(lock_sock_fast);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 6e18aa417782..2d538f14edf8 100644
index 54e2309315eb..ca72dffaa71d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1264,7 +1264,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
@@ -1270,7 +1270,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
@ -26369,7 +26351,7 @@ index 6e18aa417782..2d538f14edf8 100644
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 68f1e89430b3..455f9b21b264 100644
index ecdd9e83f2f4..73b5aa797645 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -578,7 +578,11 @@ struct Qdisc noop_qdisc = {

View File

@ -10,9 +10,9 @@
%global upstream_version 5.10
%global upstream_sublevel 0
%global devel_release 142
%global devel_release 144
%global maintenance_release .0.0
%global pkg_release .57
%global pkg_release .58
%global rt_release .rt62
%define with_debuginfo 1
@ -886,6 +886,10 @@ fi
%endif
%changelog
* Wed Mar 10 2023 liyulei <liyulei@kylinos.cn> - 5.10.0-144.0.0.58
- update kernel-rt version to 5.10.0-144.0.0
* Wed Feb 22 2023 wuchunguang <wuchunguang@kylinos.cn> - 5.10.0-142.0.0.57
- update kernel-rt version to 5.10.0-142.0.0