diff --git a/0001-apply-preempt-RT-patch.patch b/0001-apply-preempt-RT-patch.patch index 144ba66..721a437 100644 --- a/0001-apply-preempt-RT-patch.patch +++ b/0001-apply-preempt-RT-patch.patch @@ -1,7 +1,7 @@ -From 1b2f0db721f982be172160f744943c28c1d39f96 Mon Sep 17 00:00:00 2001 -From: zhangyu -Date: Sat, 19 Nov 2022 13:40:57 +0800 -Subject: [PATCH] [rt-patch] +From bc25ef53fb7619733a689039f66e5e4438a91522 Mon Sep 17 00:00:00 2001 +From: liyulei +Date: Mon, 6 Feb 2023 18:19:04 +0800 +Subject: [PATCH] apply preempt RT patch --- .../Expedited-Grace-Periods.rst | 4 +- @@ -44,7 +44,7 @@ Subject: [PATCH] [rt-patch] arch/arm64/include/asm/thread_info.h | 7 +- arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/entry.S | 13 +- - arch/arm64/kernel/fpsimd.c | 18 +- + arch/arm64/kernel/fpsimd.c | 14 +- arch/arm64/kernel/ipi_nmi.c | 2 - arch/arm64/kernel/signal.c | 2 +- arch/arm64/kvm/arm.c | 6 +- @@ -143,7 +143,6 @@ Subject: [PATCH] [rt-patch] arch/x86/include/asm/signal.h | 13 + arch/x86/include/asm/stackprotector.h | 8 +- arch/x86/include/asm/thread_info.h | 11 + - arch/x86/kernel/cpu/mshyperv.c | 2 +- arch/x86/kernel/crash_dump_32.c | 48 +- arch/x86/kernel/fpu/core.c | 12 + arch/x86/kernel/irq_32.c | 2 + @@ -158,7 +157,7 @@ Subject: [PATCH] [rt-patch] arch/xtensa/include/asm/spinlock_types.h | 4 - arch/xtensa/mm/highmem.c | 46 +- block/blk-mq.c | 124 +- - crypto/cryptd.c | 12 +- + crypto/cryptd.c | 15 +- drivers/atm/eni.c | 2 +- drivers/block/zram/zram_drv.c | 36 + drivers/block/zram/zram_drv.h | 1 + @@ -219,7 +218,7 @@ Subject: [PATCH] [rt-patch] drivers/tty/serial/8250/8250_port.c | 92 +- drivers/tty/serial/amba-pl011.c | 17 +- drivers/tty/serial/omap-serial.c | 12 +- - drivers/tty/tty_buffer.c | 2 - + drivers/tty/tty_buffer.c | 4 +- fs/afs/dir_silly.c | 2 +- fs/aio.c | 3 +- fs/btrfs/ctree.h | 1 - @@ -261,7 +260,7 @@ Subject: [PATCH] [rt-patch] include/linux/interrupt.h | 34 +- include/linux/io-mapping.h | 28 +- include/linux/irq_cpustat.h | 28 - - include/linux/irq_work.h | 13 + + include/linux/irq_work.h | 8 + include/linux/irqdesc.h | 1 + include/linux/irqflags.h | 23 +- include/linux/kernel.h | 26 +- @@ -326,9 +325,10 @@ Subject: [PATCH] [rt-patch] kernel/exit.c | 2 +- kernel/fork.c | 28 +- kernel/futex.c | 87 +- - kernel/irq/manage.c | 11 +- + kernel/irq/handle.c | 8 + + kernel/irq/manage.c | 16 +- kernel/irq/spurious.c | 8 + - kernel/irq_work.c | 136 +- + kernel/irq_work.c | 134 +- kernel/kexec_core.c | 1 - kernel/ksysfs.c | 12 + kernel/kthread.c | 16 +- @@ -337,35 +337,34 @@ Subject: [PATCH] [rt-patch] kernel/locking/mutex-rt.c | 224 +++ kernel/locking/rtmutex-debug.c | 102 - kernel/locking/rtmutex-debug.h | 11 - - kernel/locking/rtmutex.c | 941 +++++++-- + kernel/locking/rtmutex.c | 939 +++++++-- kernel/locking/rtmutex.h | 7 - kernel/locking/rtmutex_common.h | 36 +- kernel/locking/rwlock-rt.c | 334 ++++ - kernel/locking/rwsem-rt.c | 317 ++++ + kernel/locking/rwsem-rt.c | 317 +++ kernel/locking/rwsem.c | 6 + kernel/locking/spinlock.c | 7 + kernel/locking/spinlock_debug.c | 5 + kernel/notifier.c | 12 +- kernel/panic.c | 33 +- kernel/printk/Makefile | 1 - - kernel/printk/internal.h | 37 - - kernel/printk/printk.c | 1680 +++++++++-------- - kernel/printk/printk_safe.c | 425 ----- + kernel/printk/internal.h | 4 - + kernel/printk/printk.c | 1708 +++++++++-------- + kernel/printk/printk_safe.c | 349 +--- kernel/ptrace.c | 32 +- kernel/rcu/Kconfig | 4 +- kernel/rcu/tree.c | 4 +- kernel/rcu/update.c | 4 +- - kernel/sched/core.c | 1278 ++++++++++--- + kernel/sched/core.c | 1274 +++++++++--- kernel/sched/cpudeadline.c | 4 +- kernel/sched/cpupri.c | 4 +- kernel/sched/cputime.c | 36 +- kernel/sched/deadline.c | 47 +- kernel/sched/fair.c | 16 +- kernel/sched/features.h | 8 + - kernel/sched/rt.c | 81 +- - kernel/sched/sched.h | 80 +- + kernel/sched/rt.c | 83 +- + kernel/sched/sched.h | 73 +- kernel/sched/swait.c | 1 + - kernel/sched/topology.c | 1 + kernel/signal.c | 105 +- kernel/smp.c | 14 +- kernel/softirq.c | 428 ++++- @@ -377,7 +376,7 @@ Subject: [PATCH] [rt-patch] kernel/trace/trace.h | 19 - kernel/trace/trace_events.c | 2 + kernel/trace/trace_output.c | 19 +- - kernel/workqueue.c | 4 + + kernel/workqueue.c | 8 +- lib/Kconfig.debug | 2 +- lib/bug.c | 1 + lib/cpumask.c | 18 + @@ -392,7 +391,7 @@ Subject: [PATCH] [rt-patch] mm/Kconfig | 5 +- mm/highmem.c | 262 ++- mm/memcontrol.c | 67 +- - mm/page_alloc.c | 184 +- + mm/page_alloc.c | 180 +- mm/shmem.c | 31 +- mm/slab.c | 90 +- mm/slab.h | 2 +- @@ -402,7 +401,6 @@ Subject: [PATCH] [rt-patch] mm/workingset.c | 5 +- mm/z3fold.c | 17 +- mm/zsmalloc.c | 85 +- - mm/zswap.c | 1 + net/Kconfig | 2 +- net/core/dev.c | 33 +- net/core/gen_estimator.c | 6 +- @@ -412,7 +410,7 @@ Subject: [PATCH] [rt-patch] net/sched/sch_generic.c | 10 + net/sunrpc/svc_xprt.c | 4 +- net/xfrm/xfrm_state.c | 3 +- - 409 files changed, 9000 insertions(+), 4911 deletions(-) + 406 files changed, 9015 insertions(+), 4804 deletions(-) delete mode 100644 arch/alpha/include/asm/kmap_types.h delete mode 100644 arch/arc/include/asm/kmap_types.h delete mode 100644 arch/arm/include/asm/kmap_types.h @@ -448,7 +446,7 @@ Subject: [PATCH] [rt-patch] create mode 100644 kernel/locking/rwsem-rt.c diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst -index 72f0f6fbd..6f89cf1e5 100644 +index 72f0f6fbd53c..6f89cf1e567d 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst @@ -38,7 +38,7 @@ sections. @@ -470,7 +468,7 @@ index 72f0f6fbd..6f89cf1e5 100644 shown in the following diagram: diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst -index 1ae79a10a..17d38480e 100644 +index 1ae79a10a8de..17d38480ef5c 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -78,7 +78,7 @@ RCU treats a nested set as one big RCU read-side critical section. @@ -569,7 +567,7 @@ index 1ae79a10a..17d38480e 100644 for voluntary context switches. diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst -index 2efed9926..7ed495604 100644 +index 2efed9926c3f..7ed4956043bd 100644 --- a/Documentation/RCU/checklist.rst +++ b/Documentation/RCU/checklist.rst @@ -214,7 +214,7 @@ over a rather long period of time, but improvements are always welcome! @@ -582,7 +580,7 @@ index 2efed9926..7ed495604 100644 then the corresponding readers my use rcu_read_lock() and rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(), diff --git a/Documentation/RCU/rcubarrier.rst b/Documentation/RCU/rcubarrier.rst -index f64f4413a..3b4a24877 100644 +index f64f4413a47c..3b4a24877496 100644 --- a/Documentation/RCU/rcubarrier.rst +++ b/Documentation/RCU/rcubarrier.rst @@ -9,7 +9,7 @@ RCU (read-copy update) is a synchronization mechanism that can be thought @@ -608,7 +606,7 @@ index f64f4413a..3b4a24877 100644 Therefore, on_each_cpu() disables preemption across its call to smp_call_function() and also across the local call to diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst -index c9ab6af4d..e97d1b487 100644 +index c9ab6af4d3be..e97d1b4876ef 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -25,7 +25,7 @@ warnings: @@ -630,7 +628,7 @@ index c9ab6af4d..e97d1b487 100644 read-side critical section. This is especially damaging if that low-priority task is not permitted to run on any other CPU, diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst -index fb3ff76c3..3b2b1479f 100644 +index fb3ff76c3e73..3b2b1479fd0f 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -684,7 +684,7 @@ Quick Quiz #1: @@ -674,10 +672,10 @@ index fb3ff76c3..3b2b1479f 100644 read-side critical sections. It also permits spinlocks blocking while in RCU read-side critical diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 1d7650717..130306c51 100644 +index 8a1a25216da6..4779442505d5 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -4314,6 +4314,10 @@ +@@ -4345,6 +4345,10 @@ value, meaning that RCU_SOFTIRQ is used by default. Specify rcutree.use_softirq=0 to use rcuc kthreads. @@ -688,7 +686,7 @@ index 1d7650717..130306c51 100644 rcutree.rcu_fanout_exact= [KNL] Disable autobalancing of the rcu_node combining tree. This is used by rcutorture, and might -@@ -4692,6 +4696,13 @@ +@@ -4723,6 +4727,13 @@ only normal grace-period primitives. No effect on CONFIG_TINY_RCU kernels. @@ -703,7 +701,7 @@ index 1d7650717..130306c51 100644 Set time in jiffies during which RCU tasks will avoid sending IPIs, starting with the beginning diff --git a/Documentation/driver-api/io-mapping.rst b/Documentation/driver-api/io-mapping.rst -index a966239f0..a7830c594 100644 +index a966239f04e4..a7830c59481f 100644 --- a/Documentation/driver-api/io-mapping.rst +++ b/Documentation/driver-api/io-mapping.rst @@ -20,78 +20,64 @@ A mapping object is created during driver initialization using:: @@ -825,7 +823,7 @@ index a966239f0..a7830c594 100644 -performs an IPI to inform all processors about the new mapping. This results -in a significant performance penalty. diff --git a/arch/Kconfig b/arch/Kconfig -index 7800502d9..5fd528b87 100644 +index f6d455c5a897..aa0ea1c2f046 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -50,6 +50,7 @@ config OPROFILE @@ -836,7 +834,7 @@ index 7800502d9..5fd528b87 100644 select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help -@@ -673,6 +674,12 @@ config HAVE_TIF_NOHZ +@@ -683,6 +684,12 @@ config HAVE_TIF_NOHZ config HAVE_VIRT_CPU_ACCOUNTING bool @@ -849,7 +847,7 @@ index 7800502d9..5fd528b87 100644 config ARCH_HAS_SCALED_CPUTIME bool -@@ -687,7 +694,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN +@@ -697,7 +704,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN some 32-bit arches may require multiple accesses, so proper locking is needed to protect against concurrent accesses. @@ -859,7 +857,7 @@ index 7800502d9..5fd528b87 100644 help diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h deleted file mode 100644 -index 651714b45..000000000 +index 651714b45729..000000000000 --- a/arch/alpha/include/asm/kmap_types.h +++ /dev/null @@ -1,15 +0,0 @@ @@ -879,7 +877,7 @@ index 651714b45..000000000 - -#endif diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h -index 1d5716bc0..6883bc952 100644 +index 1d5716bc060b..6883bc952d22 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -894,7 +892,7 @@ index 1d5716bc0..6883bc952 100644 volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig -index 0a89cc9de..d8804001d 100644 +index 0a89cc9def65..d8804001d550 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -507,6 +507,7 @@ config LINUX_RAM_BASE @@ -906,7 +904,7 @@ index 0a89cc9de..d8804001d 100644 With ARC 2G:2G address split, only upper 2G is directly addressable by kernel. Enable this to potentially allow access to rest of 2G and PAE diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h -index 6e5eafb3a..a6b8e2c35 100644 +index 6e5eafb3afdd..a6b8e2c352c4 100644 --- a/arch/arc/include/asm/highmem.h +++ b/arch/arc/include/asm/highmem.h @@ -9,17 +9,29 @@ @@ -961,7 +959,7 @@ index 6e5eafb3a..a6b8e2c35 100644 #endif diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h deleted file mode 100644 -index fecf7851e..000000000 +index fecf7851ec32..000000000000 --- a/arch/arc/include/asm/kmap_types.h +++ /dev/null @@ -1,14 +0,0 @@ @@ -980,7 +978,7 @@ index fecf7851e..000000000 - */ -#endif diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c -index 1b9f473c6..c79912a6b 100644 +index 1b9f473c6369..c79912a6b196 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -36,9 +36,8 @@ @@ -1059,7 +1057,7 @@ index 1b9f473c6..c79912a6b 100644 + alloc_kmap_pgtable(FIXMAP_BASE); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 9096aa34e..9457b01b6 100644 +index 9096aa34e482..9457b01b63ef 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -31,6 +31,7 @@ config ARM @@ -1104,7 +1102,7 @@ index 9096aa34e..9457b01b6 100644 The address space of ARM processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h -index 9575b4040..707068f85 100644 +index 9575b404019c..707068f852c2 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -7,14 +7,14 @@ @@ -1125,7 +1123,7 @@ index 9575b4040..707068f85 100644 /* Support writing RO kernel text via kprobes, jump labels, etc. */ FIX_TEXT_POKE0, diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h -index b95848ed2..706efafbf 100644 +index b95848ed2bc7..706efafbf972 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -2,16 +2,11 @@ @@ -1149,7 +1147,7 @@ index b95848ed2..706efafbf 100644 #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h -index 31811be38..b22dffa8c 100644 +index 31811be38d78..b22dffa8c7eb 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -2,7 +2,8 @@ @@ -1205,7 +1203,7 @@ index 31811be38..b22dffa8c 100644 #endif diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h -index 54b0180c8..36d040c68 100644 +index 54b0180c8aeb..36d040c68163 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -31,6 +31,8 @@ void handle_IRQ(unsigned int, struct pt_regs *); @@ -1219,7 +1217,7 @@ index 54b0180c8..36d040c68 100644 #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h deleted file mode 100644 -index 5590940ee..000000000 +index 5590940ee43d..000000000000 --- a/arch/arm/include/asm/kmap_types.h +++ /dev/null @@ -1,10 +0,0 @@ @@ -1234,7 +1232,7 @@ index 5590940ee..000000000 - -#endif diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h -index 597695864..a37c08039 100644 +index 5976958647fe..a37c0803954b 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -1249,7 +1247,7 @@ index 597695864..a37c08039 100644 typedef struct { diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h -index 9f7ca79cc..0bf67b7f0 100644 +index 9f7ca79cc76a..9f31470f695a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -55,6 +55,7 @@ struct cpu_context_save { @@ -1266,7 +1264,7 @@ index 9f7ca79cc..0bf67b7f0 100644 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ -#define TIF_PATCH_PENDING 8 /* pending live patching update */ -+#define TIF_NEED_RESCHED_LAZY 7 ++#define TIF_NEED_RESCHED_LAZY 7 +#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ +#define TIF_PATCH_PENDING 9 /* pending live patching update */ @@ -1291,7 +1289,7 @@ index 9f7ca79cc..0bf67b7f0 100644 #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c -index 70993af22..024c65c3a 100644 +index 70993af22d80..024c65c3a0f2 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -43,6 +43,7 @@ int main(void) @@ -1303,7 +1301,7 @@ index 70993af22..024c65c3a 100644 DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S -index 4332e5950..efb2d0755 100644 +index 4332e5950042..efb2d0755ce7 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -207,11 +207,18 @@ __irq_svc: @@ -1344,7 +1342,7 @@ index 4332e5950..efb2d0755 100644 __und_fault: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S -index 7a2e63dfb..02fae4a70 100644 +index 7a2e63dfb4d9..37c91a55027e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -54,7 +54,9 @@ __ret_fast_syscall: @@ -1352,9 +1350,9 @@ index 7a2e63dfb..02fae4a70 100644 blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne fast_work_pending -+ tst r1, #_TIF_SECCOMP ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne fast_work_pending ++ tst r1, #_TIF_SECCOMP bne fast_work_pending @@ -1363,16 +1361,16 @@ index 7a2e63dfb..02fae4a70 100644 blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne do_slower_path -+ tst r1, #_TIF_SECCOMP ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne do_slower_path ++ tst r1, #_TIF_SECCOMP beq no_work_pending +do_slower_path: UNWIND(.fnend ) ENDPROC(ret_fast_syscall) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c -index 2f81d3af5..6e69f7b3d 100644 +index 2f81d3af5f9a..6e69f7b3d581 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -649,7 +649,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) @@ -1386,7 +1384,7 @@ index 2f81d3af5..6e69f7b3d 100644 } else { if (unlikely(!user_mode(regs))) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c -index d94e39a21..44383bfbf 100644 +index d94e39a21698..44383bfbf98b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -671,9 +671,7 @@ static void do_handle_IPI(int ipinr) @@ -1400,7 +1398,7 @@ index d94e39a21..44383bfbf 100644 default: diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile -index 4536159bc..3510503bc 100644 +index 4536159bc8fa..3510503bc5e6 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -21,7 +21,6 @@ KASAN_SANITIZE_physaddr.o := n @@ -1412,7 +1410,7 @@ index 4536159bc..3510503bc 100644 obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c -index 10f909744..fd9e3e740 100644 +index 10f909744d5e..fd9e3e740b3d 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -49,9 +49,9 @@ static inline unsigned long l2_get_va(unsigned long paddr) @@ -1437,7 +1435,7 @@ index 10f909744..fd9e3e740 100644 } diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c -index 581473165..f34845da3 100644 +index 5814731653d9..f34845da3522 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -59,7 +59,7 @@ static inline void l2_unmap_va(unsigned long va) @@ -1459,7 +1457,7 @@ index 581473165..f34845da3 100644 return va + (pa_offset >> (32 - PAGE_SHIFT)); #else diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index 91965fb04..d34166682 100644 +index 91965fb043de..d34166682b7f 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, @@ -1474,7 +1472,7 @@ index 91965fb04..d34166682 100644 diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c deleted file mode 100644 -index 187fab227..000000000 +index 187fab227b50..000000000000 --- a/arch/arm/mm/highmem.c +++ /dev/null @@ -1,121 +0,0 @@ @@ -1600,10 +1598,10 @@ index 187fab227..000000000 - return (void *)vaddr; -} diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index df28741e4..0ed0f28b8 100644 +index a0bba8e5426a..c528009516fd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -76,6 +76,7 @@ config ARM64 +@@ -78,6 +78,7 @@ config ARM64 select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_NUMA_BALANCING @@ -1611,7 +1609,7 @@ index df28741e4..0ed0f28b8 100644 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT -@@ -179,6 +180,7 @@ config ARM64 +@@ -182,6 +183,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -1619,17 +1617,16 @@ index df28741e4..0ed0f28b8 100644 select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUTEX_CMPXCHG if FUTEX -@@ -202,6 +204,7 @@ config ARM64 +@@ -205,6 +207,7 @@ config ARM64 select PCI_DOMAINS_GENERIC if PCI select PCI_ECAM if (ACPI && PCI) select PCI_SYSCALL if PCI -+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM ++ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM select POWER_RESET select POWER_SUPPLY select SPARSE_IRQ - diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h -index 5ffa4bacd..cbfa7b6f2 100644 +index 5ffa4bacdad3..cbfa7b6f2e09 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -13,11 +13,8 @@ @@ -1647,7 +1644,7 @@ index 5ffa4bacd..cbfa7b6f2 100644 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h -index e83f0982b..7a5770d82 100644 +index e83f0982b99c..7a5770d825b9 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -70,17 +70,43 @@ static inline bool __preempt_count_dec_and_test(void) @@ -1696,7 +1693,7 @@ index e83f0982b..7a5770d82 100644 void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h -index 18782f0c4..6672b0535 100644 +index 18782f0c4721..6672b05350b4 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -5,10 +5,6 @@ @@ -1711,7 +1708,7 @@ index 18782f0c4..6672b0535 100644 #include diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h -index af49b6190..f8ba7a6ec 100644 +index af49b6190aee..0ad7b958c566 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -27,6 +27,7 @@ struct thread_info { @@ -1726,7 +1723,7 @@ index af49b6190..f8ba7a6ec 100644 #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ -+#define TIF_NEED_RESCHED_LAZY 6 ++#define TIF_NEED_RESCHED_LAZY 6 #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -1734,7 +1731,7 @@ index af49b6190..f8ba7a6ec 100644 #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64) #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) @@ -1745,24 +1742,24 @@ index af49b6190..f8ba7a6ec 100644 + _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ + _TIF_NEED_RESCHED_LAZY) -+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_EMU) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c -index 5f59e24c9..d8d41b5f9 100644 +index 5f59e24c95d3..4f522206c47a 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -31,6 +31,7 @@ int main(void) DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu)); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); -+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); ++ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S -index 64145bfab..70abdfd6f 100644 +index 64145bfab48f..4cdbba7202af 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -521,9 +521,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING @@ -1772,42 +1769,24 @@ index 64145bfab..70abdfd6f 100644 - cbnz x24, 1f // preempt count != 0 || NMI return path - bl arm64_preempt_schedule_irq // irq en/disable is done inside + -+ cbz x24, 1f // (need_resched + count) == 0 -+ cbnz w24, 2f // count != 0 ++ cbz x24, 1f // (need_resched + count) == 0 ++ cbnz w24, 2f // count != 0 + -+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count -+ cbnz w24, 2f // preempt lazy count != 0 ++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count ++ cbnz w24, 2f // preempt lazy count != 0 + -+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags -+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? ++ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags ++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? 1: -+ bl arm64_preempt_schedule_irq // irq en/disable is done inside ++ bl arm64_preempt_schedule_irq // irq en/disable is done inside +2: #endif mov x0, sp diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index 5335a6bd1..aa631771e 100644 +index 5335a6bd1a0d..84520f11667d 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c -@@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void) - */ - static void get_cpu_fpsimd_context(void) - { -- local_bh_disable(); -+ preempt_disable(); - __get_cpu_fpsimd_context(); - } - -@@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void) - static void put_cpu_fpsimd_context(void) - { - __put_cpu_fpsimd_context(); -- local_bh_enable(); -+ preempt_enable(); - } - - static bool have_cpu_fpsimd_context(void) @@ -226,6 +226,16 @@ static void sve_free(struct task_struct *task) __sve_free(task); } @@ -1851,7 +1830,7 @@ index 5335a6bd1..aa631771e 100644 /* diff --git a/arch/arm64/kernel/ipi_nmi.c b/arch/arm64/kernel/ipi_nmi.c -index 2cf28e511..fc58fada5 100644 +index 9a8f7c256117..c0753dcdb22a 100644 --- a/arch/arm64/kernel/ipi_nmi.c +++ b/arch/arm64/kernel/ipi_nmi.c @@ -35,9 +35,7 @@ void arm64_send_nmi(cpumask_t *mask) @@ -1863,9 +1842,9 @@ index 2cf28e511..fc58fada5 100644 - printk_safe_exit(); } - static void arm64_send_ipi(cpumask_t *mask) + static DEFINE_PER_CPU(call_single_data_t, cpu_backtrace_csd) = diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c -index e5e2f1e88..c5fd06d52 100644 +index e5e2f1e888a2..c5fd06d5285b 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -694,7 +694,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, @@ -1878,10 +1857,10 @@ index e5e2f1e88..c5fd06d52 100644 local_daif_restore(DAIF_PROCCTX_NOIRQ); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c -index 384cc56a6..d5fd6e303 100644 +index 7527ac19332f..7d4b7d6d097d 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c -@@ -821,7 +821,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -860,7 +860,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -1890,7 +1869,7 @@ index 384cc56a6..d5fd6e303 100644 kvm_pmu_flush_hwstate(vcpu); -@@ -845,7 +845,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -884,7 +884,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_timer_sync_user(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -1899,7 +1878,7 @@ index 384cc56a6..d5fd6e303 100644 continue; } -@@ -924,7 +924,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -963,7 +963,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, ret); @@ -1909,7 +1888,7 @@ index 384cc56a6..d5fd6e303 100644 /* * The ARMv8 architecture doesn't give the hypervisor diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig -index 7bf0a617e..c9f2533cc 100644 +index 7bf0a617e94c..c9f2533cc53d 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -286,6 +286,7 @@ config NR_CPUS @@ -1921,7 +1900,7 @@ index 7bf0a617e..c9f2533cc 100644 config FORCE_MAX_ZONEORDER diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h -index 81f9477d5..4b589cc20 100644 +index 81f9477d5330..4b589cc20900 100644 --- a/arch/csky/include/asm/fixmap.h +++ b/arch/csky/include/asm/fixmap.h @@ -8,7 +8,7 @@ @@ -1943,7 +1922,7 @@ index 81f9477d5..4b589cc20 100644 __end_of_fixed_addresses }; diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h -index 14645e3d5..1f4ed3f4c 100644 +index 14645e3d5cd5..1f4ed3f4c0d9 100644 --- a/arch/csky/include/asm/highmem.h +++ b/arch/csky/include/asm/highmem.h @@ -9,7 +9,7 @@ @@ -1970,7 +1949,7 @@ index 14645e3d5..1f4ed3f4c 100644 #endif /* __KERNEL__ */ diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c -index 89c10800a..4161df3c6 100644 +index 89c10800a002..4161df3c6c15 100644 --- a/arch/csky/mm/highmem.c +++ b/arch/csky/mm/highmem.c @@ -9,8 +9,6 @@ @@ -2067,7 +2046,7 @@ index 89c10800a..4161df3c6 100644 - kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); -} diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h -index 19d233497..de72fb230 100644 +index 19d233497ba5..de72fb23016d 100644 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -8,10 +8,6 @@ @@ -2083,7 +2062,7 @@ index 19d233497..de72fb230 100644 } arch_spinlock_t; diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h deleted file mode 100644 -index 5c268cf7c..000000000 +index 5c268cf7c2bd..000000000000 --- a/arch/ia64/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ @@ -2101,7 +2080,7 @@ index 5c268cf7c..000000000 - -#endif /* _ASM_IA64_KMAP_TYPES_H */ diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h -index 6e345fefc..681408d68 100644 +index 6e345fefcdca..681408d6816f 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -2116,7 +2095,7 @@ index 6e345fefc..681408d68 100644 volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c -index 7abc5f37b..733e0e332 100644 +index 7abc5f37bfaf..733e0e3324b8 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -138,12 +138,8 @@ void vtime_account_kernel(struct task_struct *tsk) @@ -2155,7 +2134,7 @@ index 7abc5f37b..733e0e332 100644 static irqreturn_t diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig -index 33925ffed..7f6ca0ab4 100644 +index 33925ffed68f..7f6ca0ab4f81 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -155,6 +155,7 @@ config XILINX_UNCACHED_SHADOW @@ -2167,7 +2146,7 @@ index 33925ffed..7f6ca0ab4 100644 The address space of Microblaze processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h -index 0379ce522..e6e9288bf 100644 +index 0379ce5229e3..e6e9288bff76 100644 --- a/arch/microblaze/include/asm/fixmap.h +++ b/arch/microblaze/include/asm/fixmap.h @@ -20,7 +20,7 @@ @@ -2189,7 +2168,7 @@ index 0379ce522..e6e9288bf 100644 __end_of_fixed_addresses }; diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h -index 284ca8fb5..4418633fb 100644 +index 284ca8fb54c1..4418633fb163 100644 --- a/arch/microblaze/include/asm/highmem.h +++ b/arch/microblaze/include/asm/highmem.h @@ -25,7 +25,6 @@ @@ -2213,7 +2192,7 @@ index 284ca8fb5..4418633fb 100644 #endif /* _ASM_HIGHMEM_H */ diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile -index 1b16875ce..8ced71100 100644 +index 1b16875cea70..8ced71100047 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -6,4 +6,3 @@ @@ -2223,7 +2202,7 @@ index 1b16875ce..8ced71100 100644 -obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c deleted file mode 100644 -index 92e089041..000000000 +index 92e0890416c9..000000000000 --- a/arch/microblaze/mm/highmem.c +++ /dev/null @@ -1,78 +0,0 @@ @@ -2306,7 +2285,7 @@ index 92e089041..000000000 -} -EXPORT_SYMBOL(kunmap_atomic_high); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c -index 4a0c30ced..498eaa4d3 100644 +index 4a0c30ced72b..498eaa4d3978 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -49,17 +49,11 @@ unsigned long lowmem_size; @@ -2328,7 +2307,7 @@ index 4a0c30ced..498eaa4d3 100644 static void highmem_setup(void) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig -index 896a29df1..1b3593d53 100644 +index 896a29df1a6d..1b3593d53978 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2727,6 +2727,7 @@ config WAR_MIPS34K_MISSED_ITLB @@ -2340,7 +2319,7 @@ index 896a29df1..1b3593d53 100644 config CPU_SUPPORTS_HIGHMEM bool diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h -index 743535be7..beea14761 100644 +index 743535be7528..beea14761cef 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h @@ -17,7 +17,7 @@ @@ -2362,7 +2341,7 @@ index 743535be7..beea14761 100644 __end_of_fixed_addresses }; diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h -index 9f021cf51..1716181ea 100644 +index 9f021cf51aa7..1716181ea66d 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h @@ -24,7 +24,7 @@ @@ -2390,7 +2369,7 @@ index 9f021cf51..1716181ea 100644 diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h deleted file mode 100644 -index 16665dc24..000000000 +index 16665dc2431b..000000000000 --- a/arch/mips/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ @@ -2408,7 +2387,7 @@ index 16665dc24..000000000 - -#endif diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c -index 01b2bd95b..9aba83e1e 100644 +index 01b2bd95ba1f..9aba83e1eeb4 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -5,8 +5,6 @@ @@ -2480,7 +2459,7 @@ index 01b2bd95b..9aba83e1e 100644 -} -arch_initcall(kdump_buf_page_init); diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c -index 5fec7f45d..57e2f08f0 100644 +index 5fec7f45d79a..57e2f08f00d0 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -8,8 +8,6 @@ @@ -2572,7 +2551,7 @@ index 5fec7f45d..57e2f08f0 100644 - kmap_pte = virt_to_kpte(kmap_vstart); -} diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c -index 07e84a774..bc80893e5 100644 +index 07e84a774938..bc80893e5c0f 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -36,7 +36,6 @@ @@ -2594,7 +2573,7 @@ index 07e84a774..bc80893e5 100644 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu -index f88a12fdf..c10759952 100644 +index f88a12fdf0f3..c10759952485 100644 --- a/arch/nds32/Kconfig.cpu +++ b/arch/nds32/Kconfig.cpu @@ -157,6 +157,7 @@ config HW_SUPPORT_UNALIGNMENT_ACCESS @@ -2606,7 +2585,7 @@ index f88a12fdf..c10759952 100644 The address space of Andes processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h -index 5a4bf11e5..2fa09a2de 100644 +index 5a4bf11e5800..2fa09a2de428 100644 --- a/arch/nds32/include/asm/fixmap.h +++ b/arch/nds32/include/asm/fixmap.h @@ -6,7 +6,7 @@ @@ -2628,7 +2607,7 @@ index 5a4bf11e5..2fa09a2de 100644 FIX_EARLYCON_MEM_BASE, __end_of_fixed_addresses diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h -index fe986d0e6..16159a871 100644 +index fe986d0e6e3f..16159a8716f2 100644 --- a/arch/nds32/include/asm/highmem.h +++ b/arch/nds32/include/asm/highmem.h @@ -5,7 +5,6 @@ @@ -2668,7 +2647,7 @@ index fe986d0e6..16159a871 100644 #endif diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile -index 897ecaf5c..14fb2e8eb 100644 +index 897ecaf5cf54..14fb2e8eb036 100644 --- a/arch/nds32/mm/Makefile +++ b/arch/nds32/mm/Makefile @@ -3,7 +3,6 @@ obj-y := extable.o tlb.o fault.o init.o mmap.o \ @@ -2681,7 +2660,7 @@ index 897ecaf5c..14fb2e8eb 100644 CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE) diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c deleted file mode 100644 -index 4284cd59e..000000000 +index 4284cd59e21a..000000000000 --- a/arch/nds32/mm/highmem.c +++ /dev/null @@ -1,48 +0,0 @@ @@ -2734,7 +2713,7 @@ index 4284cd59e..000000000 -} -EXPORT_SYMBOL(kunmap_atomic_high); diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c -index 5e88c351e..f3fa02b88 100644 +index 5e88c351e6a4..f3fa02b8838a 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -33,7 +33,6 @@ @@ -2746,7 +2725,7 @@ index 5e88c351e..f3fa02b88 100644 #include #include diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c -index a978590d8..5aed97a18 100644 +index a978590d802d..5aed97a18bac 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -15,7 +15,6 @@ @@ -2758,7 +2737,7 @@ index a978590d8..5aed97a18 100644 #include #include diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h -index 7f7039516..fad29aa6f 100644 +index 7f7039516e53..fad29aa6f45f 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -32,7 +32,6 @@ typedef struct { @@ -2771,7 +2750,7 @@ index 7f7039516..fad29aa6f 100644 #define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq) diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h deleted file mode 100644 -index 3e70b5cd1..000000000 +index 3e70b5cd1123..000000000000 --- a/arch/parisc/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ @@ -2789,7 +2768,7 @@ index 3e70b5cd1..000000000 - -#endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index ed06e0c32..a0cf63581 100644 +index ed06e0c32727..a0cf63581f24 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -146,6 +146,7 @@ config PPC @@ -2825,7 +2804,7 @@ index ed06e0c32..a0cf63581 100644 source "kernel/Kconfig.hz" diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h -index cf091c4c2..7371f7e23 100644 +index cf091c4c22e5..7371f7e23c35 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -5,7 +5,7 @@ @@ -2838,7 +2817,7 @@ index cf091c4c2..7371f7e23 100644 #ifdef __BIG_ENDIAN #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE) diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h -index 897cc6875..a832aeafe 100644 +index 897cc68758d4..a832aeafe560 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -20,7 +20,7 @@ @@ -2860,7 +2839,7 @@ index 897cc6875..a832aeafe 100644 #ifdef CONFIG_PPC_8xx /* For IMMR we need an aligned 512K area */ diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h -index 104026f7d..80a5ae771 100644 +index 104026f7d6bc..80a5ae771c65 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -24,12 +24,10 @@ @@ -2890,7 +2869,7 @@ index 104026f7d..80a5ae771 100644 #endif /* _ASM_HIGHMEM_H */ diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h deleted file mode 100644 -index c8fa182d4..000000000 +index c8fa182d48c8..000000000000 --- a/arch/powerpc/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ @@ -2908,7 +2887,7 @@ index c8fa182d4..000000000 -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_KMAP_TYPES_H */ diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h -index 0f3cdd8fa..d45561e9e 100644 +index 0f3cdd8faa95..d45561e9e6ba 100644 --- a/arch/powerpc/include/asm/simple_spinlock_types.h +++ b/arch/powerpc/include/asm/simple_spinlock_types.h @@ -2,7 +2,7 @@ @@ -2921,7 +2900,7 @@ index 0f3cdd8fa..d45561e9e 100644 #endif diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h -index c5d742f18..cc6922a01 100644 +index c5d742f18021..cc6922a011ba 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -2936,7 +2915,7 @@ index c5d742f18..cc6922a01 100644 #include #include diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h -index 1c8460e23..b1653c160 100644 +index 1c8460e23583..b1653c160bab 100644 --- a/arch/powerpc/include/asm/stackprotector.h +++ b/arch/powerpc/include/asm/stackprotector.h @@ -24,7 +24,11 @@ static __always_inline void boot_init_stack_canary(void) @@ -2952,15 +2931,15 @@ index 1c8460e23..b1653c160 100644 canary ^= LINUX_VERSION_CODE; canary &= CANARY_MASK; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h -index 7480fbc4d..7eb0963e4 100644 +index 7480fbc4d79d..0f7ccf38a014 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -54,6 +54,8 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ -+ int preempt_lazy_count; /* 0 => preemptable, -+ <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ #ifdef CONFIG_SMP unsigned int cpu; #endif @@ -3011,7 +2990,7 @@ index 7480fbc4d..7eb0963e4 100644 /* Bits in local_flags */ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c -index 760f656ef..0a351a99d 100644 +index 760f656efcf6..0a351a99d090 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -187,6 +187,7 @@ int main(void) @@ -3023,7 +3002,7 @@ index 760f656ef..0a351a99d 100644 #ifdef CONFIG_PPC64 OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S -index 459f5d00b..fc9517a97 100644 +index 459f5d00b990..fc9517a97640 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -414,7 +414,9 @@ ret_from_syscall: @@ -3102,7 +3081,7 @@ index 459f5d00b..fc9517a97 100644 andi. r0,r9,_TIF_USER_WORK_MASK beq restore_user diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S -index e91d3ba1e..5ce64cfe6 100644 +index e91d3ba1e208..5ce64cfe69d4 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -1080,7 +1080,7 @@ _GLOBAL(ret_from_except_lite) @@ -3147,7 +3126,7 @@ index e91d3ba1e..5ce64cfe6 100644 restore: /* diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c -index e8a548447..5ad4f27cb 100644 +index e8a548447dd6..5ad4f27cba10 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -753,10 +753,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly; @@ -3164,7 +3143,7 @@ index e8a548447..5ad4f27cb 100644 irq_hw_number_t virq_to_hw(unsigned int virq) { diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S -index 717e658b9..08ee95ad6 100644 +index 717e658b90fd..08ee95ad6593 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -31,6 +31,7 @@ @@ -3184,7 +3163,7 @@ index 717e658b9..08ee95ad6 100644 /* * void call_do_irq(struct pt_regs *regs, void *sp); diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S -index 070465825..a6b33f7b3 100644 +index 070465825c21..a6b33f7b3264 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -27,6 +27,7 @@ @@ -3204,7 +3183,7 @@ index 070465825..a6b33f7b3 100644 _GLOBAL(call_do_irq) mflr r0 diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c -index 532f22637..1ef55f4b3 100644 +index 532f22637783..1ef55f4b389a 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -73,7 +73,8 @@ static const char *nvram_os_partitions[] = { @@ -3245,7 +3224,7 @@ index 532f22637..1ef55f4b3 100644 err_type = ERR_TYPE_KERNEL_PANIC; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c -index 310bcd768..ae3212dcf 100644 +index 310bcd768cd5..ae3212dcf562 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -193,7 +193,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, @@ -3284,7 +3263,7 @@ index 310bcd768..ae3212dcf 100644 } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c -index 1d20f0f77..7e0a497a3 100644 +index 1d20f0f77a92..7e0a497a36ee 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -312,12 +312,11 @@ static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct, @@ -3382,7 +3361,7 @@ index 1d20f0f77..7e0a497a3 100644 struct cpu_accounting_data *acct) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c -index d2f6b2e30..6a028453f 100644 +index d2f6b2e30b6a..6a028453f587 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -173,7 +173,6 @@ extern void panic_flush_kmsg_start(void) @@ -3413,7 +3392,7 @@ index d2f6b2e30..6a028453f 100644 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c -index 75b2a6c4d..db40e20d0 100644 +index 75b2a6c4db5a..db40e20d0c54 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -185,11 +185,6 @@ static void watchdog_smp_panic(int cpu, u64 tb) @@ -3429,7 +3408,7 @@ index 75b2a6c4d..db40e20d0 100644 trigger_allbutself_cpu_backtrace(); diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c -index c9a889880..d488311ef 100644 +index c9a889880214..d488311efab1 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -311,9 +311,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs) @@ -3443,7 +3422,7 @@ index c9a889880..d488311ef 100644 * This function is only called after the system * has panicked or is otherwise in a critical state. diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig -index 549591d9a..efb5bfe93 100644 +index 549591d9aaa2..efb5bfe93f70 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -178,6 +178,7 @@ config KVM_E500MC @@ -3455,7 +3434,7 @@ index 549591d9a..efb5bfe93 100644 select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile -index 55b4a8bd4..3b4e9e4e2 100644 +index 55b4a8bd408a..3b4e9e4e25ea 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -16,7 +16,6 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o @@ -3468,7 +3447,7 @@ index 55b4a8bd4..3b4e9e4e2 100644 obj-$(CONFIG_KASAN) += kasan/ diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c deleted file mode 100644 -index 624b4438a..000000000 +index 624b4438aff9..000000000000 --- a/arch/powerpc/mm/highmem.c +++ /dev/null @@ -1,67 +0,0 @@ @@ -3540,7 +3519,7 @@ index 624b4438a..000000000 -} -EXPORT_SYMBOL(kunmap_atomic_high); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c -index 1ed276d23..ae7c136ed 100644 +index 1ed276d2305f..ae7c136ed188 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -63,11 +63,6 @@ @@ -3565,7 +3544,7 @@ index 1ed276d23..ae7c136ed 100644 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c -index 6c3bc4b4d..ec862846b 100644 +index 6c3bc4b4da98..ec862846bc82 100644 --- a/arch/powerpc/platforms/powernv/opal-kmsg.c +++ b/arch/powerpc/platforms/powernv/opal-kmsg.c @@ -20,7 +20,8 @@ @@ -3579,7 +3558,7 @@ index 6c3bc4b4d..ec862846b 100644 /* * Outside of a panic context the pollers will continue to run, diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c -index 245f1f8df..f05555dde 100644 +index 245f1f8df656..f05555dde8e7 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -24,6 +24,7 @@ @@ -3674,7 +3653,7 @@ index 245f1f8df..f05555dde 100644 } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c -index 5559edf36..d62b8e053 100644 +index 5559edf36756..d62b8e053d4c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3005,7 +3005,7 @@ print_address(unsigned long addr) @@ -3699,7 +3678,7 @@ index 5559edf36..d62b8e053 100644 printf("%s", buf); } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig -index 7461318e1..517cbf603 100644 +index 7461318e10fb..517cbf60335f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -183,6 +183,7 @@ config S390 @@ -3711,7 +3690,7 @@ index 7461318e1..517cbf603 100644 select IOMMU_SUPPORT if PCI select MODULES_USE_ELF_RELA diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h -index cfed272e4..8e28e8176 100644 +index cfed272e4fd5..8e28e8176ec8 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -3726,7 +3705,7 @@ index cfed272e4..8e28e8176 100644 int lock; } __attribute__ ((aligned (4))) arch_spinlock_t; diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h -index 3622d4ebc..fac6a6798 100644 +index 3622d4ebc73a..fac6a67988eb 100644 --- a/arch/s390/include/asm/vtime.h +++ b/arch/s390/include/asm/vtime.h @@ -2,7 +2,6 @@ @@ -3738,7 +3717,7 @@ index 3622d4ebc..fac6a6798 100644 #endif /* _S390_VTIME_H */ diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c -index 579ec3a8c..9b3c5978b 100644 +index 579ec3a8c816..9b3c5978b668 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -223,35 +223,50 @@ void vtime_flush(struct task_struct *tsk) @@ -3811,7 +3790,7 @@ index 579ec3a8c..9b3c5978b 100644 * Sorted add to a list. List is linear searched until first bigger * element is found. diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h -index f38adc189..b07fbc7f7 100644 +index f38adc189b83..b07fbc7f7bc6 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -13,9 +13,6 @@ @@ -3837,7 +3816,7 @@ index f38adc189..b07fbc7f7 100644 /* * FIX_IOREMAP entries are useful for mapping physical address diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h -index edaea3559..9fe4495a8 100644 +index edaea3559a23..9fe4495a8e90 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h @@ -2,16 +2,10 @@ @@ -3863,7 +3842,7 @@ index edaea3559..9fe4495a8 100644 #endif /* __ASM_SH_HARDIRQ_H */ diff --git a/arch/sh/include/asm/kmap_types.h b/arch/sh/include/asm/kmap_types.h deleted file mode 100644 -index b78107f92..000000000 +index b78107f923dd..000000000000 --- a/arch/sh/include/asm/kmap_types.h +++ /dev/null @@ -1,15 +0,0 @@ @@ -3883,7 +3862,7 @@ index b78107f92..000000000 - -#endif diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h -index e82369f28..22ca9a98b 100644 +index e82369f286a2..22ca9a98bbb8 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -3898,7 +3877,7 @@ index e82369f28..22ca9a98b 100644 volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c -index 5717c7cbd..5db7af565 100644 +index 5717c7cbdd97..5db7af565dec 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -44,7 +44,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) @@ -3927,7 +3906,7 @@ index 5717c7cbd..5db7af565 100644 static inline void handle_one_irq(unsigned int irq) { diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c -index 9c3d32b80..f5beecdac 100644 +index 9c3d32b80038..f5beecdac693 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -186,7 +186,7 @@ BUILD_TRAP_HANDLER(nmi) @@ -3940,7 +3919,7 @@ index 9c3d32b80..f5beecdac 100644 switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { case NOTIFY_OK: diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c -index 3348e0c4d..0db6919af 100644 +index 3348e0c4d769..0db6919af8d3 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -362,9 +362,6 @@ void __init mem_init(void) @@ -3966,7 +3945,7 @@ index 3348e0c4d..0db6919af 100644 (VMALLOC_END - VMALLOC_START) >> 20, diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig -index 530b7ec5d..a38d00d8b 100644 +index 530b7ec5d3ca..a38d00d8b783 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -139,6 +139,7 @@ config MMU @@ -3978,7 +3957,7 @@ index 530b7ec5d..a38d00d8b 100644 config ZONE_DMA bool diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h -index 6c35f0d27..875116209 100644 +index 6c35f0d27ee1..875116209ec1 100644 --- a/arch/sparc/include/asm/highmem.h +++ b/arch/sparc/include/asm/highmem.h @@ -24,7 +24,6 @@ @@ -4012,7 +3991,7 @@ index 6c35f0d27..875116209 100644 #endif /* _ASM_HIGHMEM_H */ diff --git a/arch/sparc/include/asm/kmap_types.h b/arch/sparc/include/asm/kmap_types.h deleted file mode 100644 -index 55a99b6bd..000000000 +index 55a99b6bd91e..000000000000 --- a/arch/sparc/include/asm/kmap_types.h +++ /dev/null @@ -1,11 +0,0 @@ @@ -4028,7 +4007,7 @@ index 55a99b6bd..000000000 - -#endif diff --git a/arch/sparc/include/asm/vaddrs.h b/arch/sparc/include/asm/vaddrs.h -index 84d054b07..4fec0341e 100644 +index 84d054b07a6f..4fec0341e2a8 100644 --- a/arch/sparc/include/asm/vaddrs.h +++ b/arch/sparc/include/asm/vaddrs.h @@ -32,13 +32,13 @@ @@ -4048,7 +4027,7 @@ index 84d054b07..4fec0341e 100644 __end_of_fixed_addresses }; diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c -index 3ec9f1402..eb21682ab 100644 +index 3ec9f1402aad..eb21682abfcb 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) @@ -4068,7 +4047,7 @@ index 3ec9f1402..eb21682ab 100644 #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile -index b078205b7..68db1f859 100644 +index b078205b70e0..68db1f859b02 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -15,6 +15,3 @@ obj-$(CONFIG_SPARC32) += leon_mm.o @@ -4080,7 +4059,7 @@ index b078205b7..68db1f859 100644 -obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c deleted file mode 100644 -index 8f2a2afb0..000000000 +index 8f2a2afb048a..000000000000 --- a/arch/sparc/mm/highmem.c +++ /dev/null @@ -1,115 +0,0 @@ @@ -4200,7 +4179,7 @@ index 8f2a2afb0..000000000 -} -EXPORT_SYMBOL(kunmap_atomic_high); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c -index 0070f8b9a..a03caa5f6 100644 +index 0070f8b9a753..a03caa5f6628 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -971,8 +971,6 @@ void __init srmmu_paging_init(void) @@ -4213,7 +4192,7 @@ index 0070f8b9a..a03caa5f6 100644 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h -index 2c697a145..2efac5827 100644 +index 2c697a145ac1..2efac5827188 100644 --- a/arch/um/include/asm/fixmap.h +++ b/arch/um/include/asm/fixmap.h @@ -3,7 +3,6 @@ @@ -4225,7 +4204,7 @@ index 2c697a145..2efac5827 100644 #include #include diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h -index b426796d2..52e2c3626 100644 +index b426796d26fd..52e2c36267a9 100644 --- a/arch/um/include/asm/hardirq.h +++ b/arch/um/include/asm/hardirq.h @@ -2,22 +2,7 @@ @@ -4254,7 +4233,7 @@ index b426796d2..52e2c3626 100644 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h deleted file mode 100644 -index b0bd12de1..000000000 +index b0bd12de1d23..000000000000 --- a/arch/um/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ @@ -4272,7 +4251,7 @@ index b0bd12de1..000000000 - -#endif diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c -index e4abac6c9..173999422 100644 +index e4abac6c9727..173999422ed8 100644 --- a/arch/um/kernel/kmsg_dump.c +++ b/arch/um/kernel/kmsg_dump.c @@ -1,15 +1,19 @@ @@ -4315,7 +4294,7 @@ index e4abac6c9..173999422 100644 static struct kmsg_dumper kmsg_dumper = { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index c5fda7ab3..649c5bdc9 100644 +index 7e53f43447b4..521c550aa7d7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -16,6 +16,7 @@ config X86_32 @@ -4334,7 +4313,7 @@ index c5fda7ab3..649c5bdc9 100644 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS -@@ -217,6 +219,7 @@ config X86 +@@ -219,6 +221,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -4343,7 +4322,7 @@ index c5fda7ab3..649c5bdc9 100644 select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c -index be891fdf8..29c716ed1 100644 +index be891fdf8d17..29c716ed103f 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -379,14 +379,14 @@ static int ecb_encrypt(struct skcipher_request *req) @@ -4438,7 +4417,7 @@ index be891fdf8..29c716ed1 100644 return err; } diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c -index 384ccb00f..2f8df8ef8 100644 +index 384ccb00f9e1..2f8df8ef8644 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -46,7 +46,7 @@ static inline void cast5_fpu_end(bool fpu_enabled) @@ -4521,7 +4500,7 @@ index 384ccb00f..2f8df8ef8 100644 ctr_crypt_final(&walk, ctx); err = skcipher_walk_done(&walk, 0); diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c -index d3d91a0ab..6d0774721 100644 +index d3d91a0abf88..6d0774721514 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -24,7 +24,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, @@ -4631,7 +4610,7 @@ index d3d91a0ab..6d0774721 100644 nbytes = walk.nbytes; } diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h -index 77217bd29..8eba66a33 100644 +index 77217bd292bd..8eba66a33e39 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -31,7 +31,7 @@ @@ -4661,10 +4640,10 @@ index 77217bd29..8eba66a33 100644 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h -index 8b9bfaad6..d31b08865 100644 +index 1b37f1d3ab8b..2eb9b9f94393 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h -@@ -28,6 +28,7 @@ extern void kernel_fpu_begin_mask(unsigned int kfpu_mask); +@@ -30,6 +30,7 @@ extern void kernel_fpu_begin_mask(unsigned int kfpu_mask); extern void kernel_fpu_end(void); extern bool irq_fpu_usable(void); extern void fpregs_mark_activate(void); @@ -4672,9 +4651,9 @@ index 8b9bfaad6..d31b08865 100644 /* Code that is unaware of kernel_fpu_begin_mask() can use this */ static inline void kernel_fpu_begin(void) -@@ -40,17 +41,32 @@ static inline void kernel_fpu_begin(void) +@@ -42,17 +43,32 @@ static inline void kernel_fpu_begin(void) * A context switch will (and softirq might) save CPU's FPU registers to - * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in + * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in * a random state. + * + * local_bh_disable() protects against both preemption and soft interrupts @@ -4710,7 +4689,7 @@ index 8b9bfaad6..d31b08865 100644 #ifdef CONFIG_X86_DEBUG_FPU diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h -index 0f420b24e..032e02085 100644 +index 0f420b24e0fc..032e020853aa 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -23,7 +23,6 @@ @@ -4743,7 +4722,7 @@ index 0f420b24e..032e02085 100644 unsigned long end_pfn); diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h -index bacf68c4d..e2de092fc 100644 +index bacf68c4d70e..e2de092fc38c 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -9,19 +9,14 @@ @@ -4772,7 +4751,7 @@ index bacf68c4d..e2de092fc 100644 #endif /* _ASM_X86_IOMAP_H */ diff --git a/arch/x86/include/asm/kmap_types.h b/arch/x86/include/asm/kmap_types.h deleted file mode 100644 -index 04ab8266e..000000000 +index 04ab8266e347..000000000000 --- a/arch/x86/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ @@ -4790,7 +4769,7 @@ index 04ab8266e..000000000 - -#endif /* _ASM_X86_KMAP_TYPES_H */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h -index b30b56d47..9632218bf 100644 +index b30b56d47619..9632218bfd41 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -43,7 +43,6 @@ @@ -4802,7 +4781,7 @@ index b30b56d47..9632218bf 100644 #include diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h -index 2380df419..aacfaad6c 100644 +index 2380df419a82..a3b73de276c3 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -90,21 +90,54 @@ static __always_inline void __preempt_count_sub(int val) @@ -4857,13 +4836,13 @@ index 2380df419..aacfaad6c 100644 #ifdef CONFIG_PREEMPTION - +#ifdef CONFIG_PREEMPT_RT -+ extern void preempt_schedule_lock(void); ++ extern void preempt_schedule_lock(void); +#endif extern asmlinkage void preempt_schedule(void); extern asmlinkage void preempt_schedule_thunk(void); diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h -index 6fd8410a3..f3bf2f515 100644 +index 6fd8410a3910..f3bf2f515edb 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -28,6 +28,19 @@ typedef struct { @@ -4887,7 +4866,7 @@ index 6fd8410a3..f3bf2f515 100644 typedef sigset_t compat_sigset_t; #endif diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h -index 7fb482f0f..3df0a95c9 100644 +index 7fb482f0f25b..3df0a95c9e13 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -65,7 +65,7 @@ @@ -4915,15 +4894,15 @@ index 7fb482f0f..3df0a95c9 100644 canary += tsc + (tsc << 32UL); canary &= CANARY_MASK; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h -index a225c6e2c..ad34b468a 100644 +index a225c6e2ca6d..414c90f04bc3 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -60,6 +60,8 @@ struct thread_info { #ifdef CONFIG_SMP u32 cpu; /* current CPU */ #endif -+ int preempt_lazy_count; /* 0 => lazy preemptable -+ <0 => BUG */ ++ int preempt_lazy_count; /* 0 => lazy preemptable ++ <0 => BUG */ KABI_RESERVE(1) KABI_RESERVE(2) }; @@ -4931,7 +4910,7 @@ index a225c6e2c..ad34b468a 100644 #define INIT_THREAD_INFO(tsk) \ { \ .flags = 0, \ -+ .preempt_lazy_count = 0, \ ++ .preempt_lazy_count = 0, \ } #else /* !__ASSEMBLY__ */ @@ -4939,8 +4918,8 @@ index a225c6e2c..ad34b468a 100644 #include +#define GET_THREAD_INFO(reg) \ -+ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ -+ _ASM_SUB $(THREAD_SIZE),reg ; ++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ ++ _ASM_SUB $(THREAD_SIZE),reg ; + #endif @@ -4970,21 +4949,8 @@ index a225c6e2c..ad34b468a 100644 #define STACK_WARN (THREAD_SIZE/8) /* -diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c -index 021cd0677..aedced2b0 100644 ---- a/arch/x86/kernel/cpu/mshyperv.c -+++ b/arch/x86/kernel/cpu/mshyperv.c -@@ -84,7 +84,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0) - inc_irq_stat(hyperv_stimer0_count); - if (hv_stimer0_handler) - hv_stimer0_handler(); -- add_interrupt_randomness(HYPERV_STIMER0_VECTOR); -+ add_interrupt_randomness(HYPERV_STIMER0_VECTOR); - ack_APIC_irq(); - - set_irq_regs(old_regs); diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c -index 33ee47670..5fcac46aa 100644 +index 33ee47670b99..5fcac46aaf6b 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c @@ -13,8 +13,6 @@ @@ -5062,12 +5028,12 @@ index 33ee47670..5fcac46aa 100644 } -arch_initcall(kdump_buf_page_init); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c -index 571220ac8..d315d45b6 100644 +index 69b5581d4469..18f8e04048dc 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c -@@ -159,6 +159,18 @@ void kernel_fpu_end(void) +@@ -82,6 +82,18 @@ bool irq_fpu_usable(void) } - EXPORT_SYMBOL_GPL(kernel_fpu_end); + EXPORT_SYMBOL(irq_fpu_usable); +void kernel_fpu_resched(void) +{ @@ -5082,10 +5048,10 @@ index 571220ac8..d315d45b6 100644 +EXPORT_SYMBOL_GPL(kernel_fpu_resched); + /* - * Save the FPU state (mark it for reload if necessary): - * + * Save the FPU register state in fpu->fpstate->regs. The register state is + * preserved. diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c -index 0b79efc87..93c6b88b3 100644 +index 0b79efc87be5..93c6b88b382a 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -131,6 +131,7 @@ int irq_init_percpu_irqstack(unsigned int cpu) @@ -5105,7 +5071,7 @@ index 0b79efc87..93c6b88b3 100644 void __handle_irq(struct irq_desc *desc, struct pt_regs *regs) { diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c -index 440eed558..7cfc4e6b7 100644 +index 440eed558558..7cfc4e6b7c94 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -72,7 +72,9 @@ int irq_init_percpu_irqstack(unsigned int cpu) @@ -5119,10 +5085,10 @@ index 440eed558..7cfc4e6b7 100644 } +#endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 757891cd8..6904aca58 100644 +index bdc7b9c1f82a..d24459dd8f69 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -8180,6 +8180,14 @@ int kvm_arch_init(void *opaque) +@@ -8229,6 +8229,14 @@ int kvm_arch_init(void *opaque) goto out; } @@ -5135,10 +5101,10 @@ index 757891cd8..6904aca58 100644 +#endif + r = -ENOMEM; - x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu), - __alignof__(struct fpu), SLAB_ACCOUNT, + + x86_emulator_cache = kvm_alloc_emulator_cache(); diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c -index 075fe5131..2c54b76d8 100644 +index 075fe51317b0..2c54b76d8f84 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -4,65 +4,6 @@ @@ -5208,7 +5174,7 @@ index 075fe5131..2c54b76d8 100644 { struct zone *zone; diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 7c055259d..da31c2635 100644 +index 7c055259de3a..da31c2635ee4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -394,19 +394,6 @@ kernel_physical_mapping_init(unsigned long start, @@ -5241,7 +5207,7 @@ index 7c055259d..da31c2635 100644 * NOTE: at this point the bootmem allocator is fully available. */ diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c -index f60398aeb..9aaa756dd 100644 +index f60398aeb644..9aaa756ddf21 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size) @@ -5314,7 +5280,7 @@ index f60398aeb..9aaa756dd 100644 -EXPORT_SYMBOL_GPL(iounmap_atomic); +EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot); diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig -index 87e08ad38..03cbf6b53 100644 +index 87e08ad38ea7..03cbf6b53622 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -666,6 +666,7 @@ endchoice @@ -5326,7 +5292,7 @@ index 87e08ad38..03cbf6b53 100644 Linux can use the full amount of RAM in the system by default. However, the default MMUv2 setup only maps the diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h -index a06ffb0c6..92049b61c 100644 +index a06ffb0c61c7..92049b61c351 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -16,7 +16,7 @@ @@ -5348,7 +5314,7 @@ index a06ffb0c6..92049b61c 100644 __end_of_fixed_addresses }; diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h -index eac503215..0fc3b1ceb 100644 +index eac503215f17..0fc3b1cebc56 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -16,9 +16,8 @@ @@ -5379,7 +5345,7 @@ index eac503215..0fc3b1ceb 100644 #endif diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h -index 64c938925..dc846323b 100644 +index 64c9389254f1..dc846323b1cd 100644 --- a/arch/xtensa/include/asm/spinlock_types.h +++ b/arch/xtensa/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ @@ -5394,7 +5360,7 @@ index 64c938925..dc846323b 100644 #include diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c -index 673196fe8..0735ca5e8 100644 +index 673196fe862e..0735ca5e8f86 100644 --- a/arch/xtensa/mm/highmem.c +++ b/arch/xtensa/mm/highmem.c @@ -12,8 +12,6 @@ @@ -5472,7 +5438,7 @@ index 673196fe8..0735ca5e8 100644 kmap_waitqueues_init(); } diff --git a/block/blk-mq.c b/block/blk-mq.c -index 484f65d0b..f68b30d70 100644 +index c02e42071615..6791698ebca1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -44,7 +44,7 @@ @@ -5484,7 +5450,7 @@ index 484f65d0b..f68b30d70 100644 static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); -@@ -655,80 +655,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) +@@ -657,80 +657,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); @@ -5573,7 +5539,7 @@ index 484f65d0b..f68b30d70 100644 } static inline bool blk_mq_complete_need_ipi(struct request *rq) -@@ -738,6 +687,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) +@@ -740,6 +689,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) if (!IS_ENABLED(CONFIG_SMP) || !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) return false; @@ -5588,7 +5554,7 @@ index 484f65d0b..f68b30d70 100644 /* same CPU or cache domain? Complete locally */ if (cpu == rq->mq_ctx->cpu || -@@ -749,6 +706,31 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) +@@ -751,6 +708,31 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } @@ -5620,7 +5586,7 @@ index 484f65d0b..f68b30d70 100644 bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); -@@ -761,15 +743,15 @@ bool blk_mq_complete_request_remote(struct request *rq) +@@ -763,15 +745,15 @@ bool blk_mq_complete_request_remote(struct request *rq) return false; if (blk_mq_complete_need_ipi(rq)) { @@ -5635,15 +5601,15 @@ index 484f65d0b..f68b30d70 100644 } - return true; -+ if (rq->q->nr_hw_queues == 1) { -+ blk_mq_raise_softirq(rq); -+ return true; -+ } -+ return false; ++ if (rq->q->nr_hw_queues == 1) { ++ blk_mq_raise_softirq(rq); ++ return true; ++ } ++ return false; } EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); -@@ -1683,14 +1665,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, +@@ -1686,14 +1668,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { @@ -5661,7 +5627,7 @@ index 484f65d0b..f68b30d70 100644 } /* -@@ -4215,7 +4197,7 @@ static int __init blk_mq_init(void) +@@ -4222,7 +4204,7 @@ static int __init blk_mq_init(void) int i; for_each_possible_cpu(i) @@ -5671,7 +5637,7 @@ index 484f65d0b..f68b30d70 100644 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, diff --git a/crypto/cryptd.c b/crypto/cryptd.c -index 668095eca..d46645d5b 100644 +index 668095eca0fa..02ea3d0fe51d 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -36,6 +36,7 @@ static struct workqueue_struct *cryptd_wq; @@ -5690,7 +5656,7 @@ index 668095eca..d46645d5b 100644 } pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; -@@ -133,8 +135,8 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, +@@ -133,8 +135,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, struct cryptd_cpu_queue *cpu_queue; refcount_t *refcnt; @@ -5698,10 +5664,12 @@ index 668095eca..d46645d5b 100644 - cpu_queue = this_cpu_ptr(queue->cpu_queue); + cpu_queue = raw_cpu_ptr(queue->cpu_queue); + spin_lock_bh(&cpu_queue->qlock); ++ // cpu = smp_processor_id(); ++ err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); -@@ -150,7 +152,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, +@@ -150,7 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, refcount_inc(refcnt); out: @@ -5710,7 +5678,7 @@ index 668095eca..d46645d5b 100644 return err; } -@@ -167,10 +169,10 @@ static void cryptd_queue_worker(struct work_struct *work) +@@ -167,10 +171,11 @@ static void cryptd_queue_worker(struct work_struct *work) /* * Only handle one request at a time to avoid hogging crypto workqueue. */ @@ -5720,11 +5688,12 @@ index 668095eca..d46645d5b 100644 req = crypto_dequeue_request(&cpu_queue->queue); - local_bh_enable(); + spin_unlock_bh(&cpu_queue->qlock); ++ if (!req) return; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c -index 9fcc49be4..a31ffe16e 100644 +index 9fcc49be499f..a31ffe16e626 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2056,7 +2056,7 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb) @@ -5737,7 +5706,7 @@ index 9fcc49be4..a31ffe16e 100644 tasklet_enable(&ENI_DEV(vcc->dev)->task); if (res == enq_ok) return 0; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index 0636df6b6..1a7523cef 100644 +index 40df7f994b89..4821c94277e8 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -59,6 +59,40 @@ static void zram_free_page(struct zram *zram, size_t index); @@ -5798,7 +5767,7 @@ index 0636df6b6..1a7523cef 100644 } diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h -index f2fd46daa..7e4dd447e 100644 +index f2fd46daa760..7e4dd447e1dd 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -63,6 +63,7 @@ struct zram_table_entry { @@ -5810,7 +5779,7 @@ index f2fd46daa..7e4dd447e 100644 ktime_t ac_time; #endif diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c -index b99e1941c..dc4c0a0a5 100644 +index b99e1941c52c..dc4c0a0a5129 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -20,7 +20,6 @@ @@ -5822,7 +5791,7 @@ index b99e1941c..dc4c0a0a5 100644 static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, u8 *buf, size_t bufsiz) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c -index 4ed6e6602..c2bd0d40b 100644 +index 4ed6e660273a..c2bd0d40b5fc 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da @@ -5876,7 +5845,7 @@ index 4ed6e6602..c2bd0d40b 100644 return 0; } diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c -index 9811c4095..17c9d8251 100644 +index 9811c40956e5..17c9d825188b 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) @@ -5898,7 +5867,7 @@ index 9811c4095..17c9d8251 100644 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { context_tasklet((unsigned long)&ctx->context); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c -index c406de008..7792bca9e 100644 +index c406de00883a..7792bca9ee76 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -66,7 +66,7 @@ struct mm_struct efi_mm = { @@ -5921,7 +5890,7 @@ index c406de008..7792bca9e 100644 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c -index 12f7128b7..a65061e3e 100644 +index 12f7128b777f..a65061e3e1d3 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -118,7 +118,8 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) @@ -5971,7 +5940,7 @@ index 12f7128b7..a65061e3e 100644 if (intel_vgpu_active(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c -index 0c083af5a..2abf043d3 100644 +index 0c083af5a59d..2abf043d3d9d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1080,7 +1080,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer @@ -6003,7 +5972,7 @@ index 0c083af5a..2abf043d3 100644 cache->vaddr = (unsigned long)vaddr; diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c -index 0040b4765..3f4f85478 100644 +index 0040b4765a54..3f4f854786f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -342,10 +342,9 @@ void intel_breadcrumbs_park(struct intel_breadcrumbs *b) @@ -6020,7 +5989,7 @@ index 0040b4765..3f4f85478 100644 GEM_BUG_ON(!list_empty(&b->signalers)); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c -index f7b2e07e2..313d8a28e 100644 +index f7b2e07e2229..313d8a28e776 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -60,9 +60,10 @@ static int __engine_unpark(struct intel_wakeref *wf) @@ -6047,7 +6016,7 @@ index f7b2e07e2..313d8a28e 100644 #else diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c -index 58276694c..88944c3b1 100644 +index 58276694c848..88944c3b1bc8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -355,22 +355,15 @@ gtt_user_read(struct io_mapping *mapping, @@ -6109,7 +6078,7 @@ index 58276694c..88944c3b1 100644 /** diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 759f523c6..7339a42ab 100644 +index 759f523c6a6b..7339a42ab2b8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -847,6 +847,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, @@ -6129,7 +6098,7 @@ index 759f523c6..7339a42ab 100644 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h -index a4addcc64..396b65986 100644 +index a4addcc64978..396b6598694d 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -2,6 +2,10 @@ @@ -6153,7 +6122,7 @@ index a4addcc64..396b65986 100644 TP_PROTO(struct i915_request *rq), TP_ARGS(rq) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c -index 412e21604..432493183 100644 +index 412e21604a05..432493183d20 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -57,12 +57,12 @@ static void trash_stolen(struct drm_i915_private *i915) @@ -6172,7 +6141,7 @@ index 412e21604..432493183 100644 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c -index 65e28c4cd..ca483285f 100644 +index 65e28c4cd4ce..ca483285f267 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1201,9 +1201,9 @@ static int igt_ggtt_page(void *arg) @@ -6200,7 +6169,7 @@ index 65e28c4cd..ca483285f 100644 if (val != n) { pr_err("insert page failed: found %d, expected %d\n", diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h -index 6c5bbff12..411f91ee2 100644 +index 6c5bbff12eb4..411f91ee20fa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h @@ -60,19 +60,19 @@ fbmem_fini(struct io_mapping *fb) @@ -6228,7 +6197,7 @@ index 6c5bbff12..411f91ee2 100644 static inline bool diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c -index 60ab7151b..93f92ccd4 100644 +index 60ab7151b84d..93f92ccd42e5 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -124,12 +124,12 @@ qxl_image_init_helper(struct qxl_device *qdev, @@ -6305,7 +6274,7 @@ index 60ab7151b..93f92ccd4 100644 return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c -index 5cea6eea7..785023081 100644 +index 5cea6eea72ab..785023081b79 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -89,11 +89,11 @@ apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) @@ -6374,7 +6343,7 @@ index 5cea6eea7..785023081 100644 } diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c -index 544a9e4df..5ee5171d4 100644 +index 544a9e4df2a8..5ee5171d46ef 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -173,8 +173,8 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) @@ -6416,7 +6385,7 @@ index 544a9e4df..5ee5171d4 100644 fallback: qxl_bo_kunmap(bo); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h -index 5762ea40d..6ae89b1b3 100644 +index 5762ea40d047..6ae89b1b36f4 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -89,8 +89,8 @@ extern int qxl_bo_create(struct qxl_device *qdev, @@ -6431,7 +6400,7 @@ index 5762ea40d..6ae89b1b3 100644 extern void qxl_bo_unref(struct qxl_bo **bo); extern int qxl_bo_pin(struct qxl_bo *bo); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c -index b2a475a0c..b665a33b4 100644 +index b2a475a0ca4a..b665a33b449b 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -414,7 +414,7 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, @@ -6453,7 +6422,7 @@ index b2a475a0c..b665a33b4 100644 void qxl_release_fence_buffer_objects(struct qxl_release *release) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index 07d23a1e6..add8e6044 100644 +index 07d23a1e62a0..add8e60440b2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1828,6 +1828,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, @@ -6473,7 +6442,7 @@ index 07d23a1e6..add8e6044 100644 /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c -index fb2a25f84..164b9a015 100644 +index 20a5434c6853..17b2df35ef1b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -181,13 +181,15 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, @@ -6517,7 +6486,7 @@ index fb2a25f84..164b9a015 100644 return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c -index e8d66182c..71dba228f 100644 +index e8d66182cd7b..71dba228f68e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -375,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, @@ -6591,7 +6560,7 @@ index e8d66182c..71dba228f 100644 return ret; } diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h -index 7845fa5de..043e058bb 100644 +index 7845fa5de79e..043e058bb27c 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -19,6 +19,7 @@ @@ -6603,7 +6572,7 @@ index 7845fa5de..043e058bb 100644 #include "hv_trace.h" diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c -index 5d820037e..ef5e12364 100644 +index 5d820037e291..ef5e12364119 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -22,6 +22,7 @@ @@ -6643,7 +6612,7 @@ index 5d820037e..ef5e12364 100644 if (bytes_written) hyperv_report_panic_msg(panic_pa, bytes_written); diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig -index d45aba3e1..6e890131d 100644 +index ce9429ca6dde..29ccbd6acf43 100644 --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig @@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT @@ -6655,7 +6624,7 @@ index d45aba3e1..6e890131d 100644 This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index 758da34fb..d7bf991d9 100644 +index 454d90b785b9..61bea746917e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2217,8 +2217,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -6688,7 +6657,7 @@ index 758da34fb..d7bf991d9 100644 } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h -index 5c05acf20..665fe138a 100644 +index 5c05acf20e1f..665fe138ab4f 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -635,6 +635,7 @@ struct r5conf { @@ -6700,7 +6669,7 @@ index 5c05acf20..665fe138a 100644 void *scribble; /* space for constructing buffer * lists and performing address diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c -index 774970bfc..6bc2c728a 100644 +index 774970bfcf85..6bc2c728adb7 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -267,7 +267,8 @@ static void find_next_position(struct mtdoops_context *cxt) @@ -6723,7 +6692,7 @@ index 774970bfc..6bc2c728a 100644 if (reason != KMSG_DUMP_OOPS) { diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c -index 98df38fe5..12d085405 100644 +index 98df38fe553c..12d085405bd0 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -332,7 +332,7 @@ static int __init arc_rimi_init(void) @@ -6745,7 +6714,7 @@ index 98df38fe5..12d085405 100644 #ifndef MODULE diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h -index 22a49c6d7..5d4a4c7ef 100644 +index 22a49c6d7ae6..5d4a4c7efbbf 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -298,6 +298,10 @@ struct arcnet_local { @@ -6770,7 +6739,7 @@ index 22a49c6d7..5d4a4c7ef 100644 int arcnet_open(struct net_device *dev); int arcnet_close(struct net_device *dev); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c -index e04efc0a5..d76dd7d14 100644 +index e04efc0a5c97..d76dd7d14299 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t) @@ -6893,7 +6862,7 @@ index e04efc0a5..d76dd7d14 100644 return retval; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c -index f983c4ce6..be618e4b9 100644 +index f983c4ce6b07..be618e4b9ed5 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -169,7 +169,7 @@ static int __init com20020_init(void) @@ -6915,7 +6884,7 @@ index f983c4ce6..be618e4b9 100644 #ifndef MODULE diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c -index 9f44e2e45..b4f8798d8 100644 +index 9f44e2e458df..b4f8798d8c50 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -294,7 +294,7 @@ static void com20020pci_remove(struct pci_dev *pdev) @@ -6928,7 +6897,7 @@ index 9f44e2e45..b4f8798d8 100644 } diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c -index cf607ffcf..9cc5eb6a8 100644 +index cf607ffcf358..9cc5eb6a8e90 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link) @@ -6941,7 +6910,7 @@ index cf607ffcf..9cc5eb6a8 100644 dev_dbg(&link->dev, "kfree2...\n"); kfree(info); diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c -index cf214b730..3856b447d 100644 +index cf214b730671..3856b447d38e 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -396,7 +396,7 @@ static int __init com90io_init(void) @@ -6963,7 +6932,7 @@ index cf214b730..3856b447d 100644 module_init(com90io_init) diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c -index 3dc3d533c..d8dfb9ea0 100644 +index 3dc3d533cb19..d8dfb9ea0de8 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -554,7 +554,7 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem, @@ -6985,7 +6954,7 @@ index 3dc3d533c..d8dfb9ea0 100644 } diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h -index 647506064..0321be773 100644 +index 6475060649e9..0321be77366c 100644 --- a/drivers/net/ethernet/chelsio/cxgb/common.h +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -238,7 +238,6 @@ struct adapter { @@ -7023,7 +6992,7 @@ index 647506064..0321be773 100644 void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, int speed, int duplex, int pause); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c -index 1311eac9e..c827273c4 100644 +index 1311eac9eef2..c827273c4bd7 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -211,9 +211,10 @@ static int cxgb_up(struct adapter *adapter) @@ -7102,7 +7071,7 @@ index 1311eac9e..c827273c4 100644 mac_stats_task); diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c -index 2d9c2b5a6..cda01f22c 100644 +index 2d9c2b5a690a..cda01f22c71c 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -940,10 +940,11 @@ void t1_sge_intr_clear(struct sge *sge) @@ -7198,7 +7167,7 @@ index 2d9c2b5a6..cda01f22c 100644 /* diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h -index a1ba591b3..716705b96 100644 +index a1ba591b3431..716705b96f26 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.h +++ b/drivers/net/ethernet/chelsio/cxgb/sge.h @@ -74,6 +74,7 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *); @@ -7219,7 +7188,7 @@ index a1ba591b3..716705b96 100644 void t1_sge_intr_disable(struct sge *); void t1_sge_intr_clear(struct sge *); diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c -index ea0f8741d..310add28f 100644 +index ea0f8741d7cf..310add28fcf5 100644 --- a/drivers/net/ethernet/chelsio/cxgb/subr.c +++ b/drivers/net/ethernet/chelsio/cxgb/subr.c @@ -170,7 +170,7 @@ void t1_link_changed(adapter_t *adapter, int port_id) @@ -7347,7 +7316,7 @@ index ea0f8741d..310add28f 100644 #ifdef CONFIG_CHELSIO_T1_1G if (!t1_is_asic(adapter)) diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c -index e3a885891..df0eab479 100644 +index e3a8858915b3..df0eab479d51 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -963,7 +963,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue) @@ -7360,7 +7329,7 @@ index e3a885891..df0eab479 100644 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " "TxFrameId %2.2x," diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c -index 4185ca3dd..cf5c33d0f 100644 +index 4185ca3dd575..cf5c33d0fbdb 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme) @@ -7410,7 +7379,7 @@ index 4185ca3dd..cf5c33d0f 100644 jme->reg_ghc = 0; diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h -index a2c3b00d9..2af76329b 100644 +index a2c3b00d939d..2af76329b4a2 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -411,7 +411,7 @@ struct jme_adapter { @@ -7423,7 +7392,7 @@ index a2c3b00d9..2af76329b 100644 unsigned long flags; u32 reg_txcs; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c -index 71e2ada86..72e2e71aa 100644 +index 71e2ada86793..72e2e71aac0e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc) @@ -7436,10 +7405,10 @@ index 71e2ada86..72e2e71aa 100644 /* Find first taken slot. */ for (slot = 0; slot < ATH_BCBUF; slot++) { diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c -index a070e69bb..1fea850af 100644 +index 4353443b89d8..03e2569da934 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c -@@ -1457,7 +1457,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +@@ -1522,7 +1522,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Prevents hv_pci_onchannelcallback() from running concurrently * in the tasklet. */ @@ -7449,7 +7418,7 @@ index a070e69bb..1fea850af 100644 /* * Since this function is called with IRQ locks held, can't diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c -index 0f9274960..dc97e4f1f 100644 +index 0f9274960dc6..dc97e4f1f4ad 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1452,11 +1452,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, @@ -7507,7 +7476,7 @@ index 0f9274960..dc97e4f1f 100644 } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c -index 5ea426eff..0d6b9acc7 100644 +index 5ea426effa60..0d6b9acc7cf8 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) @@ -7529,7 +7498,7 @@ index 5ea426eff..0d6b9acc7 100644 list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c -index 4261380af..65160eaaa 100644 +index 4261380af97b..65160eaaa929 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -826,10 +826,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, @@ -7546,10 +7515,10 @@ index 4261380af..65160eaaa 100644 /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h -index 34aa2714f..42cd2baa7 100644 +index b6dc9003b8c4..d5cf70ead85b 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h -@@ -131,12 +131,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) +@@ -153,12 +153,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) up->dl_write(up, value); } @@ -7606,7 +7575,7 @@ index 34aa2714f..42cd2baa7 100644 return true; } -@@ -145,7 +188,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up) +@@ -167,7 +210,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up) if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; @@ -7616,7 +7585,7 @@ index 34aa2714f..42cd2baa7 100644 } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c -index 98ce484f1..ceba24927 100644 +index 98ce484f1089..ceba24927c1e 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -275,10 +275,8 @@ static void serial8250_backup_timeout(struct timer_list *t) @@ -7665,7 +7634,7 @@ index 98ce484f1..ceba24927 100644 .device = uart_console_device, .setup = univ8250_console_setup, diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c -index fbcc90c31..b33cb454c 100644 +index fbcc90c31ca1..b33cb454ce03 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c @@ -60,9 +60,18 @@ int fsl8250_handle_irq(struct uart_port *port) @@ -7688,7 +7657,7 @@ index fbcc90c31..b33cb454c 100644 port->ops->stop_rx(port); } else { diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c -index 988bf6bcc..bcd26d672 100644 +index 988bf6bcce42..bcd26d672539 100644 --- a/drivers/tty/serial/8250/8250_ingenic.c +++ b/drivers/tty/serial/8250/8250_ingenic.c @@ -146,6 +146,8 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart", @@ -7714,7 +7683,7 @@ index 988bf6bcc..bcd26d672 100644 if (ier & UART_IER_MSI) value |= UART_MCR_MDCE | UART_MCR_FCM; diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c -index de48a5846..d246f2755 100644 +index de48a58460f4..d246f2755fed 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -222,12 +222,37 @@ static void mtk8250_shutdown(struct uart_port *port) @@ -7758,10 +7727,10 @@ index de48a5846..d246f2755 100644 static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index 43884e8b5..74ce9ef73 100644 +index 9d60418e4adb..d35c0506b013 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -762,7 +762,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) +@@ -741,7 +741,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) serial_out(p, UART_EFR, UART_EFR_ECB); serial_out(p, UART_LCR, 0); } @@ -7770,7 +7739,7 @@ index 43884e8b5..74ce9ef73 100644 if (p->capabilities & UART_CAP_EFR) { serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(p, UART_EFR, efr); -@@ -1436,7 +1436,7 @@ static void serial8250_stop_rx(struct uart_port *port) +@@ -1415,7 +1415,7 @@ static void serial8250_stop_rx(struct uart_port *port) up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); up->port.read_status_mask &= ~UART_LSR_DR; @@ -7779,7 +7748,7 @@ index 43884e8b5..74ce9ef73 100644 serial8250_rpm_put(up); } -@@ -1466,7 +1466,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) +@@ -1445,7 +1445,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; @@ -7788,7 +7757,7 @@ index 43884e8b5..74ce9ef73 100644 } } EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); -@@ -1708,7 +1708,7 @@ static void serial8250_disable_ms(struct uart_port *port) +@@ -1687,7 +1687,7 @@ static void serial8250_disable_ms(struct uart_port *port) mctrl_gpio_disable_ms(up->gpios); up->ier &= ~UART_IER_MSI; @@ -7797,7 +7766,7 @@ index 43884e8b5..74ce9ef73 100644 } static void serial8250_enable_ms(struct uart_port *port) -@@ -1724,7 +1724,7 @@ static void serial8250_enable_ms(struct uart_port *port) +@@ -1703,7 +1703,7 @@ static void serial8250_enable_ms(struct uart_port *port) up->ier |= UART_IER_MSI; serial8250_rpm_get(up); @@ -7806,7 +7775,7 @@ index 43884e8b5..74ce9ef73 100644 serial8250_rpm_put(up); } -@@ -2142,14 +2142,7 @@ static void serial8250_put_poll_char(struct uart_port *port, +@@ -2121,14 +2121,7 @@ static void serial8250_put_poll_char(struct uart_port *port, struct uart_8250_port *up = up_to_u8250p(port); serial8250_rpm_get(up); @@ -7822,7 +7791,7 @@ index 43884e8b5..74ce9ef73 100644 wait_for_xmitr(up, BOTH_EMPTY); /* -@@ -2162,7 +2155,7 @@ static void serial8250_put_poll_char(struct uart_port *port, +@@ -2141,7 +2134,7 @@ static void serial8250_put_poll_char(struct uart_port *port, * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); @@ -7831,7 +7800,7 @@ index 43884e8b5..74ce9ef73 100644 serial8250_rpm_put(up); } -@@ -2465,7 +2458,7 @@ void serial8250_do_shutdown(struct uart_port *port) +@@ -2444,7 +2437,7 @@ void serial8250_do_shutdown(struct uart_port *port) */ spin_lock_irqsave(&port->lock, flags); up->ier = 0; @@ -7840,7 +7809,7 @@ index 43884e8b5..74ce9ef73 100644 spin_unlock_irqrestore(&port->lock, flags); synchronize_irq(port->irq); -@@ -2821,7 +2814,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, +@@ -2800,7 +2793,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, if (up->capabilities & UART_CAP_RTOIE) up->ier |= UART_IER_RTOIE; @@ -7849,7 +7818,7 @@ index 43884e8b5..74ce9ef73 100644 if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; -@@ -3289,7 +3282,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); +@@ -3268,7 +3261,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); #ifdef CONFIG_SERIAL_8250_CONSOLE @@ -7858,7 +7827,7 @@ index 43884e8b5..74ce9ef73 100644 { struct uart_8250_port *up = up_to_u8250p(port); -@@ -3297,6 +3290,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch) +@@ -3276,6 +3269,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch) serial_port_out(port, UART_TX, ch); } @@ -7877,7 +7846,7 @@ index 43884e8b5..74ce9ef73 100644 /* * Restore serial console when h/w power-off detected */ -@@ -3318,6 +3323,32 @@ static void serial8250_console_restore(struct uart_8250_port *up) +@@ -3297,6 +3302,32 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } @@ -7910,7 +7879,7 @@ index 43884e8b5..74ce9ef73 100644 /* * Print a string to the serial port trying not to disturb * any possible real use of the port... -@@ -3334,24 +3365,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3313,24 +3344,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, struct uart_port *port = &up->port; unsigned long flags; unsigned int ier; @@ -7937,7 +7906,7 @@ index 43884e8b5..74ce9ef73 100644 /* check scratch reg to see if port powered off during system sleep */ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { -@@ -3365,7 +3384,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3344,7 +3363,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, mdelay(port->rs485.delay_rts_before_send); } @@ -7947,7 +7916,7 @@ index 43884e8b5..74ce9ef73 100644 /* * Finally, wait for transmitter to become empty -@@ -3378,8 +3399,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3357,8 +3378,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (em485->tx_stopped) up->rs485_stop_tx(up); } @@ -7957,7 +7926,7 @@ index 43884e8b5..74ce9ef73 100644 /* * The receive handling will happen properly because the -@@ -3391,8 +3411,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3370,8 +3390,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (up->msr_saved_flags) serial8250_modem_status(up); @@ -7967,7 +7936,7 @@ index 43884e8b5..74ce9ef73 100644 } static unsigned int probe_baud(struct uart_port *port) -@@ -3412,6 +3431,7 @@ static unsigned int probe_baud(struct uart_port *port) +@@ -3391,6 +3410,7 @@ static unsigned int probe_baud(struct uart_port *port) int serial8250_console_setup(struct uart_port *port, char *options, bool probe) { @@ -7975,7 +7944,7 @@ index 43884e8b5..74ce9ef73 100644 int baud = 9600; int bits = 8; int parity = 'n'; -@@ -3421,6 +3441,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) +@@ -3400,6 +3420,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) if (!port->iobase && !port->membase) return -ENODEV; @@ -7985,10 +7954,10 @@ index 43884e8b5..74ce9ef73 100644 uart_parse_options(options, &baud, &parity, &bits, &flow); else if (probe) diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c -index 75aa943f6..2ad3ae943 100644 +index d80e8064b75e..4c24c5197b62 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c -@@ -2321,18 +2321,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) +@@ -2268,18 +2268,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int old_cr = 0, new_cr; @@ -8017,7 +7986,7 @@ index 75aa943f6..2ad3ae943 100644 /* * First save the CR then disable the interrupts -@@ -2358,8 +2364,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) +@@ -2305,8 +2311,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) pl011_write(old_cr, uap, REG_CR); if (locked) @@ -8028,7 +7997,7 @@ index 75aa943f6..2ad3ae943 100644 clk_disable(uap->clk); } diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c -index 84e815808..342005ed5 100644 +index 84e8158088cd..342005ed5ebf 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1311,13 +1311,10 @@ serial_omap_console_write(struct console *co, const char *s, @@ -8059,22 +8028,24 @@ index 84e815808..342005ed5 100644 static int __init diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c -index c3abcd043..0ab374ec7 100644 +index c3abcd0439f9..2479ea4c843b 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c -@@ -172,10 +172,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) +@@ -172,10 +172,10 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) have queued and recycle that ? */ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) return NULL; - printk_safe_enter(); ++ p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC | __GFP_NOWARN); - printk_safe_exit(); ++ if (p == NULL) return NULL; diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c -index dae9a57d7..9a6a0ec4d 100644 +index dae9a57d7ec0..9a6a0ec4d1fb 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -239,7 +239,7 @@ int afs_silly_iput(struct dentry *dentry, struct inode *inode) @@ -8087,7 +8058,7 @@ index dae9a57d7..9a6a0ec4d 100644 _enter("%p{%pd},%llx", dentry, dentry, vnode->fid.vnode); diff --git a/fs/aio.c b/fs/aio.c -index 5e5333d72..cc16ac777 100644 +index 5e5333d72c69..cc16ac777f0d 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -43,7 +43,6 @@ @@ -8108,7 +8079,7 @@ index 5e5333d72..cc16ac777 100644 INIT_WORK(&req->work, aio_poll_put_work); schedule_work(&req->work); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index bcc6848bb..fabbf6cc4 100644 +index bcc6848bb6d6..fabbf6cc45bf 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -17,7 +17,6 @@ @@ -8120,7 +8091,7 @@ index bcc6848bb..fabbf6cc4 100644 #include #include diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c -index 799be3a5d..d5165a7da 100644 +index 799be3a5d25e..d5165a7da071 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -81,7 +81,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, @@ -8133,7 +8104,7 @@ index 799be3a5d..d5165a7da 100644 cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); diff --git a/fs/dcache.c b/fs/dcache.c -index f5b78cc80..b2e0d1a07 100644 +index f5b78cc80a00..b2e0d1a07644 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2566,9 +2566,10 @@ EXPORT_SYMBOL(d_rehash); @@ -8220,7 +8191,7 @@ index f5b78cc80..b2e0d1a07 100644 hlist_bl_unlock(b); INIT_HLIST_NODE(&dentry->d_u.d_alias); diff --git a/fs/eventfd.c b/fs/eventfd.c -index df466ef81..9035ca60b 100644 +index df466ef81ddd..9035ca60bfcf 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -25,8 +25,6 @@ @@ -8260,7 +8231,7 @@ index df466ef81..9035ca60b 100644 return n; diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h -index 64aa552b2..7dae569da 100644 +index 64aa552b296d..7dae569dafb9 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -95,7 +95,6 @@ extern unsigned fscache_debug; @@ -8272,7 +8243,7 @@ index 64aa552b2..7dae569da 100644 extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n); diff --git a/fs/fscache/main.c b/fs/fscache/main.c -index 4207f98e4..85f8cf3a3 100644 +index 4207f98e405f..85f8cf3a323d 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -41,8 +41,6 @@ struct kobject *fscache_root; @@ -8303,7 +8274,7 @@ index 4207f98e4..85f8cf3a3 100644 if (ret < 0) goto error_proc; diff --git a/fs/fscache/object.c b/fs/fscache/object.c -index cb2146e02..fb9794dce 100644 +index cb2146e02cd5..fb9794dce721 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -807,6 +807,8 @@ void fscache_object_destroy(struct fscache_object *object) @@ -8353,7 +8324,7 @@ index cb2146e02..fb9794dce 100644 return fscache_object_congested(); } diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c -index bc2678323..3176913fa 100644 +index bc267832310c..3176913fae6c 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -158,7 +158,7 @@ static int fuse_direntplus_link(struct file *file, @@ -8366,7 +8337,7 @@ index bc2678323..3176913fa 100644 if (!o->nodeid) { /* diff --git a/fs/inode.c b/fs/inode.c -index 7436a17a2..45a821a8c 100644 +index 7436a17a20c1..45a821a8cc46 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -158,7 +158,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) @@ -8379,10 +8350,10 @@ index 7436a17a2..45a821a8c 100644 inode->dirtied_when = 0; diff --git a/fs/namei.c b/fs/namei.c -index 4b55e176c..2c2684aa9 100644 +index 07c00ade4c1a..a53e99d66dfa 100644 --- a/fs/namei.c +++ b/fs/namei.c -@@ -1526,7 +1526,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, +@@ -1528,7 +1528,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, { struct dentry *dentry, *old; struct inode *inode = dir->d_inode; @@ -8391,7 +8362,7 @@ index 4b55e176c..2c2684aa9 100644 /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) -@@ -3021,7 +3021,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, +@@ -3023,7 +3023,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; @@ -8401,7 +8372,7 @@ index 4b55e176c..2c2684aa9 100644 if (unlikely(IS_DEADDIR(dir_inode))) return ERR_PTR(-ENOENT); diff --git a/fs/namespace.c b/fs/namespace.c -index 6e76f2a72..dbd1119a5 100644 +index 6e76f2a72cfc..dbd1119a539f 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -14,6 +14,7 @@ @@ -8427,7 +8398,7 @@ index 6e76f2a72..dbd1119a5 100644 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index 9f88ca7b2..bc8a78ecf 100644 +index 9f88ca7b2001..bc8a78ecfe1c 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -484,7 +484,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry, @@ -8449,7 +8420,7 @@ index 9f88ca7b2..bc8a78ecf 100644 struct dentry *res; struct iattr attr = { .ia_valid = ATTR_OPEN }; diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c -index b27ebdcce..f86c98a7e 100644 +index b27ebdccef70..f86c98a7ed04 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -13,7 +13,7 @@ @@ -8471,7 +8442,7 @@ index b27ebdcce..f86c98a7e 100644 status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/proc/array.c b/fs/proc/array.c -index 18a4588c3..decaa7768 100644 +index 18a4588c35be..decaa7768044 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -384,9 +384,9 @@ static inline void task_context_switch_counts(struct seq_file *m, @@ -8487,17 +8458,17 @@ index 18a4588c3..decaa7768 100644 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) diff --git a/fs/proc/base.c b/fs/proc/base.c -index 9b4666e75..01667b0eb 100644 +index 98bfd18e61bc..d8bcf815117d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -96,6 +96,7 @@ - #include +@@ -97,6 +97,7 @@ #include #include -+#include #include ++#include #include #include "internal.h" + #include "fd.h" @@ -2161,7 +2162,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); @@ -8508,7 +8479,7 @@ index 9b4666e75..01667b0eb 100644 if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index df435cd91..eb19a3429 100644 +index df435cd91a5b..eb19a342909c 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -684,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file, @@ -8521,7 +8492,7 @@ index df435cd91..eb19a3429 100644 if (IS_ERR(child)) return false; diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c -index ce03c3dbb..5c2c14d5f 100644 +index ce03c3dbb5c3..5c2c14d5f6fc 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -384,7 +384,8 @@ void pstore_record_init(struct pstore_record *record, @@ -8544,7 +8515,7 @@ index ce03c3dbb..5c2c14d5f 100644 break; diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild -index d1300c6e0..267f6dfb8 100644 +index d1300c6e0a47..267f6dfb8960 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -30,7 +30,7 @@ mandatory-y += irq.h @@ -8557,7 +8528,7 @@ index d1300c6e0..267f6dfb8 100644 mandatory-y += linkage.h mandatory-y += local.h diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h -index d14214dfc..7317e8258 100644 +index d14214dfc10b..7317e8258b48 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h @@ -7,9 +7,13 @@ @@ -8577,7 +8548,7 @@ index d14214dfc..7317e8258 100644 #ifndef ack_bad_irq diff --git a/include/asm-generic/kmap_size.h b/include/asm-generic/kmap_size.h new file mode 100644 -index 000000000..9d6c7786a +index 000000000000..9d6c7786a645 --- /dev/null +++ b/include/asm-generic/kmap_size.h @@ -0,0 +1,12 @@ @@ -8595,7 +8566,7 @@ index 000000000..9d6c7786a +#endif diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h deleted file mode 100644 -index 9f95b7b63..000000000 +index 9f95b7b63d19..000000000000 --- a/include/asm-generic/kmap_types.h +++ /dev/null @@ -1,11 +0,0 @@ @@ -8611,7 +8582,7 @@ index 9f95b7b63..000000000 - -#endif diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h -index b4d43a4af..ac255e889 100644 +index b4d43a4af5f7..ac255e889462 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -79,6 +79,9 @@ static __always_inline bool should_resched(int preempt_offset) @@ -8625,10 +8596,10 @@ index b4d43a4af..ac255e889 100644 #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index e4bcb11d6..669d276e4 100644 +index 171884608cad..03351f985871 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -164,7 +164,7 @@ struct request { +@@ -166,7 +166,7 @@ struct request { */ union { struct hlist_node hash; /* merge hash */ @@ -8638,7 +8609,7 @@ index e4bcb11d6..669d276e4 100644 /* diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h -index a19519f42..eed86eb0a 100644 +index a19519f4241d..eed86eb0a1de 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -4,7 +4,7 @@ @@ -8662,7 +8633,7 @@ index a19519f42..eed86eb0a 100644 + #endif /* _LINUX_BH_H */ diff --git a/include/linux/console.h b/include/linux/console.h -index bc2a749e6..027278792 100644 +index bc2a749e6f0d..027278792eea 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -16,6 +16,7 @@ @@ -8707,10 +8678,10 @@ index bc2a749e6..027278792 100644 + #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h -index 5571bfc2e..82a43ee0b 100644 +index b540e5a60ea9..dacf87c92fc1 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h -@@ -155,6 +155,7 @@ enum cpuhp_state { +@@ -159,6 +159,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, @@ -8719,7 +8690,7 @@ index 5571bfc2e..82a43ee0b 100644 CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h -index 0159986ac..c53364c42 100644 +index 0159986ac9ce..c53364c4296d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p, @@ -8743,7 +8714,7 @@ index 0159986ac..c53364c42 100644 /** * for_each_cpu - iterate over every cpu in a mask diff --git a/include/linux/dcache.h b/include/linux/dcache.h -index 4bb8b1759..c5821c04a 100644 +index 4bb8b1759438..c5821c04ab88 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -108,7 +108,7 @@ struct dentry { @@ -8765,7 +8736,7 @@ index 4bb8b1759..c5821c04a 100644 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h -index edb5c186b..3f49e6516 100644 +index edb5c186b0b7..3f49e65169c6 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -3,8 +3,7 @@ @@ -8779,7 +8750,7 @@ index edb5c186b..3f49e6516 100644 struct task_struct; diff --git a/include/linux/delay.h b/include/linux/delay.h -index e8607992c..cd24f34b4 100644 +index e8607992c68a..cd24f34b4ad0 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -88,4 +88,10 @@ static inline void fsleep(unsigned long usecs) @@ -8794,7 +8765,7 @@ index e8607992c..cd24f34b4 100644 + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h -index d8e1c798d..d2aca09f7 100644 +index d8e1c798dc9d..d2aca09f7027 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -70,7 +70,7 @@ @@ -8807,7 +8778,7 @@ index d8e1c798d..d2aca09f7 100644 /** diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h -index dc4fd8a66..836b4c021 100644 +index dc4fd8a6644d..836b4c021a0a 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -14,6 +14,7 @@ @@ -8845,7 +8816,7 @@ index dc4fd8a66..836b4c021 100644 #endif diff --git a/include/linux/fs.h b/include/linux/fs.h -index 45ea12431..3fb43df18 100644 +index 45ea1243118c..3fb43df18354 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -719,7 +719,7 @@ struct inode { @@ -8858,7 +8829,7 @@ index 45ea12431..3fb43df18 100644 __u32 i_generation; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h -index 754f67ac4..76878b357 100644 +index 754f67ac4326..76878b357ffa 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -6,6 +6,7 @@ @@ -8908,7 +8879,7 @@ index 754f67ac4..76878b357 100644 } while (0) diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h new file mode 100644 -index 000000000..f9bc6acd3 +index 000000000000..f9bc6acd3679 --- /dev/null +++ b/include/linux/highmem-internal.h @@ -0,0 +1,222 @@ @@ -9135,7 +9106,7 @@ index 000000000..f9bc6acd3 + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index 6b27af8fe..ec1edaf12 100644 +index c3b75b4a8fc1..d12f7a5fc853 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -11,217 +11,137 @@ @@ -9464,7 +9435,7 @@ index 6b27af8fe..ec1edaf12 100644 #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 22240a8c3..fc162c252 100644 +index 22240a8c3a1e..fc162c2525ea 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -542,7 +542,7 @@ struct softirq_action @@ -9530,7 +9501,7 @@ index 22240a8c3..fc162c252 100644 { tasklet_disable_nosync(t); diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h -index c75e4d3d8..4bb8223f2 100644 +index c75e4d3d8833..4bb8223f2f82 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -60,22 +60,20 @@ io_mapping_fini(struct io_mapping *mapping) @@ -9602,7 +9573,7 @@ index c75e4d3d8..4bb8223f2 100644 io_mapping_create_wc(resource_size_t base, diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h deleted file mode 100644 -index 6e8895cd4..000000000 +index 6e8895cd4d92..000000000000 --- a/include/linux/irq_cpustat.h +++ /dev/null @@ -1,28 +0,0 @@ @@ -9635,7 +9606,7 @@ index 6e8895cd4..000000000 - -#endif /* __irq_cpustat_h */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h -index ec2a47a81..255d2dfec 100644 +index ec2a47a81e42..9448e2bfc602 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -3,6 +3,7 @@ @@ -9650,46 +9621,30 @@ index ec2a47a81..255d2dfec 100644 struct irq_work { struct __call_single_node node; void (*func)(struct irq_work *); -+ struct rcuwait irqwait; ++ struct rcuwait irqwait; }; #define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \ .node = { .u_flags = (_flags), }, \ .func = (_func), \ -+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \ ++ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \ } #define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0) -@@ -30,10 +33,15 @@ struct irq_work { - #define DEFINE_IRQ_WORK(name, _f) \ - struct irq_work name = IRQ_WORK_INIT(_f) - -+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0) -+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY) -+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ) -+ - static inline - void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) - { - *work = IRQ_WORK_INIT(func); -+ rcuwait_init(&work->irqwait); - } - - static inline bool irq_work_is_pending(struct irq_work *work) -@@ -46,6 +54,11 @@ static inline bool irq_work_is_busy(struct irq_work *work) +@@ -46,6 +49,11 @@ static inline bool irq_work_is_busy(struct irq_work *work) return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; } +static inline bool irq_work_is_hard(struct irq_work *work) +{ -+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ; ++ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ; +} + bool irq_work_queue(struct irq_work *work); bool irq_work_queue_on(struct irq_work *work, int cpu); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h -index dc1b213ae..9bbcd8cba 100644 +index dc1b213ae941..9bbcd8cbac50 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -68,6 +68,7 @@ struct irq_desc { @@ -9701,7 +9656,7 @@ index dc1b213ae..9bbcd8cba 100644 struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h -index fef2d43a7..741aa2008 100644 +index fef2d43a7a1d..741aa2008a34 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -71,14 +71,6 @@ do { \ @@ -9742,7 +9697,7 @@ index fef2d43a7..741aa2008 100644 defined(CONFIG_PREEMPT_TRACER) extern void stop_critical_timings(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h -index 78a0907f0..e6270bfa6 100644 +index 78a0907f0b04..e6270bfa6105 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -220,6 +220,7 @@ static __always_inline void might_resched(void) @@ -9808,7 +9763,7 @@ index 78a0907f0..e6270bfa6 100644 * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h -index 3378bcbe5..86673930c 100644 +index 3378bcbe585e..86673930c8ea 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -29,6 +29,18 @@ enum kmsg_dump_reason { @@ -9905,7 +9860,7 @@ index 3378bcbe5..86673930c 100644 } diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h -index 3f02b8186..1b8ae0349 100644 +index 3f02b818625e..1b8ae034946f 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -7,13 +7,39 @@ @@ -10065,7 +10020,7 @@ index 3f02b8186..1b8ae0349 100644 + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 1c22e294f..41aed4e91 100644 +index 99ae1f9a9019..11929a9d7d3d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -13,6 +13,7 @@ @@ -10076,7 +10031,7 @@ index 1c22e294f..41aed4e91 100644 #include #include #include -@@ -577,6 +578,9 @@ struct mm_struct { +@@ -603,6 +604,9 @@ struct mm_struct { bool tlb_flush_batched; #endif struct uprobes_state uprobes_state; @@ -10087,7 +10042,7 @@ index 1c22e294f..41aed4e91 100644 atomic_long_t hugetlb_usage; #endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h -index 4d671fba3..90923d300 100644 +index 4d671fba3cab..90923d3008fc 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -22,6 +22,20 @@ @@ -10152,7 +10107,7 @@ index 4d671fba3..90923d300 100644 #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h new file mode 100644 -index 000000000..f0b2e07cd +index 000000000000..f0b2e07cd5c5 --- /dev/null +++ b/include/linux/mutex_rt.h @@ -0,0 +1,130 @@ @@ -10287,7 +10242,7 @@ index 000000000..f0b2e07cd + +#endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h -index 5491ad5f4..cd9e5b3f1 100644 +index 5491ad5f48a9..cd9e5b3f1831 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1675,7 +1675,7 @@ struct nfs_unlinkdata { @@ -10300,7 +10255,7 @@ index 5491ad5f4..cd9e5b3f1 100644 struct nfs_fattr dir_attr; long timeout; diff --git a/include/linux/notifier.h b/include/linux/notifier.h -index 2fb373a5c..723bc2df6 100644 +index 2fb373a5c1ed..723bc2df6388 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -58,7 +58,7 @@ struct notifier_block { @@ -10331,7 +10286,7 @@ index 2fb373a5c..723bc2df6 100644 #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ diff --git a/include/linux/pid.h b/include/linux/pid.h -index 34afff2dc..514dd026c 100644 +index 34afff2dc888..514dd026c6b8 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -3,6 +3,7 @@ @@ -10343,7 +10298,7 @@ index 34afff2dc..514dd026c 100644 #include diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index 7d9c1c0e1..7b5b2ed55 100644 +index 7d9c1c0e149c..7b5b2ed55531 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -77,31 +77,37 @@ @@ -10629,7 +10584,7 @@ index 7d9c1c0e1..7b5b2ed55 100644 + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h -index 7d787f91d..9331b131b 100644 +index 7d787f91db92..9331b131ba25 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -46,6 +46,12 @@ static inline const char *printk_skip_headers(const char *buffer) @@ -10698,7 +10653,7 @@ index 7d787f91d..9331b131b 100644 * ratelimited messages with local ratelimit_state, * no local ratelimit_state used in the !PRINTK case diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h -index e0b300de8..fa6b6badd 100644 +index d31ecaf4fdd3..e711efc2e009 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -19,19 +19,9 @@ @@ -10746,7 +10701,7 @@ index e0b300de8..fa6b6badd 100644 /* Same as rb_first(), but O(1) */ diff --git a/include/linux/rbtree_type.h b/include/linux/rbtree_type.h new file mode 100644 -index 000000000..77a89dd2c +index 000000000000..77a89dd2c7c6 --- /dev/null +++ b/include/linux/rbtree_type.h @@ -0,0 +1,31 @@ @@ -10782,7 +10737,7 @@ index 000000000..77a89dd2c + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 095b3b39b..1effcae06 100644 +index 095b3b39bd03..1effcae06ea1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -54,6 +54,11 @@ void __rcu_read_unlock(void); @@ -10817,7 +10772,7 @@ index 095b3b39b..1effcae06 100644 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h -index 6fd615a0e..b02009f53 100644 +index 6fd615a0eea9..b02009f53026 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -14,11 +14,15 @@ @@ -10923,7 +10878,7 @@ index 6fd615a0e..b02009f53 100644 extern void rt_mutex_unlock(struct rt_mutex *lock); diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 -index 000000000..aafdb0a68 +index 000000000000..aafdb0a685d5 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,109 @@ @@ -11037,7 +10992,7 @@ index 000000000..aafdb0a68 + +#endif diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h -index 3bd03e180..0ad226b5d 100644 +index 3bd03e18061c..0ad226b5d8fd 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,6 +1,10 @@ @@ -11053,7 +11008,7 @@ index 3bd03e180..0ad226b5d 100644 * and initializers diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h new file mode 100644 -index 000000000..4762391d6 +index 000000000000..4762391d659b --- /dev/null +++ b/include/linux/rwlock_types_rt.h @@ -0,0 +1,56 @@ @@ -11115,7 +11070,7 @@ index 000000000..4762391d6 +#endif diff --git a/include/linux/rwsem-rt.h b/include/linux/rwsem-rt.h new file mode 100644 -index 000000000..0ba8aae9a +index 000000000000..0ba8aae9a198 --- /dev/null +++ b/include/linux/rwsem-rt.h @@ -0,0 +1,70 @@ @@ -11190,7 +11145,7 @@ index 000000000..0ba8aae9a + +#endif diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h -index 4c715be48..9323af8a9 100644 +index 4c715be48717..9323af8a9244 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,11 @@ @@ -11220,7 +11175,7 @@ index 4c715be48..9323af8a9 100644 * lock for reading */ diff --git a/include/linux/sched.h b/include/linux/sched.h -index d8c974338..df51e1c52 100644 +index a84372945f9e..58e834355ee4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -36,6 +36,7 @@ @@ -11264,21 +11219,21 @@ index d8c974338..df51e1c52 100644 /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must -@@ -679,6 +682,13 @@ struct wake_q_node { - struct task_struct_resvd { +@@ -669,6 +672,13 @@ struct wake_q_node { + struct wake_q_node *next; }; +struct kmap_ctrl { +#ifdef CONFIG_KMAP_LOCAL -+ int idx; -+ pte_t pteval[KM_MAX_IDX]; ++ int idx; ++ pte_t pteval[KM_MAX_IDX]; +#endif +}; + - struct task_struct { - #ifdef CONFIG_THREAD_INFO_IN_TASK - /* -@@ -689,6 +699,8 @@ struct task_struct { + /** + * struct task_struct_resvd - KABI extension struct + */ +@@ -691,6 +701,8 @@ struct task_struct { #endif /* -1 unrunnable, 0 runnable, >0 stopped: */ volatile long state; @@ -11287,7 +11242,7 @@ index d8c974338..df51e1c52 100644 /* * This begins the randomizable portion of task_struct. Only -@@ -761,6 +773,11 @@ struct task_struct { +@@ -763,6 +775,11 @@ struct task_struct { int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t cpus_mask; @@ -11299,9 +11254,9 @@ index d8c974338..df51e1c52 100644 #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -866,6 +883,10 @@ struct task_struct { - /* Stalled due to lack of memory */ - unsigned in_memstall:1; +@@ -871,6 +888,10 @@ struct task_struct { + #ifdef CONFIG_IOMMU_SVA + KABI_FILL_HOLE(unsigned pasid_activated:1) #endif +#ifdef CONFIG_EVENTFD + /* Recursion prevention for eventfd_signal() */ @@ -11310,7 +11265,7 @@ index d8c974338..df51e1c52 100644 unsigned long atomic_flags; /* Flags requiring atomic access. */ -@@ -1007,11 +1028,16 @@ struct task_struct { +@@ -1012,11 +1033,16 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct __rcu *sighand; @@ -11327,7 +11282,7 @@ index d8c974338..df51e1c52 100644 unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; -@@ -1038,6 +1064,7 @@ struct task_struct { +@@ -1043,6 +1069,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; @@ -11335,7 +11290,7 @@ index d8c974338..df51e1c52 100644 #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task: */ -@@ -1065,6 +1092,9 @@ struct task_struct { +@@ -1070,6 +1097,9 @@ struct task_struct { int softirq_context; int irq_config; #endif @@ -11345,7 +11300,7 @@ index d8c974338..df51e1c52 100644 #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL -@@ -1350,6 +1380,7 @@ struct task_struct { +@@ -1355,6 +1385,7 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif @@ -11353,7 +11308,7 @@ index d8c974338..df51e1c52 100644 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif -@@ -1821,6 +1852,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); +@@ -1832,6 +1863,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -11361,7 +11316,7 @@ index d8c974338..df51e1c52 100644 extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP -@@ -1918,6 +1950,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) +@@ -1929,6 +1961,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -11452,7 +11407,7 @@ index d8c974338..df51e1c52 100644 * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h -index 9a62ffdd2..412cdaba3 100644 +index 9a62ffdd296f..412cdaba33eb 100644 --- a/include/linux/sched/hotplug.h +++ b/include/linux/sched/hotplug.h @@ -11,8 +11,10 @@ extern int sched_cpu_activate(unsigned int cpu); @@ -11467,10 +11422,10 @@ index 9a62ffdd2..412cdaba3 100644 #endif diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h -index e3e5e149b..6d39ad0f5 100644 +index f58a2730a130..5f4aa984245e 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h -@@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm) +@@ -50,6 +50,17 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } @@ -11489,7 +11444,7 @@ index e3e5e149b..6d39ad0f5 100644 * mmget() - Pin the address space associated with a &struct mm_struct. * @mm: The address space to pin. diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h -index e5af028c0..994c25640 100644 +index e5af028c08b4..994c25640e15 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) @@ -11514,7 +11469,7 @@ index e5af028c0..994c25640 100644 extern void normalize_rt_tasks(void); diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h -index 26a2013ac..6e2dff721 100644 +index 26a2013ac39c..6e2dff721547 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -58,6 +58,17 @@ static inline bool wake_q_empty(struct wake_q_head *head) @@ -11537,7 +11492,7 @@ index 26a2013ac..6e2dff721 100644 #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h -index 9e6550551..ffef674de 100644 +index 9e655055112d..ffef674deda7 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -7,6 +7,7 @@ @@ -11567,7 +11522,7 @@ index 9e6550551..ffef674de 100644 int serial8250_console_exit(struct uart_port *port); diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h -index 93240799a..df2871ed8 100644 +index 93240799a404..df2871ed82dc 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -32,7 +32,7 @@ struct shmem_sb_info { @@ -11580,7 +11535,7 @@ index 93240799a..df2871ed8 100644 unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ diff --git a/include/linux/signal.h b/include/linux/signal.h -index b256f9c65..ebf6c515a 100644 +index 3038a0610407..fff1656c6b6f 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -265,6 +265,7 @@ static inline void init_sigpending(struct sigpending *sig) @@ -11592,7 +11547,7 @@ index b256f9c65..ebf6c515a 100644 /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index 4739ce5f0..b8815001f 100644 +index 26c431883c69..c91fa019e28d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -298,6 +298,7 @@ struct sk_buff_head { @@ -11603,7 +11558,7 @@ index 4739ce5f0..b8815001f 100644 }; struct sk_buff; -@@ -1914,6 +1915,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) +@@ -1924,6 +1925,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } @@ -11617,7 +11572,7 @@ index 4739ce5f0..b8815001f 100644 struct lock_class_key *class) { diff --git a/include/linux/smp.h b/include/linux/smp.h -index 84a0b4828..8348fa412 100644 +index 84a0b4828f66..8348fa4127a0 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -260,6 +260,9 @@ static inline int get_boot_cpu_id(void) @@ -11631,7 +11586,7 @@ index 84a0b4828..8348fa412 100644 * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h -index 79897841a..c3c70291b 100644 +index 79897841a2cc..c3c70291b46c 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -309,7 +309,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) @@ -11668,7 +11623,7 @@ index 79897841a..c3c70291b 100644 * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h -index 19a9be9d9..da38149f2 100644 +index 19a9be9d97ee..da38149f2843 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) @@ -11683,7 +11638,7 @@ index 19a9be9d9..da38149f2 100644 #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 -index 000000000..3085132ea +index 000000000000..3085132eae38 --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,155 @@ @@ -11843,7 +11798,7 @@ index 000000000..3085132ea + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h -index b981caafe..8d896d3e1 100644 +index b981caafe8bf..8d896d3e1a01 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,93 +9,15 @@ @@ -11949,7 +11904,7 @@ index b981caafe..8d896d3e1 100644 #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h new file mode 100644 -index 000000000..e4549f0dd +index 000000000000..e4549f0dd197 --- /dev/null +++ b/include/linux/spinlock_types_nort.h @@ -0,0 +1,39 @@ @@ -11994,7 +11949,7 @@ index 000000000..e4549f0dd +#endif diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 -index 000000000..1d4a180e9 +index 000000000000..1d4a180e983d --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,65 @@ @@ -12065,7 +12020,7 @@ index 000000000..1d4a180e9 +#endif diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h new file mode 100644 -index 000000000..446da786e +index 000000000000..446da786e5d5 --- /dev/null +++ b/include/linux/spinlock_types_rt.h @@ -0,0 +1,38 @@ @@ -12108,7 +12063,7 @@ index 000000000..446da786e + +#endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h -index c09b6407a..d9b371fa1 100644 +index c09b6407ae1b..d9b371fa13e0 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -1,7 +1,7 @@ @@ -12121,14 +12076,14 @@ index c09b6407a..d9b371fa1 100644 #endif diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h -index 08ec8e2fd..8eac0050b 100644 +index 2959b9e52af4..f9f7c954baeb 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -25,6 +25,7 @@ typedef int (*cpu_stop_fn_t)(void *arg); struct cpu_stop_work { struct list_head list; /* cpu_stopper->works */ cpu_stop_fn_t fn; -+ unsigned long caller; ++ unsigned long caller; void *arg; struct cpu_stop_done *done; KABI_RESERVE(1) @@ -12151,7 +12106,7 @@ index 08ec8e2fd..8eac0050b 100644 /* diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h -index 19f76d87f..1c89c7d9f 100644 +index 19f76d87f20f..7c841bf0a250 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -36,7 +36,17 @@ static inline long set_restart_fn(struct restart_block *restart, @@ -12160,21 +12115,21 @@ index 19f76d87f..1c89c7d9f 100644 -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#ifdef CONFIG_PREEMPT_LAZY -+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) -+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) + +#else -+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_lazy() 0 ++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_lazy() 0 +#endif #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h -index 409385b25..4a0f567d6 100644 +index 409385b25ecb..3b3c9de8247b 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -67,6 +67,8 @@ struct trace_entry { @@ -12199,7 +12154,7 @@ index 409385b25..4a0f567d6 100644 + entry->pid = current->pid; entry->type = type; - entry->flags = trace_ctx >> 16; -+ entry->flags = trace_ctx >> 24; ++ entry->flags = trace_ctx >> 24; } -unsigned int tracing_gen_ctx_flags(unsigned long irqflags); @@ -12261,7 +12216,7 @@ index 409385b25..4a0f567d6 100644 struct trace_event_file; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h -index e81856c0b..66eb968a0 100644 +index e81856c0ba13..66eb968a09d4 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -66,7 +66,7 @@ @@ -12390,7 +12345,7 @@ index e81856c0b..66eb968a0 100644 #endif return __u64_stats_fetch_retry(syncp, start); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h -index 322dcbfcc..9a3a10ea3 100644 +index 322dcbfcc933..9a3a10ea3e3c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -63,7 +63,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); @@ -12414,7 +12369,7 @@ index 322dcbfcc..9a3a10ea3 100644 static inline void count_vm_events(enum vm_event_item item, long delta) diff --git a/include/linux/vtime.h b/include/linux/vtime.h -index 2cdeca062..041d6524d 100644 +index 2cdeca062db3..041d6524d144 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -83,36 +83,46 @@ static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } @@ -12481,7 +12436,7 @@ index 2cdeca062..041d6524d 100644 #endif /* _LINUX_KERNEL_VTIME_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h -index 9b8b08331..33001b534 100644 +index 1663e47681a3..20aae69387aa 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -10,6 +10,7 @@ @@ -12493,7 +12448,7 @@ index 9b8b08331..33001b534 100644 typedef struct wait_queue_entry wait_queue_entry_t; diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h -index 6ecf2a022..3145de598 100644 +index 6ecf2a0220db..3145de598645 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -28,6 +28,14 @@ struct ww_class { @@ -12512,7 +12467,7 @@ index 6ecf2a022..3145de598 100644 struct task_struct *task; unsigned long stamp; diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h -index 1424e02ce..163f8415e 100644 +index 1424e02cef90..163f8415e5db 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -6,6 +6,7 @@ @@ -12560,7 +12515,7 @@ index 1424e02ce..163f8415e 100644 struct gnet_stats_rate_est64 *sample); diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h new file mode 100644 -index 000000000..67710bace +index 000000000000..67710bace741 --- /dev/null +++ b/include/net/net_seq_lock.h @@ -0,0 +1,15 @@ @@ -12580,7 +12535,7 @@ index 000000000..67710bace + +#endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h -index 9144e0f09..464d14b2a 100644 +index 9144e0f09a30..464d14b2aca3 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -74,7 +74,7 @@ struct netns_xfrm { @@ -12593,7 +12548,7 @@ index 9144e0f09..464d14b2a 100644 spinlock_t xfrm_policy_lock; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h -index 250569d8d..c8b8dba10 100644 +index 250569d8df65..c8b8dba10d9a 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -10,6 +10,7 @@ @@ -12671,12 +12626,12 @@ index 250569d8d..c8b8dba10 100644 struct Qdisc *root = qdisc_root_sleeping(qdisc); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h -index eb5ec1fb6..0e5ff10e5 100644 +index eb5ec1fb66b4..122d96db9d26 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h -@@ -705,6 +705,18 @@ DECLARE_TRACE(sched_update_nr_running_tp, - TP_PROTO(struct rq *rq, int change), - TP_ARGS(rq, change)); +@@ -732,6 +732,18 @@ DEFINE_EVENT(psi_memstall_template, psi_memstall_leave, + TP_ARGS(function) + ); +DECLARE_TRACE(sched_migrate_disable_tp, + TP_PROTO(struct task_struct *p), @@ -12690,11 +12645,11 @@ index eb5ec1fb6..0e5ff10e5 100644 + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + - DECLARE_EVENT_CLASS(psi_memstall_template, + #endif /* _TRACE_SCHED_H */ - TP_PROTO(unsigned long function), + /* This part must be outside protection */ diff --git a/init/Kconfig b/init/Kconfig -index beb4a6d1c..51b3bfe34 100644 +index b137a6c043ac..ea2b1b7043a4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -866,7 +866,7 @@ config NUMA_BALANCING @@ -12706,7 +12661,7 @@ index beb4a6d1c..51b3bfe34 100644 help This option adds support for automatic NUMA aware memory/task placement. The mechanism is quite primitive and is based on migrating memory when -@@ -1000,6 +1000,7 @@ config CFS_BANDWIDTH +@@ -1005,6 +1005,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED @@ -12714,7 +12669,7 @@ index beb4a6d1c..51b3bfe34 100644 default n help This feature lets you explicitly allocate real CPU bandwidth -@@ -1962,6 +1963,7 @@ choice +@@ -1977,6 +1978,7 @@ choice config SLAB bool "SLAB" @@ -12722,7 +12677,7 @@ index beb4a6d1c..51b3bfe34 100644 select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work -@@ -1982,6 +1984,7 @@ config SLUB +@@ -1997,6 +1999,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" @@ -12730,7 +12685,7 @@ index beb4a6d1c..51b3bfe34 100644 help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but -@@ -2048,7 +2051,7 @@ config SHUFFLE_PAGE_ALLOCATOR +@@ -2063,7 +2066,7 @@ config SHUFFLE_PAGE_ALLOCATOR config SLUB_CPU_PARTIAL default y @@ -12740,7 +12695,7 @@ index beb4a6d1c..51b3bfe34 100644 help Per cpu partial caches accelerate objects allocation and freeing diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks -index 3de8fd118..4198f0273 100644 +index 3de8fd11873b..4198f0273ecd 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -251,7 +251,7 @@ config ARCH_USE_QUEUED_RWLOCKS @@ -12753,7 +12708,7 @@ index 3de8fd118..4198f0273 100644 config ARCH_HAS_MMIOWB bool diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt -index e62a62303..b95f8784c 100644 +index e62a623031ea..b95f8784c4e4 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,5 +1,11 @@ @@ -12777,7 +12732,7 @@ index e62a62303..b95f8784c 100644 This option turns the kernel into a real-time kernel by replacing various locking primitives (spinlocks, rwlocks, etc.) with diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index ad09f3fd3..43ed0a0dd 100644 +index dc1f782f8e0a..879204826e1b 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -351,7 +351,7 @@ void cpuset_read_unlock(void) @@ -13088,7 +13043,7 @@ index ad09f3fd3..43ed0a0dd 100644 } diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c -index d2ae14d0b..7b3bea56d 100644 +index d2ae14d0b9e5..7b3bea56d593 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -156,8 +156,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) @@ -13112,7 +13067,7 @@ index d2ae14d0b..7b3bea56d 100644 /* if @may_sleep, play nice and yield if necessary */ if (may_sleep && (need_resched() || diff --git a/kernel/cpu.c b/kernel/cpu.c -index c06ced18f..10b6287af 100644 +index c06ced18f78a..10b6287afe97 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1662,7 +1662,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { @@ -13139,7 +13094,7 @@ index c06ced18f..10b6287af 100644 [CPUHP_AP_SMPBOOT_THREADS] = { .name = "smpboot/threads:online", diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c -index 4e09fab52..1f5c577b9 100644 +index 4e09fab52faf..1f5c577b926e 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2157,7 +2157,7 @@ static int kdb_dmesg(int argc, const char **argv) @@ -13174,7 +13129,7 @@ index 4e09fab52..1f5c577b9 100644 skip--; continue; diff --git a/kernel/entry/common.c b/kernel/entry/common.c -index cea3957eb..790b0992e 100644 +index 2228de39bb4f..4d29b123f0d5 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -2,6 +2,7 @@ @@ -13222,10 +13177,10 @@ index cea3957eb..790b0992e 100644 } } diff --git a/kernel/exit.c b/kernel/exit.c -index ab900b661..2449246d3 100644 +index d612cb5b5943..f1c818aa5eed 100644 --- a/kernel/exit.c +++ b/kernel/exit.c -@@ -152,7 +152,7 @@ static void __exit_signal(struct task_struct *tsk) +@@ -153,7 +153,7 @@ static void __exit_signal(struct task_struct *tsk) * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ @@ -13235,7 +13190,7 @@ index ab900b661..2449246d3 100644 spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c -index 8a2e82781..e70cd01fc 100644 +index a01cda37dd25..d66c7f0fa43f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -42,6 +42,7 @@ @@ -13246,7 +13201,7 @@ index 8a2e82781..e70cd01fc 100644 #include #include #include -@@ -290,7 +291,7 @@ static inline void free_thread_stack(struct task_struct *tsk) +@@ -291,7 +292,7 @@ static inline void free_thread_stack(struct task_struct *tsk) return; } @@ -13255,7 +13210,7 @@ index 8a2e82781..e70cd01fc 100644 return; } #endif -@@ -691,6 +692,19 @@ void __mmdrop(struct mm_struct *mm) +@@ -693,6 +694,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); @@ -13275,7 +13230,7 @@ index 8a2e82781..e70cd01fc 100644 static void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm; -@@ -732,6 +746,15 @@ void __put_task_struct(struct task_struct *tsk) +@@ -734,6 +748,15 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); @@ -13291,7 +13246,7 @@ index 8a2e82781..e70cd01fc 100644 io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); -@@ -930,10 +953,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) +@@ -950,10 +973,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; @@ -13304,7 +13259,7 @@ index 8a2e82781..e70cd01fc 100644 #ifdef CONFIG_FAULT_INJECTION tsk->fail_nth = 0; -@@ -2029,6 +2054,7 @@ static __latent_entropy struct task_struct *copy_process( +@@ -2076,6 +2101,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -13313,7 +13268,7 @@ index 8a2e82781..e70cd01fc 100644 p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/kernel/futex.c b/kernel/futex.c -index 98a6e1b80..b2b275bc1 100644 +index 98a6e1b80bfe..b2b275bc1958 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1498,6 +1498,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ @@ -13481,32 +13436,59 @@ index 98a6e1b80..b2b275bc1 100644 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ret = 0; +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c +index 8806444a6855..acbce92f99b8 100644 +--- a/kernel/irq/handle.c ++++ b/kernel/irq/handle.c +@@ -193,9 +193,17 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) + irqreturn_t retval; + unsigned int flags = 0; + ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; ++ + retval = __handle_irq_event_percpu(desc, &flags); + ++#ifdef CONFIG_PREEMPT_RT ++ desc->random_ip = ip; ++#else + add_interrupt_randomness(desc->irq_data.irq); ++#endif ++ + + if (!noirqdebug) + note_interrupt(desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 239f5084b..13d2b25a5 100644 +index 239f5084bfb2..bc59cb61fbe9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -1299,7 +1299,7 @@ static int irq_thread(void *data) - struct irq_desc *desc = irq_to_desc(action->irq); - irqreturn_t (*handler_fn)(struct irq_desc *desc, - struct irqaction *action); -- -+ sched_set_fifo(current); +@@ -1302,6 +1302,8 @@ static int irq_thread(void *data) + irq_thread_set_ready(desc, action); ++ sched_set_fifo(current); ++ if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, -@@ -1322,6 +1322,11 @@ static int irq_thread(void *data) + &action->thread_flags)) + handler_fn = irq_forced_thread_fn; +@@ -1322,6 +1324,16 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { + migrate_disable(); ++ // add_interrupt_randomness(action->irq, 0, ++ // desc->random_ip ^ (unsigned long) action); ++ + add_interrupt_randomness(action->irq); ++ ++ + migrate_enable(); + } wake_threads_waitq(desc); } -@@ -1467,8 +1472,6 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) +@@ -1467,8 +1479,6 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) if (IS_ERR(t)) return PTR_ERR(t); @@ -13515,7 +13497,7 @@ index 239f5084b..13d2b25a5 100644 /* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code -@@ -2857,7 +2860,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); +@@ -2857,7 +2867,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * @@ -13525,7 +13507,7 @@ index 239f5084b..13d2b25a5 100644 */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c -index f865e5f4d..dc7311dd7 100644 +index f865e5f4d382..dc7311dd74b1 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -443,6 +443,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); @@ -13551,15 +13533,14 @@ index f865e5f4d..dc7311dd7 100644 printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); diff --git a/kernel/irq_work.c b/kernel/irq_work.c -index fbff25adb..711bd5e87 100644 +index fbff25adb574..d3466e3bacc1 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c -@@ -18,11 +18,37 @@ +@@ -18,11 +18,36 @@ #include #include #include +#include -+#include #include @@ -13569,55 +13550,34 @@ index fbff25adb..711bd5e87 100644 + +static void wake_irq_workd(void) +{ -+ struct task_struct *tsk = __this_cpu_read(irq_workd); ++ struct task_struct *tsk = __this_cpu_read(irq_workd); + -+ if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) -+ wake_up_process(tsk); ++ if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) ++ wake_up_process(tsk); +} + +#ifdef CONFIG_SMP +static void irq_work_wake(struct irq_work *entry) +{ -+ wake_irq_workd(); ++ wake_irq_workd(); +} + +static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = -+ IRQ_WORK_INIT_HARD(irq_work_wake); ++ IRQ_WORK_INIT_HARD(irq_work_wake); +#endif + +static int irq_workd_should_run(unsigned int cpu) +{ -+ return !llist_empty(this_cpu_ptr(&lazy_list)); ++ return !llist_empty(this_cpu_ptr(&lazy_list)); +} /* * Claim the entry so that no one else will poke at it. -@@ -52,15 +78,30 @@ void __weak arch_irq_work_raise(void) +@@ -52,15 +77,29 @@ void __weak arch_irq_work_raise(void) /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { -+ struct llist_head *list; -+ bool rt_lazy_work = false; -+ bool lazy_work = false; -+ int work_flags; -+ -+ work_flags = atomic_read(&work->node.a_flags); -+ if (work_flags & IRQ_WORK_LAZY) -+ lazy_work = true; -+ else if (IS_ENABLED(CONFIG_PREEMPT_RT) && -+ !(work_flags & IRQ_WORK_HARD_IRQ)) -+ rt_lazy_work = true; -+ -+ if (lazy_work || rt_lazy_work) -+ list = this_cpu_ptr(&lazy_list); -+ else -+ list = this_cpu_ptr(&raised_list); -+ -+ if (!llist_add(&work->node.llist, list)) -+ return; -+ -+ - /* If the work is "lazy", handle it from next tick if any */ +- /* If the work is "lazy", handle it from next tick if any */ - if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { - if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && - tick_nohz_tick_stopped()) @@ -13626,68 +13586,79 @@ index fbff25adb..711bd5e87 100644 - if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) - arch_irq_work_raise(); - } -+ if (!lazy_work || tick_nohz_tick_stopped()) -+ arch_irq_work_raise(); ++ struct llist_head *list; ++ bool rt_lazy_work = false; ++ bool lazy_work = false; ++ int work_flags; ++ ++ work_flags = atomic_read(&work->node.a_flags); ++ if (work_flags & IRQ_WORK_LAZY) ++ lazy_work = true; ++ else if (IS_ENABLED(CONFIG_PREEMPT_RT) && ++ !(work_flags & IRQ_WORK_HARD_IRQ)) ++ rt_lazy_work = true; ++ ++ if (lazy_work || rt_lazy_work) ++ list = this_cpu_ptr(&lazy_list); ++ else ++ list = this_cpu_ptr(&raised_list); ++ ++ if (!llist_add(&work->node.llist, list)) ++ return; ++ ++ /* If the work is "lazy", handle it from next tick if any */ ++ if (!lazy_work || tick_nohz_tick_stopped()) ++ arch_irq_work_raise(); } /* Enqueue the irq work @work on the current CPU */ -@@ -102,10 +143,28 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) +@@ -102,10 +141,28 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); +- __smp_call_single_queue(cpu, &work->node.llist); + -+ /* -+ * On PREEMPT_RT the items which are not marked as -+ * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work -+ * item is used on the remote CPU to wake the thread. -+ */ -+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && -+ !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { ++ /* ++ * On PREEMPT_RT the items which are not marked as ++ * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work ++ * item is used on the remote CPU to wake the thread. ++ */ ++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && ++ !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { ++ ++ if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) ++ goto out; ++ ++ work = &per_cpu(irq_work_wakeup, cpu); ++ if (!irq_work_claim(work)) ++ goto out; ++ } + -+ if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) -+ goto out; -+ -+ work = &per_cpu(irq_work_wakeup, cpu); -+ if (!irq_work_claim(work)) -+ goto out; -+ } -+ - __smp_call_single_queue(cpu, &work->node.llist); ++ __smp_call_single_queue(cpu, &work->node.llist); } else { __irq_work_queue_local(work); } -+out: ++out: preempt_enable(); return true; -@@ -120,9 +179,8 @@ bool irq_work_needs_cpu(void) - raised = this_cpu_ptr(&raised_list); - lazy = this_cpu_ptr(&lazy_list); - -- if (llist_empty(raised) || arch_irq_work_has_interrupt()) -- if (llist_empty(lazy)) -- return false; -+ if (llist_empty(raised) && llist_empty(lazy)) -+ return false; - - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -153,6 +211,10 @@ void irq_work_single(void *arg) +@@ -153,14 +210,23 @@ void irq_work_single(void *arg) */ flags &= ~IRQ_WORK_PENDING; (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); + -+ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || -+ !arch_irq_work_has_interrupt()) ++ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || ++ !arch_irq_work_has_interrupt()) + rcuwait_wake_up(&work->irqwait); } static void irq_work_run_list(struct llist_head *list) -@@ -160,7 +222,12 @@ static void irq_work_run_list(struct llist_head *list) + { struct irq_work *work, *tmp; struct llist_node *llnode; - +- - BUG_ON(!irqs_disabled()); ++ + /* + * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed + * in a per-CPU thread in preemptible context. Only the items which are @@ -13697,7 +13668,7 @@ index fbff25adb..711bd5e87 100644 if (llist_empty(list)) return; -@@ -177,7 +244,10 @@ static void irq_work_run_list(struct llist_head *list) +@@ -177,7 +243,10 @@ static void irq_work_run_list(struct llist_head *list) void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); @@ -13709,12 +13680,11 @@ index fbff25adb..711bd5e87 100644 } EXPORT_SYMBOL_GPL(irq_work_run); -@@ -187,7 +257,11 @@ void irq_work_tick(void) +@@ -187,7 +256,10 @@ void irq_work_tick(void) if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); - irq_work_run_list(this_cpu_ptr(&lazy_list)); -+ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + irq_work_run_list(this_cpu_ptr(&lazy_list)); + else @@ -13722,18 +13692,18 @@ index fbff25adb..711bd5e87 100644 } /* -@@ -197,8 +271,42 @@ void irq_work_tick(void) +@@ -197,8 +269,42 @@ void irq_work_tick(void) void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); -+ might_sleep(); ++ might_sleep(); + -+ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || -+ !arch_irq_work_has_interrupt()) { -+ rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), -+ TASK_UNINTERRUPTIBLE); -+ return; -+ } ++ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || ++ !arch_irq_work_has_interrupt()) { ++ rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), ++ TASK_UNINTERRUPTIBLE); ++ return; ++ } while (irq_work_is_busy(work)) cpu_relax(); @@ -13765,9 +13735,8 @@ index fbff25adb..711bd5e87 100644 + return 0; +} +early_initcall(irq_work_init_threads); -\ No newline at end of file diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c -index b9a6f4658..c26219f34 100644 +index b9a6f4658f89..c26219f34445 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -984,7 +984,6 @@ void crash_kexec(struct pt_regs *regs) @@ -13779,7 +13748,7 @@ index b9a6f4658..c26219f34 100644 /* diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c -index 35859da8b..dfff31ed6 100644 +index 35859da8bd4f..dfff31ed644a 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -138,6 +138,15 @@ KERNEL_ATTR_RO(vmcoreinfo); @@ -13809,7 +13778,7 @@ index 35859da8b..dfff31ed6 100644 NULL }; diff --git a/kernel/kthread.c b/kernel/kthread.c -index 508fe5278..3ce6a31db 100644 +index 508fe5278285..3ce6a31db7b4 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -264,6 +264,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme); @@ -13857,7 +13826,7 @@ index 508fe5278..3ce6a31db 100644 kfree(create); return task; diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile -index 6d11cfb9b..c7fbf737e 100644 +index 6d11cfb9b41f..c7fbf737e16e 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -3,7 +3,7 @@ @@ -13896,10 +13865,10 @@ index 6d11cfb9b..c7fbf737e 100644 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c -index b6683cefe..4be12aad9 100644 +index 6cbd2b444476..f2f5defaf4e7 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c -@@ -5412,6 +5412,7 @@ static noinstr void check_flags(unsigned long flags) +@@ -5413,6 +5413,7 @@ static noinstr void check_flags(unsigned long flags) } } @@ -13907,7 +13876,7 @@ index b6683cefe..4be12aad9 100644 /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only -@@ -5426,6 +5427,7 @@ static noinstr void check_flags(unsigned long flags) +@@ -5427,6 +5428,7 @@ static noinstr void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } @@ -13917,7 +13886,7 @@ index b6683cefe..4be12aad9 100644 print_irqtrace_events(current); diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c new file mode 100644 -index 000000000..2b849e6b9 +index 000000000000..2b849e6b9b4a --- /dev/null +++ b/kernel/locking/mutex-rt.c @@ -0,0 +1,224 @@ @@ -14146,7 +14115,7 @@ index 000000000..2b849e6b9 +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c -index 36e69100e..fb1501003 100644 +index 36e69100e8e0..fb150100335f 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -32,110 +32,12 @@ @@ -14285,7 +14254,7 @@ index 36e69100e..fb1501003 100644 } - diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h -index fc549713b..659e93e25 100644 +index fc549713bba3..659e93e256c6 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -18,20 +18,9 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock); @@ -14310,7 +14279,7 @@ index fc549713b..659e93e25 100644 - debug_rt_mutex_print_deadlock(w); -} diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index f00dd928f..40539bc43 100644 +index a82d1176e7c6..8fb8662160dd 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -8,6 +8,11 @@ @@ -14355,8 +14324,8 @@ index f00dd928f..40539bc43 100644 static inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, -@@ -267,6 +279,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, - return 1; +@@ -275,6 +287,27 @@ static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) + return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); } +#define STEAL_NORMAL 0 @@ -14383,7 +14352,7 @@ index f00dd928f..40539bc43 100644 static void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { -@@ -371,6 +404,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, +@@ -353,6 +386,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, return debug_rt_mutex_detect_deadlock(waiter, chwalk); } @@ -14398,7 +14367,7 @@ index f00dd928f..40539bc43 100644 /* * Max number of times we'll walk the boosting chain: */ -@@ -378,7 +419,8 @@ int max_lock_depth = 1024; +@@ -360,7 +401,8 @@ int max_lock_depth = 1024; static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { @@ -14408,7 +14377,7 @@ index f00dd928f..40539bc43 100644 } /* -@@ -514,7 +556,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -496,7 +538,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ @@ -14417,7 +14386,7 @@ index f00dd928f..40539bc43 100644 goto out_unlock_pi; /* -@@ -597,7 +639,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -579,7 +621,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * walk, we detected a deadlock. */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { @@ -14425,7 +14394,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock(&lock->wait_lock); ret = -EDEADLK; goto out_unlock_pi; -@@ -694,13 +735,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -676,13 +717,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * follow here. This is the end of the chain we are walking. */ if (!rt_mutex_owner(lock)) { @@ -14444,7 +14413,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock_irq(&lock->wait_lock); return 0; } -@@ -801,9 +845,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -783,9 +827,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @task: The task which wants to acquire the lock * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL @@ -14458,7 +14427,7 @@ index f00dd928f..40539bc43 100644 { lockdep_assert_held(&lock->wait_lock); -@@ -839,12 +885,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +@@ -821,12 +867,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (waiter) { /* @@ -14474,7 +14443,7 @@ index f00dd928f..40539bc43 100644 /* * We can acquire the lock. Remove the waiter from the * lock waiters tree. -@@ -862,14 +907,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +@@ -844,14 +889,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (rt_mutex_has_waiters(lock)) { /* @@ -14493,7 +14462,7 @@ index f00dd928f..40539bc43 100644 /* * The current top waiter stays enqueued. We * don't have to change anything in the lock -@@ -916,6 +959,329 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +@@ -898,6 +941,329 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, return 1; } @@ -14823,7 +14792,7 @@ index f00dd928f..40539bc43 100644 /* * Task blocks on lock. * -@@ -948,6 +1314,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +@@ -930,6 +1296,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, return -EDEADLK; raw_spin_lock(&task->pi_lock); @@ -14846,7 +14815,7 @@ index f00dd928f..40539bc43 100644 waiter->task = task; waiter->lock = lock; waiter->prio = task->prio; -@@ -971,7 +1353,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +@@ -953,7 +1335,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, rt_mutex_enqueue_pi(owner, waiter); rt_mutex_adjust_prio(owner); @@ -14855,7 +14824,7 @@ index f00dd928f..40539bc43 100644 chain_walk = 1; } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { chain_walk = 1; -@@ -1013,6 +1395,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +@@ -995,6 +1377,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * Called with lock->wait_lock held and interrupts disabled. */ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, @@ -14863,7 +14832,7 @@ index f00dd928f..40539bc43 100644 struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; -@@ -1052,7 +1435,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, +@@ -1034,7 +1417,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, * Pairs with preempt_enable() in rt_mutex_postunlock(); */ preempt_disable(); @@ -14875,7 +14844,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock(¤t->pi_lock); } -@@ -1067,7 +1453,7 @@ static void remove_waiter(struct rt_mutex *lock, +@@ -1049,7 +1435,7 @@ static void remove_waiter(struct rt_mutex *lock, { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); @@ -14884,7 +14853,7 @@ index f00dd928f..40539bc43 100644 lockdep_assert_held(&lock->wait_lock); -@@ -1093,7 +1479,8 @@ static void remove_waiter(struct rt_mutex *lock, +@@ -1075,7 +1461,8 @@ static void remove_waiter(struct rt_mutex *lock, rt_mutex_adjust_prio(owner); /* Store the lock on which owner is blocked or NULL */ @@ -14894,7 +14863,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock(&owner->pi_lock); -@@ -1129,26 +1516,28 @@ void rt_mutex_adjust_pi(struct task_struct *task) +@@ -1111,26 +1498,28 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; @@ -14926,7 +14895,7 @@ index f00dd928f..40539bc43 100644 } /** -@@ -1164,7 +1553,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +@@ -1146,7 +1535,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, @@ -14936,7 +14905,7 @@ index f00dd928f..40539bc43 100644 { int ret = 0; -@@ -1173,24 +1563,23 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, +@@ -1155,24 +1545,23 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, if (try_to_take_rt_mutex(lock, current, waiter)) break; @@ -14972,7 +14941,7 @@ index f00dd928f..40539bc43 100644 schedule(); raw_spin_lock_irq(&lock->wait_lock); -@@ -1211,43 +1600,110 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, +@@ -1193,43 +1582,110 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, if (res != -EDEADLOCK || detect_deadlock) return; @@ -15106,7 +15075,7 @@ index f00dd928f..40539bc43 100644 return 0; } -@@ -1257,16 +1713,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, +@@ -1239,16 +1695,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, if (unlikely(timeout)) hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); @@ -15138,7 +15107,7 @@ index f00dd928f..40539bc43 100644 } /* -@@ -1274,6 +1740,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, +@@ -1256,6 +1722,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); @@ -15175,7 +15144,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -@@ -1334,7 +1830,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) +@@ -1316,7 +1812,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) * Return whether the current task needs to call rt_mutex_postunlock(). */ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, @@ -15185,7 +15154,7 @@ index f00dd928f..40539bc43 100644 { unsigned long flags; -@@ -1388,7 +1885,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +@@ -1370,7 +1867,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * * Queue the next waiter for wakeup once we release the wait_lock. */ @@ -15194,7 +15163,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return true; /* call rt_mutex_postunlock() */ -@@ -1402,29 +1899,16 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +@@ -1384,29 +1881,16 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, @@ -15228,22 +15197,20 @@ index f00dd928f..40539bc43 100644 } static inline int -@@ -1440,10 +1924,12 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, +@@ -1422,9 +1906,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, /* * Performs the wakeup of the top-waiter and re-enables preemption. */ -void rt_mutex_postunlock(struct wake_q_head *wake_q) +void rt_mutex_postunlock(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q) ++ struct wake_q_head *wake_sleeper_q) { wake_up_q(wake_q); -- -+ wake_up_q_sleeper(wake_sleeper_q); -+ ++ wake_up_q_sleeper(wake_sleeper_q); + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ preempt_enable(); - } -@@ -1451,23 +1937,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) +@@ -1433,23 +1919,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) static inline void rt_mutex_fastunlock(struct rt_mutex *lock, bool (*slowfn)(struct rt_mutex *lock, @@ -15295,7 +15262,7 @@ index f00dd928f..40539bc43 100644 } #ifdef CONFIG_DEBUG_LOCK_ALLOC -@@ -1508,16 +2017,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); +@@ -1490,16 +1999,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { @@ -15313,7 +15280,7 @@ index f00dd928f..40539bc43 100644 } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); -@@ -1534,36 +2034,17 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) +@@ -1516,36 +2016,17 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) return __rt_mutex_slowtrylock(lock); } @@ -15358,7 +15325,7 @@ index f00dd928f..40539bc43 100644 /** * rt_mutex_trylock - try to lock a rt_mutex -@@ -1580,10 +2061,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) +@@ -1562,10 +2043,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) { int ret; @@ -15370,7 +15337,7 @@ index f00dd928f..40539bc43 100644 if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); -@@ -1591,6 +2069,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) +@@ -1573,6 +2051,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) } EXPORT_SYMBOL_GPL(rt_mutex_trylock); @@ -15382,7 +15349,7 @@ index f00dd928f..40539bc43 100644 /** * rt_mutex_unlock - unlock a rt_mutex * -@@ -1599,16 +2082,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); +@@ -1581,16 +2064,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); void __sched rt_mutex_unlock(struct rt_mutex *lock) { mutex_release(&lock->dep_map, _RET_IP_); @@ -15403,7 +15370,7 @@ index f00dd928f..40539bc43 100644 { lockdep_assert_held(&lock->wait_lock); -@@ -1625,23 +2105,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, +@@ -1607,23 +2087,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, * avoid inversion prior to the wakeup. preempt_disable() * therein pairs with rt_mutex_postunlock(). */ @@ -15442,7 +15409,7 @@ index f00dd928f..40539bc43 100644 } /** -@@ -1655,9 +2147,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) +@@ -1637,9 +2129,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) void rt_mutex_destroy(struct rt_mutex *lock) { WARN_ON(rt_mutex_is_locked(lock)); @@ -15452,7 +15419,7 @@ index f00dd928f..40539bc43 100644 } EXPORT_SYMBOL_GPL(rt_mutex_destroy); -@@ -1680,7 +2169,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, +@@ -1662,7 +2151,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, if (name && key) debug_rt_mutex_init(lock, name, key); } @@ -15461,7 +15428,7 @@ index f00dd928f..40539bc43 100644 /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a -@@ -1700,6 +2189,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, +@@ -1682,6 +2171,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL, NULL); @@ -15476,7 +15443,7 @@ index f00dd928f..40539bc43 100644 debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } -@@ -1722,6 +2219,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) +@@ -1704,6 +2201,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) rt_mutex_set_owner(lock, NULL); } @@ -15503,7 +15470,7 @@ index f00dd928f..40539bc43 100644 /** * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take -@@ -1752,6 +2269,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, +@@ -1734,6 +2251,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, if (try_to_take_rt_mutex(lock, task, NULL)) return 1; @@ -15538,7 +15505,7 @@ index f00dd928f..40539bc43 100644 /* We enforce deadlock detection for futexes */ ret = task_blocks_on_rt_mutex(lock, waiter, task, RT_MUTEX_FULL_CHAINWALK); -@@ -1766,7 +2311,8 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, +@@ -1748,7 +2293,8 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } @@ -15548,7 +15515,7 @@ index f00dd928f..40539bc43 100644 return ret; } -@@ -1851,12 +2397,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, +@@ -1833,12 +2379,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, raw_spin_lock_irq(&lock->wait_lock); /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); @@ -15565,7 +15532,7 @@ index f00dd928f..40539bc43 100644 raw_spin_unlock_irq(&lock->wait_lock); return ret; -@@ -1918,3 +2467,97 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, +@@ -1900,3 +2449,97 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, return cleanup; } @@ -15664,7 +15631,7 @@ index f00dd928f..40539bc43 100644 +EXPORT_SYMBOL(__rt_mutex_owner_current); +#endif diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h -index 732f96abf..338ccd291 100644 +index 732f96abf462..338ccd29119a 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -19,15 +19,8 @@ @@ -15684,7 +15651,7 @@ index 732f96abf..338ccd291 100644 enum rtmutex_chainwalk walk) { diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index ca6fb4890..248a7d915 100644 +index ca6fb489007b..248a7d91583b 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -15,6 +15,7 @@ @@ -15758,7 +15725,7 @@ index ca6fb4890..248a7d915 100644 # include "rtmutex-debug.h" diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c new file mode 100644 -index 000000000..3d2d1f14b +index 000000000000..3d2d1f14b513 --- /dev/null +++ b/kernel/locking/rwlock-rt.c @@ -0,0 +1,334 @@ @@ -16098,7 +16065,7 @@ index 000000000..3d2d1f14b +EXPORT_SYMBOL(__rt_rwlock_init); diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c new file mode 100644 -index 000000000..b61edc4dc +index 000000000000..b61edc4dcb73 --- /dev/null +++ b/kernel/locking/rwsem-rt.c @@ -0,0 +1,317 @@ @@ -16420,7 +16387,7 @@ index 000000000..b61edc4dc + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c -index cc5cc889b..f7c909ef1 100644 +index cc5cc889b5b7..f7c909ef1261 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -28,6 +28,7 @@ @@ -16460,7 +16427,7 @@ index cc5cc889b..f7c909ef1 100644 } EXPORT_SYMBOL(up_read_non_owner); diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c -index 0ff08380f..45445a2f1 100644 +index 0ff08380f531..45445a2f1799 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ @@ -16494,7 +16461,7 @@ index 0ff08380f..45445a2f1 100644 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c -index b9d93087e..72e306e0e 100644 +index b9d93087ee66..72e306e0e8a3 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, @@ -16528,7 +16495,7 @@ index b9d93087e..72e306e0e 100644 + +#endif diff --git a/kernel/notifier.c b/kernel/notifier.c -index 1b019cbca..c20782f07 100644 +index 1b019cbca594..c20782f07643 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -142,9 +142,9 @@ int atomic_notifier_chain_register(struct atomic_notifier_head *nh, @@ -16568,7 +16535,7 @@ index 1b019cbca..c20782f07 100644 return ret; } diff --git a/kernel/panic.c b/kernel/panic.c -index d991c3b1b..fa3025e0c 100644 +index d991c3b1b559..fa3025e0c601 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -177,12 +177,28 @@ static void panic_print_sys_info(void) @@ -16663,7 +16630,7 @@ index d991c3b1b..fa3025e0c 100644 /* diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile -index eee3dc9b6..59cb24e25 100644 +index eee3dc9b60a9..59cb24e25f00 100644 --- a/kernel/printk/Makefile +++ b/kernel/printk/Makefile @@ -1,5 +1,4 @@ @@ -16673,49 +16640,35 @@ index eee3dc9b6..59cb24e25 100644 obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o obj-$(CONFIG_PRINTK) += printk_ringbuffer.o diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h -index b1c155328..e69de29bb 100644 +index b1c155328b04..059c3d876e33 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h -@@ -1,37 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-or-later */ --/* -- * internal.h - printk internal definitions -- */ --#include -- --#ifdef CONFIG_PRINTK -- --#define PRINTK_SAFE_CONTEXT_MASK 0x007ffffff --#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x008000000 --#define PRINTK_NMI_CONTEXT_MASK 0xff0000000 -- --#define PRINTK_NMI_CONTEXT_OFFSET 0x010000000 -- +@@ -12,8 +12,6 @@ + + #define PRINTK_NMI_CONTEXT_OFFSET 0x010000000 + -extern raw_spinlock_t logbuf_lock; - --__printf(4, 0) --int vprintk_store(int facility, int level, -- const struct dev_printk_info *dev_info, -- const char *fmt, va_list args); -- --__printf(1, 0) int vprintk_default(const char *fmt, va_list args); --__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args); --__printf(1, 0) int vprintk_func(const char *fmt, va_list args); -- + __printf(4, 0) + int vprintk_store(int facility, int level, + const struct dev_printk_info *dev_info, +@@ -23,7 +21,6 @@ __printf(1, 0) int vprintk_default(const char *fmt, va_list args); + __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args); + __printf(1, 0) int vprintk_func(const char *fmt, va_list args); + -void printk_safe_init(void); --bool printk_percpu_data_ready(void); -- --void defer_console_output(void); -- --#else -- --__printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } -- + bool printk_percpu_data_ready(void); + + void defer_console_output(void); +@@ -32,6 +29,5 @@ void defer_console_output(void); + + __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } + -static inline void printk_safe_init(void) { } --static inline bool printk_percpu_data_ready(void) { return false; } --#endif /* CONFIG_PRINTK */ + static inline bool printk_percpu_data_ready(void) { return false; } + #endif /* CONFIG_PRINTK */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index ecd28d4fa..5d44477e4 100644 +index ecd28d4fa20e..e95b00f24c75 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -44,6 +44,9 @@ @@ -16736,20 +16689,7 @@ index ecd28d4fa..5d44477e4 100644 int console_printk[4] = { CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ -@@ -93,12 +95,6 @@ EXPORT_SYMBOL_GPL(console_drivers); - */ - int __read_mostly suppress_printk; - --#ifdef CONFIG_LOCKDEP --static struct lockdep_map console_lock_dep_map = { -- .name = "console_lock" --}; --#endif -- - enum devkmsg_log_bits { - __DEVKMSG_LOG_BIT_ON = 0, - __DEVKMSG_LOG_BIT_OFF, -@@ -227,19 +223,7 @@ static int nr_ext_console_drivers; +@@ -227,19 +229,7 @@ static int nr_ext_console_drivers; static int __down_trylock_console_sem(unsigned long ip) { @@ -16770,7 +16710,7 @@ index ecd28d4fa..5d44477e4 100644 return 1; mutex_acquire(&console_lock_dep_map, 0, 1, ip); return 0; -@@ -248,13 +232,9 @@ static int __down_trylock_console_sem(unsigned long ip) +@@ -248,13 +238,9 @@ static int __down_trylock_console_sem(unsigned long ip) static void __up_console_sem(unsigned long ip) { @@ -16784,7 +16724,7 @@ index ecd28d4fa..5d44477e4 100644 } #define up_console_sem() __up_console_sem(_RET_IP_) -@@ -268,11 +248,6 @@ static void __up_console_sem(unsigned long ip) +@@ -268,11 +254,6 @@ static void __up_console_sem(unsigned long ip) */ static int console_locked, console_suspended; @@ -16796,7 +16736,7 @@ index ecd28d4fa..5d44477e4 100644 /* * Array of consoles built from command line options (console=) */ -@@ -357,61 +332,43 @@ enum log_flags { +@@ -357,61 +338,43 @@ enum log_flags { LOG_CONT = 8, /* text is a fragment of a continuation line */ }; @@ -16880,7 +16820,7 @@ index ecd28d4fa..5d44477e4 100644 #define LOG_LINE_MAX (1024 - PREFIX_MAX) #define LOG_LEVEL(v) ((v) & 0x07) -@@ -449,11 +406,36 @@ static struct printk_ringbuffer *prb = &printk_rb_static; +@@ -449,11 +412,36 @@ static struct printk_ringbuffer *prb = &printk_rb_static; */ static bool __printk_percpu_data_ready __read_mostly; @@ -16918,7 +16858,7 @@ index ecd28d4fa..5d44477e4 100644 /* Return log buffer address */ char *log_buf_addr_get(void) { -@@ -495,52 +477,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) +@@ -495,52 +483,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) *trunc_msg_len = 0; } @@ -16971,7 +16911,7 @@ index ecd28d4fa..5d44477e4 100644 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); static int syslog_action_restricted(int type) -@@ -669,7 +605,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, +@@ -669,7 +611,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { @@ -16980,7 +16920,7 @@ index ecd28d4fa..5d44477e4 100644 struct ratelimit_state rs; struct mutex lock; char buf[CONSOLE_EXT_LOG_MAX]; -@@ -770,27 +706,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, +@@ -770,27 +712,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, if (ret) return ret; @@ -17012,7 +16952,7 @@ index ecd28d4fa..5d44477e4 100644 goto out; } -@@ -799,8 +730,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, +@@ -799,8 +736,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, &r->text_buf[0], r->info->text_len, &r->info->dev_info); @@ -17022,7 +16962,7 @@ index ecd28d4fa..5d44477e4 100644 if (len > count) { ret = -EINVAL; -@@ -835,11 +765,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) +@@ -835,11 +771,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) if (offset) return -ESPIPE; @@ -17035,7 +16975,7 @@ index ecd28d4fa..5d44477e4 100644 break; case SEEK_DATA: /* -@@ -847,16 +776,15 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) +@@ -847,16 +782,15 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) * like issued by 'dmesg -c'. Reading /dev/kmsg itself * changes no global state, and does not clear anything. */ @@ -17054,7 +16994,7 @@ index ecd28d4fa..5d44477e4 100644 return ret; } -@@ -871,15 +799,13 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) +@@ -871,15 +805,13 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) poll_wait(file, &log_wait, wait); @@ -17072,7 +17012,7 @@ index ecd28d4fa..5d44477e4 100644 return ret; } -@@ -912,9 +838,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) +@@ -912,9 +844,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) prb_rec_init_rd(&user->record, &user->info, &user->text_buf[0], sizeof(user->text_buf)); @@ -17083,7 +17023,7 @@ index ecd28d4fa..5d44477e4 100644 file->private_data = user; return 0; -@@ -1006,6 +930,9 @@ void log_buf_vmcoreinfo_setup(void) +@@ -1006,6 +936,9 @@ void log_buf_vmcoreinfo_setup(void) VMCOREINFO_SIZE(atomic_long_t); VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); @@ -17093,7 +17033,7 @@ index ecd28d4fa..5d44477e4 100644 } #endif -@@ -1077,9 +1004,6 @@ static inline void log_buf_add_cpu(void) {} +@@ -1077,9 +1010,6 @@ static inline void log_buf_add_cpu(void) {} static void __init set_percpu_data_ready(void) { @@ -17103,7 +17043,7 @@ index ecd28d4fa..5d44477e4 100644 __printk_percpu_data_ready = true; } -@@ -1119,7 +1043,6 @@ void __init setup_log_buf(int early) +@@ -1119,7 +1049,6 @@ void __init setup_log_buf(int early) struct printk_record r; size_t new_descs_size; size_t new_infos_size; @@ -17111,7 +17051,7 @@ index ecd28d4fa..5d44477e4 100644 char *new_log_buf; unsigned int free; u64 seq; -@@ -1177,8 +1100,6 @@ void __init setup_log_buf(int early) +@@ -1177,8 +1106,6 @@ void __init setup_log_buf(int early) new_descs, ilog2(new_descs_count), new_infos); @@ -17120,7 +17060,7 @@ index ecd28d4fa..5d44477e4 100644 log_buf_len = new_log_buf_len; log_buf = new_log_buf; new_log_buf_len = 0; -@@ -1194,8 +1115,6 @@ void __init setup_log_buf(int early) +@@ -1194,8 +1121,6 @@ void __init setup_log_buf(int early) */ prb = &printk_rb_dynamic; @@ -17129,7 +17069,7 @@ index ecd28d4fa..5d44477e4 100644 if (seq != prb_next_seq(&printk_rb_static)) { pr_err("dropped %llu messages\n", prb_next_seq(&printk_rb_static) - seq); -@@ -1472,6 +1391,50 @@ static size_t get_record_print_text_size(struct printk_info *info, +@@ -1472,6 +1397,50 @@ static size_t get_record_print_text_size(struct printk_info *info, return ((prefix_len * line_count) + info->text_len + 1); } @@ -17180,7 +17120,7 @@ index ecd28d4fa..5d44477e4 100644 static int syslog_print(char __user *buf, int size) { struct printk_info info; -@@ -1479,19 +1442,19 @@ static int syslog_print(char __user *buf, int size) +@@ -1479,19 +1448,19 @@ static int syslog_print(char __user *buf, int size) char *text; int len = 0; @@ -17204,7 +17144,7 @@ index ecd28d4fa..5d44477e4 100644 break; } if (r.info->seq != syslog_seq) { -@@ -1520,7 +1483,7 @@ static int syslog_print(char __user *buf, int size) +@@ -1520,7 +1489,7 @@ static int syslog_print(char __user *buf, int size) syslog_partial += n; } else n = 0; @@ -17213,7 +17153,7 @@ index ecd28d4fa..5d44477e4 100644 if (!n) break; -@@ -1543,34 +1506,25 @@ static int syslog_print(char __user *buf, int size) +@@ -1543,34 +1512,25 @@ static int syslog_print(char __user *buf, int size) static int syslog_print_all(char __user *buf, int size, bool clear) { struct printk_info info; @@ -17252,7 +17192,7 @@ index ecd28d4fa..5d44477e4 100644 len = 0; prb_for_each_record(seq, prb, seq, &r) { -@@ -1583,20 +1537,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) +@@ -1583,20 +1543,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) break; } @@ -17278,7 +17218,7 @@ index ecd28d4fa..5d44477e4 100644 kfree(text); return len; -@@ -1604,9 +1558,21 @@ static int syslog_print_all(char __user *buf, int size, bool clear) +@@ -1604,9 +1564,21 @@ static int syslog_print_all(char __user *buf, int size, bool clear) static void syslog_clear(void) { @@ -17303,7 +17243,7 @@ index ecd28d4fa..5d44477e4 100644 } int do_syslog(int type, char __user *buf, int len, int source) -@@ -1632,8 +1598,9 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1632,8 +1604,9 @@ int do_syslog(int type, char __user *buf, int len, int source) return 0; if (!access_ok(buf, len)) return -EFAULT; @@ -17314,7 +17254,7 @@ index ecd28d4fa..5d44477e4 100644 if (error) return error; error = syslog_print(buf, len); -@@ -1681,10 +1648,10 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1681,10 +1654,10 @@ int do_syslog(int type, char __user *buf, int len, int source) break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: @@ -17327,7 +17267,7 @@ index ecd28d4fa..5d44477e4 100644 return 0; } if (info.seq != syslog_seq) { -@@ -1712,7 +1679,7 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1712,7 +1685,7 @@ int do_syslog(int type, char __user *buf, int len, int source) } error -= syslog_partial; } @@ -17336,20 +17276,84 @@ index ecd28d4fa..5d44477e4 100644 break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: -@@ -1742,9 +1709,7 @@ static struct lockdep_map console_owner_dep_map = { - }; - #endif +@@ -1731,221 +1704,191 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) + return do_syslog(type, buf, len, SYSLOG_FROM_READER); + } + +-/* +- * Special console_lock variants that help to reduce the risk of soft-lockups. +- * They allow to pass console_lock to another printk() call using a busy wait. +- */ ++int printk_delay_msec __read_mostly; + +-#ifdef CONFIG_LOCKDEP +-static struct lockdep_map console_owner_dep_map = { +- .name = "console_owner" +-}; +-#endif ++static inline void printk_delay(int level) ++{ ++ boot_delay_msec(level); ++ ++ if (unlikely(printk_delay_msec)) { ++ int m = printk_delay_msec; -static DEFINE_RAW_SPINLOCK(console_owner_lock); -static struct task_struct *console_owner; -static bool console_waiter; -+int printk_delay_msec __read_mostly; ++ while (m--) { ++ mdelay(1); ++ touch_nmi_watchdog(); ++ } ++ } ++} - #if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) - void zap_locks(void) -@@ -1765,187 +1730,171 @@ void zap_locks(void) +-#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) +-void zap_locks(void) ++static bool kernel_sync_mode(void) + { +- if (raw_spin_is_locked(&logbuf_lock)) { +- debug_locks_off(); +- raw_spin_lock_init(&logbuf_lock); +- } ++ return (oops_in_progress || sync_mode); ++} + +- if (raw_spin_is_locked(&console_owner_lock)) { +- raw_spin_lock_init(&console_owner_lock); +- } ++static bool console_can_sync(struct console *con) ++{ ++ if (!(con->flags & CON_ENABLED)) ++ return false; ++ if (con->write_atomic && kernel_sync_mode()) ++ return true; ++ if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread) ++ return true; ++ if (con->write && (con->flags & CON_BOOT) && !con->thread) ++ return true; ++ return false; ++} + +- console_owner = NULL; +- console_waiter = false; ++static bool call_sync_console_driver(struct console *con, const char *text, size_t text_len) ++{ ++ if (!(con->flags & CON_ENABLED)) ++ return false; ++ if (con->write_atomic && kernel_sync_mode()) ++ con->write_atomic(con, text, text_len); ++ else if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread) ++ con->write_atomic(con, text, text_len); ++ else if (con->write && (con->flags & CON_BOOT) && !con->thread) ++ con->write(con, text, text_len); ++ else ++ return false; + +- sema_init(&console_sem, 1); ++ return true; } - #endif +-#endif -/** - * console_lock_spinning_enable - mark beginning of code where another @@ -17361,23 +17365,22 @@ index ecd28d4fa..5d44477e4 100644 - * ready to hand over the lock at the end of the section. - */ -static void console_lock_spinning_enable(void) -+static inline void printk_delay(int level) ++static bool have_atomic_console(void) { - raw_spin_lock(&console_owner_lock); - console_owner = current; - raw_spin_unlock(&console_owner_lock); -+ boot_delay_msec(level); -+ -+ if (unlikely(printk_delay_msec)) { -+ int m = printk_delay_msec; ++ struct console *con; - /* The waiter may spin on us after setting console_owner */ - spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); -+ while (m--) { -+ mdelay(1); -+ touch_nmi_watchdog(); -+ } ++ for_each_console(con) { ++ if (!(con->flags & CON_ENABLED)) ++ continue; ++ if (con->write_atomic) ++ return true; + } ++ return false; } -/** @@ -17396,55 +17399,33 @@ index ecd28d4fa..5d44477e4 100644 - * Return: 1 if the lock rights were passed, 0 otherwise. - */ -static int console_lock_spinning_disable_and_check(void) -+static bool kernel_sync_mode(void) ++static bool print_sync(struct console *con, u64 *seq) { - int waiter; -+ return (oops_in_progress || sync_mode); -+} ++ struct printk_info info; ++ struct printk_record r; ++ size_t text_len; - raw_spin_lock(&console_owner_lock); - waiter = READ_ONCE(console_waiter); - console_owner = NULL; - raw_spin_unlock(&console_owner_lock); -+static bool console_can_sync(struct console *con) -+{ -+ if (!(con->flags & CON_ENABLED)) -+ return false; -+ if (con->write_atomic && kernel_sync_mode()) -+ return true; -+ if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread) -+ return true; -+ if (con->write && (con->flags & CON_BOOT) && !con->thread) -+ return true; -+ return false; -+} ++ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf)); - if (!waiter) { - spin_release(&console_owner_dep_map, _THIS_IP_); - return 0; - } -+static bool call_sync_console_driver(struct console *con, const char *text, size_t text_len) -+{ -+ if (!(con->flags & CON_ENABLED)) -+ return false; -+ if (con->write_atomic && kernel_sync_mode()) -+ con->write_atomic(con, text, text_len); -+ else if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread) -+ con->write_atomic(con, text, text_len); -+ else if (con->write && (con->flags & CON_BOOT) && !con->thread) -+ con->write(con, text, text_len); -+ else ++ if (!prb_read_valid(prb, *seq, &r)) + return false; - /* The waiter is now free to continue */ - WRITE_ONCE(console_waiter, false); -+ return true; -+} ++ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); - spin_release(&console_owner_dep_map, _THIS_IP_); -+static bool have_atomic_console(void) -+{ -+ struct console *con; ++ if (!call_sync_console_driver(con, &con->sync_buf[0], text_len)) ++ return false; - /* - * Hand off console_lock to waiter. The waiter will perform @@ -17452,14 +17433,8 @@ index ecd28d4fa..5d44477e4 100644 - */ - mutex_release(&console_lock_dep_map, _THIS_IP_); - return 1; -+ for_each_console(con) { -+ if (!(con->flags & CON_ENABLED)) -+ continue; -+ if (con->write_atomic) -+ return true; -+ } -+ return false; - } +-} ++ *seq = r.info->seq; -/** - * console_trylock_spinning - try to get console_lock by busy waiting @@ -17472,23 +17447,24 @@ index ecd28d4fa..5d44477e4 100644 - * Return: 1 if we got the lock, 0 othrewise - */ -static int console_trylock_spinning(void) -+static bool print_sync(struct console *con, u64 *seq) - { +-{ - struct task_struct *owner = NULL; - bool waiter; - bool spin = false; - unsigned long flags; -+ struct printk_info info; -+ struct printk_record r; -+ size_t text_len; ++ touch_softlockup_watchdog_sync(); ++ clocksource_touch_watchdog(); ++ rcu_cpu_stall_reset(); ++ touch_nmi_watchdog(); - if (console_trylock()) - return 1; -+ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf)); ++ if (text_len) ++ printk_delay(r.info->level); - printk_safe_enter_irqsave(flags); -+ if (!prb_read_valid(prb, *seq, &r)) -+ return false; ++ return true; ++} - raw_spin_lock(&console_owner_lock); - owner = READ_ONCE(console_owner); @@ -17498,7 +17474,10 @@ index ecd28d4fa..5d44477e4 100644 - spin = true; - } - raw_spin_unlock(&console_owner_lock); -+ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); ++static void print_sync_until(struct console *con, u64 seq) ++{ ++ unsigned int flags; ++ u64 printk_seq; - /* - * If there is an active printk() writing to the @@ -17512,9 +17491,17 @@ index ecd28d4fa..5d44477e4 100644 - if (!spin) { - printk_safe_exit_irqrestore(flags); - return 0; -- } -+ if (!call_sync_console_driver(con, &con->sync_buf[0], text_len)) -+ return false; ++ console_atomic_lock(&flags); ++ for (;;) { ++ printk_seq = atomic64_read(&con->printk_seq); ++ if (printk_seq >= seq) ++ break; ++ if (!print_sync(con, &printk_seq)) ++ break; ++ atomic64_set(&con->printk_seq, printk_seq + 1); + } ++ console_atomic_unlock(flags); ++} - /* We spin waiting for the owner to release us */ - spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); @@ -17522,7 +17509,13 @@ index ecd28d4fa..5d44477e4 100644 - while (READ_ONCE(console_waiter)) - cpu_relax(); - spin_release(&console_owner_dep_map, _THIS_IP_); -+ *seq = r.info->seq; ++#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) ++void zap_locks(void) ++{ ++// if (raw_spin_is_locked(&logbuf_lock)) { ++// debug_locks_off(); ++// raw_spin_lock_init(&logbuf_lock); ++// } - printk_safe_exit_irqrestore(flags); - /* @@ -17532,17 +17525,17 @@ index ecd28d4fa..5d44477e4 100644 - * complain. - */ - mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); -+ touch_softlockup_watchdog_sync(); -+ clocksource_touch_watchdog(); -+ rcu_cpu_stall_reset(); -+ touch_nmi_watchdog(); ++// if (raw_spin_is_locked(&console_owner_lock)) { ++// raw_spin_lock_init(&console_owner_lock); ++// } - return 1; -+ if (text_len) -+ printk_delay(r.info->level); ++// console_owner = NULL; ++// console_waiter = false; + -+ return true; ++// sema_init(&console_sem, 1); } ++#endif -/* - * Call the console drivers, asking them to write out @@ -17551,47 +17544,36 @@ index ecd28d4fa..5d44477e4 100644 - */ -static void call_console_drivers(const char *ext_text, size_t ext_len, - const char *text, size_t len) -+static void print_sync_until(struct console *con, u64 seq) - { +-{ - static char dropped_text[64]; - size_t dropped_len = 0; - struct console *con; -+ unsigned int flags; -+ u64 printk_seq; -+ -+ console_atomic_lock(&flags); -+ for (;;) { -+ printk_seq = atomic64_read(&con->printk_seq); -+ if (printk_seq >= seq) -+ break; -+ if (!print_sync(con, &printk_seq)) -+ break; -+ atomic64_set(&con->printk_seq, printk_seq + 1); -+ } -+ console_atomic_unlock(flags); -+} - -- trace_console_rcuidle(text, len); +#ifdef CONFIG_PRINTK_NMI +#define NUM_RECURSION_CTX 2 +#else +#define NUM_RECURSION_CTX 1 +#endif -- if (!console_drivers) -- return; +- trace_console_rcuidle(text, len); +struct printk_recursion { + char count[NUM_RECURSION_CTX]; +}; +- if (!console_drivers) +- return; ++static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion); ++static char printk_recursion_count[NUM_RECURSION_CTX]; + - if (console_dropped) { - dropped_len = snprintf(dropped_text, sizeof(dropped_text), - "** %lu printk messages dropped **\n", - console_dropped); - console_dropped = 0; - } -+static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion); -+static char printk_recursion_count[NUM_RECURSION_CTX]; ++static char *printk_recursion_counter(void) ++{ ++ struct printk_recursion *rec; ++ char *count; - for_each_console(con) { - if (exclusive_console && con != exclusive_console) @@ -17610,11 +17592,6 @@ index ecd28d4fa..5d44477e4 100644 - con->write(con, dropped_text, dropped_len); - con->write(con, text, len); - } -+static char *printk_recursion_counter(void) -+{ -+ struct printk_recursion *rec; -+ char *count; -+ + if (!printk_percpu_data_ready()) { + count = &printk_recursion_count[0]; + } else { @@ -17622,16 +17599,16 @@ index ecd28d4fa..5d44477e4 100644 + + count = &rec->count[0]; } -+ +-} + +-int printk_delay_msec __read_mostly; +#ifdef CONFIG_PRINTK_NMI + if (in_nmi()) + count++; +#endif + + return count; - } - --int printk_delay_msec __read_mostly; ++} -static inline void printk_delay(void) +static bool printk_enter_irqsave(unsigned long *flags) @@ -17666,7 +17643,7 @@ index ecd28d4fa..5d44477e4 100644 } static inline u32 printk_caller_id(void) -@@ -1954,144 +1903,248 @@ static inline u32 printk_caller_id(void) +@@ -1954,144 +1897,248 @@ static inline u32 printk_caller_id(void) 0x80000000 + raw_smp_processor_id(); } @@ -17800,16 +17777,11 @@ index ecd28d4fa..5d44477e4 100644 } -asmlinkage int vprintk_emit(int facility, int level, -- const struct dev_printk_info *dev_info, -- const char *fmt, va_list args) +__printf(4, 0) +static int vprintk_store(int facility, int level, + const struct dev_printk_info *dev_info, + const char *fmt, va_list args) - { -- int printed_len; -- bool in_sched = false; -- unsigned long flags; ++{ + const u32 caller_id = printk_caller_id(); + struct prb_reserved_entry e; + enum log_flags lflags = 0; @@ -17824,9 +17796,7 @@ index ecd28d4fa..5d44477e4 100644 + int ret = 0; + u64 ts_nsec; + u64 seq; - -- /* Suppress unimportant messages after panic happens */ -- if (unlikely(suppress_printk)) ++ + /* + * Since the duration of printk() can vary depending on the message + * and state of the ringbuffer, grab the timestamp now so that it is @@ -17836,11 +17806,8 @@ index ecd28d4fa..5d44477e4 100644 + ts_nsec = local_clock(); + + if (!printk_enter_irqsave(&irqflags)) - return 0; - -- if (level == LOGLEVEL_SCHED) { -- level = LOGLEVEL_DEFAULT; -- in_sched = true; ++ return 0; ++ + /* + * The sprintf needs to come first since the syslog prefix might be + * passed in as a parameter. An extra byte must be reserved so that @@ -17883,10 +17850,8 @@ index ecd28d4fa..5d44477e4 100644 + ret = text_len; + goto out; + } - } - -- boot_delay_msec(level); -- printk_delay(); ++ } ++ + /* + * Explicitly initialize the record before every prb_reserve() call. + * prb_reserve_in_last() and prb_reserve() purposely invalidate the @@ -17896,32 +17861,12 @@ index ecd28d4fa..5d44477e4 100644 + if (!prb_reserve(&e, prb, &r)) { + /* truncate the message if it is too long for empty buffer */ + truncate_msg(&reserve_size, &trunc_msg_len); - -- /* This stops the holder of console_sem just where we want him */ -- logbuf_lock_irqsave(flags); -- printed_len = vprintk_store(facility, level, dev_info, fmt, args); -- logbuf_unlock_irqrestore(flags); ++ + prb_rec_init_wr(&r, reserve_size + trunc_msg_len); + if (!prb_reserve(&e, prb, &r)) + goto out; + } - -- /* If called from the scheduler, we can not call up(). */ -- if (!in_sched) { -- /* -- * Disable preemption to avoid being preempted while holding -- * console_sem which would prevent anyone from printing to -- * console -- */ -- preempt_disable(); -- /* -- * Try to acquire and then immediately release the console -- * semaphore. The release will print out buffers and wake up -- * /dev/kmsg and syslog() users. -- */ -- if (console_trylock_spinning()) -- console_unlock(); -- preempt_enable(); ++ + seq = r.info->seq; + + /* fill message */ @@ -17943,8 +17888,8 @@ index ecd28d4fa..5d44477e4 100644 + } else { + prb_final_commit(&e); + final_commit = true; - } - ++ } ++ + ret = text_len + trunc_msg_len; +out: + /* only the kernel may perform synchronous printing */ @@ -17962,20 +17907,49 @@ index ecd28d4fa..5d44477e4 100644 +} + +asmlinkage int vprintk_emit(int facility, int level, -+ const struct dev_printk_info *dev_info, -+ const char *fmt, va_list args) -+{ -+ int printed_len; -+ -+ /* Suppress unimportant messages after panic happens */ -+ if (unlikely(suppress_printk)) -+ return 0; -+ + const struct dev_printk_info *dev_info, + const char *fmt, va_list args) + { + int printed_len; +- bool in_sched = false; +- unsigned long flags; + + /* Suppress unimportant messages after panic happens */ + if (unlikely(suppress_printk)) + return 0; + +- if (level == LOGLEVEL_SCHED) { + if (level == LOGLEVEL_SCHED) -+ level = LOGLEVEL_DEFAULT; -+ -+ printed_len = vprintk_store(facility, level, dev_info, fmt, args); -+ + level = LOGLEVEL_DEFAULT; +- in_sched = true; +- } +- +- boot_delay_msec(level); +- printk_delay(); + +- /* This stops the holder of console_sem just where we want him */ +- logbuf_lock_irqsave(flags); + printed_len = vprintk_store(facility, level, dev_info, fmt, args); +- logbuf_unlock_irqrestore(flags); +- +- /* If called from the scheduler, we can not call up(). */ +- if (!in_sched) { +- /* +- * Disable preemption to avoid being preempted while holding +- * console_sem which would prevent anyone from printing to +- * console +- */ +- preempt_disable(); +- /* +- * Try to acquire and then immediately release the console +- * semaphore. The release will print out buffers and wake up +- * /dev/kmsg and syslog() users. +- */ +- if (console_trylock_spinning()) +- console_unlock(); +- preempt_enable(); +- } + wake_up_klogd(); return printed_len; } @@ -18001,18 +17975,18 @@ index ecd28d4fa..5d44477e4 100644 + return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); +#endif + return vprintk_default(fmt, args); -+} + } +-EXPORT_SYMBOL_GPL(vprintk_default); + +asmlinkage int vprintk(const char *fmt, va_list args) +{ + return vprintk_func(fmt, args); - } --EXPORT_SYMBOL_GPL(vprintk_default); ++} +EXPORT_SYMBOL(vprintk); /** * printk - print a kernel message -@@ -2127,38 +2180,158 @@ asmlinkage __visible int printk(const char *fmt, ...) +@@ -2127,38 +2174,158 @@ asmlinkage __visible int printk(const char *fmt, ...) } EXPORT_SYMBOL(printk); @@ -18033,7 +18007,10 @@ index ecd28d4fa..5d44477e4 100644 + size_t len; + int error; + u64 seq; -+ + +-#define LOG_LINE_MAX 0 +-#define PREFIX_MAX 0 +-#define printk_time false + if (con->flags & CON_EXTENDED) { + ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); + if (!ext_text) @@ -18044,22 +18021,19 @@ index ecd28d4fa..5d44477e4 100644 + if (!text || !dropped_text) + goto out; --#define LOG_LINE_MAX 0 --#define PREFIX_MAX 0 --#define printk_time false +-#define prb_read_valid(rb, seq, r) false +-#define prb_first_valid_seq(rb) 0 + if (con->flags & CON_EXTENDED) + write_text = ext_text; + else + write_text = text; --#define prb_read_valid(rb, seq, r) false --#define prb_first_valid_seq(rb) 0 -+ seq = atomic64_read(&con->printk_seq); - -static u64 syslog_seq; -static u64 console_seq; -static u64 exclusive_console_stop_seq; -static unsigned long console_dropped; ++ seq = atomic64_read(&con->printk_seq); ++ + prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX); + + for (;;) { @@ -18068,7 +18042,9 @@ index ecd28d4fa..5d44477e4 100644 + + if (kthread_should_stop()) + break; -+ + +-static size_t record_print_text(const struct printk_record *r, +- bool syslog, bool time) + if (error) + continue; + @@ -18100,9 +18076,7 @@ index ecd28d4fa..5d44477e4 100644 + } + + printk_seq = atomic64_read(&con->printk_seq); - --static size_t record_print_text(const struct printk_record *r, -- bool syslog, bool time) ++ + console_lock(); + console_may_schedule = 0; + @@ -18195,7 +18169,7 @@ index ecd28d4fa..5d44477e4 100644 #endif /* CONFIG_PRINTK */ -@@ -2403,34 +2576,6 @@ int is_console_locked(void) +@@ -2403,34 +2570,6 @@ int is_console_locked(void) } EXPORT_SYMBOL(is_console_locked); @@ -18230,7 +18204,7 @@ index ecd28d4fa..5d44477e4 100644 /** * console_unlock - unlock the console system * -@@ -2447,142 +2592,14 @@ static inline int can_use_console(void) +@@ -2447,142 +2586,14 @@ static inline int can_use_console(void) */ void console_unlock(void) { @@ -18373,7 +18347,7 @@ index ecd28d4fa..5d44477e4 100644 } EXPORT_SYMBOL(console_unlock); -@@ -2632,23 +2649,20 @@ void console_unblank(void) +@@ -2632,23 +2643,20 @@ void console_unblank(void) */ void console_flush_on_panic(enum con_flush_mode mode) { @@ -18385,29 +18359,30 @@ index ecd28d4fa..5d44477e4 100644 - * ensure may_schedule is cleared. - */ - console_trylock(); -+ struct console *c; -+ u64 seq; ++ struct console *c; ++ u64 seq; + -+ if (!console_trylock()) -+ return; ++ if (!console_trylock()) ++ return; + console_may_schedule = 0; - if (mode == CONSOLE_REPLAY_ALL) { +- if (mode == CONSOLE_REPLAY_ALL) { - unsigned long flags; -- ++ if (mode == CONSOLE_REPLAY_ALL) { ++ seq = prb_first_valid_seq(prb); ++ for_each_console(c) ++ atomic64_set(&c->printk_seq, seq); ++ } + - logbuf_lock_irqsave(flags); - console_seq = prb_first_valid_seq(prb); - logbuf_unlock_irqrestore(flags); -+ seq = prb_first_valid_seq(prb); -+ for_each_console(c) -+ atomic64_set(&c->printk_seq, seq); - } -+ +- } console_unlock(); } EXPORT_SYMBOL(console_flush_on_panic); -@@ -2784,7 +2798,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) +@@ -2784,7 +2792,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) */ void register_console(struct console *newcon) { @@ -18415,7 +18390,7 @@ index ecd28d4fa..5d44477e4 100644 struct console *bcon = NULL; int err; -@@ -2808,6 +2821,8 @@ void register_console(struct console *newcon) +@@ -2808,6 +2815,8 @@ void register_console(struct console *newcon) } } @@ -18424,7 +18399,7 @@ index ecd28d4fa..5d44477e4 100644 if (console_drivers && console_drivers->flags & CON_BOOT) bcon = console_drivers; -@@ -2849,8 +2864,10 @@ void register_console(struct console *newcon) +@@ -2849,8 +2858,10 @@ void register_console(struct console *newcon) * the real console are the same physical device, it's annoying to * see the beginning boot messages twice */ @@ -18436,7 +18411,7 @@ index ecd28d4fa..5d44477e4 100644 /* * Put this console in the list - keep the -@@ -2872,26 +2889,12 @@ void register_console(struct console *newcon) +@@ -2872,26 +2883,12 @@ void register_console(struct console *newcon) if (newcon->flags & CON_EXTENDED) nr_ext_console_drivers++; @@ -18469,7 +18444,7 @@ index ecd28d4fa..5d44477e4 100644 console_unlock(); console_sysfs_notify(); -@@ -2965,6 +2968,9 @@ int unregister_console(struct console *console) +@@ -2965,6 +2962,9 @@ int unregister_console(struct console *console) console_unlock(); console_sysfs_notify(); @@ -18479,7 +18454,7 @@ index ecd28d4fa..5d44477e4 100644 if (console->exit) res = console->exit(console); -@@ -3047,6 +3053,15 @@ static int __init printk_late_init(void) +@@ -3047,6 +3047,15 @@ static int __init printk_late_init(void) unregister_console(con); } } @@ -18495,7 +18470,7 @@ index ecd28d4fa..5d44477e4 100644 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, console_cpu_notify); WARN_ON(ret < 0); -@@ -3062,7 +3077,6 @@ late_initcall(printk_late_init); +@@ -3062,7 +3071,6 @@ late_initcall(printk_late_init); * Delayed printk version, for scheduler-internal messages: */ #define PRINTK_PENDING_WAKEUP 0x01 @@ -18503,7 +18478,7 @@ index ecd28d4fa..5d44477e4 100644 static DEFINE_PER_CPU(int, printk_pending); -@@ -3070,14 +3084,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) +@@ -3070,14 +3078,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) { int pending = __this_cpu_xchg(printk_pending, 0); @@ -18519,7 +18494,7 @@ index ecd28d4fa..5d44477e4 100644 } static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = -@@ -3096,25 +3104,10 @@ void wake_up_klogd(void) +@@ -3096,25 +3098,10 @@ void wake_up_klogd(void) preempt_enable(); } @@ -18548,7 +18523,7 @@ index ecd28d4fa..5d44477e4 100644 } int printk_deferred(const char *fmt, ...) -@@ -3253,8 +3246,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); +@@ -3253,8 +3240,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); */ void kmsg_dump(enum kmsg_dump_reason reason) { @@ -18576,7 +18551,7 @@ index ecd28d4fa..5d44477e4 100644 rcu_read_lock(); list_for_each_entry_rcu(dumper, &dump_list, list) { -@@ -3272,25 +3283,18 @@ void kmsg_dump(enum kmsg_dump_reason reason) +@@ -3272,25 +3277,18 @@ void kmsg_dump(enum kmsg_dump_reason reason) continue; /* initialize iterator with data about the stored records */ @@ -18607,7 +18582,7 @@ index ecd28d4fa..5d44477e4 100644 * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to * @size: maximum size of the buffer -@@ -3304,11 +3308,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) +@@ -3304,11 +3302,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) * * A return value of FALSE indicates that there are no more records to * read. @@ -18621,7 +18596,7 @@ index ecd28d4fa..5d44477e4 100644 { struct printk_info info; unsigned int line_count; -@@ -3318,16 +3320,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, +@@ -3318,16 +3314,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, prb_rec_init_rd(&r, &info, line, size); @@ -18641,7 +18616,7 @@ index ecd28d4fa..5d44477e4 100644 &info, &line_count)) { goto out; } -@@ -3336,48 +3338,18 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, +@@ -3336,48 +3332,18 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, } @@ -18692,7 +18667,7 @@ index ecd28d4fa..5d44477e4 100644 * @syslog: include the "<4>" prefixes * @buf: buffer to copy the line to * @size: maximum size of the buffer -@@ -3394,116 +3366,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line); +@@ -3394,116 +3360,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line); * A return value of FALSE indicates that there are no more records to * read. */ @@ -19016,27 +18991,13 @@ index ecd28d4fa..5d44477e4 100644 +} +EXPORT_SYMBOL(pr_flush); diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c -index b774685cc..e69de29bb 100644 +index b774685ccf80..218e42566550 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c -@@ -1,425 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later --/* -- * printk_safe.c - Safe printk for printk-deadlock-prone contexts -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#include "internal.h" -- +@@ -15,295 +15,9 @@ + + #include "internal.h" + -/* - * printk() could not take logbuf_lock in NMI context. Instead, - * it uses an alternative implementation that temporary stores @@ -19065,11 +19026,11 @@ index b774685cc..e69de29bb 100644 -}; - -static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); --static DEFINE_PER_CPU(int, printk_context); -- + static DEFINE_PER_CPU(int, printk_context); + -static DEFINE_RAW_SPINLOCK(safe_read_lock); - --#ifdef CONFIG_PRINTK_NMI + #ifdef CONFIG_PRINTK_NMI -static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); -#endif - @@ -19326,37 +19287,25 @@ index b774685cc..e69de29bb 100644 - return printk_safe_log_store(s, fmt, args); -} - --void noinstr printk_nmi_enter(void) --{ -- this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET); --} -- --void noinstr printk_nmi_exit(void) --{ -- this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET); --} -- --/* -- * Marks a code that might produce many messages in NMI context -- * and the risk of losing them is more critical than eventual -- * reordering. + void noinstr printk_nmi_enter(void) + { + this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET); +@@ -318,11 +32,6 @@ void noinstr printk_nmi_exit(void) + * Marks a code that might produce many messages in NMI context + * and the risk of losing them is more critical than eventual + * reordering. - * - * It has effect only when called in NMI context. Then printk() - * will try to store the messages into the main logbuf directly - * and use the per-CPU buffers only as a fallback when the lock - * is not available. -- */ --void printk_nmi_direct_enter(void) --{ -- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) -- this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK); --} -- --void printk_nmi_direct_exit(void) --{ -- this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); --} -- + */ + void printk_nmi_direct_enter(void) + { +@@ -335,27 +44,8 @@ void printk_nmi_direct_exit(void) + this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); + } + -#else - -static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) @@ -19364,8 +19313,8 @@ index b774685cc..e69de29bb 100644 - return 0; -} - --#endif /* CONFIG_PRINTK_NMI */ -- + #endif /* CONFIG_PRINTK_NMI */ + -/* - * Lock-less printk(), to avoid deadlocks should the printk() recurse - * into itself. It uses a per-CPU buffer to store the message, just like @@ -19378,42 +19327,33 @@ index b774685cc..e69de29bb 100644 - return printk_safe_log_store(s, fmt, args); -} - --/* Can be preempted by NMI. */ --void printk_safe_enter(void) --{ -- this_cpu_inc(printk_context); --} --EXPORT_SYMBOL_GPL(printk_safe_enter); -- --/* Can be preempted by NMI. */ --void printk_safe_exit(void) --{ -- this_cpu_dec(printk_context); --} --EXPORT_SYMBOL_GPL(printk_safe_exit); -- --__printf(1, 0) int vprintk_func(const char *fmt, va_list args) --{ --#ifdef CONFIG_KGDB_KDB -- /* Allow to pass printk() to kdb but avoid a recursion. */ -- if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) -- return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); --#endif -- -- /* + /* Can be preempted by NMI. */ + void printk_safe_enter(void) + { +@@ -379,47 +69,22 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) + #endif + + /* - * Try to use the main logbuf even in NMI. But avoid calling console -- * drivers that might have their own locks. -- */ ++ * Use the main logbuf even in NMI. But avoid calling console + * drivers that might have their own locks. + */ - if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && - raw_spin_trylock(&logbuf_lock)) { -- int len; -- -- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args); ++ if (this_cpu_read(printk_context) & ++ (PRINTK_NMI_DIRECT_CONTEXT_MASK | ++ PRINTK_NMI_CONTEXT_MASK | ++ PRINTK_SAFE_CONTEXT_MASK)) { + int len; + ++ printk_safe_enter_irqsave(flags); + len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args); - raw_spin_unlock(&logbuf_lock); -- defer_console_output(); -- return len; -- } -- ++ printk_safe_exit_irqrestore(flags); + defer_console_output(); + return len; + } + - /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ - if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) - return vprintk_nmi(fmt, args); @@ -19422,9 +19362,9 @@ index b774685cc..e69de29bb 100644 - if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) - return vprintk_safe(fmt, args); - -- /* No obstacles. */ -- return vprintk_default(fmt, args); --} + /* No obstacles. */ + return vprintk_default(fmt, args); + } - -void __init printk_safe_init(void) -{ @@ -19446,7 +19386,7 @@ index b774685cc..e69de29bb 100644 - printk_safe_flush(); -} diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index 6d82fba43..8a65ec16c 100644 +index 6d82fba43c97..8a65ec16c512 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -197,7 +197,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) @@ -19503,7 +19443,7 @@ index 6d82fba43..8a65ec16c 100644 } diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig -index b1d7aef10..5e11e30f4 100644 +index b1d7aef10e6a..5e11e30f45eb 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -190,8 +190,8 @@ config RCU_FAST_NO_HZ @@ -19518,7 +19458,7 @@ index b1d7aef10..5e11e30f4 100644 This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index 4e6a44683..8937a7a2b 100644 +index 4e6a44683248..8937a7a2b33f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -100,8 +100,10 @@ static struct rcu_state rcu_state = { @@ -19534,7 +19474,7 @@ index 4e6a44683..8937a7a2b 100644 static bool rcu_fanout_exact; module_param(rcu_fanout_exact, bool, 0444); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c -index 849f0aa99..dd94a602a 100644 +index 849f0aa99333..dd94a602a6d2 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -56,8 +56,10 @@ @@ -19550,7 +19490,7 @@ index 849f0aa99..dd94a602a 100644 #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index c936c0422..7bb89c886 100644 +index 62d14fba4ca6..37a6eaceaad5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -65,7 +65,11 @@ const_debug unsigned int sysctl_sched_features = @@ -19692,7 +19632,7 @@ index c936c0422..7bb89c886 100644 void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2062,6 +2131,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) +@@ -2063,6 +2132,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP @@ -19775,7 +19715,7 @@ index c936c0422..7bb89c886 100644 /* * Per-CPU kthreads are allowed to run on !active && online CPUs, see * __set_cpus_allowed_ptr() and select_fallback_rq(). -@@ -2071,7 +2216,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) +@@ -2072,7 +2217,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) if (!cpumask_test_cpu(cpu, p->cpus_ptr)) return false; @@ -19784,7 +19724,7 @@ index c936c0422..7bb89c886 100644 return cpu_online(cpu); return cpu_active(cpu); -@@ -2116,8 +2261,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, +@@ -2117,8 +2262,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, } struct migration_arg { @@ -19808,7 +19748,7 @@ index c936c0422..7bb89c886 100644 }; /* -@@ -2150,15 +2308,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, +@@ -2151,15 +2309,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, static int migration_cpu_stop(void *data) { struct migration_arg *arg = data; @@ -19827,7 +19767,7 @@ index c936c0422..7bb89c886 100644 /* * We need to explicitly wake pending tasks before running * __migrate_task() such that we will not miss enforcing cpus_ptr -@@ -2168,21 +2328,121 @@ static int migration_cpu_stop(void *data) +@@ -2169,21 +2329,121 @@ static int migration_cpu_stop(void *data) raw_spin_lock(&p->pi_lock); rq_lock(rq, &rf); @@ -19952,7 +19892,7 @@ index c936c0422..7bb89c886 100644 return 0; } -@@ -2190,18 +2450,39 @@ static int migration_cpu_stop(void *data) +@@ -2191,18 +2451,39 @@ static int migration_cpu_stop(void *data) * sched_class::set_cpus_allowed must do the below, but is not required to * actually call this function. */ @@ -19995,7 +19935,7 @@ index c936c0422..7bb89c886 100644 queued = task_on_rq_queued(p); running = task_current(rq, p); -@@ -2217,7 +2498,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2218,7 +2499,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) if (running) put_prev_task(rq, p); @@ -20004,7 +19944,7 @@ index c936c0422..7bb89c886 100644 if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); -@@ -2225,6 +2506,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2226,6 +2507,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) set_next_task(rq, p); } @@ -20227,7 +20167,7 @@ index c936c0422..7bb89c886 100644 /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -2235,7 +2732,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2236,7 +2733,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) * call is not atomic; no spinlocks may be held. */ static int __set_cpus_allowed_ptr(struct task_struct *p, @@ -20237,7 +20177,7 @@ index c936c0422..7bb89c886 100644 { const struct cpumask *cpu_valid_mask = cpu_active_mask; unsigned int dest_cpu; -@@ -2246,9 +2744,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2247,9 +2745,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, rq = task_rq_lock(p, &rf); update_rq_clock(rq); @@ -20254,7 +20194,7 @@ index c936c0422..7bb89c886 100644 */ cpu_valid_mask = cpu_online_mask; } -@@ -2257,13 +2760,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2258,13 +2761,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, * Must re-check here, to close a race against __kthread_bind(), * sched_setaffinity() is not guaranteed to observe the flag. */ @@ -20280,7 +20220,7 @@ index c936c0422..7bb89c886 100644 /* * Picking a ~random cpu helps in cases where we are changing affinity -@@ -2276,7 +2788,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2277,7 +2789,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } @@ -20289,7 +20229,7 @@ index c936c0422..7bb89c886 100644 if (p->flags & PF_KTHREAD) { /* -@@ -2288,23 +2800,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2289,23 +2801,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, p->nr_cpus_allowed != 1); } @@ -20314,7 +20254,7 @@ index c936c0422..7bb89c886 100644 out: task_rq_unlock(rq, p, &rf); -@@ -2313,7 +2810,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2314,7 +2811,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { @@ -20323,7 +20263,7 @@ index c936c0422..7bb89c886 100644 } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); -@@ -2354,6 +2851,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) +@@ -2355,6 +2852,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) * Clearly, migrating tasks to offline CPUs is a fairly daft thing. */ WARN_ON_ONCE(!cpu_online(new_cpu)); @@ -20332,7 +20272,7 @@ index c936c0422..7bb89c886 100644 #endif trace_sched_migrate_task(p, new_cpu); -@@ -2486,6 +2985,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, +@@ -2487,6 +2986,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, } #endif /* CONFIG_NUMA_BALANCING */ @@ -20351,7 +20291,7 @@ index c936c0422..7bb89c886 100644 /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -2530,7 +3041,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2531,7 +3042,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -20360,7 +20300,7 @@ index c936c0422..7bb89c886 100644 return 0; cpu_relax(); } -@@ -2545,7 +3056,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2546,7 +3057,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; @@ -20370,7 +20310,7 @@ index c936c0422..7bb89c886 100644 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); -@@ -2579,7 +3091,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2580,7 +3092,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) ktime_t to = NSEC_PER_SEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); @@ -20379,7 +20319,7 @@ index c936c0422..7bb89c886 100644 continue; } -@@ -2684,6 +3196,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) +@@ -2685,6 +3197,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) } fallthrough; case possible: @@ -20392,7 +20332,7 @@ index c936c0422..7bb89c886 100644 do_set_cpus_allowed(p, cpu_possible_mask); state = fail; break; -@@ -2718,7 +3236,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -2719,7 +3237,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) { lockdep_assert_held(&p->pi_lock); @@ -20401,7 +20341,7 @@ index c936c0422..7bb89c886 100644 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else cpu = cpumask_any(p->cpus_ptr); -@@ -2741,6 +3259,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -2742,6 +3260,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) void sched_set_stop_task(int cpu, struct task_struct *stop) { @@ -20409,7 +20349,7 @@ index c936c0422..7bb89c886 100644 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; struct task_struct *old_stop = cpu_rq(cpu)->stop; -@@ -2756,6 +3275,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) +@@ -2757,6 +3276,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); stop->sched_class = &stop_sched_class; @@ -20430,7 +20370,7 @@ index c936c0422..7bb89c886 100644 } cpu_rq(cpu)->stop = stop; -@@ -2769,15 +3302,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) +@@ -2770,15 +3303,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) } } @@ -20457,7 +20397,7 @@ index c936c0422..7bb89c886 100644 static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) -@@ -3198,7 +3739,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +@@ -3203,7 +3744,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) int cpu, success = 0; preempt_disable(); @@ -20466,7 +20406,7 @@ index c936c0422..7bb89c886 100644 /* * We're waking current, this means 'p->on_rq' and 'task_cpu(p) * == smp_processor_id()'. Together this means we can special -@@ -3228,8 +3769,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +@@ -3233,8 +3774,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -20494,7 +20434,7 @@ index c936c0422..7bb89c886 100644 trace_sched_waking(p); -@@ -3418,6 +3977,18 @@ int wake_up_process(struct task_struct *p) +@@ -3423,6 +3982,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); @@ -20513,15 +20453,15 @@ index c936c0422..7bb89c886 100644 int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); -@@ -3471,6 +4042,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) +@@ -3476,6 +4047,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) init_numa_balancing(clone_flags, p); #ifdef CONFIG_SMP p->wake_entry.u_flags = CSD_TYPE_TTWU; + p->migration_pending = NULL; #endif - } - -@@ -3676,6 +4248,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) + #ifdef CONFIG_BPF_SCHED + p->tag = 0; +@@ -3684,6 +4256,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -20531,16 +20471,13 @@ index c936c0422..7bb89c886 100644 #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3846,59 +4421,143 @@ __fire_sched_out_preempt_notifiers(struct task_struct *curr, +@@ -3854,59 +4429,143 @@ __fire_sched_out_preempt_notifiers(struct task_struct *curr, notifier->ops->sched_out(notifier, next); } -static __always_inline void -fire_sched_out_preempt_notifiers(struct task_struct *curr, - struct task_struct *next) --{ -- if (static_branch_unlikely(&preempt_notifier_key)) -- __fire_sched_out_preempt_notifiers(curr, next); +static __always_inline void +fire_sched_out_preempt_notifiers(struct task_struct *curr, + struct task_struct *next) @@ -20645,7 +20582,9 @@ index c936c0422..7bb89c886 100644 +static void balance_push(struct rq *rq); + +static inline void balance_switch(struct rq *rq) -+{ + { +- if (static_branch_unlikely(&preempt_notifier_key)) +- __fire_sched_out_preempt_notifiers(curr, next); + if (likely(!rq->balance_flags)) + return; + @@ -20713,15 +20652,7 @@ index c936c0422..7bb89c886 100644 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) { -@@ -3924,6 +4583,7 @@ static inline void finish_lock_switch(struct rq *rq) - * prev into current: - */ - spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); -+ balance_switch(rq); - raw_spin_rq_unlock_irq(rq); - } - -@@ -3939,6 +4599,22 @@ static inline void finish_lock_switch(struct rq *rq) +@@ -3947,6 +4606,22 @@ static inline void finish_lock_switch(struct rq *rq) # define finish_arch_post_lock_switch() do { } while (0) #endif @@ -20744,7 +20675,7 @@ index c936c0422..7bb89c886 100644 /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch -@@ -3961,6 +4637,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, +@@ -3969,6 +4644,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -20752,7 +20683,7 @@ index c936c0422..7bb89c886 100644 prepare_task(next); prepare_arch_switch(next); } -@@ -4028,6 +4705,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -4036,6 +4712,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) finish_lock_switch(rq); finish_arch_post_lock_switch(); kcov_finish_switch(current); @@ -20760,7 +20691,7 @@ index c936c0422..7bb89c886 100644 fire_sched_in_preempt_notifiers(current); /* -@@ -4044,63 +4722,17 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -4052,63 +4729,19 @@ static struct rq *finish_task_switch(struct task_struct *prev) */ if (mm) { membarrier_mm_sync_core_before_usermode(mm); @@ -20770,7 +20701,7 @@ index c936c0422..7bb89c886 100644 if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); -- + - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. @@ -20820,12 +20751,12 @@ index c936c0422..7bb89c886 100644 -static inline void balance_callback(struct rq *rq) -{ -} -- + -#endif /** * schedule_tail - first thing a freshly forked thread must call. -@@ -4121,7 +4753,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) +@@ -4129,7 +4762,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) */ rq = finish_task_switch(prev); @@ -20833,7 +20764,7 @@ index c936c0422..7bb89c886 100644 preempt_enable(); if (current->set_child_tid) -@@ -5278,7 +5909,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +@@ -5286,7 +5918,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * * WARNING: must be called with preemption disabled! */ @@ -20842,7 +20773,7 @@ index c936c0422..7bb89c886 100644 { struct task_struct *prev, *next; unsigned long *switch_count; -@@ -5331,7 +5962,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5339,7 +5971,7 @@ static void __sched notrace __schedule(bool preempt) * - ptrace_{,un}freeze_traced() can change ->state underneath us. */ prev_state = prev->state; @@ -20851,7 +20782,7 @@ index c936c0422..7bb89c886 100644 if (signal_pending_state(prev_state, prev)) { prev->state = TASK_RUNNING; } else { -@@ -5366,6 +5997,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5374,6 +6006,7 @@ static void __sched notrace __schedule(bool preempt) next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -20859,7 +20790,7 @@ index c936c0422..7bb89c886 100644 clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -5391,6 +6023,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5399,6 +6032,7 @@ static void __sched notrace __schedule(bool preempt) */ ++*switch_count; @@ -20867,7 +20798,7 @@ index c936c0422..7bb89c886 100644 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); trace_sched_switch(preempt, prev, next); -@@ -5399,10 +6032,11 @@ static void __sched notrace __schedule(bool preempt) +@@ -5407,10 +6041,11 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next, &rf); } else { rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); @@ -20882,7 +20813,7 @@ index c936c0422..7bb89c886 100644 } void __noreturn do_task_dead(void) -@@ -5413,7 +6047,7 @@ void __noreturn do_task_dead(void) +@@ -5421,7 +6056,7 @@ void __noreturn do_task_dead(void) /* Tell freezer to ignore us: */ current->flags |= PF_NOFREEZE; @@ -20891,7 +20822,7 @@ index c936c0422..7bb89c886 100644 BUG(); /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ -@@ -5446,9 +6080,6 @@ static inline void sched_submit_work(struct task_struct *tsk) +@@ -5454,9 +6089,6 @@ static inline void sched_submit_work(struct task_struct *tsk) preempt_enable_no_resched(); } @@ -20901,7 +20832,7 @@ index c936c0422..7bb89c886 100644 /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -5474,7 +6105,7 @@ asmlinkage __visible void __sched schedule(void) +@@ -5482,7 +6114,7 @@ asmlinkage __visible void __sched schedule(void) sched_submit_work(tsk); do { preempt_disable(); @@ -20910,7 +20841,7 @@ index c936c0422..7bb89c886 100644 sched_preempt_enable_no_resched(); } while (need_resched()); sched_update_worker(tsk); -@@ -5502,7 +6133,7 @@ void __sched schedule_idle(void) +@@ -5510,7 +6142,7 @@ void __sched schedule_idle(void) */ WARN_ON_ONCE(current->state); do { @@ -20919,7 +20850,7 @@ index c936c0422..7bb89c886 100644 } while (need_resched()); } -@@ -5555,7 +6186,7 @@ static void __sched notrace preempt_schedule_common(void) +@@ -5563,7 +6195,7 @@ static void __sched notrace preempt_schedule_common(void) */ preempt_disable_notrace(); preempt_latency_start(1); @@ -20928,7 +20859,7 @@ index c936c0422..7bb89c886 100644 preempt_latency_stop(1); preempt_enable_no_resched_notrace(); -@@ -5566,6 +6197,30 @@ static void __sched notrace preempt_schedule_common(void) +@@ -5574,6 +6206,30 @@ static void __sched notrace preempt_schedule_common(void) } while (need_resched()); } @@ -20959,10 +20890,11 @@ index c936c0422..7bb89c886 100644 #ifdef CONFIG_PREEMPTION /* * This is the entry point to schedule() from in-kernel preemption -@@ -5580,11 +6235,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) +@@ -5587,12 +6243,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) + */ if (likely(!preemptible())) return; - +- + if (!preemptible_lazy()) + return; preempt_schedule_common(); @@ -20973,11 +20905,11 @@ index c936c0422..7bb89c886 100644 +#ifdef CONFIG_PREEMPT_RT +void __sched notrace preempt_schedule_lock(void) +{ -+ do { -+ preempt_disable(); -+ __schedule(true, true); -+ sched_preempt_enable_no_resched(); -+ } while (need_resched()); ++ do { ++ preempt_disable(); ++ __schedule(true, true); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); +} +NOKPROBE_SYMBOL(preempt_schedule_lock); +EXPORT_SYMBOL(preempt_schedule_lock); @@ -20986,7 +20918,7 @@ index c936c0422..7bb89c886 100644 #ifdef CONFIG_PREEMPT_DYNAMIC DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); EXPORT_STATIC_CALL(preempt_schedule); -@@ -5612,6 +6282,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -5620,6 +6290,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) if (likely(!preemptible())) return; @@ -20996,7 +20928,7 @@ index c936c0422..7bb89c886 100644 do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5634,7 +6307,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -5642,7 +6315,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) * an infinite recursion. */ prev_ctx = exception_enter(); @@ -21005,7 +20937,7 @@ index c936c0422..7bb89c886 100644 exception_exit(prev_ctx); preempt_latency_stop(1); -@@ -5852,7 +6525,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) +@@ -5860,7 +6533,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) do { preempt_disable(); local_irq_enable(); @@ -21014,7 +20946,7 @@ index c936c0422..7bb89c886 100644 local_irq_disable(); sched_preempt_enable_no_resched(); } while (need_resched()); -@@ -6018,9 +6691,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) +@@ -6026,9 +6699,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) out_unlock: /* Avoid rq from going away on us: */ preempt_disable(); @@ -21028,7 +20960,7 @@ index c936c0422..7bb89c886 100644 preempt_enable(); } #else -@@ -6263,6 +6938,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6271,6 +6946,7 @@ static int __sched_setscheduler(struct task_struct *p, int oldpolicy = -1, policy = attr->sched_policy; int retval, oldprio, newprio, queued, running; const struct sched_class *prev_class; @@ -21036,7 +20968,7 @@ index c936c0422..7bb89c886 100644 struct rq_flags rf; int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; -@@ -6517,6 +7193,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6525,6 +7201,7 @@ static int __sched_setscheduler(struct task_struct *p, /* Avoid rq from going away on us: */ preempt_disable(); @@ -21044,7 +20976,7 @@ index c936c0422..7bb89c886 100644 task_rq_unlock(rq, p, &rf); if (pi) { -@@ -6525,7 +7202,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6533,7 +7210,7 @@ static int __sched_setscheduler(struct task_struct *p, } /* Run balance callbacks after we've adjusted the PI chain: */ @@ -21053,7 +20985,7 @@ index c936c0422..7bb89c886 100644 preempt_enable(); return 0; -@@ -7020,7 +7697,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +@@ -7028,7 +7705,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) } #endif again: @@ -21062,7 +20994,7 @@ index c936c0422..7bb89c886 100644 if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); -@@ -7606,7 +8283,7 @@ void __init init_idle(struct task_struct *idle, int cpu) +@@ -7614,7 +8291,7 @@ void __init init_idle(struct task_struct *idle, int cpu) * * And since this is boot we can forgo the serialization. */ @@ -21071,7 +21003,7 @@ index c936c0422..7bb89c886 100644 #endif /* * We're having a chicken and egg problem, even though we are -@@ -7633,7 +8310,9 @@ void __init init_idle(struct task_struct *idle, int cpu) +@@ -7641,7 +8318,9 @@ void __init init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -21082,7 +21014,7 @@ index c936c0422..7bb89c886 100644 /* * The idle tasks have their own, simple scheduling class: */ -@@ -7738,6 +8417,7 @@ void sched_setnuma(struct task_struct *p, int nid) +@@ -7751,6 +8430,7 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -21090,7 +21022,7 @@ index c936c0422..7bb89c886 100644 /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -7757,119 +8437,126 @@ void idle_task_exit(void) +@@ -7770,119 +8450,126 @@ void idle_task_exit(void) /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } @@ -21165,7 +21097,7 @@ index c936c0422..7bb89c886 100644 - int dest_cpu; + struct task_struct *push_task = rq->curr; + -+ lockdep_assert_held(&rq->lock); ++ lockdep_assert_held(&rq->__lock); + SCHED_WARN_ON(rq->cpu != smp_processor_id()); /* @@ -21194,9 +21126,9 @@ index c936c0422..7bb89c886 100644 + */ + if (!rq->nr_running && !rq_has_pinned_tasks(rq) && + rcuwait_active(&rq->hotplug_wait)) { -+ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&rq->__lock); + rcuwait_wake_up(&rq->hotplug_wait); -+ raw_spin_lock(&rq->lock); ++ raw_spin_lock(&rq->__lock); + } + return; + } @@ -21210,7 +21142,7 @@ index c936c0422..7bb89c886 100644 + * Both preemption and IRQs are still disabled. */ - update_rq_clock(rq); -+ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&rq->__lock); + stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, + this_cpu_ptr(&push_work)); + /* @@ -21218,7 +21150,7 @@ index c936c0422..7bb89c886 100644 + * schedule(). The next pick is obviously going to be the stop task + * which is_per_cpu_kthread() and will push this task away. + */ -+ raw_spin_lock(&rq->lock); ++ raw_spin_lock(&rq->__lock); +} - for (;;) { @@ -21307,7 +21239,7 @@ index c936c0422..7bb89c886 100644 #endif /* CONFIG_HOTPLUG_CPU */ void set_rq_online(struct rq *rq) -@@ -7955,6 +8642,8 @@ int sched_cpu_activate(unsigned int cpu) +@@ -7970,6 +8657,8 @@ int sched_cpu_activate(unsigned int cpu) struct rq *rq = cpu_rq(cpu); struct rq_flags rf; @@ -21316,7 +21248,7 @@ index c936c0422..7bb89c886 100644 #ifdef CONFIG_SCHED_SMT /* * When going up, increment the number of cores with SMT present. -@@ -7990,6 +8679,8 @@ int sched_cpu_activate(unsigned int cpu) +@@ -8005,6 +8694,8 @@ int sched_cpu_activate(unsigned int cpu) int sched_cpu_deactivate(unsigned int cpu) { @@ -21325,7 +21257,7 @@ index c936c0422..7bb89c886 100644 int ret; set_cpu_active(cpu, false); -@@ -8002,6 +8693,16 @@ int sched_cpu_deactivate(unsigned int cpu) +@@ -8017,6 +8708,16 @@ int sched_cpu_deactivate(unsigned int cpu) */ synchronize_rcu(); @@ -21342,7 +21274,7 @@ index c936c0422..7bb89c886 100644 #ifdef CONFIG_SCHED_SMT /* * When going down, decrement the number of cores with SMT present. -@@ -8017,6 +8718,7 @@ int sched_cpu_deactivate(unsigned int cpu) +@@ -8032,6 +8733,7 @@ int sched_cpu_deactivate(unsigned int cpu) ret = cpuset_cpu_inactive(cpu); if (ret) { @@ -21350,7 +21282,7 @@ index c936c0422..7bb89c886 100644 set_cpu_active(cpu, true); return ret; } -@@ -8041,6 +8743,41 @@ int sched_cpu_starting(unsigned int cpu) +@@ -8056,6 +8758,41 @@ int sched_cpu_starting(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU @@ -21392,7 +21324,7 @@ index c936c0422..7bb89c886 100644 int sched_cpu_dying(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -8050,12 +8787,7 @@ int sched_cpu_dying(unsigned int cpu) +@@ -8065,12 +8802,7 @@ int sched_cpu_dying(unsigned int cpu) sched_tick_stop(cpu); rq_lock_irqsave(rq, &rf); @@ -21406,17 +21338,17 @@ index c936c0422..7bb89c886 100644 rq_unlock_irqrestore(rq, &rf); calc_load_migrate(rq); -@@ -8266,6 +8998,9 @@ void __init sched_init(void) +@@ -8281,6 +9013,9 @@ void __init sched_init(void) INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); #endif +#ifdef CONFIG_HOTPLUG_CPU -+ rcuwait_init(&rq->hotplug_wait); ++ rcuwait_init(&rq->hotplug_wait); +#endif #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); -@@ -8316,7 +9051,7 @@ void __init sched_init(void) +@@ -8331,7 +9066,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { @@ -21425,7 +21357,7 @@ index c936c0422..7bb89c886 100644 return (nested == preempt_offset); } -@@ -8413,6 +9148,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) +@@ -8428,6 +9163,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } EXPORT_SYMBOL_GPL(__cant_sleep); @@ -21466,7 +21398,7 @@ index c936c0422..7bb89c886 100644 #ifdef CONFIG_MAGIC_SYSRQ diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c -index 8cb06c8c7..ceb03d76c 100644 +index 8cb06c8c7eb1..ceb03d76c0cc 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -120,7 +120,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, @@ -21488,7 +21420,7 @@ index 8cb06c8c7..ceb03d76c 100644 if (later_mask) cpumask_set_cpu(best_cpu, later_mask); diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c -index 0033731a0..11c4df201 100644 +index 0033731a0797..11c4df2010de 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -73,11 +73,11 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p, @@ -21506,7 +21438,7 @@ index 0033731a0..11c4df201 100644 /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c -index ca0eef7d3..02a5aa60f 100644 +index ca0eef7d3852..02a5aa60fe7e 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -44,12 +44,13 @@ static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, @@ -21585,10 +21517,10 @@ index ca0eef7d3..02a5aa60f 100644 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, u64 *ut, u64 *st) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index cb487d7d3..6aa18aa2d 100644 +index c4c0d760d252..14252d5be166 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -565,7 +565,7 @@ static int push_dl_task(struct rq *rq); +@@ -551,7 +551,7 @@ static int push_dl_task(struct rq *rq); static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) { @@ -21597,7 +21529,7 @@ index cb487d7d3..6aa18aa2d 100644 } static DEFINE_PER_CPU(struct callback_head, dl_push_head); -@@ -1931,7 +1931,7 @@ static void task_fork_dl(struct task_struct *p) +@@ -1913,7 +1913,7 @@ static void task_fork_dl(struct task_struct *p) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -21606,7 +21538,7 @@ index cb487d7d3..6aa18aa2d 100644 return 1; return 0; } -@@ -2021,8 +2021,8 @@ static int find_later_rq(struct task_struct *task) +@@ -2003,8 +2003,8 @@ static int find_later_rq(struct task_struct *task) return this_cpu; } @@ -21617,7 +21549,7 @@ index cb487d7d3..6aa18aa2d 100644 /* * Last chance: if a CPU being in both later_mask * and current sd span is valid, that becomes our -@@ -2044,7 +2044,7 @@ static int find_later_rq(struct task_struct *task) +@@ -2026,7 +2026,7 @@ static int find_later_rq(struct task_struct *task) if (this_cpu != -1) return this_cpu; @@ -21626,7 +21558,7 @@ index cb487d7d3..6aa18aa2d 100644 if (cpu < nr_cpu_ids) return cpu; -@@ -2109,7 +2109,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) +@@ -2091,7 +2091,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) */ next_task = pick_next_pushable_dl_task(rq); if (unlikely(next_task != task || @@ -21635,7 +21567,7 @@ index cb487d7d3..6aa18aa2d 100644 double_unlock_balance(rq, later_rq); later_rq = NULL; break; -@@ -2153,6 +2153,9 @@ static int push_dl_task(struct rq *rq) +@@ -2135,6 +2135,9 @@ static int push_dl_task(struct rq *rq) return 0; retry: @@ -21645,7 +21577,7 @@ index cb487d7d3..6aa18aa2d 100644 if (WARN_ON(next_task == rq->curr)) return 0; -@@ -2230,7 +2233,7 @@ static void push_dl_tasks(struct rq *rq) +@@ -2212,7 +2215,7 @@ static void push_dl_tasks(struct rq *rq) static void pull_dl_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, cpu; @@ -21654,7 +21586,7 @@ index cb487d7d3..6aa18aa2d 100644 bool resched = false; struct rq *src_rq; u64 dmin = LONG_MAX; -@@ -2260,6 +2263,7 @@ static void pull_dl_task(struct rq *this_rq) +@@ -2242,6 +2245,7 @@ static void pull_dl_task(struct rq *this_rq) continue; /* Might drop this_rq->lock */ @@ -21662,7 +21594,7 @@ index cb487d7d3..6aa18aa2d 100644 double_lock_balance(this_rq, src_rq); /* -@@ -2291,17 +2295,28 @@ static void pull_dl_task(struct rq *this_rq) +@@ -2273,17 +2277,28 @@ static void pull_dl_task(struct rq *this_rq) src_rq->curr->dl.deadline)) goto skip; @@ -21697,7 +21629,7 @@ index cb487d7d3..6aa18aa2d 100644 } if (resched) -@@ -2325,7 +2340,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) +@@ -2307,7 +2322,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) } static void set_cpus_allowed_dl(struct task_struct *p, @@ -21707,7 +21639,7 @@ index cb487d7d3..6aa18aa2d 100644 { struct root_domain *src_rd; struct rq *rq; -@@ -2354,7 +2370,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, +@@ -2336,7 +2352,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, raw_spin_unlock(&src_dl_b->lock); } @@ -21716,7 +21648,7 @@ index cb487d7d3..6aa18aa2d 100644 } /* Assumes rq->lock is held */ -@@ -2550,6 +2566,7 @@ const struct sched_class dl_sched_class +@@ -2532,6 +2548,7 @@ const struct sched_class dl_sched_class .rq_online = rq_online_dl, .rq_offline = rq_offline_dl, .task_woken = task_woken_dl, @@ -21725,19 +21657,19 @@ index cb487d7d3..6aa18aa2d 100644 .task_tick = task_tick_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 20b482688..bbc3b1e7f 100644 +index d3c4b945c019..602bacf33e5d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4453,7 +4453,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) - ideal_runtime = sched_slice(cfs_rq, curr); - delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; +@@ -4467,7 +4467,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) + #endif + if (delta_exec > ideal_runtime) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4477,7 +4477,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4491,7 +4491,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) @@ -21746,7 +21678,7 @@ index 20b482688..bbc3b1e7f 100644 } static void -@@ -4620,7 +4620,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +@@ -4634,7 +4634,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { @@ -21755,7 +21687,7 @@ index 20b482688..bbc3b1e7f 100644 return; } /* -@@ -4769,7 +4769,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) +@@ -4783,7 +4783,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -21764,7 +21696,7 @@ index 20b482688..bbc3b1e7f 100644 } static __always_inline -@@ -5521,7 +5521,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) +@@ -5543,7 +5543,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) @@ -21773,7 +21705,7 @@ index 20b482688..bbc3b1e7f 100644 return; } hrtick_start(rq, delta); -@@ -7142,7 +7142,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -7267,7 +7267,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: @@ -21782,7 +21714,7 @@ index 20b482688..bbc3b1e7f 100644 /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -11719,7 +11719,7 @@ static void task_fork_fair(struct task_struct *p) +@@ -11938,7 +11938,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -21791,7 +21723,7 @@ index 20b482688..bbc3b1e7f 100644 } se->vruntime -= cfs_rq->min_vruntime; -@@ -11746,7 +11746,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) +@@ -11965,7 +11965,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) @@ -21801,7 +21733,7 @@ index 20b482688..bbc3b1e7f 100644 check_preempt_curr(rq, p, 0); } diff --git a/kernel/sched/features.h b/kernel/sched/features.h -index 97ed11bd2..0dade2e74 100644 +index fef48f5be2fa..f8a556887472 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -45,11 +45,19 @@ SCHED_FEAT(DOUBLE_TICK, false) @@ -21825,7 +21757,7 @@ index 97ed11bd2..0dade2e74 100644 /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index 5dbf51ebd..89fd828db 100644 +index 0f349d8d076d..9cfb0948ea06 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -270,7 +270,7 @@ static void pull_rt_task(struct rq *this_rq); @@ -21837,7 +21769,7 @@ index 5dbf51ebd..89fd828db 100644 } static inline int rt_overloaded(struct rq *rq) -@@ -1676,7 +1676,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +@@ -1679,7 +1679,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -21846,7 +21778,7 @@ index 5dbf51ebd..89fd828db 100644 return 1; return 0; -@@ -1770,8 +1770,8 @@ static int find_lowest_rq(struct task_struct *task) +@@ -1773,8 +1773,8 @@ static int find_lowest_rq(struct task_struct *task) return this_cpu; } @@ -21857,7 +21789,7 @@ index 5dbf51ebd..89fd828db 100644 if (best_cpu < nr_cpu_ids) { rcu_read_unlock(); return best_cpu; -@@ -1788,7 +1788,7 @@ static int find_lowest_rq(struct task_struct *task) +@@ -1791,7 +1791,7 @@ static int find_lowest_rq(struct task_struct *task) if (this_cpu != -1) return this_cpu; @@ -21866,7 +21798,7 @@ index 5dbf51ebd..89fd828db 100644 if (cpu < nr_cpu_ids) return cpu; -@@ -1849,7 +1849,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1852,7 +1852,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) */ struct task_struct *next_task = pick_next_pushable_task(rq); if (unlikely(next_task != task || @@ -21875,7 +21807,7 @@ index 5dbf51ebd..89fd828db 100644 double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; break; -@@ -1873,7 +1873,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1876,7 +1876,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * running task can migrate over to a CPU that is running a task * of lesser priority. */ @@ -21884,7 +21816,7 @@ index 5dbf51ebd..89fd828db 100644 { struct task_struct *next_task; struct rq *lowest_rq; -@@ -1887,6 +1887,39 @@ static int push_rt_task(struct rq *rq) +@@ -1890,6 +1890,39 @@ static int push_rt_task(struct rq *rq) return 0; retry: @@ -21924,7 +21856,7 @@ index 5dbf51ebd..89fd828db 100644 if (WARN_ON(next_task == rq->curr)) return 0; -@@ -1941,12 +1974,10 @@ static int push_rt_task(struct rq *rq) +@@ -1944,12 +1977,10 @@ static int push_rt_task(struct rq *rq) deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); @@ -21938,7 +21870,7 @@ index 5dbf51ebd..89fd828db 100644 out: put_task_struct(next_task); -@@ -1956,7 +1987,7 @@ static int push_rt_task(struct rq *rq) +@@ -1959,7 +1990,7 @@ static int push_rt_task(struct rq *rq) static void push_rt_tasks(struct rq *rq) { /* push_rt_task will return true if it moved an RT */ @@ -21947,17 +21879,19 @@ index 5dbf51ebd..89fd828db 100644 ; } -@@ -2109,7 +2140,8 @@ void rto_push_irq_work_func(struct irq_work *work) +@@ -2112,7 +2143,10 @@ void rto_push_irq_work_func(struct irq_work *work) */ if (has_pushable_tasks(rq)) { raw_spin_rq_lock(rq); - push_rt_tasks(rq); ++ + while (push_rt_task(rq, true)) + ; ++ raw_spin_rq_unlock(rq); } -@@ -2134,7 +2166,7 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2137,7 +2171,7 @@ static void pull_rt_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, cpu; bool resched = false; @@ -21966,7 +21900,7 @@ index 5dbf51ebd..89fd828db 100644 struct rq *src_rq; int rt_overload_count = rt_overloaded(this_rq); -@@ -2181,6 +2213,7 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2184,6 +2218,7 @@ static void pull_rt_task(struct rq *this_rq) * double_lock_balance, and another CPU could * alter this_rq */ @@ -21974,7 +21908,7 @@ index 5dbf51ebd..89fd828db 100644 double_lock_balance(this_rq, src_rq); /* -@@ -2208,11 +2241,15 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2211,11 +2246,15 @@ static void pull_rt_task(struct rq *this_rq) if (p->prio < src_rq->curr->prio) goto skip; @@ -21995,7 +21929,7 @@ index 5dbf51ebd..89fd828db 100644 /* * We continue with the search, just in * case there's an even higher prio task -@@ -2222,6 +2259,13 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2225,6 +2264,13 @@ static void pull_rt_task(struct rq *this_rq) } skip: double_unlock_balance(this_rq, src_rq); @@ -22009,7 +21943,7 @@ index 5dbf51ebd..89fd828db 100644 } if (resched) -@@ -2471,6 +2515,7 @@ const struct sched_class rt_sched_class +@@ -2474,6 +2520,7 @@ const struct sched_class rt_sched_class .rq_offline = rq_offline_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, @@ -22018,10 +21952,10 @@ index 5dbf51ebd..89fd828db 100644 .task_tick = task_tick_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 3bd6c9886..53adda69d 100644 +index 0ab8e2532f2d..fb01ee6e50d0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1025,6 +1025,7 @@ struct rq { +@@ -1032,6 +1032,7 @@ struct rq { unsigned long cpu_capacity_orig; struct callback_head *balance_callback; @@ -22029,7 +21963,7 @@ index 3bd6c9886..53adda69d 100644 unsigned char nohz_idle_balance; unsigned char idle_balance; -@@ -1055,6 +1056,10 @@ struct rq { +@@ -1062,6 +1063,10 @@ struct rq { /* This is used to determine avg_idle's max value */ u64 max_idle_balance_cost; @@ -22040,19 +21974,20 @@ index 3bd6c9886..53adda69d 100644 #endif /* CONFIG_SMP */ #ifdef CONFIG_IRQ_TIME_ACCOUNTING -@@ -1129,6 +1134,11 @@ struct rq { +@@ -1136,6 +1141,12 @@ struct rq { unsigned int core_forceidle_seq; #endif +#ifdef CONFIG_SMP -+ unsigned int nr_pinned; ++ unsigned int nr_pinned; +#endif -+ unsigned int push_busy; -+ struct cpu_stop_work push_work; ++ unsigned int push_busy; ++ struct cpu_stop_work push_work; ++ KABI_RESERVE(1) KABI_RESERVE(2) KABI_RESERVE(3) -@@ -1164,6 +1174,17 @@ static inline int cpu_of(struct rq *rq) +@@ -1171,6 +1182,17 @@ static inline int cpu_of(struct rq *rq) #endif } @@ -22061,29 +21996,16 @@ index 3bd6c9886..53adda69d 100644 +static inline bool is_migration_disabled(struct task_struct *p) +{ +#ifdef CONFIG_SMP -+ return p->migration_disabled; ++ return p->migration_disabled; +#else -+ return false; ++ return false; +#endif +} + #ifdef CONFIG_QOS_SCHED enum task_qos_level { QOS_LEVEL_OFFLINE = -1, -@@ -1538,6 +1559,12 @@ struct rq_flags { - */ - unsigned int clock_update_flags; - #endif -+ -+#ifdef CONFIG_SMP -+ unsigned int nr_pinned; -+#endif -+ unsigned int push_busy; -+ struct cpu_stop_work push_work; - }; - - /* -@@ -1558,6 +1585,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) +@@ -1565,6 +1587,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); rf->clock_update_flags = 0; #endif @@ -22093,7 +22015,7 @@ index 3bd6c9886..53adda69d 100644 } static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) -@@ -1723,6 +1753,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) +@@ -1730,6 +1755,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) #ifdef CONFIG_SMP @@ -22103,7 +22025,7 @@ index 3bd6c9886..53adda69d 100644 static inline void queue_balance_callback(struct rq *rq, struct callback_head *head, -@@ -1730,12 +1763,12 @@ queue_balance_callback(struct rq *rq, +@@ -1737,12 +1765,13 @@ queue_balance_callback(struct rq *rq, { lockdep_assert_rq_held(rq); @@ -22113,12 +22035,12 @@ index 3bd6c9886..53adda69d 100644 head->func = (void (*)(struct callback_head *))func; head->next = rq->balance_callback; -- rq->balance_callback = head; + rq->balance_callback = head; + rq->balance_flags |= BALANCE_WORK; } #define rcu_dereference_check_sched_domain(p) \ -@@ -2060,6 +2093,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) +@@ -2067,6 +2096,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ #define WF_ON_CPU 0x08 /* Wakee is on_cpu */ @@ -22126,7 +22048,7 @@ index 3bd6c9886..53adda69d 100644 /* * To aid in avoiding the subversion of "niceness" due to uneven distribution -@@ -2141,10 +2175,13 @@ struct sched_class { +@@ -2148,10 +2178,13 @@ struct sched_class { void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, @@ -22141,7 +22063,7 @@ index 3bd6c9886..53adda69d 100644 #endif void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); -@@ -2234,13 +2271,38 @@ static inline bool sched_fair_runnable(struct rq *rq) +@@ -2241,13 +2274,38 @@ static inline bool sched_fair_runnable(struct rq *rq) extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); extern struct task_struct *pick_next_task_idle(struct rq *rq); @@ -22181,7 +22103,7 @@ index 3bd6c9886..53adda69d 100644 #endif -@@ -2284,6 +2346,15 @@ extern void reweight_task(struct task_struct *p, int prio); +@@ -2291,6 +2349,15 @@ extern void reweight_task(struct task_struct *p, int prio); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -22197,16 +22119,8 @@ index 3bd6c9886..53adda69d 100644 extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); -@@ -2667,7 +2738,6 @@ extern void nohz_balance_exit_idle(struct rq *rq); - static inline void nohz_balance_exit_idle(struct rq *rq) { } - #endif - -- - #ifdef CONFIG_SMP - static inline - void __dl_update(struct dl_bw *dl_b, s64 bw) diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c -index e1c655f92..f230b1ac7 100644 +index e1c655f928c7..f230b1ac7f91 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -64,6 +64,7 @@ void swake_up_all(struct swait_queue_head *q) @@ -22217,20 +22131,8 @@ index e1c655f92..f230b1ac7 100644 raw_spin_lock_irq(&q->lock); list_splice_init(&q->task_list, &tmp); while (!list_empty(&tmp)) { -diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c -index 2678e7590..0c94b0e41 100644 ---- a/kernel/sched/topology.c -+++ b/kernel/sched/topology.c -@@ -529,6 +529,7 @@ static int init_rootdomain(struct root_domain *rd) - rd->rto_cpu = -1; - raw_spin_lock_init(&rd->rto_lock); - init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); -+ atomic_or(IRQ_WORK_HARD_IRQ, &rd->rto_push_work.node.a_flags); - #endif - - init_dl_bw(&rd->dl_bw); diff --git a/kernel/signal.c b/kernel/signal.c -index 6d374d02a..d944e9100 100644 +index d874c96315a6..2b8ba0d38444 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -20,6 +20,7 @@ @@ -22361,7 +22263,7 @@ index 6d374d02a..d944e9100 100644 /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ -@@ -1319,6 +1379,34 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t) +@@ -1320,6 +1380,34 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t) struct k_sigaction *action; int sig = info->si_signo; @@ -22396,7 +22298,7 @@ index 6d374d02a..d944e9100 100644 spin_lock_irqsave(&t->sighand->siglock, flags); action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; -@@ -1812,7 +1900,8 @@ EXPORT_SYMBOL(kill_pid); +@@ -1813,7 +1901,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { @@ -22406,7 +22308,7 @@ index 6d374d02a..d944e9100 100644 if (q) q->flags |= SIGQUEUE_PREALLOC; -@@ -2198,16 +2287,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t +@@ -2199,16 +2288,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); @@ -22424,7 +22326,7 @@ index 6d374d02a..d944e9100 100644 cgroup_leave_frozen(true); } else { diff --git a/kernel/smp.c b/kernel/smp.c -index 114776d0d..6d35929a1 100644 +index 114776d0d11e..6d35929a1990 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -480,8 +480,18 @@ void flush_smp_call_function_from_idle(void) @@ -22449,7 +22351,7 @@ index 114776d0d..6d35929a1 100644 local_irq_restore(flags); } diff --git a/kernel/softirq.c b/kernel/softirq.c -index 09229ad82..c9adc5c46 100644 +index 09229ad82209..c9adc5c46248 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -13,6 +13,7 @@ @@ -23020,7 +22922,7 @@ index 09229ad82..c9adc5c46 100644 #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c -index dd5aeddbe..8bf1fb832 100644 +index d0bf6da49322..7a74b501a3e9 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -47,11 +47,27 @@ struct cpu_stopper { @@ -23098,7 +23000,7 @@ index dd5aeddbe..8bf1fb832 100644 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); goto repeat; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 4ef90718c..6eb443234 100644 +index 544ce87ba38a..3db616aecb17 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2052,6 +2052,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, @@ -23139,7 +23041,7 @@ index 4ef90718c..6eb443234 100644 * Functions related to boot-time initialization: */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 33750db5b..90f3c8ad5 100644 +index 33750db5b564..90f3c8ad5f47 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -989,7 +989,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) @@ -23152,7 +23054,7 @@ index 33750db5b..90f3c8ad5 100644 pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n", (unsigned int) local_softirq_pending()); diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index f7d3a108e..f24e6fed6 100644 +index f7d3a108e27c..f24e6fed6633 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1287,7 +1287,7 @@ static void del_timer_wait_running(struct timer_list *timer) @@ -23179,7 +23081,7 @@ index f7d3a108e..f24e6fed6 100644 ret = try_to_del_timer_sync(timer); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 4e130e2bb..50a2db7a7 100644 +index 4e130e2bb566..50a2db7a791d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2592,60 +2592,43 @@ enum print_line_t trace_handle_return(struct trace_seq *s) @@ -23329,7 +23231,7 @@ index 4e130e2bb..50a2db7a7 100644 } EXPORT_SYMBOL_GPL(ftrace_dump); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h -index c0596e250..44943a9be 100644 +index c0596e250c2a..44943a9be317 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -141,25 +141,6 @@ struct kretprobe_trace_entry_head { @@ -23359,10 +23261,10 @@ index c0596e250..44943a9be 100644 struct trace_array; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index f4b11f609..a2abc0b40 100644 +index d29a9a5e5743..9ad773198754 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c -@@ -183,6 +183,8 @@ static int trace_define_common_fields(void) +@@ -184,6 +184,8 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); @@ -23372,7 +23274,7 @@ index f4b11f609..a2abc0b40 100644 return ret; } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c -index 7042544c5..c711eb334 100644 +index 7042544c5bde..c711eb334811 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -441,6 +441,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) @@ -23422,10 +23324,36 @@ index 7042544c5..c711eb334 100644 } diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 14d4c072c..9fefb9f05 100644 +index a27605c17f07..f27c35376159 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c -@@ -4934,6 +4934,10 @@ static void unbind_workers(int cpu) +@@ -4818,9 +4818,7 @@ void show_workqueue_state(void) + * drivers that queue work while holding locks + * also taken in their write paths. + */ +- printk_safe_enter(); + show_pwq(pwq); +- printk_safe_exit(); + } + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + /* +@@ -4844,7 +4842,6 @@ void show_workqueue_state(void) + * queue work while holding locks also taken in their write + * paths. + */ +- printk_safe_enter(); + pr_info("pool %d:", pool->id); + pr_cont_pool_info(pool); + pr_cont(" hung=%us workers=%d", +@@ -4859,7 +4856,6 @@ void show_workqueue_state(void) + first = false; + } + pr_cont("\n"); +- printk_safe_exit(); + next_pool: + raw_spin_unlock_irqrestore(&pool->lock, flags); + /* +@@ -4951,6 +4947,10 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; raw_spin_unlock_irq(&pool->lock); @@ -23437,10 +23365,10 @@ index 14d4c072c..9fefb9f05 100644 /* diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index ec28c6507..e49ab3b66 100644 +index 10e425c30486..68857da74e6f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -1366,7 +1366,7 @@ config DEBUG_ATOMIC_SLEEP +@@ -1406,7 +1406,7 @@ config DEBUG_ATOMIC_SLEEP config DEBUG_LOCKING_API_SELFTESTS bool "Locking API boot-time self-tests" @@ -23450,7 +23378,7 @@ index ec28c6507..e49ab3b66 100644 Say Y here if you want the kernel to run a short self-test during bootup. The self-test checks whether common types of locking bugs diff --git a/lib/bug.c b/lib/bug.c -index 4ab398a2d..9c681f29e 100644 +index 4ab398a2de93..9c681f29e61e 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -202,6 +202,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) @@ -23462,7 +23390,7 @@ index 4ab398a2d..9c681f29e 100644 return BUG_TRAP_TYPE_BUG; } diff --git a/lib/cpumask.c b/lib/cpumask.c -index fb22fb266..c3c76b833 100644 +index fb22fb266f93..c3c76b833384 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -261,3 +261,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p, @@ -23488,7 +23416,7 @@ index fb22fb266..c3c76b833 100644 +} +EXPORT_SYMBOL(cpumask_any_distribute); diff --git a/lib/debugobjects.c b/lib/debugobjects.c -index 9e14ae023..083882a3c 100644 +index 9e14ae02306b..083882a3cf2f 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -557,7 +557,10 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack @@ -23504,7 +23432,7 @@ index 9e14ae023..083882a3c 100644 db = get_bucket((unsigned long) addr); diff --git a/lib/dump_stack.c b/lib/dump_stack.c -index a00ee6eed..f5a33b6f7 100644 +index a00ee6eedc7c..f5a33b6f773f 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -12,6 +12,7 @@ @@ -23524,7 +23452,7 @@ index a00ee6eed..f5a33b6f7 100644 /** diff --git a/lib/irq_poll.c b/lib/irq_poll.c -index 2f17b488d..7557bf7ec 100644 +index 2f17b488d58e..7557bf7ecf1f 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop) @@ -23568,7 +23496,7 @@ index 2f17b488d..7557bf7ec 100644 return 0; } diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c -index 76c52b0b7..98c376b02 100644 +index 76c52b0b76d3..98c376b02dff 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -787,6 +787,8 @@ GENERATE_TESTCASE(init_held_rtmutex); @@ -23721,7 +23649,7 @@ index 76c52b0b7..98c376b02 100644 force_read_lock_recursive = 0; diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c -index 8abe1870d..b09a490f5 100644 +index 8abe1870dba4..b09a490f5f70 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, @@ -23738,7 +23666,7 @@ index 8abe1870d..b09a490f5 100644 put_cpu(); } diff --git a/lib/scatterlist.c b/lib/scatterlist.c -index a59778946..907f59045 100644 +index a59778946404..907f59045998 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -892,7 +892,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) @@ -23751,7 +23679,7 @@ index a59778946..907f59045 100644 } else kunmap(miter->page); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c -index 525222e4f..1c1dbd300 100644 +index 2916606a9333..0c0c42b14370 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -26,6 +26,11 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2) @@ -23767,7 +23695,7 @@ index 525222e4f..1c1dbd300 100644 * It is valid to assume CPU-locality during early bootup: */ diff --git a/lib/test_lockup.c b/lib/test_lockup.c -index 78a630bbd..d27a80502 100644 +index 78a630bbd03d..d27a80502204 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -485,6 +485,21 @@ static int __init test_lockup_init(void) @@ -23801,7 +23729,7 @@ index 78a630bbd..d27a80502 100644 if ((wait_state != TASK_RUNNING || diff --git a/mm/Kconfig b/mm/Kconfig -index 5e1175da7..54bd48067 100644 +index be7fd4ed2c4f..03a22fc5ae3b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -404,7 +404,7 @@ config NOMMU_INITIAL_TRIM_EXCESS @@ -23813,16 +23741,18 @@ index 5e1175da7..54bd48067 100644 select COMPACTION select XARRAY_MULTI help -@@ -982,4 +982,7 @@ config MEMORY_RELIABLE - - source "mm/damon/Kconfig" +@@ -943,6 +943,9 @@ config ARCH_HAS_HUGEPD + config MAPPING_DIRTY_HELPERS + bool +config KMAP_LOCAL -+ bool ++ bool + - endmenu + config PIN_MEMORY + bool "Support for pin memory" + depends on MMU && ARM64 diff --git a/mm/highmem.c b/mm/highmem.c -index efe38ab47..16f3ecd4a 100644 +index efe38ab479b5..ad72e587ce54 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -31,10 +31,6 @@ @@ -23877,15 +23807,7 @@ index efe38ab47..16f3ecd4a 100644 { lock_kmap(); flush_all_zero_pkmaps(); -@@ -367,7 +358,6 @@ void kunmap_high(struct page *page) - if (need_wakeup) - wake_up(pkmap_map_wait); - } -- - EXPORT_SYMBOL(kunmap_high); - - #ifdef CONFIG_TRANSPARENT_HUGEPAGE -@@ -428,7 +418,249 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1, +@@ -428,7 +419,250 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1, } EXPORT_SYMBOL(zero_user_segments); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -23901,17 +23823,18 @@ index efe38ab47..16f3ecd4a 100644 + * slot is unused which acts as a guard page + */ +#ifdef CONFIG_DEBUG_HIGHMEM -+# define KM_INCR 2 ++# define KM_INCR 2 +#else -+# define KM_INCR 1 ++# define KM_INCR 1 +#endif + +static inline int kmap_local_idx_push(void) +{ -+ WARN_ON_ONCE(in_irq() && !irqs_disabled()); -+ current->kmap_ctrl.idx += KM_INCR; -+ BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); -+ return current->kmap_ctrl.idx - 1; ++ WARN_ON_ONCE(in_irq() && !irqs_disabled()); ++ current->kmap_ctrl.idx += KM_INCR; ++ BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); ++ return current->kmap_ctrl.idx - 1; ++ +} + +static inline int kmap_local_idx(void) @@ -23926,29 +23849,29 @@ index efe38ab47..16f3ecd4a 100644 +} + +#ifndef arch_kmap_local_post_map -+# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) ++# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) +#endif + +#ifndef arch_kmap_local_pre_unmap -+# define arch_kmap_local_pre_unmap(vaddr) do { } while (0) ++# define arch_kmap_local_pre_unmap(vaddr) do { } while (0) +#endif + +#ifndef arch_kmap_local_post_unmap -+# define arch_kmap_local_post_unmap(vaddr) do { } while (0) ++# define arch_kmap_local_post_unmap(vaddr) do { } while (0) +#endif + +#ifndef arch_kmap_local_map_idx -+#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) ++#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) +#endif + +#ifndef arch_kmap_local_unmap_idx -+#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) ++#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) +#endif + +#ifndef arch_kmap_local_high_get +static inline void *arch_kmap_local_high_get(struct page *page) +{ -+ return NULL; ++ return NULL; +} +#endif + @@ -23956,8 +23879,8 @@ index efe38ab47..16f3ecd4a 100644 +static inline bool kmap_high_unmap_local(unsigned long vaddr) +{ +#ifdef ARCH_NEEDS_KMAP_HIGH_GET -+ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { -+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); ++ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { ++ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + return true; + } +#endif @@ -23966,88 +23889,88 @@ index efe38ab47..16f3ecd4a 100644 + +static inline int kmap_local_calc_idx(int idx) +{ -+ return idx + KM_MAX_IDX * smp_processor_id(); ++ return idx + KM_MAX_IDX * smp_processor_id(); +} + +static pte_t *__kmap_pte; + +static pte_t *kmap_get_pte(void) +{ -+ if (!__kmap_pte) -+ __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); -+ return __kmap_pte; ++ if (!__kmap_pte) ++ __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); ++ return __kmap_pte; +} + +void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) +{ -+ pte_t pteval, *kmap_pte = kmap_get_pte(); -+ unsigned long vaddr; -+ int idx; ++ pte_t pteval, *kmap_pte = kmap_get_pte(); ++ unsigned long vaddr; ++ int idx; + -+ /* -+ * Disable migration so resulting virtual address is stable -+ * accross preemption. -+ */ -+ migrate_disable(); -+ preempt_disable(); -+ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); -+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -+ BUG_ON(!pte_none(*(kmap_pte - idx))); -+ pteval = pfn_pte(pfn, prot); -+ set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval); -+ arch_kmap_local_post_map(vaddr, pteval); -+ current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; ++ /* ++ * Disable migration so resulting virtual address is stable ++ * accross preemption. ++ */ ++ migrate_disable(); ++ preempt_disable(); ++ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); ++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ BUG_ON(!pte_none(*(kmap_pte - idx))); ++ pteval = pfn_pte(pfn, prot); ++ set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval); ++ arch_kmap_local_post_map(vaddr, pteval); ++ current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; + preempt_enable(); + -+ return (void *)vaddr; ++ return (void *)vaddr; +} +EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); + +void *__kmap_local_page_prot(struct page *page, pgprot_t prot) +{ -+ void *kmap; -+ -+ if (!PageHighMem(page)) -+ return page_address(page); -+ -+ /* Try kmap_high_get() if architecture has it enabled */ -+ kmap = arch_kmap_local_high_get(page); -+ if (kmap) -+ return kmap; -+ -+ return __kmap_local_pfn_prot(page_to_pfn(page), prot); ++ void *kmap; ++ ++ if (!PageHighMem(page)) ++ return page_address(page); ++ ++ /* Try kmap_high_get() if architecture has it enabled */ ++ kmap = arch_kmap_local_high_get(page); ++ if (kmap) ++ return kmap; ++ ++ return __kmap_local_pfn_prot(page_to_pfn(page), prot); +} +EXPORT_SYMBOL(__kmap_local_page_prot); + +void kunmap_local_indexed(void *vaddr) +{ -+ unsigned long addr = (unsigned long) vaddr & PAGE_MASK; -+ pte_t *kmap_pte = kmap_get_pte(); -+ int idx; ++ unsigned long addr = (unsigned long) vaddr & PAGE_MASK; ++ pte_t *kmap_pte = kmap_get_pte(); ++ int idx; ++ ++ if (addr < __fix_to_virt(FIX_KMAP_END) || ++ addr > __fix_to_virt(FIX_KMAP_BEGIN)) { ++ /* ++ * Handle mappings which were obtained by kmap_high_get() ++ * first as the virtual address of such mappings is below ++ * PAGE_OFFSET. Warn for all other addresses which are in ++ * the user space part of the virtual address space. ++ */ ++ if (!kmap_high_unmap_local(addr)) ++ WARN_ON_ONCE(addr < PAGE_OFFSET); ++ return; ++ } + -+ if (addr < __fix_to_virt(FIX_KMAP_END) || -+ addr > __fix_to_virt(FIX_KMAP_BEGIN)) { -+ /* -+ * Handle mappings which were obtained by kmap_high_get() -+ * first as the virtual address of such mappings is below -+ * PAGE_OFFSET. Warn for all other addresses which are in -+ * the user space part of the virtual address space. -+ */ -+ if (!kmap_high_unmap_local(addr)) -+ WARN_ON_ONCE(addr < PAGE_OFFSET); -+ return; -+ } -+ -+ preempt_disable(); -+ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); -+ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); -+ -+ arch_kmap_local_pre_unmap(addr); -+ pte_clear(&init_mm, addr, kmap_pte - idx); -+ arch_kmap_local_post_unmap(addr); -+ current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); ++ preempt_disable(); ++ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); ++ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ ++ arch_kmap_local_pre_unmap(addr); ++ pte_clear(&init_mm, addr, kmap_pte - idx); ++ arch_kmap_local_post_unmap(addr); ++ current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); + kmap_local_idx_pop(); -+ preempt_enable(); ++ preempt_enable(); + migrate_enable(); +} +EXPORT_SYMBOL(kunmap_local_indexed); @@ -24064,72 +23987,72 @@ index efe38ab47..16f3ecd4a 100644 + */ +void __kmap_local_sched_out(void) +{ -+ struct task_struct *tsk = current; -+ pte_t *kmap_pte = kmap_get_pte(); -+ int i; ++ struct task_struct *tsk = current; ++ pte_t *kmap_pte = kmap_get_pte(); ++ int i; + -+ /* Clear kmaps */ -+ for (i = 0; i < tsk->kmap_ctrl.idx; i++) { -+ pte_t pteval = tsk->kmap_ctrl.pteval[i]; -+ unsigned long addr; -+ int idx; -+ -+ /* With debug all even slots are unmapped and act as guard */ -+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { -+ WARN_ON_ONCE(!pte_none(pteval)); -+ continue; -+ } -+ if (WARN_ON_ONCE(pte_none(pteval))) -+ continue; -+ -+ /* -+ * This is a horrible hack for XTENSA to calculate the -+ * coloured PTE index. Uses the PFN encoded into the pteval -+ * and the map index calculation because the actual mapped -+ * virtual address is not stored in task::kmap_ctrl. -+ * For any sane architecture this is optimized out. -+ */ -+ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); -+ -+ addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -+ arch_kmap_local_pre_unmap(addr); -+ pte_clear(&init_mm, addr, kmap_pte - idx); -+ arch_kmap_local_post_unmap(addr); -+ } ++ /* Clear kmaps */ ++ for (i = 0; i < tsk->kmap_ctrl.idx; i++) { ++ pte_t pteval = tsk->kmap_ctrl.pteval[i]; ++ unsigned long addr; ++ int idx; ++ ++ /* With debug all even slots are unmapped and act as guard */ ++ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { ++ WARN_ON_ONCE(!pte_none(pteval)); ++ continue; ++ } ++ if (WARN_ON_ONCE(pte_none(pteval))) ++ continue; ++ ++ /* ++ * This is a horrible hack for XTENSA to calculate the ++ * coloured PTE index. Uses the PFN encoded into the pteval ++ * and the map index calculation because the actual mapped ++ * virtual address is not stored in task::kmap_ctrl. ++ * For any sane architecture this is optimized out. ++ */ ++ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); ++ ++ addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ arch_kmap_local_pre_unmap(addr); ++ pte_clear(&init_mm, addr, kmap_pte - idx); ++ arch_kmap_local_post_unmap(addr); ++ } +} -+ ++ +void __kmap_local_sched_in(void) +{ -+ struct task_struct *tsk = current; -+ pte_t *kmap_pte = kmap_get_pte(); -+ int i; ++ struct task_struct *tsk = current; ++ pte_t *kmap_pte = kmap_get_pte(); ++ int i; + -+ /* Restore kmaps */ -+ for (i = 0; i < tsk->kmap_ctrl.idx; i++) { -+ pte_t pteval = tsk->kmap_ctrl.pteval[i]; -+ unsigned long addr; -+ int idx; ++ /* Restore kmaps */ ++ for (i = 0; i < tsk->kmap_ctrl.idx; i++) { ++ pte_t pteval = tsk->kmap_ctrl.pteval[i]; ++ unsigned long addr; ++ int idx; ++ ++ /* With debug all even slots are unmapped and act as guard */ ++ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { ++ WARN_ON_ONCE(!pte_none(pteval)); ++ continue; ++ } ++ if (WARN_ON_ONCE(pte_none(pteval))) ++ continue; + -+ /* With debug all even slots are unmapped and act as guard */ -+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { -+ WARN_ON_ONCE(!pte_none(pteval)); -+ continue; -+ } -+ if (WARN_ON_ONCE(pte_none(pteval))) -+ continue; -+ -+ /* See comment in __kmap_local_sched_out() */ -+ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); -+ addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -+ set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); -+ arch_kmap_local_post_map(addr, pteval); -+ } ++ /* See comment in __kmap_local_sched_out() */ ++ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); ++ addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); ++ arch_kmap_local_post_map(addr, pteval); ++ } +} + +void kmap_local_fork(struct task_struct *tsk) +{ -+ if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) -+ memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); ++ if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) ++ memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); +} + +#endif @@ -24137,7 +24060,7 @@ index efe38ab47..16f3ecd4a 100644 #if defined(HASHED_PAGE_VIRTUAL) diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index ac07a0ffb..bb004f8f5 100644 +index b2c4bc4bb591..8c5d1d0c62dd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -67,6 +67,7 @@ @@ -24152,18 +24075,18 @@ index ac07a0ffb..bb004f8f5 100644 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); #endif ++ +struct event_lock { -+ local_lock_t l; ++ local_lock_t l; +}; +static DEFINE_PER_CPU(struct event_lock, event_lock) = { -+ .l = INIT_LOCAL_LOCK(l), ++ .l = INIT_LOCAL_LOCK(l), +}; -+ + /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -755,6 +764,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +@@ -756,6 +765,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); memcg = pn->memcg; @@ -24171,7 +24094,7 @@ index ac07a0ffb..bb004f8f5 100644 /* Update memcg */ __this_cpu_add(memcg->vmstats_percpu->state[idx], val); -@@ -762,6 +772,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +@@ -763,6 +773,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); memcg_rstat_updated(memcg); @@ -24179,7 +24102,7 @@ index ac07a0ffb..bb004f8f5 100644 } /** -@@ -2171,6 +2182,7 @@ void unlock_page_memcg(struct page *page) +@@ -2172,6 +2183,7 @@ void unlock_page_memcg(struct page *page) EXPORT_SYMBOL(unlock_page_memcg); struct memcg_stock_pcp { @@ -24187,7 +24110,7 @@ index ac07a0ffb..bb004f8f5 100644 struct mem_cgroup *cached; /* this never be root cgroup */ unsigned int nr_pages; -@@ -2222,7 +2234,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2223,7 +2235,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (nr_pages > MEMCG_CHARGE_BATCH) return ret; @@ -24196,7 +24119,7 @@ index ac07a0ffb..bb004f8f5 100644 stock = this_cpu_ptr(&memcg_stock); if (memcg == stock->cached && stock->nr_pages >= nr_pages) { -@@ -2230,7 +2242,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2231,7 +2243,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) ret = true; } @@ -24205,7 +24128,7 @@ index ac07a0ffb..bb004f8f5 100644 return ret; } -@@ -2265,14 +2277,14 @@ static void drain_local_stock(struct work_struct *dummy) +@@ -2266,14 +2278,14 @@ static void drain_local_stock(struct work_struct *dummy) * The only protection from memory hotplug vs. drain_stock races is * that we always operate on local CPU stock here with IRQ disabled */ @@ -24222,7 +24145,7 @@ index ac07a0ffb..bb004f8f5 100644 } /* -@@ -2284,7 +2296,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2285,7 +2297,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; unsigned long flags; @@ -24231,7 +24154,7 @@ index ac07a0ffb..bb004f8f5 100644 stock = this_cpu_ptr(&memcg_stock); if (stock->cached != memcg) { /* reset if necessary */ -@@ -2297,7 +2309,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2298,7 +2310,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (stock->nr_pages > MEMCG_CHARGE_BATCH) drain_stock(stock); @@ -24240,7 +24163,7 @@ index ac07a0ffb..bb004f8f5 100644 } /* -@@ -2317,7 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2318,7 +2330,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -24249,7 +24172,7 @@ index ac07a0ffb..bb004f8f5 100644 for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -2340,7 +2352,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2341,7 +2353,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) schedule_work_on(cpu, &stock->work); } } @@ -24258,7 +24181,7 @@ index ac07a0ffb..bb004f8f5 100644 mutex_unlock(&percpu_charge_mutex); } -@@ -3131,7 +3143,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3143,7 +3155,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) unsigned long flags; bool ret = false; @@ -24267,7 +24190,7 @@ index ac07a0ffb..bb004f8f5 100644 stock = this_cpu_ptr(&memcg_stock); if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { -@@ -3139,7 +3151,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3151,7 +3163,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) ret = true; } @@ -24276,7 +24199,7 @@ index ac07a0ffb..bb004f8f5 100644 return ret; } -@@ -3195,7 +3207,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3207,7 +3219,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) struct memcg_stock_pcp *stock; unsigned long flags; @@ -24285,7 +24208,7 @@ index ac07a0ffb..bb004f8f5 100644 stock = this_cpu_ptr(&memcg_stock); if (stock->cached_objcg != objcg) { /* reset if necessary */ -@@ -3209,7 +3221,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3221,7 +3233,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) if (stock->nr_bytes > PAGE_SIZE) drain_obj_stock(stock); @@ -24294,7 +24217,7 @@ index ac07a0ffb..bb004f8f5 100644 } int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) -@@ -6221,12 +6233,12 @@ static int mem_cgroup_move_account(struct page *page, +@@ -6312,12 +6324,12 @@ static int mem_cgroup_move_account(struct page *page, ret = 0; @@ -24309,7 +24232,7 @@ index ac07a0ffb..bb004f8f5 100644 out_unlock: unlock_page(page); out: -@@ -7203,10 +7215,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) +@@ -7294,10 +7306,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) css_get(&memcg->css); commit_charge(page, memcg); @@ -24322,7 +24245,7 @@ index ac07a0ffb..bb004f8f5 100644 /* * Cgroup1's unified memory+swap counter has been charged with the -@@ -7262,11 +7274,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) +@@ -7353,11 +7365,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg_oom_recover(ug->memcg); } @@ -24336,7 +24259,7 @@ index ac07a0ffb..bb004f8f5 100644 /* drop reference from uncharge_page */ css_put(&ug->memcg->css); -@@ -7438,10 +7450,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) +@@ -7529,10 +7541,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) css_get(&memcg->css); commit_charge(newpage, memcg); @@ -24349,7 +24272,7 @@ index ac07a0ffb..bb004f8f5 100644 } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -7571,9 +7583,13 @@ static int __init mem_cgroup_init(void) +@@ -7662,9 +7674,13 @@ static int __init mem_cgroup_init(void) cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); @@ -24366,7 +24289,7 @@ index ac07a0ffb..bb004f8f5 100644 for_each_node(node) { struct mem_cgroup_tree_per_node *rtpn; -@@ -7624,6 +7640,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -7715,6 +7731,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -24374,7 +24297,7 @@ index ac07a0ffb..bb004f8f5 100644 VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -7669,9 +7686,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -7760,9 +7777,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ @@ -24389,7 +24312,7 @@ index ac07a0ffb..bb004f8f5 100644 css_put(&memcg->css); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index eea54e228..b62f61eec 100644 +index d58ddd6e7f73..2ae33a303830 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ @@ -24400,7 +24323,7 @@ index eea54e228..b62f61eec 100644 #include #include #include -@@ -389,6 +390,13 @@ EXPORT_SYMBOL(nr_node_ids); +@@ -393,6 +394,13 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif @@ -24414,7 +24337,7 @@ index eea54e228..b62f61eec 100644 int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -1333,7 +1341,7 @@ static inline void prefetch_buddy(struct page *page) +@@ -1337,7 +1345,7 @@ static inline void prefetch_buddy(struct page *page) } /* @@ -24423,57 +24346,57 @@ index eea54e228..b62f61eec 100644 * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * -@@ -1343,15 +1351,56 @@ static inline void prefetch_buddy(struct page *page) +@@ -1347,15 +1355,56 @@ static inline void prefetch_buddy(struct page *page) * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ -static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) +static void free_pcppages_bulk(struct zone *zone, struct list_head *head, -+ bool zone_retry) ++ bool zone_retry) +{ -+ bool isolated_pageblocks; -+ struct page *page, *tmp; -+ unsigned long flags; ++ bool isolated_pageblocks; ++ struct page *page, *tmp; ++ unsigned long flags; + -+ spin_lock_irqsave(&zone->lock, flags); -+ isolated_pageblocks = has_isolate_pageblock(zone); ++ spin_lock_irqsave(&zone->lock, flags); ++ isolated_pageblocks = has_isolate_pageblock(zone); ++ ++ /* ++ * Use safe version since after __free_one_page(), ++ * page->lru.next will not point to original list. ++ */ ++ list_for_each_entry_safe(page, tmp, head, lru) { ++ int mt = get_pcppage_migratetype(page); + -+ /* -+ * Use safe version since after __free_one_page(), -+ * page->lru.next will not point to original list. -+ */ -+ list_for_each_entry_safe(page, tmp, head, lru) { -+ int mt = get_pcppage_migratetype(page); -+ -+ if (page_zone(page) != zone) { -+ /* -+ * free_unref_page_list() sorts pages by zone. If we end -+ * up with pages from a different NUMA nodes belonging -+ * to the same ZONE index then we need to redo with the -+ * correct ZONE pointer. Skip the page for now, redo it -+ * on the next iteration. -+ */ -+ WARN_ON_ONCE(zone_retry == false); -+ if (zone_retry) -+ continue; -+ } ++ if (page_zone(page) != zone) { ++ /* ++ * free_unref_page_list() sorts pages by zone. If we end ++ * up with pages from a different NUMA nodes belonging ++ * to the same ZONE index then we need to redo with the ++ * correct ZONE pointer. Skip the page for now, redo it ++ * on the next iteration. ++ */ ++ WARN_ON_ONCE(zone_retry == false); ++ if (zone_retry) ++ continue; ++ } + + /* MIGRATE_ISOLATE page should not go to pcplists */ -+ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -+ /* Pageblock could have been isolated meanwhile */ -+ if (unlikely(isolated_pageblocks)) -+ mt = get_pageblock_migratetype(page); -+ ++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); ++ /* Pageblock could have been isolated meanwhile */ ++ if (unlikely(isolated_pageblocks)) ++ mt = get_pageblock_migratetype(page); ++ + list_del(&page->lru); + __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); -+ trace_mm_page_pcpu_drain(page, 0, mt); -+ } -+ spin_unlock_irqrestore(&zone->lock, flags); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ } ++ spin_unlock_irqrestore(&zone->lock, flags); +} + +static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, -+ struct list_head *dst) ++ struct list_head *dst) { int migratetype = 0; int batch_free = 0; @@ -24485,7 +24408,7 @@ index eea54e228..b62f61eec 100644 /* * Ensure proper count is passed which otherwise would stuck in the -@@ -1388,7 +1437,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, +@@ -1392,7 +1441,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (bulkfree_pcp_prepare(page)) continue; @@ -24494,7 +24417,7 @@ index eea54e228..b62f61eec 100644 /* * We are going to put the page back to the global -@@ -1405,26 +1454,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, +@@ -1409,26 +1458,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, } } while (--count && --batch_free && !list_empty(list)); } @@ -24521,7 +24444,7 @@ index eea54e228..b62f61eec 100644 } static void free_one_page(struct zone *zone, -@@ -1526,11 +1555,11 @@ static void __free_pages_ok(struct page *page, unsigned int order, +@@ -1530,11 +1559,11 @@ static void __free_pages_ok(struct page *page, unsigned int order, return; migratetype = get_pfnblock_migratetype(page, pfn); @@ -24535,7 +24458,7 @@ index eea54e228..b62f61eec 100644 } void __free_pages_core(struct page *page, unsigned int order) -@@ -2941,13 +2970,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) +@@ -2945,13 +2974,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain, batch; @@ -24557,7 +24480,7 @@ index eea54e228..b62f61eec 100644 } #endif -@@ -2963,14 +2997,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) +@@ -2967,14 +3001,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -24583,7 +24506,7 @@ index eea54e228..b62f61eec 100644 } /* -@@ -3018,9 +3059,9 @@ static void drain_local_pages_wq(struct work_struct *work) +@@ -3022,9 +3063,9 @@ static void drain_local_pages_wq(struct work_struct *work) * cpu which is allright but we also have to make sure to not move to * a different one. */ @@ -24595,7 +24518,7 @@ index eea54e228..b62f61eec 100644 } /* -@@ -3190,7 +3231,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn) +@@ -3194,7 +3235,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn) return true; } @@ -24605,30 +24528,26 @@ index eea54e228..b62f61eec 100644 { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; -@@ -3218,8 +3260,11 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) - pcp = &this_cpu_ptr(zone->pageset)->pcp; +@@ -3223,7 +3265,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; -- if (pcp->count >= READ_ONCE(pcp->high)) + if (pcp->count >= READ_ONCE(pcp->high)) - free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp); -+ if (pcp->count >= READ_ONCE(pcp->high)) { -+ unsigned long batch = READ_ONCE(pcp->batch); -+ -+ isolate_pcp_pages(batch, pcp, dst); -+ } ++ ++ isolate_pcp_pages(READ_ONCE(pcp->batch), pcp, dst); } /* -@@ -3229,6 +3274,8 @@ void free_unref_page(struct page *page) +@@ -3233,6 +3276,8 @@ void free_unref_page(struct page *page) { unsigned long flags; unsigned long pfn = page_to_pfn(page); -+ struct zone *zone = page_zone(page); -+ LIST_HEAD(dst); ++ struct zone *zone = page_zone(page); ++ LIST_HEAD(dst); /* Free dynamic hugetlb page */ if (free_page_to_dhugetlb_pool(page)) -@@ -3237,9 +3284,11 @@ void free_unref_page(struct page *page) +@@ -3241,9 +3286,11 @@ void free_unref_page(struct page *page) if (!free_unref_page_prepare(page, pfn)) return; @@ -24643,19 +24562,19 @@ index eea54e228..b62f61eec 100644 } /* -@@ -3250,6 +3299,11 @@ void free_unref_page_list(struct list_head *list) +@@ -3254,6 +3301,11 @@ void free_unref_page_list(struct list_head *list) struct page *page, *next; unsigned long flags, pfn; int batch_count = 0; -+ struct list_head dsts[__MAX_NR_ZONES]; -+ int i; ++ struct list_head dsts[__MAX_NR_ZONES]; ++ int i; + -+ for (i = 0; i < __MAX_NR_ZONES; i++) -+ INIT_LIST_HEAD(&dsts[i]); ++ for (i = 0; i < __MAX_NR_ZONES; i++) ++ INIT_LIST_HEAD(&dsts[i]); /* Free dynamic hugetlb page list */ free_page_list_to_dhugetlb_pool(list); -@@ -3262,25 +3316,42 @@ void free_unref_page_list(struct list_head *list) +@@ -3266,25 +3318,42 @@ void free_unref_page_list(struct list_head *list) set_page_private(page, pfn); } @@ -24703,7 +24622,7 @@ index eea54e228..b62f61eec 100644 } /* -@@ -3437,7 +3508,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, +@@ -3441,7 +3510,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct page *page; unsigned long flags; @@ -24712,7 +24631,7 @@ index eea54e228..b62f61eec 100644 pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); -@@ -3445,7 +3516,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, +@@ -3449,7 +3518,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); zone_statistics(preferred_zone, zone); } @@ -24721,7 +24640,7 @@ index eea54e228..b62f61eec 100644 return page; } -@@ -3479,7 +3550,8 @@ struct page *rmqueue(struct zone *preferred_zone, +@@ -3483,7 +3552,8 @@ struct page *rmqueue(struct zone *preferred_zone, * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); @@ -24731,7 +24650,7 @@ index eea54e228..b62f61eec 100644 do { page = NULL; -@@ -3505,7 +3577,7 @@ struct page *rmqueue(struct zone *preferred_zone, +@@ -3509,7 +3579,7 @@ struct page *rmqueue(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); @@ -24740,7 +24659,7 @@ index eea54e228..b62f61eec 100644 out: /* Separate test+clear to avoid unnecessary atomics */ -@@ -3518,7 +3590,7 @@ struct page *rmqueue(struct zone *preferred_zone, +@@ -3522,7 +3592,7 @@ struct page *rmqueue(struct zone *preferred_zone, return page; failed: @@ -24749,7 +24668,7 @@ index eea54e228..b62f61eec 100644 return NULL; } -@@ -9085,7 +9157,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -9288,7 +9358,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -24758,7 +24677,7 @@ index eea54e228..b62f61eec 100644 if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -9094,7 +9166,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -9297,7 +9367,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } @@ -24768,7 +24687,7 @@ index eea54e228..b62f61eec 100644 #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/shmem.c b/mm/shmem.c -index ad2d68150..f0b47bec1 100644 +index f7caf1dec81c..0ef372e7d126 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -307,10 +307,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) @@ -24822,7 +24741,7 @@ index ad2d68150..f0b47bec1 100644 } } -@@ -1469,10 +1470,10 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) +@@ -1471,10 +1472,10 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) { struct mempolicy *mpol = NULL; if (sbinfo->mpol) { @@ -24835,11 +24754,11 @@ index ad2d68150..f0b47bec1 100644 } return mpol; } -@@ -3549,9 +3550,10 @@ static int shmem_reconfigure(struct fs_context *fc) +@@ -3575,9 +3576,10 @@ static int shmem_reconfigure(struct fs_context *fc) struct shmem_options *ctx = fc->fs_private; struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); unsigned long inodes; -+ struct mempolicy *mpol = NULL; ++ struct mempolicy *mpol = NULL; const char *err; - spin_lock(&sbinfo->stat_lock); @@ -24847,7 +24766,7 @@ index ad2d68150..f0b47bec1 100644 inodes = sbinfo->max_inodes - sbinfo->free_inodes; if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { -@@ -3597,14 +3599,15 @@ static int shmem_reconfigure(struct fs_context *fc) +@@ -3623,14 +3625,15 @@ static int shmem_reconfigure(struct fs_context *fc) * Preserve previous mempolicy unless mpol remount option was specified. */ if (ctx->mpol) { @@ -24866,7 +24785,7 @@ index ad2d68150..f0b47bec1 100644 return invalfc(fc, "%s", err); } -@@ -3721,7 +3724,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) +@@ -3747,7 +3750,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) sbinfo->mpol = ctx->mpol; ctx->mpol = NULL; @@ -24876,7 +24795,7 @@ index ad2d68150..f0b47bec1 100644 goto failed; spin_lock_init(&sbinfo->shrinklist_lock); diff --git a/mm/slab.c b/mm/slab.c -index ae84578f3..a65a5f169 100644 +index ae84578f3fde..a65a5f169fd5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -234,7 +234,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) @@ -25227,7 +25146,7 @@ index ae84578f3..a65a5f169 100644 num_objs = total_slabs * cachep->num; active_slabs = total_slabs - free_slabs; diff --git a/mm/slab.h b/mm/slab.h -index 8414c3451..d937f8673 100644 +index 8414c345127b..d937f8673193 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -523,7 +523,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, @@ -25240,7 +25159,7 @@ index 8414c3451..d937f8673 100644 #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c -index ad44734db..d558074fe 100644 +index ad44734dbf72..84a55c8bb3d6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -458,7 +458,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, @@ -25680,12 +25599,12 @@ index ad44734db..d558074fe 100644 static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; int node; -+ int cpu; ++ int cpu; + -+ for_each_possible_cpu(cpu) { -+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); -+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); -+ } ++ for_each_possible_cpu(cpu) { ++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); ++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); ++ } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -25733,10 +25652,10 @@ index ad44734db..d558074fe 100644 for (i = 0; i < t.count; i++) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c -index dadbea292..dd7da773b 100644 +index d7a68eb0db42..ebe4de5f90d9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c -@@ -1886,7 +1886,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) +@@ -1887,7 +1887,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; @@ -25745,7 +25664,7 @@ index dadbea292..dd7da773b 100644 void *vaddr; node = numa_node_id(); -@@ -1923,11 +1923,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) +@@ -1924,11 +1924,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) return ERR_PTR(err); } @@ -25760,7 +25679,7 @@ index dadbea292..dd7da773b 100644 return vaddr; } -@@ -1992,6 +1993,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -1993,6 +1994,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; void *vaddr = NULL; unsigned int order; @@ -25768,7 +25687,7 @@ index dadbea292..dd7da773b 100644 BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -2006,7 +2008,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2007,7 +2009,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) order = get_order(size); rcu_read_lock(); @@ -25778,7 +25697,7 @@ index dadbea292..dd7da773b 100644 list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; -@@ -2029,7 +2032,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2030,7 +2033,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) break; } @@ -25788,7 +25707,7 @@ index dadbea292..dd7da773b 100644 /* Allocate new block if nothing was found */ diff --git a/mm/vmstat.c b/mm/vmstat.c -index 05433f663..afb32defb 100644 +index 05433f66376b..afb32defb498 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -321,6 +321,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, @@ -25888,7 +25807,7 @@ index 05433f663..afb32defb 100644 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/mm/workingset.c b/mm/workingset.c -index 4a30e4a81..4c92584ca 100644 +index 4a30e4a813a5..4c92584ca34d 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -431,6 +431,8 @@ static struct list_lru shadow_nodes; @@ -25911,7 +25830,7 @@ index 4a30e4a81..4c92584ca 100644 if (node->count && node->count == node->nr_values) { if (list_empty(&node->private_list)) { diff --git a/mm/z3fold.c b/mm/z3fold.c -index f75c638c6..6fdf4774f 100644 +index f75c638c69db..6fdf4774fdbe 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -623,14 +623,16 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool, @@ -25973,7 +25892,7 @@ index f75c638c6..6fdf4774f 100644 if (!zhdr) { int cpu; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c -index c18dc8e61..16ce2b05d 100644 +index c18dc8e61d35..16ce2b05df90 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -57,6 +57,7 @@ @@ -26163,20 +26082,8 @@ index c18dc8e61..16ce2b05d 100644 migrate_read_unlock(zspage); unpin_tag(handle); -diff --git a/mm/zswap.c b/mm/zswap.c -index 030254e04..f848f93a1 100644 ---- a/mm/zswap.c -+++ b/mm/zswap.c -@@ -18,6 +18,7 @@ - #include - #include - #include -+#include - #include - #include - #include diff --git a/net/Kconfig b/net/Kconfig -index d6567162c..05b0f041f 100644 +index d6567162c1cf..05b0f041f039 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -282,7 +282,7 @@ config CGROUP_NET_CLASSID @@ -26189,7 +26096,7 @@ index d6567162c..05b0f041f 100644 config BQL bool diff --git a/net/core/dev.c b/net/core/dev.c -index ee0b40568..2ca7feed0 100644 +index 64837bb056ee..dee3d89c80e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -221,14 +221,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) @@ -26309,7 +26216,7 @@ index ee0b40568..2ca7feed0 100644 } EXPORT_SYMBOL(__napi_schedule); -@@ -10992,6 +11000,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -10978,6 +10986,7 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); @@ -26317,7 +26224,7 @@ index ee0b40568..2ca7feed0 100644 #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; -@@ -11005,7 +11014,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -10991,7 +11000,7 @@ static int dev_cpu_dead(unsigned int oldcpu) netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -26326,7 +26233,7 @@ index ee0b40568..2ca7feed0 100644 netif_rx_ni(skb); input_queue_head_incr(oldsd); } -@@ -11321,7 +11330,7 @@ static int __init net_dev_init(void) +@@ -11307,7 +11316,7 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); @@ -26336,7 +26243,7 @@ index ee0b40568..2ca7feed0 100644 #ifdef CONFIG_XFRM_OFFLOAD skb_queue_head_init(&sd->xfrm_backlog); diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c -index 8e582e29a..e51f4854d 100644 +index 8e582e29a41e..e51f4854d8b2 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -42,7 +42,7 @@ @@ -26367,7 +26274,7 @@ index 8e582e29a..e51f4854d 100644 return gen_new_estimator(bstats, cpu_bstats, rate_est, lock, running, opt); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c -index e491b083b..ef432cea2 100644 +index e491b083b348..ef432cea2e10 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -137,7 +137,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, @@ -26417,10 +26324,10 @@ index e491b083b..ef432cea2 100644 struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/core/sock.c b/net/core/sock.c -index 3f49f1117..3ad09374f 100644 +index 56a927b9b372..28acdfbcdd22 100644 --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -3057,12 +3057,11 @@ void lock_sock_nested(struct sock *sk, int subclass) +@@ -3059,12 +3059,11 @@ void lock_sock_nested(struct sock *sk, int subclass) if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; @@ -26434,7 +26341,7 @@ index 3f49f1117..3ad09374f 100644 } EXPORT_SYMBOL(lock_sock_nested); -@@ -3111,12 +3110,11 @@ bool lock_sock_fast(struct sock *sk) +@@ -3113,12 +3112,11 @@ bool lock_sock_fast(struct sock *sk) __lock_sock(sk); sk->sk_lock.owned = 1; @@ -26449,7 +26356,7 @@ index 3f49f1117..3ad09374f 100644 } EXPORT_SYMBOL(lock_sock_fast); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index 6e18aa417..2d538f14e 100644 +index 6e18aa417782..2d538f14edf8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1264,7 +1264,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, @@ -26462,7 +26369,7 @@ index 6e18aa417..2d538f14e 100644 err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c -index 5d5391adb..8fe206c7b 100644 +index 68f1e89430b3..455f9b21b264 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -578,7 +578,11 @@ struct Qdisc noop_qdisc = { @@ -26494,7 +26401,7 @@ index 5d5391adb..8fe206c7b 100644 sch->ops = ops; sch->flags = ops->static_flags; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c -index 362487f3a..5c6c31fc7 100644 +index 362487f3a6de..5c6c31fc7890 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -422,7 +422,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) @@ -26516,10 +26423,10 @@ index 362487f3a..5c6c31fc7 100644 } EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c -index a6a4838d6..7893ec004 100644 +index ac2f1a7330c9..84f421e6b12c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c -@@ -2672,7 +2672,8 @@ int __net_init xfrm_state_init(struct net *net) +@@ -2673,7 +2673,8 @@ int __net_init xfrm_state_init(struct net *net) net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); spin_lock_init(&net->xfrm.xfrm_state_lock); @@ -26530,5 +26437,5 @@ index a6a4838d6..7893ec004 100644 out_byspi: -- -2.33.0 +2.36.1 diff --git a/0001-modify-openeuler_defconfig-for-rt62.patch b/0001-modify-openeuler_defconfig-for-rt62.patch index 319be60..2156deb 100644 --- a/0001-modify-openeuler_defconfig-for-rt62.patch +++ b/0001-modify-openeuler_defconfig-for-rt62.patch @@ -1,7 +1,7 @@ -From 430ab8c58ebcf6305676db654c0711f9cf0e1d94 Mon Sep 17 00:00:00 2001 -From: ryan -Date: Tue, 29 Nov 2022 15:23:28 +0800 -Subject: [PATCH 2/2] modify-openeuler_defconfig-for-rt62 +From 0026e130f88770f45b00f52ba1374dd90b8be0f0 Mon Sep 17 00:00:00 2001 +From: liyulei +Date: Mon, 6 Feb 2023 18:04:41 +0800 +Subject: [PATCH 2/2] modify openeuler_defconfig for rt62 --- arch/arm64/configs/openeuler_defconfig | 5 +++-- @@ -13,10 +13,10 @@ Subject: [PATCH 2/2] modify-openeuler_defconfig-for-rt62 6 files changed, 24 insertions(+), 12 deletions(-) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig -index a96c3ae85a19..ad870eabf021 100644 +index 30384eacad4f..b0ed4a9b3058 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig -@@ -73,6 +73,7 @@ CONFIG_HIGH_RES_TIMERS=y +@@ -74,6 +74,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set @@ -24,7 +24,7 @@ index a96c3ae85a19..ad870eabf021 100644 # # CPU/Task time and stats accounting -@@ -729,7 +730,7 @@ CONFIG_ACPI_MPAM=y +@@ -733,7 +734,7 @@ CONFIG_ACPI_MPAM=y CONFIG_ACPI_PPTT=y # CONFIG_PMIC_OPREGION is not set CONFIG_IRQ_BYPASS_MANAGER=y @@ -33,7 +33,7 @@ index a96c3ae85a19..ad870eabf021 100644 CONFIG_KVM=y CONFIG_HAVE_KVM_IRQCHIP=y CONFIG_HAVE_KVM_IRQFD=y -@@ -1116,7 +1117,7 @@ CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +@@ -1125,7 +1126,7 @@ CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y # CONFIG_GUP_BENCHMARK is not set # CONFIG_READ_ONLY_THP_FOR_FS is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y @@ -41,7 +41,7 @@ index a96c3ae85a19..ad870eabf021 100644 +# CONFIG_PIN_MEMORY is not set CONFIG_PID_RESERVE=y CONFIG_MEMORY_RELIABLE=y - + # CONFIG_CLEAR_FREELIST_PAGE is not set diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 84520f11667d..aa631771e0dc 100644 --- a/arch/arm64/kernel/fpsimd.c @@ -65,10 +65,10 @@ index 84520f11667d..aa631771e0dc 100644 static bool have_cpu_fpsimd_context(void) diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig -index e5a68009ed20..8a8b42b4cb59 100644 +index 5ada612f1d75..685a49ca1fef 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig -@@ -88,9 +88,10 @@ CONFIG_NO_HZ=y +@@ -89,9 +89,10 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y # end of Timers subsystem @@ -80,7 +80,7 @@ index e5a68009ed20..8a8b42b4cb59 100644 # # CPU/Task time and stats accounting -@@ -205,7 +206,7 @@ CONFIG_HAVE_UID16=y +@@ -206,7 +207,7 @@ CONFIG_HAVE_UID16=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_HAVE_PCSPKR_PLATFORM=y CONFIG_BPF=y @@ -89,7 +89,7 @@ index e5a68009ed20..8a8b42b4cb59 100644 CONFIG_UID16=y CONFIG_MULTIUSER=y CONFIG_SGETMASK_SYSCALL=y -@@ -725,7 +726,7 @@ CONFIG_KVM_COMPAT=y +@@ -735,7 +736,7 @@ CONFIG_KVM_COMPAT=y CONFIG_HAVE_KVM_IRQ_BYPASS=y CONFIG_HAVE_KVM_NO_POLL=y CONFIG_KVM_XFER_TO_GUEST_WORK=y @@ -158,5 +158,5 @@ index e95b00f24c75..59bd804e662a 100644 { // if (raw_spin_is_locked(&logbuf_lock)) { -- -2.25.1 +2.36.1 diff --git a/kernel-rt.spec b/kernel-rt.spec index def68c5..c4d116c 100644 --- a/kernel-rt.spec +++ b/kernel-rt.spec @@ -10,9 +10,9 @@ %global upstream_version 5.10 %global upstream_sublevel 0 -%global devel_release 129 +%global devel_release 141 %global maintenance_release .0.0 -%global pkg_release .55 +%global pkg_release .56 %global rt_release .rt62 %define with_debuginfo 1 @@ -71,7 +71,7 @@ Source9002: series.conf Source9998: patches.tar.bz2 %endif -Patch0: 0000-kernel-5.10.0-129.0.0-rt62.patch +Patch0: 0001-apply-preempt-RT-patch.patch Patch1: 0001-modify-openeuler_defconfig-for-rt62.patch #BuildRequires: @@ -886,6 +886,9 @@ fi %endif %changelog +* Mon Feb 06 2023 kylin-liyulei - 5.10.0-141.0.0.56 +- add kernel-rt.spec and rt patches + * Mon Nov 28 2022 kylin-liyulei - 5.10.0-129.0.0.55 - add kernel-rt.spec and rt patches