edgeos: Bump kernel versions
BUG=b/245753521
BUG=b/245752740
BUG=b/245752319
TEST=cusky
RELEASE_NOTE=none
Change-Id: I8211bf69854806c7f3c1bd647b4130920d87da65
Reviewed-on: https://cos-review.googlesource.com/c/cos/overlays/board-overlays/+/36967
Tested-by: Cusky Presubmit Bot <presubmit@cos-infra-prod.iam.gserviceaccount.com>
Reviewed-by: Robert Kolchmeyer <rkolchmeyer@google.com>
Reviewed-by: Ian Coolidge <icoolidge@google.com>
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.61-r7.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.61-r7.ebuild
deleted file mode 120000
index 9ec104f..0000000
--- a/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.61-r7.ebuild
+++ /dev/null
@@ -1 +0,0 @@
-lakitu-kernel-5_15-5.15.61.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.67-r8.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.67-r8.ebuild
new file mode 120000
index 0000000..b0f8f51
--- /dev/null
+++ b/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.67-r8.ebuild
@@ -0,0 +1 @@
+lakitu-kernel-5_15-5.15.67.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.61.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.67.ebuild
similarity index 93%
rename from project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.61.ebuild
rename to project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.67.ebuild
index 8e3083e..0eb903d 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.61.ebuild
+++ b/project-edgeos/sys-kernel/lakitu-kernel-5_15/lakitu-kernel-5_15-5.15.67.ebuild
@@ -14,8 +14,8 @@
EAPI=6
-CROS_WORKON_COMMIT="9c99910a6cace26c9d6d1c3c20c8bb500e26ac45"
-CROS_WORKON_TREE="aab3206ebb0cd90a433a3408ce79631485425abc"
+CROS_WORKON_COMMIT="0bf6895076f079a4abf8e739c63be92bb2121a20"
+CROS_WORKON_TREE="4659ae0990588190bf5bbfd36c3bfe68f1374249"
CROS_WORKON_REPO="https://cos.googlesource.com"
CROS_WORKON_PROJECT="third_party/kernel"
CROS_WORKON_LOCALNAME="kernel/v5.15"
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/files/0000-patch-5.15.55-rt48.patch b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/files/0000-patch-5.15.65-rt49.patch
similarity index 89%
rename from project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/files/0000-patch-5.15.55-rt48.patch
rename to project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/files/0000-patch-5.15.65-rt49.patch
index de0dcda..26274d4 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/files/0000-patch-5.15.55-rt48.patch
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/files/0000-patch-5.15.65-rt49.patch
@@ -196,20 +196,6 @@
schedule();
} else {
if (unlikely(!user_mode(regs)))
-diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 842427ff2b3c..b943e2df9540 100644
---- a/arch/arm/kernel/smp.c
-+++ b/arch/arm/kernel/smp.c
-@@ -667,9 +667,7 @@ static void do_handle_IPI(int ipinr)
- break;
-
- case IPI_CPU_BACKTRACE:
-- printk_deferred_enter();
- nmi_cpu_backtrace(get_irq_regs());
-- printk_deferred_exit();
- break;
-
- default:
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index efa402025031..59487ee9fd61 100644
--- a/arch/arm/mm/fault.c
@@ -235,7 +221,7 @@
return 0;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 8b6f090e0364..784c90ba371e 100644
+index 9d80c783142f..fffa284eda00 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -88,6 +88,7 @@ config ARM64
@@ -490,10 +476,10 @@
uprobe_notify_resume(regs);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
-index f181527f9d43..0a9a75c236da 100644
+index 4cb265e15361..fd602025e913 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
-@@ -829,7 +829,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+@@ -828,7 +828,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -502,7 +488,7 @@
kvm_pmu_flush_hwstate(vcpu);
-@@ -853,7 +853,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+@@ -852,7 +852,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -511,7 +497,7 @@
continue;
}
-@@ -922,7 +922,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+@@ -921,7 +921,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
@@ -818,20 +804,6 @@
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
-diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
-index 22ceeeb705ab..d5359701f787 100644
---- a/arch/powerpc/kexec/crash.c
-+++ b/arch/powerpc/kexec/crash.c
-@@ -312,9 +312,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
- unsigned int i;
- int (*old_handler)(struct pt_regs *regs);
-
-- /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
-- printk_deferred_enter();
--
- /*
- * This function is only called after the system
- * has panicked or is otherwise in a critical state.
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index ff581d70f20c..e5c84d55bdfb 100644
--- a/arch/powerpc/kvm/Kconfig
@@ -1019,7 +991,7 @@
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 1d0f16b53393..ecf7aed3ba65 100644
+index 57f5e881791a..e8a4870f8686 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -107,6 +107,7 @@ config X86
@@ -1200,47 +1172,6 @@
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
-diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
-index 722fd712e1cf..82cc3a7be6bd 100644
---- a/arch/x86/kernel/dumpstack_32.c
-+++ b/arch/x86/kernel/dumpstack_32.c
-@@ -141,7 +141,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
- */
- if (visit_mask) {
- if (*visit_mask & (1UL << info->type)) {
-- printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
-+ pr_warn_once("WARNING: stack recursion on stack type %d\n", info->type);
- goto unknown;
- }
- *visit_mask |= 1UL << info->type;
-diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
-index 6c5defd6569a..5f725b0ceb29 100644
---- a/arch/x86/kernel/dumpstack_64.c
-+++ b/arch/x86/kernel/dumpstack_64.c
-@@ -207,7 +207,8 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
- if (visit_mask) {
- if (*visit_mask & (1UL << info->type)) {
- if (task == current)
-- printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
-+ pr_warn_once("WARNING: stack recursion on stack type %d\n",
-+ info->type);
- goto unknown;
- }
- *visit_mask |= 1UL << info->type;
-diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
-index 15aefa3f3e18..52af9a89ad47 100644
---- a/arch/x86/kernel/i8259.c
-+++ b/arch/x86/kernel/i8259.c
-@@ -207,8 +207,7 @@ static void mask_and_ack_8259A(struct irq_data *data)
- * lets ACK and report it. [once per IRQ]
- */
- if (!(spurious_irq_mask & irqmask)) {
-- printk_deferred(KERN_DEBUG
-- "spurious 8259A interrupt: IRQ%d.\n", irq);
-+ printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
- atomic_inc(&irq_err_count);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 044902d5a3c4..e5dd6da78713 100644
--- a/arch/x86/kernel/irq_32.c
@@ -1281,73 +1212,11 @@
return NMI_HANDLED;
}
-diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
-index d7c44b257f7f..2d0361cd304f 100644
---- a/arch/x86/kernel/unwind_frame.c
-+++ b/arch/x86/kernel/unwind_frame.c
-@@ -41,9 +41,9 @@ static void unwind_dump(struct unwind_state *state)
-
- dumped_before = true;
-
-- printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
-- state->stack_info.type, state->stack_info.next_sp,
-- state->stack_mask, state->graph_idx);
-+ printk("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
-+ state->stack_info.type, state->stack_info.next_sp,
-+ state->stack_mask, state->graph_idx);
-
- for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
- sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
-@@ -59,13 +59,11 @@ static void unwind_dump(struct unwind_state *state)
-
- if (zero) {
- if (!prev_zero)
-- printk_deferred("%p: %0*x ...\n",
-- sp, BITS_PER_LONG/4, 0);
-+ printk("%p: %0*x ...\n", sp, BITS_PER_LONG/4, 0);
- continue;
- }
-
-- printk_deferred("%p: %0*lx (%pB)\n",
-- sp, BITS_PER_LONG/4, word, (void *)word);
-+ printk("%p: %0*lx (%pB)\n", sp, BITS_PER_LONG/4, word, (void *)word);
- }
- }
- }
-@@ -342,13 +340,13 @@ bool unwind_next_frame(struct unwind_state *state)
- goto the_end;
-
- if (state->regs) {
-- printk_deferred_once(KERN_WARNING
-+ pr_warn_once(
- "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
- state->regs, state->task->comm,
- state->task->pid, next_bp);
- unwind_dump(state);
- } else {
-- printk_deferred_once(KERN_WARNING
-+ pr_warn_once(
- "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
- state->bp, state->task->comm,
- state->task->pid, next_bp);
-diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
-index a1202536fc57..a26a7c3849f5 100644
---- a/arch/x86/kernel/unwind_orc.c
-+++ b/arch/x86/kernel/unwind_orc.c
-@@ -9,7 +9,7 @@
- #include <asm/orc_lookup.h>
-
- #define orc_warn(fmt, ...) \
-- printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
-+ pr_warn_once("WARNING: " fmt, ##__VA_ARGS__)
-
- #define orc_warn_current(args...) \
- ({ \
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 8974884ef2ad..ae033707f278 100644
+index f5b7a05530eb..d401a2f42c44 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -8550,6 +8550,14 @@ int kvm_arch_init(void *opaque)
+@@ -8571,6 +8571,14 @@ int kvm_arch_init(void *opaque)
goto out;
}
@@ -1376,10 +1245,10 @@
#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 95993c4efa49..2f173fea818c 100644
+index 1a28ba9017ed..fa59e8650f54 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1565,14 +1565,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+@@ -1566,14 +1566,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -1398,7 +1267,7 @@
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
-index 70f69f0910c9..58eee8eab4bf 100644
+index 163a1283a866..444183fe847d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1061,14 +1061,14 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
@@ -1491,19 +1360,6 @@
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
ktime_t ac_time;
#endif
-diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 7bd6eb15d432..68e94c265ada 100644
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -183,7 +183,7 @@ static void __cold process_random_ready_list(void)
-
- #define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
-- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
-+ pr_notice("random: %s called from %pS with crng_init=%d\n", \
- __func__, (void *)_RET_IP_, crng_init)
-
-
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index d3f2e5364c27..9c4a99757afd 100644
--- a/drivers/char/tpm/tpm_tis.c
@@ -1925,7 +1781,7 @@
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
-index cfbef70e8ba7..cded25be1f55 100644
+index 8fb065caf30b..c232535ca8f4 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1422,7 +1422,7 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr)
@@ -1950,7 +1806,7 @@
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index b58984ddca13..3f187b4e8f23 100644
+index 19e497a7e747..55db7f2a65e2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2217,8 +2217,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
@@ -2182,10 +2038,10 @@
dev_err(dev, "failed to invoke irq handler\n");
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
-index 6473361525d1..2321d02e9b7a 100644
+index b3abc29aa927..c4843be91417 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
-@@ -132,12 +132,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
+@@ -156,12 +156,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
up->dl_write(up, value);
}
@@ -2242,7 +2098,7 @@
return true;
}
-@@ -146,7 +189,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
+@@ -170,7 +213,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
if (!(up->ier & UART_IER_THRI))
return false;
up->ier &= ~UART_IER_THRI;
@@ -2252,10 +2108,10 @@
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
-index 1ce193daea7f..fad00c0414e3 100644
+index 30b7890645ac..b1883f906f7a 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -264,10 +264,8 @@ static void serial8250_backup_timeout(struct timer_list *t)
+@@ -265,10 +265,8 @@ static void serial8250_backup_timeout(struct timer_list *t)
* Must disable interrupts or else we risk racing with the interrupt
* based handler.
*/
@@ -2268,7 +2124,7 @@
iir = serial_in(up, UART_IIR);
-@@ -290,7 +288,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
+@@ -291,7 +289,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
serial8250_tx_chars(up);
if (up->port.irq)
@@ -2277,7 +2133,7 @@
spin_unlock_irqrestore(&up->port.lock, flags);
-@@ -568,6 +566,14 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
+@@ -572,6 +570,14 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -2292,7 +2148,7 @@
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
-@@ -661,6 +667,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
+@@ -665,6 +671,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
static struct console univ8250_console = {
.name = "ttyS",
@@ -2301,7 +2157,7 @@
.device = uart_console_device,
.setup = univ8250_console_setup,
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
-index fc65a2293ce9..19a92530040f 100644
+index af74f82ad782..0bab91ea75c6 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -60,9 +60,18 @@ int fsl8250_handle_irq(struct uart_port *port)
@@ -2394,10 +2250,10 @@
static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
-index df9731f73746..363888c2678b 100644
+index a5496bd1b650..ff8f9c465d9e 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -770,7 +770,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+@@ -749,7 +749,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial_out(p, UART_EFR, UART_EFR_ECB);
serial_out(p, UART_LCR, 0);
}
@@ -2406,7 +2262,7 @@
if (p->capabilities & UART_CAP_EFR) {
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(p, UART_EFR, efr);
-@@ -1444,7 +1444,7 @@ static void serial8250_stop_rx(struct uart_port *port)
+@@ -1423,7 +1423,7 @@ static void serial8250_stop_rx(struct uart_port *port)
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
@@ -2415,7 +2271,7 @@
serial8250_rpm_put(up);
}
-@@ -1474,7 +1474,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
+@@ -1453,7 +1453,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
serial8250_clear_and_reinit_fifos(p);
p->ier |= UART_IER_RLSI | UART_IER_RDI;
@@ -2424,7 +2280,7 @@
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1710,7 +1710,7 @@ static void serial8250_disable_ms(struct uart_port *port)
+@@ -1689,7 +1689,7 @@ static void serial8250_disable_ms(struct uart_port *port)
mctrl_gpio_disable_ms(up->gpios);
up->ier &= ~UART_IER_MSI;
@@ -2433,7 +2289,7 @@
}
static void serial8250_enable_ms(struct uart_port *port)
-@@ -1726,7 +1726,7 @@ static void serial8250_enable_ms(struct uart_port *port)
+@@ -1705,7 +1705,7 @@ static void serial8250_enable_ms(struct uart_port *port)
up->ier |= UART_IER_MSI;
serial8250_rpm_get(up);
@@ -2442,7 +2298,7 @@
serial8250_rpm_put(up);
}
-@@ -2145,14 +2145,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
+@@ -2124,14 +2124,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get(up);
@@ -2458,7 +2314,7 @@
wait_for_xmitr(up, BOTH_EMPTY);
/*
-@@ -2165,7 +2158,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
+@@ -2144,7 +2137,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* and restore the IER
*/
wait_for_xmitr(up, BOTH_EMPTY);
@@ -2467,7 +2323,7 @@
serial8250_rpm_put(up);
}
-@@ -2468,7 +2461,7 @@ void serial8250_do_shutdown(struct uart_port *port)
+@@ -2447,7 +2440,7 @@ void serial8250_do_shutdown(struct uart_port *port)
*/
spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
@@ -2476,7 +2332,7 @@
spin_unlock_irqrestore(&port->lock, flags);
synchronize_irq(port->irq);
-@@ -2850,7 +2843,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+@@ -2829,7 +2822,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
@@ -2485,7 +2341,7 @@
if (up->capabilities & UART_CAP_EFR) {
unsigned char efr = 0;
-@@ -3316,7 +3309,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
+@@ -3297,7 +3290,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -2494,7 +2350,7 @@
{
struct uart_8250_port *up = up_to_u8250p(port);
-@@ -3324,6 +3317,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch)
+@@ -3305,6 +3298,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch)
serial_port_out(port, UART_TX, ch);
}
@@ -2513,7 +2369,7 @@
/*
* Restore serial console when h/w power-off detected
*/
-@@ -3345,6 +3350,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+@@ -3326,6 +3331,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
@@ -2546,7 +2402,7 @@
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
-@@ -3361,24 +3392,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3342,24 +3373,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier;
@@ -2573,7 +2429,7 @@
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3392,7 +3411,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3373,7 +3392,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
mdelay(port->rs485.delay_rts_before_send);
}
@@ -2583,7 +2439,7 @@
/*
* Finally, wait for transmitter to become empty
-@@ -3405,8 +3426,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3386,8 +3407,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (em485->tx_stopped)
up->rs485_stop_tx(up);
}
@@ -2593,7 +2449,7 @@
/*
* The receive handling will happen properly because the
-@@ -3418,8 +3438,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3399,8 +3419,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (up->msr_saved_flags)
serial8250_modem_status(up);
@@ -2603,7 +2459,7 @@
}
static unsigned int probe_baud(struct uart_port *port)
-@@ -3439,6 +3458,7 @@ static unsigned int probe_baud(struct uart_port *port)
+@@ -3420,6 +3439,7 @@ static unsigned int probe_baud(struct uart_port *port)
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -2611,7 +2467,7 @@
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3448,6 +3468,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+@@ -3429,6 +3449,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
if (!port->iobase && !port->membase)
return -ENODEV;
@@ -2621,10 +2477,10 @@
uart_parse_options(options, &baud, &parity, &bits, &flow);
else if (probe)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
-index 0e908061b5d7..6d04cbe736a0 100644
+index 300a8bbb4b80..c7c6558508b5 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2309,18 +2309,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+@@ -2328,18 +2328,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_amba_port *uap = amba_ports[co->index];
unsigned int old_cr = 0, new_cr;
@@ -2653,7 +2509,7 @@
/*
* First save the CR then disable the interrupts
-@@ -2346,8 +2352,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+@@ -2365,8 +2371,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
pl011_write(old_cr, uap, REG_CR);
if (locked)
@@ -2909,10 +2765,10 @@
if (!o->nodeid) {
/*
diff --git a/fs/namei.c b/fs/namei.c
-index 2ea15d043412..383f9fd2daaa 100644
+index 1fd854d4cd2c..71c0b93a4800 100644
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -1633,7 +1633,7 @@ static struct dentry *__lookup_slow(const struct qstr *name,
+@@ -1635,7 +1635,7 @@ static struct dentry *__lookup_slow(const struct qstr *name,
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
@@ -2921,7 +2777,7 @@
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
-@@ -3244,7 +3244,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
+@@ -3246,7 +3246,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -2931,7 +2787,7 @@
if (unlikely(IS_DEADDIR(dir_inode)))
return ERR_PTR(-ENOENT);
diff --git a/fs/namespace.c b/fs/namespace.c
-index dc31ad6b370f..41950313de7a 100644
+index d946298691ed..9e6cbea0c879 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -344,8 +344,24 @@ int __mnt_want_write(struct vfsmount *m)
@@ -3006,7 +2862,7 @@
status = -EBUSY;
spin_lock(&dentry->d_lock);
diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 1f394095eb88..fade2c7c705b 100644
+index 300d53ee7040..6ab25d4d4037 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -96,6 +96,7 @@
@@ -3017,7 +2873,7 @@
#include <linux/cn_proc.h>
#include <trace/events/oom.h>
#include "internal.h"
-@@ -2043,7 +2044,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
+@@ -2071,7 +2072,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
@@ -3027,7 +2883,7 @@
if (IS_ERR(child))
goto end_instantiate;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
-index 5d66faecd4ef..619d8e114646 100644
+index 013fc5931bc3..279faa9b8ce3 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -678,7 +678,7 @@ static bool proc_sys_fill_cache(struct file *file,
@@ -3315,10 +3171,10 @@
atomic_long_t hugetlb_usage;
#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index f8d46dc62d65..07b45ebbcb8f 100644
+index 3b97438afe3e..e506a579e473 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1916,7 +1916,6 @@ enum netdev_ml_priv_type {
+@@ -1930,7 +1930,6 @@ enum netdev_ml_priv_type {
* @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
@@ -3326,7 +3182,7 @@
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
-@@ -2250,7 +2249,6 @@ struct net_device {
+@@ -2264,7 +2263,6 @@ struct net_device {
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
@@ -3334,7 +3190,7 @@
bool proto_down;
unsigned wol_enabled:1;
unsigned threaded:1;
-@@ -2360,13 +2358,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
+@@ -2374,13 +2372,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
#define netdev_lockdep_set_classes(dev) \
{ \
static struct lock_class_key qdisc_tx_busylock_key; \
@@ -3499,7 +3355,7 @@
#endif /* CONFIG_SMP */
diff --git a/include/linux/printk.h b/include/linux/printk.h
-index 9497f6b98339..f1b9cd8d11d6 100644
+index 9497f6b98339..eddfc5de6ee7 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -47,6 +47,12 @@ static inline const char *printk_skip_headers(const char *buffer)
@@ -3515,50 +3371,28 @@
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
-@@ -155,20 +161,7 @@ int vprintk(const char *fmt, va_list args);
+@@ -155,6 +161,8 @@ int vprintk(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
int _printk(const char *fmt, ...);
--/*
-- * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
-- */
--__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
--
--extern void __printk_safe_enter(void);
--extern void __printk_safe_exit(void);
--/*
-- * The printk_deferred_enter/exit macros are available only as a hack for
-- * some code paths that need to defer all printk console printing. Interrupts
-- * must be disabled for the deferred duration.
-- */
--#define printk_deferred_enter __printk_safe_enter
--#define printk_deferred_exit __printk_safe_exit
+bool pr_flush(int timeout_ms, bool reset_on_progress);
-
++
/*
- * Please don't use printk_ratelimit(), because it shares ratelimiting state
-@@ -210,18 +203,10 @@ int _printk(const char *s, ...)
+ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
+ */
+@@ -224,6 +232,11 @@ static inline void printk_deferred_exit(void)
+ {
+ }
+
++static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
++{
++ return true;
++}
++
+ static inline int printk_ratelimit(void)
{
return 0;
- }
--static inline __printf(1, 2) __cold
--int _printk_deferred(const char *s, ...)
--{
-- return 0;
--}
--
--static inline void printk_deferred_enter(void)
--{
--}
-
--static inline void printk_deferred_exit(void)
-+static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
- {
-+ return true;
- }
-
- static inline int printk_ratelimit(void)
-@@ -284,17 +269,30 @@ static inline void printk_trigger_flush(void)
+@@ -284,17 +297,30 @@ static inline void printk_trigger_flush(void)
extern int __printk_cpu_trylock(void);
extern void __printk_wait_on_cpu_lock(void);
extern void __printk_cpu_unlock(void);
@@ -3593,7 +3427,7 @@
for (;;) { \
local_irq_save(flags); \
if (__printk_cpu_trylock()) \
-@@ -304,22 +302,30 @@ extern void __printk_cpu_unlock(void);
+@@ -304,22 +330,30 @@ extern void __printk_cpu_unlock(void);
}
/**
@@ -3634,29 +3468,6 @@
extern int kptr_restrict;
-@@ -448,8 +454,6 @@ struct pi_entry {
- * See the vsnprintf() documentation for format string extensions over C99.
- */
- #define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
--#define printk_deferred(fmt, ...) \
-- printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
-
- /**
- * pr_emerg - Print an emergency-level message
-@@ -587,13 +591,9 @@ struct pi_entry {
- #ifdef CONFIG_PRINTK
- #define printk_once(fmt, ...) \
- DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
--#define printk_deferred_once(fmt, ...) \
-- DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
- #else
- #define printk_once(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
--#define printk_deferred_once(fmt, ...) \
-- no_printk(fmt, ##__VA_ARGS__)
- #endif
-
- #define pr_emerg_once(fmt, ...) \
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index f0e535f199be..002266693e50 100644
--- a/include/linux/ratelimit_types.h
@@ -3716,7 +3527,7 @@
extern void rt_mutex_unlock(struct rt_mutex *lock);
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index ad7ff332a0ac..20efdf15c2b9 100644
+index dcba347cbffa..23fb0ca4993b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,12 +118,8 @@ struct task_group;
@@ -3967,31 +3778,6 @@
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
-diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index e5af028c08b4..994c25640e15 100644
---- a/include/linux/sched/rt.h
-+++ b/include/linux/sched/rt.h
-@@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
- }
- extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
- extern void rt_mutex_adjust_pi(struct task_struct *p);
--static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
--{
-- return tsk->pi_blocked_on != NULL;
--}
- #else
- static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
- {
- return NULL;
- }
- # define rt_mutex_adjust_pi(p) do { } while (0)
--static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
--{
-- return false;
--}
- #endif
-
- extern void normalize_rt_tasks(void);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 5db211f43b29..aa011f668705 100644
--- a/include/linux/serial_8250.h
@@ -4023,7 +3809,7 @@
int serial8250_console_exit(struct uart_port *port);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index e213acaa91ec..d8bc89ee46e3 100644
+index ae598ed86b50..922e1b6e5b05 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -300,6 +300,7 @@ struct sk_buff_head {
@@ -4034,7 +3820,7 @@
};
struct sk_buff;
-@@ -1945,6 +1946,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
+@@ -1990,6 +1991,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
@@ -4074,36 +3860,6 @@
# error "please don't include this file directly"
#endif
-diff --git a/include/linux/suspend.h b/include/linux/suspend.h
-index 4bcd65679cee..4cd3bc5d3891 100644
---- a/include/linux/suspend.h
-+++ b/include/linux/suspend.h
-@@ -541,23 +541,17 @@ static inline void unlock_system_sleep(void) {}
- #ifdef CONFIG_PM_SLEEP_DEBUG
- extern bool pm_print_times_enabled;
- extern bool pm_debug_messages_on;
--extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
-+extern __printf(1, 2) void pm_pr_dbg(const char *fmt, ...);
- #else
- #define pm_print_times_enabled (false)
- #define pm_debug_messages_on (false)
-
- #include <linux/printk.h>
-
--#define __pm_pr_dbg(defer, fmt, ...) \
-+#define pm_pr_dbg(fmt, ...) \
- no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
- #endif
-
--#define pm_pr_dbg(fmt, ...) \
-- __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
--
--#define pm_deferred_pr_dbg(fmt, ...) \
-- __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
--
- #ifdef CONFIG_PM_AUTOSLEEP
-
- /* kernel/power/autosleep.c */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 0999f6317978..7af834b7c114 100644
--- a/include/linux/thread_info.h
@@ -4702,10 +4458,10 @@
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
diff --git a/init/main.c b/init/main.c
-index cf79b5a766cb..500a40b705e9 100644
+index 649d9e4201a8..ee92d608ffc4 100644
--- a/init/main.c
+++ b/init/main.c
-@@ -1605,6 +1605,7 @@ static noinline void __init kernel_init_freeable(void)
+@@ -1606,6 +1606,7 @@ static noinline void __init kernel_init_freeable(void)
rcu_init_tasks_generic();
do_pre_smp_initcalls();
@@ -4886,7 +4642,7 @@
}
}
diff --git a/kernel/exit.c b/kernel/exit.c
-index 91a43e57a32e..1d099609568d 100644
+index aefe7445508d..1d18299609d6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -64,6 +64,7 @@
@@ -5389,7 +5145,7 @@
return -ENOMEM;
per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
-index 2ef90d15699f..2ab883d856b5 100644
+index ed3f24a81549..ddb3efce860b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1250,10 +1250,10 @@ void kprobe_busy_end(void)
@@ -5486,10 +5242,10 @@
kfree(create);
return task;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
-index a30702b847ba..3aa2a6df1f87 100644
+index 120bbdacd58b..9f52368deb9a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -5475,6 +5475,7 @@ static noinstr void check_flags(unsigned long flags)
+@@ -5476,6 +5476,7 @@ static noinstr void check_flags(unsigned long flags)
}
}
@@ -5497,7 +5253,7 @@
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -5489,6 +5490,7 @@ static noinstr void check_flags(unsigned long flags)
+@@ -5490,6 +5491,7 @@ static noinstr void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
@@ -5750,155 +5506,21 @@
}
/*
-diff --git a/kernel/power/main.c b/kernel/power/main.c
-index 7e646079fbeb..8b153aa90ecc 100644
---- a/kernel/power/main.c
-+++ b/kernel/power/main.c
-@@ -546,14 +546,13 @@ static int __init pm_debug_messages_setup(char *str)
- __setup("pm_debug_messages", pm_debug_messages_setup);
-
- /**
-- * __pm_pr_dbg - Print a suspend debug message to the kernel log.
-- * @defer: Whether or not to use printk_deferred() to print the message.
-+ * pm_pr_dbg - Print a suspend debug message to the kernel log.
- * @fmt: Message format.
- *
- * The message will be emitted if enabled through the pm_debug_messages
- * sysfs attribute.
- */
--void __pm_pr_dbg(bool defer, const char *fmt, ...)
-+void pm_pr_dbg(const char *fmt, ...)
- {
- struct va_format vaf;
- va_list args;
-@@ -566,10 +565,7 @@ void __pm_pr_dbg(bool defer, const char *fmt, ...)
- vaf.fmt = fmt;
- vaf.va = &args;
-
-- if (defer)
-- printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
-- else
-- printk(KERN_DEBUG "PM: %pV", &vaf);
-+ printk(KERN_DEBUG "PM: %pV", &vaf);
-
- va_end(args);
- }
-diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
-index d118739874c0..bc6b856a0ff4 100644
---- a/kernel/printk/Makefile
-+++ b/kernel/printk/Makefile
-@@ -1,6 +1,5 @@
- # SPDX-License-Identifier: GPL-2.0-only
- obj-y = printk.o
--obj-$(CONFIG_PRINTK) += printk_safe.o
- obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
- obj-$(CONFIG_PRINTK) += printk_ringbuffer.o
- obj-$(CONFIG_PRINTK_INDEX) += index.o
-diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
-index 9f3ed2fdb721..de8ab059dd96 100644
---- a/kernel/printk/internal.h
-+++ b/kernel/printk/internal.h
-@@ -2,7 +2,6 @@
- /*
- * internal.h - printk internal definitions
- */
--#include <linux/percpu.h>
-
- #ifdef CONFIG_PRINTK
-
-@@ -12,41 +11,6 @@ enum printk_info_flags {
- LOG_CONT = 8, /* text is a fragment of a continuation line */
- };
-
--__printf(4, 0)
--int vprintk_store(int facility, int level,
-- const struct dev_printk_info *dev_info,
-- const char *fmt, va_list args);
--
--__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
--__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
--
--bool printk_percpu_data_ready(void);
--
--#define printk_safe_enter_irqsave(flags) \
-- do { \
-- local_irq_save(flags); \
-- __printk_safe_enter(); \
-- } while (0)
--
--#define printk_safe_exit_irqrestore(flags) \
-- do { \
-- __printk_safe_exit(); \
-- local_irq_restore(flags); \
-- } while (0)
--
--void defer_console_output(void);
--
- u16 printk_parse_prefix(const char *text, int *level,
- enum printk_info_flags *flags);
--#else
--
--/*
-- * In !PRINTK builds we still export console_sem
-- * semaphore and some of console functions (console_unlock()/etc.), so
-- * printk-safe must preserve the existing local IRQ guarantees.
-- */
--#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
--#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
--
--static inline bool printk_percpu_data_ready(void) { return false; }
- #endif /* CONFIG_PRINTK */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index 8d856b7c2e5a..ac2c44792ec4 100644
+index 8d856b7c2e5a..7f27cfee283e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -44,6 +44,10 @@
+@@ -44,6 +44,9 @@
#include <linux/irq_work.h>
#include <linux/ctype.h>
#include <linux/uio.h>
-+#include <linux/kdb.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
+#include <linux/clocksource.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-@@ -227,19 +231,7 @@ static int nr_ext_console_drivers;
-
- static int __down_trylock_console_sem(unsigned long ip)
- {
-- int lock_failed;
-- unsigned long flags;
--
-- /*
-- * Here and in __up_console_sem() we need to be in safe mode,
-- * because spindump/WARN/etc from under console ->lock will
-- * deadlock in printk()->down_trylock_console_sem() otherwise.
-- */
-- printk_safe_enter_irqsave(flags);
-- lock_failed = down_trylock(&console_sem);
-- printk_safe_exit_irqrestore(flags);
--
-- if (lock_failed)
-+ if (down_trylock(&console_sem))
- return 1;
- mutex_acquire(&console_lock_dep_map, 0, 1, ip);
- return 0;
-@@ -248,13 +240,9 @@ static int __down_trylock_console_sem(unsigned long ip)
-
- static void __up_console_sem(unsigned long ip)
- {
-- unsigned long flags;
--
- mutex_release(&console_lock_dep_map, ip);
-
-- printk_safe_enter_irqsave(flags);
- up(&console_sem);
-- printk_safe_exit_irqrestore(flags);
- }
- #define up_console_sem() __up_console_sem(_RET_IP_)
-
-@@ -268,11 +256,6 @@ static void __up_console_sem(unsigned long ip)
+@@ -268,11 +271,6 @@ static void __up_console_sem(unsigned long ip)
*/
static int console_locked, console_suspended;
@@ -5910,7 +5532,7 @@
/*
* Array of consoles built from command line options (console=)
*/
-@@ -352,10 +335,13 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
+@@ -352,10 +350,13 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
* non-prinatable characters are escaped in the "\xff" notation.
*/
@@ -5925,7 +5547,7 @@
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -363,17 +349,6 @@ static u64 syslog_seq;
+@@ -363,17 +364,6 @@ static u64 syslog_seq;
static size_t syslog_partial;
static bool syslog_time;
@@ -5943,7 +5565,7 @@
/*
* The next printk record to read after the last 'clear' command. There are
* two copies (updated with seqcount_latch) so that reads can locklessly
-@@ -391,9 +366,6 @@ static struct latched_seq clear_seq = {
+@@ -391,9 +381,6 @@ static struct latched_seq clear_seq = {
#define PREFIX_MAX 32
#endif
@@ -5953,13 +5575,7 @@
/* the maximum size allowed to be reserved for a record */
#define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
-@@ -432,12 +404,12 @@ static struct printk_ringbuffer *prb = &printk_rb_static;
- */
- static bool __printk_percpu_data_ready __read_mostly;
-
--bool printk_percpu_data_ready(void)
-+static bool printk_percpu_data_ready(void)
- {
+@@ -437,7 +424,7 @@ bool printk_percpu_data_ready(void)
return __printk_percpu_data_ready;
}
@@ -5968,7 +5584,7 @@
static void latched_seq_write(struct latched_seq *ls, u64 val)
{
raw_write_seqcount_latch(&ls->latch);
-@@ -1771,188 +1743,152 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
+@@ -1771,188 +1758,152 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
@@ -6271,7 +5887,7 @@
}
/*
-@@ -2025,20 +1961,6 @@ static u8 *__printk_recursion_counter(void)
+@@ -2025,20 +1976,6 @@ static u8 *__printk_recursion_counter(void)
local_irq_restore(flags); \
} while (0)
@@ -6292,17 +5908,7 @@
static inline u32 printk_caller_id(void)
{
return in_task() ? task_pid_nr(current) :
-@@ -2119,13 +2041,14 @@ static u16 printk_sprint(char *text, u16 size, int facility,
- }
-
- __printf(4, 0)
--int vprintk_store(int facility, int level,
-- const struct dev_printk_info *dev_info,
-- const char *fmt, va_list args)
-+static int vprintk_store(int facility, int level,
-+ const struct dev_printk_info *dev_info,
-+ const char *fmt, va_list args)
- {
+@@ -2126,6 +2063,7 @@ int vprintk_store(int facility, int level,
const u32 caller_id = printk_caller_id();
struct prb_reserved_entry e;
enum printk_info_flags flags = 0;
@@ -6310,7 +5916,7 @@
struct printk_record r;
unsigned long irqflags;
u16 trunc_msg_len = 0;
-@@ -2136,6 +2059,7 @@ int vprintk_store(int facility, int level,
+@@ -2136,6 +2074,7 @@ int vprintk_store(int facility, int level,
u16 text_len;
int ret = 0;
u64 ts_nsec;
@@ -6318,7 +5924,7 @@
/*
* Since the duration of printk() can vary depending on the message
-@@ -2174,6 +2098,7 @@ int vprintk_store(int facility, int level,
+@@ -2174,6 +2113,7 @@ int vprintk_store(int facility, int level,
if (flags & LOG_CONT) {
prb_rec_init_wr(&r, reserve_size);
if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
@@ -6326,7 +5932,7 @@
text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
facility, &flags, fmt, args);
r.info->text_len += text_len;
-@@ -2181,6 +2106,7 @@ int vprintk_store(int facility, int level,
+@@ -2181,6 +2121,7 @@ int vprintk_store(int facility, int level,
if (flags & LOG_NEWLINE) {
r.info->flags |= LOG_NEWLINE;
prb_final_commit(&e);
@@ -6334,7 +5940,7 @@
} else {
prb_commit(&e);
}
-@@ -2204,6 +2130,7 @@ int vprintk_store(int facility, int level,
+@@ -2204,6 +2145,7 @@ int vprintk_store(int facility, int level,
if (!prb_reserve(&e, prb, &r))
goto out;
}
@@ -6342,7 +5948,7 @@
/* fill message */
text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
-@@ -2219,13 +2146,25 @@ int vprintk_store(int facility, int level,
+@@ -2219,13 +2161,25 @@ int vprintk_store(int facility, int level,
memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
/* A message without a trailing newline can be continued. */
@@ -6370,7 +5976,7 @@
printk_exit_irqrestore(recursion_ptr, irqflags);
return ret;
}
-@@ -2235,50 +2174,43 @@ asmlinkage int vprintk_emit(int facility, int level,
+@@ -2235,40 +2189,16 @@ asmlinkage int vprintk_emit(int facility, int level,
const char *fmt, va_list args)
{
int printed_len;
@@ -6412,36 +6018,7 @@
wake_up_klogd();
return printed_len;
}
- EXPORT_SYMBOL(vprintk_emit);
-
--int vprintk_default(const char *fmt, va_list args)
-+__printf(1, 0)
-+static int vprintk_default(const char *fmt, va_list args)
- {
- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
- }
--EXPORT_SYMBOL_GPL(vprintk_default);
-+
-+__printf(1, 0)
-+static int vprintk_func(const char *fmt, va_list args)
-+{
-+#ifdef CONFIG_KGDB_KDB
-+ /* Allow to pass printk() to kdb but avoid a recursion. */
-+ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
-+ return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
-+#endif
-+ return vprintk_default(fmt, args);
-+}
-+
-+asmlinkage int vprintk(const char *fmt, va_list args)
-+{
-+ return vprintk_func(fmt, args);
-+}
-+EXPORT_SYMBOL(vprintk);
-
- asmlinkage __visible int _printk(const char *fmt, ...)
- {
-@@ -2293,37 +2225,162 @@ asmlinkage __visible int _printk(const char *fmt, ...)
+@@ -2293,37 +2223,162 @@ asmlinkage __visible int _printk(const char *fmt, ...)
}
EXPORT_SYMBOL(_printk);
@@ -6555,7 +6132,9 @@
+ con->write(con, write_text, len);
+ if (len)
+ printk_delay(r.info->level);
-+
+
+-static size_t record_print_text(const struct printk_record *r,
+- bool syslog, bool time)
+ latched_seq_write(&con->printk_seq, seq);
+
+ console_unlock();
@@ -6570,9 +6149,7 @@
+ con->name, con->index);
+ return ret;
+}
-
--static size_t record_print_text(const struct printk_record *r,
-- bool syslog, bool time)
++
+/* Must be called within console_lock(). */
+static void start_printk_kthread(struct console *con)
{
@@ -6627,7 +6204,7 @@
#endif /* CONFIG_PRINTK */
-@@ -2580,34 +2637,6 @@ int is_console_locked(void)
+@@ -2580,34 +2635,6 @@ int is_console_locked(void)
}
EXPORT_SYMBOL(is_console_locked);
@@ -6662,7 +6239,7 @@
/**
* console_unlock - unlock the console system
*
-@@ -2624,140 +2653,13 @@ static inline int can_use_console(void)
+@@ -2624,140 +2651,13 @@ static inline int can_use_console(void)
*/
void console_unlock(void)
{
@@ -6803,7 +6380,7 @@
}
EXPORT_SYMBOL(console_unlock);
-@@ -2807,18 +2709,20 @@ void console_unblank(void)
+@@ -2807,18 +2707,20 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
@@ -6835,7 +6412,7 @@
console_unlock();
}
-@@ -2954,6 +2858,7 @@ static int try_enable_new_console(struct console *newcon, bool user_specified)
+@@ -2954,6 +2856,7 @@ static int try_enable_new_console(struct console *newcon, bool user_specified)
void register_console(struct console *newcon)
{
struct console *bcon = NULL;
@@ -6843,7 +6420,7 @@
int err;
for_each_console(bcon) {
-@@ -2976,6 +2881,8 @@ void register_console(struct console *newcon)
+@@ -2976,6 +2879,8 @@ void register_console(struct console *newcon)
}
}
@@ -6852,7 +6429,7 @@
if (console_drivers && console_drivers->flags & CON_BOOT)
bcon = console_drivers;
-@@ -3017,8 +2924,10 @@ void register_console(struct console *newcon)
+@@ -3017,8 +2922,10 @@ void register_console(struct console *newcon)
* the real console are the same physical device, it's annoying to
* see the beginning boot messages twice
*/
@@ -6864,7 +6441,7 @@
/*
* Put this console in the list - keep the
-@@ -3040,27 +2949,21 @@ void register_console(struct console *newcon)
+@@ -3040,27 +2947,21 @@ void register_console(struct console *newcon)
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
@@ -6906,7 +6483,7 @@
console_unlock();
console_sysfs_notify();
-@@ -3134,6 +3037,9 @@ int unregister_console(struct console *console)
+@@ -3134,6 +3035,9 @@ int unregister_console(struct console *console)
console_unlock();
console_sysfs_notify();
@@ -6916,7 +6493,7 @@
if (console->exit)
res = console->exit(console);
-@@ -3216,6 +3122,15 @@ static int __init printk_late_init(void)
+@@ -3216,6 +3120,15 @@ static int __init printk_late_init(void)
unregister_console(con);
}
}
@@ -6932,7 +6509,7 @@
ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
console_cpu_notify);
WARN_ON(ret < 0);
-@@ -3239,14 +3154,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
+@@ -3239,14 +3152,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
int pending = this_cpu_xchg(printk_pending, 0);
@@ -6948,38 +6525,16 @@
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
-@@ -3293,29 +3202,7 @@ void defer_console_output(void)
+@@ -3293,7 +3200,7 @@ void defer_console_output(void)
void printk_trigger_flush(void)
{
- defer_console_output();
--}
--
--int vprintk_deferred(const char *fmt, va_list args)
--{
-- int r;
--
-- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
-- defer_console_output();
--
-- return r;
--}
--
--int _printk_deferred(const char *fmt, ...)
--{
-- va_list args;
-- int r;
--
-- va_start(args, fmt);
-- r = vprintk_deferred(fmt, args);
-- va_end(args);
--
-- return r;
+ wake_up_klogd();
}
- /*
-@@ -3444,6 +3331,24 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ int vprintk_deferred(const char *fmt, va_list args)
+@@ -3444,6 +3351,24 @@ void kmsg_dump(enum kmsg_dump_reason reason)
{
struct kmsg_dumper *dumper;
@@ -7004,7 +6559,7 @@
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
enum kmsg_dump_reason max_reason = dumper->max_reason;
-@@ -3626,6 +3531,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+@@ -3626,6 +3551,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#ifdef CONFIG_SMP
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
@@ -7012,7 +6567,7 @@
/**
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
-@@ -3705,6 +3611,9 @@ EXPORT_SYMBOL(__printk_cpu_trylock);
+@@ -3705,6 +3631,9 @@ EXPORT_SYMBOL(__printk_cpu_trylock);
*/
void __printk_cpu_unlock(void)
{
@@ -7022,7 +6577,7 @@
if (atomic_read(&printk_cpulock_nested)) {
atomic_dec(&printk_cpulock_nested);
return;
-@@ -3715,6 +3624,12 @@ void __printk_cpu_unlock(void)
+@@ -3715,6 +3644,12 @@ void __printk_cpu_unlock(void)
* LMM(__printk_cpu_unlock:A)
*/
@@ -7035,7 +6590,7 @@
/*
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
-@@ -3735,6 +3650,98 @@ void __printk_cpu_unlock(void)
+@@ -3735,6 +3670,98 @@ void __printk_cpu_unlock(void)
*/
atomic_set_release(&printk_cpulock_owner,
-1); /* LMM(__printk_cpu_unlock:B) */
@@ -7134,64 +6689,6 @@
+}
+EXPORT_SYMBOL(pr_flush);
+#endif /* CONFIG_PRINTK */
-diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
-deleted file mode 100644
-index ef0f9a2044da..000000000000
---- a/kernel/printk/printk_safe.c
-+++ /dev/null
-@@ -1,52 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0-or-later
--/*
-- * printk_safe.c - Safe printk for printk-deadlock-prone contexts
-- */
--
--#include <linux/preempt.h>
--#include <linux/kdb.h>
--#include <linux/smp.h>
--#include <linux/cpumask.h>
--#include <linux/printk.h>
--#include <linux/kprobes.h>
--
--#include "internal.h"
--
--static DEFINE_PER_CPU(int, printk_context);
--
--/* Can be preempted by NMI. */
--void __printk_safe_enter(void)
--{
-- this_cpu_inc(printk_context);
--}
--
--/* Can be preempted by NMI. */
--void __printk_safe_exit(void)
--{
-- this_cpu_dec(printk_context);
--}
--
--asmlinkage int vprintk(const char *fmt, va_list args)
--{
--#ifdef CONFIG_KGDB_KDB
-- /* Allow to pass printk() to kdb but avoid a recursion. */
-- if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
-- return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
--#endif
--
-- /*
-- * Use the main logbuf even in NMI. But avoid calling console
-- * drivers that might have their own locks.
-- */
-- if (this_cpu_read(printk_context) || in_nmi()) {
-- int len;
--
-- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
-- defer_console_output();
-- return len;
-- }
--
-- /* No obstacles. */
-- return vprintk_default(fmt, args);
--}
--EXPORT_SYMBOL(vprintk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0cf547531ddf..0df2de214daa 100644
--- a/kernel/ptrace.c
@@ -7328,7 +6825,7 @@
div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
bl = max(rdp->blimit, pending >> div);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index b89ca5c83143..73e82dae64cd 100644
+index 85be684687b0..1a7ac75b1afc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -75,7 +75,11 @@ __read_mostly int sysctl_resched_latency_warn_once = 1;
@@ -7415,19 +6912,7 @@
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -2945,9 +2993,8 @@ void force_compatible_cpus_allowed_ptr(struct task_struct *p)
-
- out_set_mask:
- if (printk_ratelimit()) {
-- printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-- task_pid_nr(p), p->comm,
-- cpumask_pr_args(override_mask));
-+ printk("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-+ task_pid_nr(p), p->comm, cpumask_pr_args(override_mask));
- }
-
- WARN_ON(set_cpus_allowed_ptr(p, override_mask));
-@@ -3203,7 +3250,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
+@@ -3203,7 +3251,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -7436,7 +6921,7 @@
return 0;
cpu_relax();
}
-@@ -3218,7 +3265,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
+@@ -3218,7 +3266,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -7445,7 +6930,7 @@
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
-@@ -3252,7 +3299,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
+@@ -3252,7 +3300,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
ktime_t to = NSEC_PER_SEC / HZ;
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -7454,18 +6939,7 @@
continue;
}
-@@ -3377,8 +3424,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
-- printk_deferred("process %d (%s) no longer affine to cpu%d\n",
-- task_pid_nr(p), p->comm, cpu);
-+ printk("process %d (%s) no longer affine to cpu%d\n",
-+ task_pid_nr(p), p->comm, cpu);
- }
- }
-
-@@ -4386,6 +4433,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -4395,6 +4443,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -7475,7 +6949,7 @@
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -4880,20 +4930,18 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+@@ -4890,20 +4941,18 @@ static struct rq *finish_task_switch(struct task_struct *prev)
*/
if (mm) {
membarrier_mm_sync_core_before_usermode(mm);
@@ -7501,7 +6975,7 @@
put_task_struct_rcu_user(prev);
}
-@@ -6294,6 +6342,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+@@ -6304,6 +6353,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -7509,22 +6983,7 @@
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6379,8 +6428,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
- preempt_enable_no_resched();
- }
-
-- if (tsk_is_pi_blocked(tsk))
-- return;
-+ /*
-+ * spinlock and rwlock must not flush block requests. This will
-+ * deadlock if the callback attempts to acquire a lock which is
-+ * already acquired.
-+ */
-+ SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
-
- /*
- * If we are going to sleep and we have plugged IO queued,
-@@ -6511,6 +6564,30 @@ static void __sched notrace preempt_schedule_common(void)
+@@ -6525,6 +6575,30 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched());
}
@@ -7555,7 +7014,7 @@
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6524,7 +6601,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+@@ -6538,7 +6612,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;
@@ -7565,7 +7024,7 @@
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6557,6 +6635,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -6571,6 +6646,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;
@@ -7575,7 +7034,7 @@
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -8709,7 +8790,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
+@@ -8723,7 +8801,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -7586,23 +7045,23 @@
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -9503,14 +9586,8 @@ void __init sched_init(void)
+@@ -9524,14 +9604,8 @@ void __init sched_init(void)
}
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-static inline int preempt_count_equals(int preempt_offset)
-{
- int nested = preempt_count() + rcu_preempt_depth();
-
+-
- return (nested == preempt_offset);
-}
--
+
-void __might_sleep(const char *file, int line, int preempt_offset)
+void __might_sleep(const char *file, int line)
{
unsigned int state = get_current_state();
/*
-@@ -9524,11 +9601,32 @@ void __might_sleep(const char *file, int line, int preempt_offset)
+@@ -9545,11 +9619,32 @@ void __might_sleep(const char *file, int line, int preempt_offset)
(void *)current->task_state_change,
(void *)current->task_state_change);
@@ -7637,7 +7096,7 @@
{
/* Ratelimiting timestamp: */
static unsigned long prev_jiffy;
-@@ -9538,7 +9636,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
+@@ -9559,7 +9654,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
/* WARN_ON_ONCE() by default, no rate limit required: */
rcu_sleep_check();
@@ -7646,7 +7105,7 @@
!is_idle_task(current) && !current->non_block_count) ||
system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
oops_in_progress)
-@@ -9551,29 +9649,33 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
+@@ -9572,29 +9667,33 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
/* Save this before calling printk(), since that will clobber it: */
preempt_disable_ip = get_preempt_disable_ip(current);
@@ -7694,36 +7153,11 @@
void __cant_sleep(const char *file, int line, int preempt_offset)
{
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index fffcb1aa77b7..2799117917c7 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -800,7 +800,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
- * entity.
- */
- if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
-- printk_deferred_once("sched: DL replenish lagged too much\n");
-+ printk_once("sched: DL replenish lagged too much\n");
- dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
- dl_se->runtime = pi_of(dl_se)->dl_runtime;
- }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index fcbacc35d2b9..99e1ccd10439 100644
+index a853e4e9e3c3..0e13c859ab0f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4247,10 +4247,7 @@ static inline void check_schedstat_required(void)
- trace_sched_stat_iowait_enabled() ||
- trace_sched_stat_blocked_enabled() ||
- trace_sched_stat_runtime_enabled()) {
-- printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
-- "stat_blocked and stat_runtime require the "
-- "kernel parameter schedstats=enable or "
-- "kernel.sched_schedstats=1\n");
-+ printk_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
- }
- #endif
- }
-@@ -4458,7 +4455,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4458,7 +4458,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -7732,7 +7166,7 @@
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -4482,7 +4479,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4482,7 +4482,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (delta > ideal_runtime)
@@ -7741,7 +7175,7 @@
}
static void
-@@ -4625,7 +4622,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+@@ -4625,7 +4625,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
@@ -7750,7 +7184,7 @@
return;
}
/*
-@@ -4765,7 +4762,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
+@@ -4765,7 +4765,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -7759,7 +7193,7 @@
}
static __always_inline
-@@ -5528,7 +5525,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
+@@ -5528,7 +5528,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (task_current(rq, p))
@@ -7768,7 +7202,7 @@
return;
}
hrtick_start(rq, delta);
-@@ -7220,7 +7217,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7232,7 +7232,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
@@ -7777,7 +7211,7 @@
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -11123,7 +11120,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -11210,7 +11210,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -7786,7 +7220,7 @@
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -11150,7 +11147,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
+@@ -11237,7 +11237,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
@@ -7796,7 +7230,7 @@
check_preempt_curr(rq, p, 0);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index 7f8dace0964c..d5cee51819bf 100644
+index c4947c1b5edb..e13090e33f3c 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -46,11 +46,19 @@ SCHED_FEAT(DOUBLE_TICK, false)
@@ -7819,57 +7253,11 @@
/*
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
-diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
-index cad2a1b34ed0..fca1bfa2763f 100644
---- a/kernel/sched/psi.c
-+++ b/kernel/sched/psi.c
-@@ -717,11 +717,10 @@ static void psi_group_change(struct psi_group *group, int cpu,
- if (groupc->tasks[t]) {
- groupc->tasks[t]--;
- } else if (!psi_bug) {
-- printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n",
-- cpu, t, groupc->tasks[0],
-- groupc->tasks[1], groupc->tasks[2],
-- groupc->tasks[3], groupc->tasks[4],
-- clear, set);
-+ pr_err("psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
-+ cpu, t, groupc->tasks[0],
-+ groupc->tasks[1], groupc->tasks[2],
-+ groupc->tasks[3], clear, set);
- psi_bug = 1;
- }
- }
-@@ -787,9 +786,9 @@ static void psi_flags_change(struct task_struct *task, int clear, int set)
- if (((task->psi_flags & set) ||
- (task->psi_flags & clear) != clear) &&
- !psi_bug) {
-- printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
-- task->pid, task->comm, task_cpu(task),
-- task->psi_flags, clear, set);
-+ pr_err("psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
-+ task->pid, task->comm, task_cpu(task),
-+ task->psi_flags, clear, set);
- psi_bug = 1;
- }
-
-diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index 8007d087a57f..6ba8c7bdcdae 100644
---- a/kernel/sched/rt.c
-+++ b/kernel/sched/rt.c
-@@ -983,7 +983,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
- */
- if (likely(rt_b->rt_runtime)) {
- rt_rq->rt_throttled = 1;
-- printk_deferred_once("sched: RT throttling activated\n");
-+ printk_once("sched: RT throttling activated\n");
- } else {
- /*
- * In case we did anyway, make it go away,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index fe8be2f8a47d..38f9078fcaaa 100644
+index e49902898253..249319fc6d03 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -2323,6 +2323,15 @@ extern void reweight_task(struct task_struct *p, int prio);
+@@ -2321,6 +2321,15 @@ extern void reweight_task(struct task_struct *p, int prio);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -7911,7 +7299,7 @@
rd->visit_gen = 0;
diff --git a/kernel/signal.c b/kernel/signal.c
-index d831f0aec56e..24fee2a3788a 100644
+index c7dbb19219b9..0bbd89fbf240 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1324,6 +1324,34 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
@@ -7967,7 +7355,7 @@
cgroup_leave_frozen(true);
} else {
diff --git a/kernel/smp.c b/kernel/smp.c
-index b68d63e965db..d00f1dda09c6 100644
+index 82825345432c..9d3c8c56d904 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -690,10 +690,20 @@ void flush_smp_call_function_from_idle(void)
@@ -7993,159 +7381,11 @@
local_irq_restore(flags);
}
-diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
-index 003ccf338d20..00fc43605c6b 100644
---- a/kernel/time/clockevents.c
-+++ b/kernel/time/clockevents.c
-@@ -203,8 +203,7 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
- {
- /* Nothing to do if we already reached the limit */
- if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
-- printk_deferred(KERN_WARNING
-- "CE: Reprogramming failure. Giving up\n");
-+ pr_warn("CE: Reprogramming failure. Giving up\n");
- dev->next_event = KTIME_MAX;
- return -ETIME;
- }
-@@ -217,10 +216,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
- if (dev->min_delta_ns > MIN_DELTA_LIMIT)
- dev->min_delta_ns = MIN_DELTA_LIMIT;
-
-- printk_deferred(KERN_WARNING
-- "CE: %s increased min_delta_ns to %llu nsec\n",
-- dev->name ? dev->name : "?",
-- (unsigned long long) dev->min_delta_ns);
-+ pr_warn("CE: %s increased min_delta_ns to %llu nsec\n",
-+ dev->name ? dev->name : "?", (unsigned long long) dev->min_delta_ns);
- return 0;
- }
-
-diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
-index 406dccb79c2b..829d7797811f 100644
---- a/kernel/time/ntp.c
-+++ b/kernel/time/ntp.c
-@@ -939,9 +939,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
- time_status |= STA_PPSERROR;
- pps_errcnt++;
- pps_dec_freq_interval();
-- printk_deferred(KERN_ERR
-- "hardpps: PPSERROR: interval too long - %lld s\n",
-- freq_norm.sec);
-+ pr_err("hardpps: PPSERROR: interval too long - %lld s\n", freq_norm.sec);
- return 0;
- }
-
-@@ -954,8 +952,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
- delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
- pps_freq = ftemp;
- if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
-- printk_deferred(KERN_WARNING
-- "hardpps: PPSWANDER: change=%ld\n", delta);
-+ pr_warn("hardpps: PPSWANDER: change=%ld\n", delta);
- time_status |= STA_PPSWANDER;
- pps_stbcnt++;
- pps_dec_freq_interval();
-@@ -999,9 +996,8 @@ static void hardpps_update_phase(long error)
- * the time offset is updated.
- */
- if (jitter > (pps_jitter << PPS_POPCORN)) {
-- printk_deferred(KERN_WARNING
-- "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
-- jitter, (pps_jitter << PPS_POPCORN));
-+ pr_warn("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
-+ jitter, (pps_jitter << PPS_POPCORN));
- time_status |= STA_PPSJITTER;
- pps_jitcnt++;
- } else if (time_status & STA_PPSTIME) {
-@@ -1058,7 +1054,7 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t
- time_status |= STA_PPSJITTER;
- /* restart the frequency calibration interval */
- pps_fbase = *raw_ts;
-- printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
-+ pr_err("hardpps: PPSJITTER: bad pulse\n");
- return;
- }
-
-diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 871c912860ed..f18cad9a14df 100644
---- a/kernel/time/timekeeping.c
-+++ b/kernel/time/timekeeping.c
-@@ -204,22 +204,23 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
- const char *name = tk->tkr_mono.clock->name;
-
- if (offset > max_cycles) {
-- printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
-- offset, name, max_cycles);
-- printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
-+ printk("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
-+ offset, name, max_cycles);
-+ printk(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
- } else {
- if (offset > (max_cycles >> 1)) {
-- printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
-- offset, name, max_cycles >> 1);
-- printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
-+ printk("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
-+ offset, name, max_cycles >> 1);
-+ printk(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
- }
- }
-
- if (tk->underflow_seen) {
- if (jiffies - tk->last_warning > WARNING_FREQ) {
-- printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
-- printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
-- printk_deferred(" Your kernel is probably still fine.\n");
-+ printk("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n",
-+ name);
-+ printk(" Please report this, consider using a different clocksource, if possible.\n");
-+ printk(" Your kernel is probably still fine.\n");
- tk->last_warning = jiffies;
- }
- tk->underflow_seen = 0;
-@@ -227,9 +228,10 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
-
- if (tk->overflow_seen) {
- if (jiffies - tk->last_warning > WARNING_FREQ) {
-- printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
-- printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
-- printk_deferred(" Your kernel is probably still fine.\n");
-+ printk("WARNING: Overflow in clocksource '%s' observed, time update capped.\n",
-+ name);
-+ printk(" Please report this, consider using a different clocksource, if possible.\n");
-+ printk(" Your kernel is probably still fine.\n");
- tk->last_warning = jiffies;
- }
- tk->overflow_seen = 0;
-@@ -1669,9 +1671,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
- const struct timespec64 *delta)
- {
- if (!timespec64_valid_strict(delta)) {
-- printk_deferred(KERN_WARNING
-- "__timekeeping_inject_sleeptime: Invalid "
-- "sleep delta value!\n");
-+ pr_warn("%s: Invalid sleep delta value!\n", __func__);
- return;
- }
- tk_xtime_add(tk, delta);
-diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
-index b73e8850e58d..149cc4b08d8e 100644
---- a/kernel/time/timekeeping_debug.c
-+++ b/kernel/time/timekeeping_debug.c
-@@ -49,7 +49,7 @@ void tk_debug_account_sleep_time(const struct timespec64 *t)
- int bin = min(fls(t->tv_sec), NUM_BINS-1);
-
- sleep_time_bin[bin]++;
-- pm_deferred_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
-+ pm_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
- (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
- }
-
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 518ce39a878d..53ea832567d7 100644
+index d93f9c59f50e..d50921bd3acb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2636,7 +2636,13 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+@@ -2614,7 +2614,13 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@@ -8160,7 +7400,7 @@
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
-@@ -4217,15 +4223,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
+@@ -4195,15 +4201,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
static void print_lat_help_header(struct seq_file *m)
{
@@ -8187,7 +7427,7 @@
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -4259,14 +4267,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
+@@ -4237,14 +4245,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
print_event_info(buf, m);
@@ -8213,10 +7453,10 @@
void
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index c072e8b9849c..0098d7713f91 100644
+index c84c94334a60..8a7d2af099dd 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -192,6 +192,7 @@ static int trace_define_common_fields(void)
+@@ -193,6 +193,7 @@ static int trace_define_common_fields(void)
/* Holds both preempt_count and migrate_disable */
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
@@ -8269,36 +7509,6 @@
if (entry->preempt_count & 0xf0)
trace_seq_printf(s, "%x", entry->preempt_count >> 4);
else
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 3f4d27668576..fa66b2ac3198 100644
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -4845,9 +4845,7 @@ void show_workqueue_state(void)
- * drivers that queue work while holding locks
- * also taken in their write paths.
- */
-- printk_deferred_enter();
- show_pwq(pwq);
-- printk_deferred_exit();
- }
- raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
- /*
-@@ -4871,7 +4869,6 @@ void show_workqueue_state(void)
- * queue work while holding locks also taken in their write
- * paths.
- */
-- printk_deferred_enter();
- pr_info("pool %d:", pool->id);
- pr_cont_pool_info(pool);
- pr_cont(" hung=%us workers=%d",
-@@ -4886,7 +4883,6 @@ void show_workqueue_state(void)
- first = false;
- }
- pr_cont("\n");
-- printk_deferred_exit();
- next_pool:
- raw_spin_unlock_irqrestore(&pool->lock, flags);
- /*
diff --git a/lib/bug.c b/lib/bug.c
index 45a0584f6541..03a87df69ed2 100644
--- a/lib/bug.c
@@ -8957,21 +8167,6 @@
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
-diff --git a/lib/ratelimit.c b/lib/ratelimit.c
-index e01a93f46f83..524cf65dce53 100644
---- a/lib/ratelimit.c
-+++ b/lib/ratelimit.c
-@@ -47,9 +47,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
- if (time_is_before_jiffies(rs->begin + rs->interval)) {
- if (rs->missed) {
- if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
-- printk_deferred(KERN_WARNING
-- "%s: %d callbacks suppressed\n",
-- func, rs->missed);
-+ pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
- rs->missed = 0;
- }
- }
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index abb3432ed744..d5e82e4a57ad 100644
--- a/lib/scatterlist.c
@@ -9017,11 +8212,11 @@
kunmap(miter->page);
diff --git a/localversion-rt b/localversion-rt
new file mode 100644
-index 000000000000..24707986c321
+index 000000000000..4b7dca68a5b4
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt48
++-rt49
diff --git a/mm/Kconfig b/mm/Kconfig
index c048dea7e342..88778414465b 100644
--- a/mm/Kconfig
@@ -9036,10 +8231,10 @@
select XARRAY_MULTI
help
diff --git a/mm/memory.c b/mm/memory.c
-index 26d115ded4ab..307be06c9484 100644
+index a4d0f744a458..6fd7479ebdac 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -5287,7 +5287,7 @@ void __might_fault(const char *file, int line)
+@@ -5291,7 +5291,7 @@ void __might_fault(const char *file, int line)
return;
if (pagefault_disabled())
return;
@@ -9049,7 +8244,7 @@
if (current->mm)
might_lock_read(¤t->mm->mmap_lock);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index a0b7afae59e9..ac537f5caa9d 100644
+index 61d7967897ce..9fec8c8b5578 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3149,9 +3149,9 @@ static void drain_local_pages_wq(struct work_struct *work)
@@ -9329,7 +8524,7 @@
config BQL
bool
diff --git a/net/core/dev.c b/net/core/dev.c
-index 6111506a4105..38d6ffad4750 100644
+index be51644e95da..ad0d386bc275 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -225,14 +225,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
@@ -9377,7 +8572,7 @@
if (unlikely(contended))
spin_lock(&q->busylock);
-@@ -4662,6 +4668,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -4663,6 +4669,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
rps_unlock(sd);
local_irq_restore(flags);
@@ -9385,7 +8580,7 @@
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4902,7 +4909,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -4903,7 +4910,7 @@ static int netif_rx_internal(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -9394,7 +8589,7 @@
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4912,14 +4919,14 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -4913,14 +4920,14 @@ static int netif_rx_internal(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
@@ -9412,7 +8607,7 @@
}
return ret;
}
-@@ -4958,11 +4965,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4959,11 +4966,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
@@ -9426,7 +8621,7 @@
trace_netif_rx_ni_exit(err);
return err;
-@@ -6405,12 +6410,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -6407,12 +6412,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -9441,7 +8636,7 @@
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -6488,6 +6495,7 @@ void __napi_schedule(struct napi_struct *n)
+@@ -6490,6 +6497,7 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -9449,7 +8644,7 @@
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -11318,6 +11326,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+@@ -11320,6 +11328,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
@@ -9457,7 +8652,7 @@
#ifdef CONFIG_RPS
remsd = oldsd->rps_ipi_list;
-@@ -11331,7 +11340,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+@@ -11333,7 +11342,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -9466,7 +8661,7 @@
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
-@@ -11646,7 +11655,7 @@ static int __init net_dev_init(void)
+@@ -11648,7 +11657,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
@@ -10402,7 +9597,7 @@
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
-index 30c29a9a2efd..dd27a062e913 100644
+index 250d87d993cb..0433ffa8c72f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -304,8 +304,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.55-r6.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.55-r6.ebuild
deleted file mode 120000
index e4d6be5..0000000
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.55-r6.ebuild
+++ /dev/null
@@ -1 +0,0 @@
-lakitu-kernel-rt-5_15-5.15.55.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.65-r7.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.65-r7.ebuild
new file mode 120000
index 0000000..f5af8c3
--- /dev/null
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.65-r7.ebuild
@@ -0,0 +1 @@
+lakitu-kernel-rt-5_15-5.15.65.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.55.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.65.ebuild
similarity index 93%
rename from project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.55.ebuild
rename to project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.65.ebuild
index 747b00e..3c3c096 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.55.ebuild
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_15/lakitu-kernel-rt-5_15-5.15.65.ebuild
@@ -14,8 +14,8 @@
EAPI=6
-CROS_WORKON_COMMIT="e4e743b14b3af6c301b0c813f9ee8d516af5b4cf"
-CROS_WORKON_TREE="e4e743b14b3af6c301b0c813f9ee8d516af5b4cf"
+CROS_WORKON_COMMIT="dee7a914f9a1503899051b06a7ed0110618e2011"
+CROS_WORKON_TREE="dee7a914f9a1503899051b06a7ed0110618e2011"
CROS_WORKON_REPO="https://cos.googlesource.com"
CROS_WORKON_PROJECT="third_party/kernel"
CROS_WORKON_LOCALNAME="kernel/v5.15"