edgeos: Sync OSS repo.

Sync the open source repo until we can move the internal repo
downstream of this one.

BUG=b/180686339
TEST=builds.
RELEASE_NOTES=none

Change-Id: I76eca6e0845b1e05fa4b4427d6de5d4d078ade21
Reviewed-on: https://cos-review.googlesource.com/c/cos/overlays/board-overlays/+/21835
Tested-by: Cusky Presubmit Bot <presubmit@cos-infra-prod.iam.gserviceaccount.com>
Reviewed-by: Roy Yang <royyang@google.com>
diff --git a/project-edgeos/sys-apps/loadpin-trigger/files/Makefile b/project-edgeos/sys-apps/loadpin-trigger/files/Makefile
new file mode 100644
index 0000000..1e191bb
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/files/Makefile
@@ -0,0 +1,21 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+obj-m := loadpin_trigger.o
+
+modules:
+	$(MAKE) CC=$(CC) LD=$(LD) -C $(KERNEL_SOURCES) M=$(CURDIR) ARCH=$(ARCH) \
+		KBUILD_OUTPUT=$(KBUILD_OUTPUT) $@
+
+module: modules
diff --git a/project-edgeos/sys-apps/loadpin-trigger/files/loadpin_trigger.c b/project-edgeos/sys-apps/loadpin-trigger/files/loadpin_trigger.c
new file mode 100644
index 0000000..dc91cc8
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/files/loadpin_trigger.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Distributed under the terms of the GNU General Public License v2.
+ *
+ * Module to trigger loadpin. The kernel module calls
+ * kernel_read_file_from_path to load a dummy file from rootfs into kernel,
+ * which will trigger loadpin.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+#include <linux/kernel_read_file.h>
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ke Wu");
+MODULE_DESCRIPTION("A Linux module to trigger loadpin.");
+MODULE_VERSION("1");
+
+#define MAX_DATA_SIZE 1024
+
+static const char *root_fs_dummy_path = "/etc/loadpin_trigger";
+
+static int __init loadpin_trigger_init(void) {
+  void *data;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+  size_t size;
+#else
+  loff_t size;
+#endif
+  int rc;
+
+  data = NULL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+  rc = kernel_read_file_from_path(root_fs_dummy_path, 0, &data,
+                                  MAX_DATA_SIZE, &size,
+                                  READING_UNKNOWN);
+#else
+  rc = kernel_read_file_from_path(root_fs_dummy_path, &data, &size,
+                                  0, READING_UNKNOWN);
+#endif
+  if (rc < 0) {
+    pr_err("Unable to read file: %s (%d)", root_fs_dummy_path, rc);
+    return -EIO;
+  }
+  vfree(data);
+  return 0;
+}
+
+module_init(loadpin_trigger_init);
diff --git a/project-edgeos/sys-apps/loadpin-trigger/files/loadpin_trigger_dummy b/project-edgeos/sys-apps/loadpin-trigger/files/loadpin_trigger_dummy
new file mode 100644
index 0000000..9759233
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/files/loadpin_trigger_dummy
@@ -0,0 +1 @@
+A dummy file to trigger loadpin.
diff --git a/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1r-r3.ebuild b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1r-r3.ebuild
new file mode 120000
index 0000000..e51bc52
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1r-r3.ebuild
@@ -0,0 +1 @@
+loadpin-trigger-1r.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1r.ebuild b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1r.ebuild
new file mode 100644
index 0000000..afa4a82
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1r.ebuild
@@ -0,0 +1,53 @@
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Distributed under the terms of the GNU General Public License v2.
+
+# This package installs a kernel module loadpin-trigger.ko and configures
+# OS to load the module on boot. The kernel module calls
+# kernel_read_file_from_path to load a dummy file into kernel, which will
+# trigger loadpin. We used to rely on loading kernel modules to pin to
+# rootfs, but the recent kernel param loadpin.exclude=kernel-module we added
+# makes loadpin ignore module loading.
+
+EAPI=6
+inherit cos-linux-mod
+
+DESCRIPTION="Kernel module to trigger loadpin on boot"
+
+LICENSE="GPL-2"
+SLOT="rt"
+KEYWORDS="*"
+
+DEPEND="virtual/linux-sources"
+
+RDEPEND="
+	${DEPEND}
+"
+
+S="${WORKDIR}"
+
+# Prevent kernel module signature being striped.
+STRIP_MASK+=" *.ko"
+
+MODULE_NAME="loadpin_trigger"
+
+pkg_setup() {
+	MODULE_NAMES="${MODULE_NAME}(::)"
+	# We cannot rely on auto detection because we have two kernels installed.
+	KERNEL_DIR="$(find "${ROOT}/usr/src" -maxdepth 1 -name "lakitu-kernel-rt-[0-9]*")"
+	KBUILD_OUTPUT="${KERNEL_DIR}/build"
+	linux-mod_pkg_setup
+	BUILD_PARAMS="KERNEL_SOURCES=${KV_DIR} KBUILD_OUTPUT=${KBUILD_OUTPUT}"
+}
+
+src_prepare() {
+	cp "${FILESDIR}"/* .
+	cos-linux-mod_src_prepare
+}
+
+src_compile() {
+	cos-linux-mod_src_compile
+}
+
+src_install() {
+	cos-linux-mod_src_install
+}
diff --git a/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1v-r2.ebuild b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1v-r2.ebuild
new file mode 120000
index 0000000..6ad30c7
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1v-r2.ebuild
@@ -0,0 +1 @@
+loadpin-trigger-1v.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1v.ebuild b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1v.ebuild
new file mode 100644
index 0000000..62ce793
--- /dev/null
+++ b/project-edgeos/sys-apps/loadpin-trigger/loadpin-trigger-1v.ebuild
@@ -0,0 +1,60 @@
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Distributed under the terms of the GNU General Public License v2.
+
+# This package installs a kernel module loadpin-trigger.ko and configures
+# OS to load the module on boot. The kernel module calls
+# kernel_read_file_from_path to load a dummy file into kernel, which will
+# trigger loadpin. We used to rely on loading kernel modules to pin to
+# rootfs, but the recent kernel param loadpin.exclude=kernel-module we added
+# makes loadpin ignore module loading.
+
+EAPI=6
+inherit cos-linux-mod
+
+DESCRIPTION="Kernel module to trigger loadpin on boot"
+
+LICENSE="GPL-2"
+SLOT="vanilla"
+KEYWORDS="*"
+
+DEPEND="virtual/linux-sources"
+
+RDEPEND="
+	${DEPEND}
+"
+
+S="${WORKDIR}"
+
+# Prevent kernel module signature being striped.
+STRIP_MASK+=" *.ko"
+
+MODULE_NAME="loadpin_trigger"
+
+pkg_setup() {
+	MODULE_NAMES="${MODULE_NAME}(::)"
+	# We cannot rely on auto detection because we have two kernels installed.
+	KERNEL_DIR="$(find "${ROOT}/usr/src" -maxdepth 1 -name "lakitu-kernel-[0-9]*")"
+	KBUILD_OUTPUT="${KERNEL_DIR}/build"
+	linux-mod_pkg_setup
+	BUILD_PARAMS="KERNEL_SOURCES=${KV_DIR} KBUILD_OUTPUT=${KBUILD_OUTPUT}"
+}
+
+src_prepare() {
+	cp "${FILESDIR}"/* .
+	cos-linux-mod_src_prepare
+}
+
+src_compile() {
+	cos-linux-mod_src_compile
+}
+
+src_install() {
+	cos-linux-mod_src_install
+
+	# Install a dummy file to /etc. The file will be read by
+	# loadpin-trigger so that rootfs will be pinned.
+	if [[ ! -e /etc/loadpin_trigger ]]; then
+		insinto /etc
+		newins "${FILESDIR}"/loadpin_trigger_dummy loadpin_trigger
+	fi
+}
diff --git a/project-edgeos/sys-apps/systemd/files/249-foreignroutingpolicy.patch b/project-edgeos/sys-apps/systemd/files/249-foreignroutingpolicy.patch
new file mode 100644
index 0000000..f1b3ea8
--- /dev/null
+++ b/project-edgeos/sys-apps/systemd/files/249-foreignroutingpolicy.patch
@@ -0,0 +1,115 @@
+commit d94dfe7053d49fa62c4bfc07b7f3fc2227c10aff
+Author: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date:   Sun Apr 11 21:33:51 2021 +0900
+
+    network: introduce ManageForeignRoutingPolicyRules= boolean setting in networkd.conf
+    
+    The commit 0b81225e5791f660506f7db0ab88078cf296b771 makes that networkd
+    remove all foreign rules except those with "proto kernel".
+    
+    But, in some situation, people may want to manage routing policy rules
+    with other tools, e.g. 'ip' command. To support such the situation,
+    this introduce ManageForeignRoutingPolicyRules= boolean setting.
+    
+    Closes #19106.
+
+diff --git a/man/networkd.conf.xml b/man/networkd.conf.xml
+index dcce2095ed..23422c6c94 100644
+--- a/man/networkd.conf.xml
++++ b/man/networkd.conf.xml
+@@ -62,6 +62,15 @@
+         If <varname>SpeedMeter=no</varname>, the value is ignored. Defaults to 10sec.</para></listitem>
+       </varlistentry>
+ 
++      <varlistentry>
++        <term><varname>ManageForeignRoutingPolicyRules=</varname></term>
++        <listitem><para>A boolean. When true, <command>systemd-networkd</command> will remove rules
++        that are not configured in .network files (except for rules with protocol
++        <literal>kernel</literal>). When false, it will not remove any foreign rules, keeping them even
++        if they are not configured in a .network file. Defaults to yes.
++        </para></listitem>
++      </varlistentry>
++
+       <varlistentry>
+         <term><varname>ManageForeignRoutes=</varname></term>
+         <listitem><para>A boolean. When true, <command>systemd-networkd</command> will store any routes
+diff --git a/src/network/networkd-gperf.gperf b/src/network/networkd-gperf.gperf
+index b2a2f55790..74d509896a 100644
+--- a/src/network/networkd-gperf.gperf
++++ b/src/network/networkd-gperf.gperf
+@@ -20,9 +20,10 @@ struct ConfigPerfItem;
+ %struct-type
+ %includes
+ %%
+-Network.SpeedMeter,            config_parse_bool,                      0,          offsetof(Manager, use_speed_meter)
+-Network.SpeedMeterIntervalSec, config_parse_sec,                       0,          offsetof(Manager, speed_meter_interval_usec)
+-Network.ManageForeignRoutes,   config_parse_bool,                      0,          offsetof(Manager, manage_foreign_routes)
+-Network.RouteTable,            config_parse_route_table_names,         0,          0
+-DHCP.DUIDType,                 config_parse_duid_type,                 0,          offsetof(Manager, duid)
+-DHCP.DUIDRawData,              config_parse_duid_rawdata,              0,          offsetof(Manager, duid)
++Network.SpeedMeter,                      config_parse_bool,                      0,          offsetof(Manager, use_speed_meter)
++Network.SpeedMeterIntervalSec,           config_parse_sec,                       0,          offsetof(Manager, speed_meter_interval_usec)
++Network.ManageForeignRoutingPolicyRules, config_parse_bool,                      0,          offsetof(Manager, manage_foreign_rules)
++Network.ManageForeignRoutes,             config_parse_bool,                      0,          offsetof(Manager, manage_foreign_routes)
++Network.RouteTable,                      config_parse_route_table_names,         0,          0
++DHCP.DUIDType,                           config_parse_duid_type,                 0,          offsetof(Manager, duid)
++DHCP.DUIDRawData,                        config_parse_duid_rawdata,              0,          offsetof(Manager, duid)
+diff --git a/src/network/networkd-manager.c b/src/network/networkd-manager.c
+index a8db2cc44b..20957ecd89 100644
+--- a/src/network/networkd-manager.c
++++ b/src/network/networkd-manager.c
+@@ -380,6 +380,7 @@ int manager_new(Manager **ret) {
+         *m = (Manager) {
+                 .speed_meter_interval_usec = SPEED_METER_DEFAULT_TIME_INTERVAL,
+                 .manage_foreign_routes = true,
++                .manage_foreign_rules = true,
+                 .ethtool_fd = -1,
+         };
+ 
+@@ -655,6 +656,9 @@ static int manager_enumerate_rules(Manager *m) {
+         assert(m);
+         assert(m->rtnl);
+ 
++        if (!m->manage_foreign_rules)
++                return 0;
++
+         r = sd_rtnl_message_new_routing_policy_rule(m->rtnl, &req, RTM_GETRULE, 0);
+         if (r < 0)
+                 return r;
+diff --git a/src/network/networkd-manager.h b/src/network/networkd-manager.h
+index 7f630fccc7..0fae7a5c2e 100644
+--- a/src/network/networkd-manager.h
++++ b/src/network/networkd-manager.h
+@@ -32,6 +32,7 @@ struct Manager {
+         bool dirty;
+         bool restarting;
+         bool manage_foreign_routes;
++        bool manage_foreign_rules;
+ 
+         Set *dirty_links;
+ 
+diff --git a/src/network/networkd-routing-policy-rule.c b/src/network/networkd-routing-policy-rule.c
+index a7fddfd58f..03bdd4e640 100644
+--- a/src/network/networkd-routing-policy-rule.c
++++ b/src/network/networkd-routing-policy-rule.c
+@@ -977,6 +977,8 @@ int manager_rtnl_process_rule(sd_netlink *rtnl, sd_netlink_message *message, Man
+         case RTM_NEWRULE:
+                 if (rule)
+                         log_routing_policy_rule_debug(tmp, tmp->family, "Received remembered", NULL, m);
++                else if (!m->manage_foreign_routes)
++                        log_routing_policy_rule_debug(tmp, tmp->family, "Ignoring received foreign", NULL, m);
+                 else {
+                         log_routing_policy_rule_debug(tmp, tmp->family, "Remembering foreign", NULL, m);
+                         r = routing_policy_rule_consume_foreign(m, TAKE_PTR(tmp));
+diff --git a/src/network/networkd.conf b/src/network/networkd.conf
+index 0eac327314..4e4e8b8d07 100644
+--- a/src/network/networkd.conf
++++ b/src/network/networkd.conf
+@@ -15,6 +15,7 @@
+ [Network]
+ #SpeedMeter=no
+ #SpeedMeterIntervalSec=10sec
++#ManageForeignRoutingPolicyRules=yes
+ #ManageForeignRoutes=yes
+ #RouteTable=
+ 
diff --git a/project-edgeos/sys-apps/systemd/systemd-248-r3.ebuild b/project-edgeos/sys-apps/systemd/systemd-248-r5.ebuild
similarity index 100%
rename from project-edgeos/sys-apps/systemd/systemd-248-r3.ebuild
rename to project-edgeos/sys-apps/systemd/systemd-248-r5.ebuild
diff --git a/project-edgeos/sys-apps/systemd/systemd-248.ebuild b/project-edgeos/sys-apps/systemd/systemd-248.ebuild
index c905849..7fc31e2 100644
--- a/project-edgeos/sys-apps/systemd/systemd-248.ebuild
+++ b/project-edgeos/sys-apps/systemd/systemd-248.ebuild
@@ -211,6 +211,9 @@
 			"${FILESDIR}"/239-change-paths-for-udev-rules-init-reboot.patch
 			# Boot into multi-user.target instead of graphical.target.
 			"${FILESDIR}"/239-default-target.patch
+			# Support ManageForeignRoutingPolicy option.
+			"${FILESDIR}"/249-foreignroutingpolicy.patch
+			"${FILESDIR}"/248-do-not-use-strdupa-on-a-patch.patch
 	)
 	default
 }
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_10/files/common/no_nfs.config b/project-edgeos/sys-kernel/lakitu-kernel-5_10/files/common/no_nfs.config
new file mode 100644
index 0000000..4ae4e70
--- /dev/null
+++ b/project-edgeos/sys-kernel/lakitu-kernel-5_10/files/common/no_nfs.config
@@ -0,0 +1,31 @@
+CONFIG_NFS_FS=n
+CONFIG_NFS_V3=n
+CONFIG_NFS_V3_ACL=n
+CONFIG_NFS_V4=n
+CONFIG_NFS_V4_1=n
+CONFIG_NFS_V4_2=n
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN=n
+CONFIG_NFS_V4_SECURITY_LABEL=n
+CONFIG_NFS_FSCACHE=n
+CONFIG_NFS_USE_KERNEL_DNS=n
+CONFIG_NFS_DEBUG=n
+CONFIG_NFS_DISABLE_UDP_SUPPORT=n
+CONFIG_NFSD=n
+CONFIG_NFSD_V2_ACL=n
+CONFIG_NFSD_V3=n
+CONFIG_NFSD_V3_ACL=n
+CONFIG_NFSD_V4=n
+CONFIG_NFSD_V4_SECURITY_LABEL=n
+CONFIG_NFS_ACL_SUPPORT=n
+CONFIG_NFS_COMMON=n
+CONFIG_SUNRPC=n
+CONFIG_SUNRPC_GSS=n
+CONFIG_SUNRPC_BACKCHANNEL=n
+CONFIG_SUNRPC_DEBUG=n
+CONFIG_PNFS_FILE_LAYOUT=n
+CONFIG_PNFS_BLOCK=n
+CONFIG_PNFS_FLEXFILE_LAYOUT=n
+CONFIG_GRACE_PERIOD=n
+CONFIG_LOCKD=n
+CONFIG_LOCKD_V4=n
+
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.49-r4.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.49-r4.ebuild
deleted file mode 120000
index d9d62d1..0000000
--- a/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.49-r4.ebuild
+++ /dev/null
@@ -1 +0,0 @@
-lakitu-kernel-5_10-5.10.49.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.58-r7.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.58-r7.ebuild
new file mode 120000
index 0000000..2b18bfd
--- /dev/null
+++ b/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.58-r7.ebuild
@@ -0,0 +1 @@
+lakitu-kernel-5_10-5.10.58.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.49.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.58.ebuild
similarity index 93%
rename from project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.49.ebuild
rename to project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.58.ebuild
index c3ed77b..9fb2e02 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.49.ebuild
+++ b/project-edgeos/sys-kernel/lakitu-kernel-5_10/lakitu-kernel-5_10-5.10.58.ebuild
@@ -13,8 +13,8 @@
 
 EAPI=6
 
-CROS_WORKON_COMMIT="e27cedcf6607268099cee58624c297ffef166e72"
-CROS_WORKON_TREE="2ea0aa63317ad878956c5e09295b581c742f67a6"
+CROS_WORKON_COMMIT="44374465de137e8fdd16b14fdae56edd9726733e"
+CROS_WORKON_TREE="53d8b0aa233120407593306d2459195b9fa637a1"
 CROS_WORKON_REPO="https://cos.googlesource.com"
 CROS_WORKON_PROJECT="third_party/kernel"
 CROS_WORKON_LOCALNAME="kernel/v5.10"
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0000-patch-5.10.35-rt39.patch b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0000-patch-5.10.59-rt51.patch
similarity index 96%
rename from project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0000-patch-5.10.35-rt39.patch
rename to project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0000-patch-5.10.59-rt51.patch
index 0a1f84b..6043731 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0000-patch-5.10.35-rt39.patch
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0000-patch-5.10.59-rt51.patch
@@ -225,10 +225,10 @@
  		read-side critical sections.  It also permits
  		spinlocks blocking while in RCU read-side critical
 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 26bfe7ae711b..9ee9d99cd811 100644
+index f103667d3727..9dd83d919db1 100644
 --- a/Documentation/admin-guide/kernel-parameters.txt
 +++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -4085,6 +4085,10 @@
+@@ -4091,6 +4091,10 @@
  			value, meaning that RCU_SOFTIRQ is used by default.
  			Specify rcutree.use_softirq=0 to use rcuc kthreads.
  
@@ -239,7 +239,7 @@
  	rcutree.rcu_fanout_exact= [KNL]
  			Disable autobalancing of the rcu_node combining
  			tree.  This is used by rcutorture, and might
-@@ -4463,6 +4467,13 @@
+@@ -4469,6 +4473,13 @@
  			only normal grace-period primitives.  No effect
  			on CONFIG_TINY_RCU kernels.
  
@@ -655,7 +655,7 @@
  	  The address space of ARM processors is only 4 Gigabytes large
  	  and it has to accommodate user address space, kernel address
 diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
-index fc56fc3e1931..c279a8a463a2 100644
+index 9575b404019c..707068f852c2 100644
 --- a/arch/arm/include/asm/fixmap.h
 +++ b/arch/arm/include/asm/fixmap.h
 @@ -7,14 +7,14 @@
@@ -840,10 +840,10 @@
  #endif /* __KERNEL__ */
  #endif /* __ASM_ARM_THREAD_INFO_H */
 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
-index be8050b0c3df..884e40a525ce 100644
+index 70993af22d80..024c65c3a0f2 100644
 --- a/arch/arm/kernel/asm-offsets.c
 +++ b/arch/arm/kernel/asm-offsets.c
-@@ -42,6 +42,7 @@ int main(void)
+@@ -43,6 +43,7 @@ int main(void)
    BLANK();
    DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
    DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
@@ -935,10 +935,10 @@
  		} else {
  			if (unlikely(!user_mode(regs)))
 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 48099c6e1e4a..609b7d3104ea 100644
+index 8aa7fa949c23..3693706ba0a1 100644
 --- a/arch/arm/kernel/smp.c
 +++ b/arch/arm/kernel/smp.c
-@@ -672,9 +672,7 @@ static void do_handle_IPI(int ipinr)
+@@ -671,9 +671,7 @@ static void do_handle_IPI(int ipinr)
  		break;
  
  	case IPI_CPU_BACKTRACE:
@@ -1205,7 +1205,7 @@
  #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
  
 diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
-index 80e946b2abee..994f997b1572 100644
+index e83f0982b99c..7a5770d825b9 100644
 --- a/arch/arm64/include/asm/preempt.h
 +++ b/arch/arm64/include/asm/preempt.h
 @@ -70,17 +70,43 @@ static inline bool __preempt_count_dec_and_test(void)
@@ -1317,26 +1317,26 @@
  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
    DEFINE(TSK_TI_TTBR0,		offsetof(struct task_struct, thread_info.ttbr0));
 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
-index 2da82c139e1c..32c907a71ca4 100644
+index fe83d6d67ec3..cd2fc5556c8d 100644
 --- a/arch/arm64/kernel/entry.S
 +++ b/arch/arm64/kernel/entry.S
-@@ -651,9 +651,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+@@ -515,9 +515,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
  	mrs	x0, daif
  	orr	x24, x24, x0
  alternative_else_nop_endif
 -	cbnz	x24, 1f				// preempt count != 0 || NMI return path
 -	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
 +
-+	cbz	x24, 1f					// (need_resched + count) == 0
-+	cbnz	w24, 2f					// count != 0
++	cbz     x24, 1f                                 // (need_resched + count) == 0
++	cbnz    w24, 2f                                 // count != 0
 +
-+	ldr	w24, [tsk, #TSK_TI_PREEMPT_LAZY]	// get preempt lazy count
-+	cbnz	w24, 2f					// preempt lazy count != 0
++	ldr     w24, [tsk, #TSK_TI_PREEMPT_LAZY]        // get preempt lazy count
++	cbnz    w24, 2f                                 // preempt lazy count != 0
 +
-+	ldr	x0, [tsk, #TSK_TI_FLAGS]		// get flags
-+	tbz	x0, #TIF_NEED_RESCHED_LAZY, 2f		// needs rescheduling?
++	ldr     x0, [tsk, #TSK_TI_FLAGS]                // get flags
++	tbz     x0, #TIF_NEED_RESCHED_LAZY, 2f          // needs rescheduling?
  1:
-+	bl	arm64_preempt_schedule_irq		// irq en/disable is done inside
++	bl      arm64_preempt_schedule_irq              // irq en/disable is done inside
 +2:
  #endif
  
@@ -1388,10 +1388,10 @@
  
  /*
 diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
-index 50852992752b..aafe59f680e8 100644
+index e62005317ce2..fe94a3e1f849 100644
 --- a/arch/arm64/kernel/signal.c
 +++ b/arch/arm64/kernel/signal.c
-@@ -918,7 +918,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
+@@ -919,7 +919,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
  		/* Check valid user FS if needed */
  		addr_limit_user_check();
  
@@ -1401,10 +1401,10 @@
  			local_daif_restore(DAIF_PROCCTX_NOIRQ);
  
 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
-index a1c2c955474e..df1d0d1511d1 100644
+index 5e5dd99e8cee..a94211fb63c8 100644
 --- a/arch/arm64/kvm/arm.c
 +++ b/arch/arm64/kvm/arm.c
-@@ -706,7 +706,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+@@ -708,7 +708,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  		 * involves poking the GIC, which must be done in a
  		 * non-preemptible context.
  		 */
@@ -1413,7 +1413,7 @@
  
  		kvm_pmu_flush_hwstate(vcpu);
  
-@@ -755,7 +755,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+@@ -757,7 +757,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  				kvm_timer_sync_user(vcpu);
  			kvm_vgic_sync_hwstate(vcpu);
  			local_irq_enable();
@@ -1422,7 +1422,7 @@
  			continue;
  		}
  
-@@ -827,7 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+@@ -829,7 +829,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  		/* Exit types that need handling before we can be preempted */
  		handle_exit_early(vcpu, ret);
  
@@ -1851,10 +1851,10 @@
  
  static void highmem_setup(void)
 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index 2000bb2b0220..6b762bebff33 100644
+index 1a63f592034e..336ebd552990 100644
 --- a/arch/mips/Kconfig
 +++ b/arch/mips/Kconfig
-@@ -2719,6 +2719,7 @@ config WAR_MIPS34K_MISSED_ITLB
+@@ -2722,6 +2722,7 @@ config WAR_MIPS34K_MISSED_ITLB
  config HIGHMEM
  	bool "High Memory Support"
  	depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
@@ -1885,7 +1885,7 @@
  	__end_of_fixed_addresses
  };
 diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
-index f1f788b57166..19edf8e69971 100644
+index 9f021cf51aa7..1716181ea66d 100644
 --- a/arch/mips/include/asm/highmem.h
 +++ b/arch/mips/include/asm/highmem.h
 @@ -24,7 +24,7 @@
@@ -2257,7 +2257,7 @@
 -}
 -EXPORT_SYMBOL(kunmap_atomic_high);
 diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
-index 8348feaaf46e..bf9b2310fc93 100644
+index 5e88c351e6a4..f3fa02b8838a 100644
 --- a/arch/openrisc/mm/init.c
 +++ b/arch/openrisc/mm/init.c
 @@ -33,7 +33,6 @@
@@ -2312,7 +2312,7 @@
 -
 -#endif
 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 31ed8083571f..71529672b738 100644
+index 5afa0ebd78ca..1d3e6f351bf2 100644
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
 @@ -146,6 +146,7 @@ config PPC
@@ -2361,7 +2361,7 @@
  #ifdef __BIG_ENDIAN
  #define BITOFF_CAL(size, off)	((sizeof(u32) - size - off) * BITS_PER_BYTE)
 diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
-index 6bfc87915d5d..8d03c16a3663 100644
+index 591b2f4deed5..947b5b9c4424 100644
 --- a/arch/powerpc/include/asm/fixmap.h
 +++ b/arch/powerpc/include/asm/fixmap.h
 @@ -20,7 +20,7 @@
@@ -2372,8 +2372,8 @@
 +#include <asm/kmap_size.h>
  #endif
  
- #ifdef CONFIG_KASAN
-@@ -55,7 +55,7 @@ enum fixed_addresses {
+ #ifdef CONFIG_PPC64
+@@ -61,7 +61,7 @@ enum fixed_addresses {
  	FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
  #ifdef CONFIG_HIGHMEM
  	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
@@ -3222,10 +3222,10 @@
  		printf("%s", buf);
  	}
 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
-index 4a2a12be04c9..6f1fdcd3b5db 100644
+index 896b68e541b2..54a4e77f2d2a 100644
 --- a/arch/s390/Kconfig
 +++ b/arch/s390/Kconfig
-@@ -181,6 +181,7 @@ config S390
+@@ -183,6 +183,7 @@ config S390
  	select HAVE_RSEQ
  	select HAVE_SYSCALL_TRACEPOINTS
  	select HAVE_VIRT_CPU_ACCOUNTING
@@ -3838,7 +3838,7 @@
  
  static struct kmsg_dumper kmsg_dumper = {
 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 3a5ecb1039bf..a2c12ad69173 100644
+index f3c8a8110f60..78f90306ca39 100644
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
 @@ -15,6 +15,7 @@ config X86_32
@@ -4184,7 +4184,7 @@
  
  void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
 diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
-index 38f4936045ab..41d3be7da969 100644
+index 8b9bfaad6e66..d31b0886592a 100644
 --- a/arch/x86/include/asm/fpu/api.h
 +++ b/arch/x86/include/asm/fpu/api.h
 @@ -28,6 +28,7 @@ extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
@@ -4325,7 +4325,7 @@
  #include <asm/nospec-branch.h>
  
 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
-index 69485ca13665..471dec2d78e1 100644
+index a334dd0d7c42..afe37a8c6c24 100644
 --- a/arch/x86/include/asm/preempt.h
 +++ b/arch/x86/include/asm/preempt.h
 @@ -89,20 +89,54 @@ static __always_inline void __preempt_count_sub(int val)
@@ -4644,10 +4644,10 @@
  }
 +#endif
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 0d8383b82bca..b53e8e693ee5 100644
+index 6ab42cdcb8a4..2fd39b56e417 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -7908,6 +7908,14 @@ int kvm_arch_init(void *opaque)
+@@ -7948,6 +7948,14 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
@@ -4997,7 +4997,7 @@
  	kmap_waitqueues_init();
  }
 diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 2a1eff60c797..b293f74ea8ca 100644
+index a368eb6dc647..c98ba39f8004 100644
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
 @@ -41,7 +41,7 @@
@@ -5171,7 +5171,7 @@
  }
  EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
  
-@@ -1604,14 +1585,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+@@ -1605,14 +1586,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
  		return;
  
  	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -5189,7 +5189,7 @@
  	}
  
  	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
-@@ -3909,7 +3890,7 @@ static int __init blk_mq_init(void)
+@@ -3949,7 +3930,7 @@ static int __init blk_mq_init(void)
  	int i;
  
  	for_each_possible_cpu(i)
@@ -5346,7 +5346,7 @@
  	ktime_t ac_time;
  #endif
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index f462b9d2f5a5..a5370228c17f 100644
+index 340ad21491e2..209220eca36f 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -1252,28 +1252,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
@@ -5472,7 +5472,7 @@
  	if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
  		context_tasklet((unsigned long)&ctx->context);
 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
-index 4b7ee3fa9224..abb18c958e3b 100644
+index 847f33ffc4ae..ae79c3300129 100644
 --- a/drivers/firmware/efi/efi.c
 +++ b/drivers/firmware/efi/efi.c
 @@ -66,7 +66,7 @@ struct mm_struct efi_mm = {
@@ -5545,10 +5545,10 @@
  	if (intel_vgpu_active(dev_priv))
  		return;
 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
-index bd3046e5a934..1850f13e9d19 100644
+index 0c083af5a59d..2abf043d3d9d 100644
 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
 +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
-@@ -1081,7 +1081,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
+@@ -1080,7 +1080,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
  		struct i915_ggtt *ggtt = cache_to_ggtt(cache);
  
  		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
@@ -5557,7 +5557,7 @@
  
  		if (drm_mm_node_allocated(&cache->node)) {
  			ggtt->vm.clear_range(&ggtt->vm,
-@@ -1147,7 +1147,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
+@@ -1146,7 +1146,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
  
  	if (cache->vaddr) {
  		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
@@ -5566,7 +5566,7 @@
  	} else {
  		struct i915_vma *vma;
  		int err;
-@@ -1195,8 +1195,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
+@@ -1194,8 +1194,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
  		offset += page << PAGE_SHIFT;
  	}
  
@@ -5931,10 +5931,10 @@
  	}
  
 diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
-index 2bc364412e8b..9350d238ba54 100644
+index 544a9e4df2a8..5ee5171d46ef 100644
 --- a/drivers/gpu/drm/qxl/qxl_object.c
 +++ b/drivers/gpu/drm/qxl/qxl_object.c
-@@ -172,8 +172,8 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+@@ -173,8 +173,8 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
  	return 0;
  }
  
@@ -5945,7 +5945,7 @@
  {
  	unsigned long offset;
  	void *rptr;
-@@ -188,7 +188,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+@@ -189,7 +189,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
  		goto fallback;
  
  	offset = bo->tbo.mem.start << PAGE_SHIFT;
@@ -5954,7 +5954,7 @@
  fallback:
  	if (bo->kptr) {
  		rptr = bo->kptr + (page_offset * PAGE_SIZE);
-@@ -214,14 +214,14 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
+@@ -215,14 +215,14 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
  	ttm_bo_kunmap(&bo->kmap);
  }
  
@@ -5973,10 +5973,10 @@
   fallback:
  	qxl_bo_kunmap(bo);
 diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
-index 6b434e5ef795..02f1e0374228 100644
+index 5762ea40d047..6ae89b1b36f4 100644
 --- a/drivers/gpu/drm/qxl/qxl_object.h
 +++ b/drivers/gpu/drm/qxl/qxl_object.h
-@@ -88,8 +88,8 @@ extern int qxl_bo_create(struct qxl_device *qdev,
+@@ -89,8 +89,8 @@ extern int qxl_bo_create(struct qxl_device *qdev,
  			 struct qxl_bo **bo_ptr);
  extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
  extern void qxl_bo_kunmap(struct qxl_bo *bo);
@@ -5988,10 +5988,10 @@
  extern void qxl_bo_unref(struct qxl_bo **bo);
  extern int qxl_bo_pin(struct qxl_bo *bo);
 diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
-index 4fae3e393da1..9f37b51e61c6 100644
+index b2a475a0ca4a..b665a33b449b 100644
 --- a/drivers/gpu/drm/qxl/qxl_release.c
 +++ b/drivers/gpu/drm/qxl/qxl_release.c
-@@ -408,7 +408,7 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+@@ -414,7 +414,7 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
  	union qxl_release_info *info;
  	struct qxl_bo *bo = release->release_bo;
  
@@ -6000,7 +6000,7 @@
  	if (!ptr)
  		return NULL;
  	info = ptr + (release->release_offset & ~PAGE_MASK);
-@@ -423,7 +423,7 @@ void qxl_release_unmap(struct qxl_device *qdev,
+@@ -429,7 +429,7 @@ void qxl_release_unmap(struct qxl_device *qdev,
  	void *ptr;
  
  	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
@@ -6010,10 +6010,10 @@
  
  void qxl_release_fence_buffer_objects(struct qxl_release *release)
 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
-index e0ae911ef427..781edf550436 100644
+index 71bdafac9210..95ce311f434b 100644
 --- a/drivers/gpu/drm/radeon/radeon_display.c
 +++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1822,6 +1822,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+@@ -1823,6 +1823,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  	struct radeon_device *rdev = dev->dev_private;
  
  	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -6021,7 +6021,7 @@
  
  	/* Get optional system timestamp before query. */
  	if (stime)
-@@ -1914,6 +1915,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+@@ -1915,6 +1916,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  		*etime = ktime_get();
  
  	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -7002,10 +7002,10 @@
  	/* Find first taken slot. */
  	for (slot = 0; slot < ATH_BCBUF; slot++) {
 diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
-index 03ed5cb1c4b2..7370cdc1abdb 100644
+index 44e15f0e3a2e..dc23b21263ce 100644
 --- a/drivers/pci/controller/pci-hyperv.c
 +++ b/drivers/pci/controller/pci-hyperv.c
-@@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+@@ -1457,7 +1457,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  	 * Prevents hv_pci_onchannelcallback() from running concurrently
  	 * in the tasklet.
  	 */
@@ -7112,10 +7112,10 @@
  	/* peek cache of free slot */
  	if (pool->left != FC_XID_UNKNOWN) {
 diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
-index 52bb21205bb6..5cbcaafbb4aa 100644
+index 34aa2714f3c9..42cd2baa7663 100644
 --- a/drivers/tty/serial/8250/8250.h
 +++ b/drivers/tty/serial/8250/8250.h
-@@ -130,12 +130,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
+@@ -131,12 +131,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
  	up->dl_write(up, value);
  }
  
@@ -7172,7 +7172,7 @@
  	return true;
  }
  
-@@ -144,7 +187,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
+@@ -145,7 +188,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
  	if (!(up->ier & UART_IER_THRI))
  		return false;
  	up->ier &= ~UART_IER_THRI;
@@ -7280,10 +7280,10 @@
  		if (ier & UART_IER_MSI)
  			value |= UART_MCR_MDCE | UART_MCR_FCM;
 diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
-index f7d3023f860f..8133713dcf5e 100644
+index fb65dc601b23..5bc734c70da4 100644
 --- a/drivers/tty/serial/8250/8250_mtk.c
 +++ b/drivers/tty/serial/8250/8250_mtk.c
-@@ -213,12 +213,37 @@ static void mtk8250_shutdown(struct uart_port *port)
+@@ -218,12 +218,37 @@ static void mtk8250_shutdown(struct uart_port *port)
  
  static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
  {
@@ -7324,10 +7324,10 @@
  
  static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
-index b0af13074cd3..b05f8c34b291 100644
+index 3de0a16e055a..c060bd1b07b4 100644
 --- a/drivers/tty/serial/8250/8250_port.c
 +++ b/drivers/tty/serial/8250/8250_port.c
-@@ -757,7 +757,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+@@ -761,7 +761,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
  			serial_out(p, UART_EFR, UART_EFR_ECB);
  			serial_out(p, UART_LCR, 0);
  		}
@@ -7336,7 +7336,7 @@
  		if (p->capabilities & UART_CAP_EFR) {
  			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
  			serial_out(p, UART_EFR, efr);
-@@ -1429,7 +1429,7 @@ static void serial8250_stop_rx(struct uart_port *port)
+@@ -1435,7 +1435,7 @@ static void serial8250_stop_rx(struct uart_port *port)
  
  	up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
  	up->port.read_status_mask &= ~UART_LSR_DR;
@@ -7345,7 +7345,7 @@
  
  	serial8250_rpm_put(up);
  }
-@@ -1459,7 +1459,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
+@@ -1465,7 +1465,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
  		serial8250_clear_and_reinit_fifos(p);
  
  		p->ier |= UART_IER_RLSI | UART_IER_RDI;
@@ -7354,7 +7354,7 @@
  	}
  }
  EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1687,7 +1687,7 @@ static void serial8250_disable_ms(struct uart_port *port)
+@@ -1693,7 +1693,7 @@ static void serial8250_disable_ms(struct uart_port *port)
  	mctrl_gpio_disable_ms(up->gpios);
  
  	up->ier &= ~UART_IER_MSI;
@@ -7363,7 +7363,7 @@
  }
  
  static void serial8250_enable_ms(struct uart_port *port)
-@@ -1703,7 +1703,7 @@ static void serial8250_enable_ms(struct uart_port *port)
+@@ -1709,7 +1709,7 @@ static void serial8250_enable_ms(struct uart_port *port)
  	up->ier |= UART_IER_MSI;
  
  	serial8250_rpm_get(up);
@@ -7372,7 +7372,7 @@
  	serial8250_rpm_put(up);
  }
  
-@@ -2118,14 +2118,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
+@@ -2136,14 +2136,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
  	struct uart_8250_port *up = up_to_u8250p(port);
  
  	serial8250_rpm_get(up);
@@ -7388,7 +7388,7 @@
  
  	wait_for_xmitr(up, BOTH_EMPTY);
  	/*
-@@ -2138,7 +2131,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
+@@ -2156,7 +2149,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
  	 *	and restore the IER
  	 */
  	wait_for_xmitr(up, BOTH_EMPTY);
@@ -7397,7 +7397,7 @@
  	serial8250_rpm_put(up);
  }
  
-@@ -2441,7 +2434,7 @@ void serial8250_do_shutdown(struct uart_port *port)
+@@ -2459,7 +2452,7 @@ void serial8250_do_shutdown(struct uart_port *port)
  	 */
  	spin_lock_irqsave(&port->lock, flags);
  	up->ier = 0;
@@ -7406,7 +7406,7 @@
  	spin_unlock_irqrestore(&port->lock, flags);
  
  	synchronize_irq(port->irq);
-@@ -2771,7 +2764,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+@@ -2802,7 +2795,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
  	if (up->capabilities & UART_CAP_RTOIE)
  		up->ier |= UART_IER_RTOIE;
  
@@ -7415,7 +7415,7 @@
  
  	if (up->capabilities & UART_CAP_EFR) {
  		unsigned char efr = 0;
-@@ -3237,7 +3230,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
+@@ -3268,7 +3261,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
  
  #ifdef CONFIG_SERIAL_8250_CONSOLE
  
@@ -7424,7 +7424,7 @@
  {
  	struct uart_8250_port *up = up_to_u8250p(port);
  
-@@ -3245,6 +3238,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch)
+@@ -3276,6 +3269,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch)
  	serial_port_out(port, UART_TX, ch);
  }
  
@@ -7443,7 +7443,7 @@
  /*
   *	Restore serial console when h/w power-off detected
   */
-@@ -3266,6 +3271,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+@@ -3297,6 +3302,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
  	serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
  }
  
@@ -7476,7 +7476,7 @@
  /*
   *	Print a string to the serial port trying not to disturb
   *	any possible real use of the port...
-@@ -3282,24 +3313,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3313,24 +3344,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
  	struct uart_port *port = &up->port;
  	unsigned long flags;
  	unsigned int ier;
@@ -7503,7 +7503,7 @@
  
  	/* check scratch reg to see if port powered off during system sleep */
  	if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3313,7 +3332,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3344,7 +3363,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
  		mdelay(port->rs485.delay_rts_before_send);
  	}
  
@@ -7513,7 +7513,7 @@
  
  	/*
  	 *	Finally, wait for transmitter to become empty
-@@ -3326,8 +3347,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3357,8 +3378,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
  		if (em485->tx_stopped)
  			up->rs485_stop_tx(up);
  	}
@@ -7523,7 +7523,7 @@
  
  	/*
  	 *	The receive handling will happen properly because the
-@@ -3339,8 +3359,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3370,8 +3390,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
  	if (up->msr_saved_flags)
  		serial8250_modem_status(up);
  
@@ -7533,7 +7533,7 @@
  }
  
  static unsigned int probe_baud(struct uart_port *port)
-@@ -3360,6 +3379,7 @@ static unsigned int probe_baud(struct uart_port *port)
+@@ -3391,6 +3410,7 @@ static unsigned int probe_baud(struct uart_port *port)
  
  int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
  {
@@ -7541,7 +7541,7 @@
  	int baud = 9600;
  	int bits = 8;
  	int parity = 'n';
-@@ -3369,6 +3389,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+@@ -3400,6 +3420,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
  	if (!port->iobase && !port->membase)
  		return -ENODEV;
  
@@ -7594,10 +7594,10 @@
  	clk_disable(uap->clk);
  }
 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
-index 76b94d0ff586..80371598efea 100644
+index 84e8158088cd..342005ed5ebf 100644
 --- a/drivers/tty/serial/omap-serial.c
 +++ b/drivers/tty/serial/omap-serial.c
-@@ -1301,13 +1301,10 @@ serial_omap_console_write(struct console *co, const char *s,
+@@ -1311,13 +1311,10 @@ serial_omap_console_write(struct console *co, const char *s,
  
  	pm_runtime_get_sync(up->dev);
  
@@ -7614,7 +7614,7 @@
  
  	/*
  	 * First save the IER then disable the interrupts
-@@ -1336,8 +1333,7 @@ serial_omap_console_write(struct console *co, const char *s,
+@@ -1346,8 +1343,7 @@ serial_omap_console_write(struct console *co, const char *s,
  	pm_runtime_mark_last_busy(up->dev);
  	pm_runtime_put_autosuspend(up->dev);
  	if (locked)
@@ -7625,10 +7625,10 @@
  
  static int __init
 diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
-index 04f75a44f243..60cbce1995a5 100644
+index dae9a57d7ec0..9a6a0ec4d1fb 100644
 --- a/fs/afs/dir_silly.c
 +++ b/fs/afs/dir_silly.c
-@@ -236,7 +236,7 @@ int afs_silly_iput(struct dentry *dentry, struct inode *inode)
+@@ -239,7 +239,7 @@ int afs_silly_iput(struct dentry *dentry, struct inode *inode)
  	struct dentry *alias;
  	int ret;
  
@@ -7810,7 +7810,7 @@
  	if (unlikely(IS_DEADDIR(dir_inode)))
  		return ERR_PTR(-ENOENT);
 diff --git a/fs/namespace.c b/fs/namespace.c
-index c7fbb50a5aaa..5e261e50625a 100644
+index 175312428cdf..e8a1e90b4d34 100644
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
 @@ -14,6 +14,7 @@
@@ -7880,10 +7880,10 @@
  	status = -EBUSY;
  	spin_lock(&dentry->d_lock);
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 65ec2029fa80..7052441be967 100644
+index 18a4588c35be..decaa7768044 100644
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
-@@ -382,9 +382,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
+@@ -384,9 +384,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
  static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
  {
  	seq_printf(m, "Cpus_allowed:\t%*pb\n",
@@ -7896,7 +7896,7 @@
  
  static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
 diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 55ce0ee9c5c7..a66f399476fc 100644
+index 5d52aea8d7e7..d89526cfedf2 100644
 --- a/fs/proc/base.c
 +++ b/fs/proc/base.c
 @@ -96,6 +96,7 @@
@@ -8020,7 +8020,7 @@
 -
 -#endif
 diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
-index d683f5e6d791..71c1535db56a 100644
+index b4d43a4af5f7..ac255e889462 100644
 --- a/include/asm-generic/preempt.h
 +++ b/include/asm-generic/preempt.h
 @@ -79,6 +79,9 @@ static __always_inline bool should_resched(int preempt_offset)
@@ -8116,10 +8116,10 @@
 +
  #endif /* _LINUX_CONSOLE_H */
 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
-index bc56287a1ed1..0042ef362511 100644
+index 8fb893ed205e..5c0a0883b91a 100644
 --- a/include/linux/cpuhotplug.h
 +++ b/include/linux/cpuhotplug.h
-@@ -152,6 +152,7 @@ enum cpuhp_state {
+@@ -153,6 +153,7 @@ enum cpuhp_state {
  	CPUHP_AP_ONLINE,
  	CPUHP_TEARDOWN_CPU,
  	CPUHP_AP_ONLINE_IDLE,
@@ -8174,7 +8174,7 @@
  extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
  extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
 diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
-index 2915f56ad421..5a9e3e3769ce 100644
+index edb5c186b0b7..3f49e65169c6 100644
 --- a/include/linux/debug_locks.h
 +++ b/include/linux/debug_locks.h
 @@ -3,8 +3,7 @@
@@ -9421,7 +9421,7 @@
 +
 +#endif
 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 3433ecc9c1f7..02649396954b 100644
+index 4eb38918da8f..2a83d965b00e 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
 @@ -12,6 +12,7 @@
@@ -9432,7 +9432,7 @@
  #include <linux/page-flags-layout.h>
  #include <linux/workqueue.h>
  #include <linux/seqlock.h>
-@@ -557,6 +558,9 @@ struct mm_struct {
+@@ -570,6 +571,9 @@ struct mm_struct {
  		bool tlb_flush_batched;
  #endif
  		struct uprobes_state uprobes_state;
@@ -9643,10 +9643,10 @@
 +
 +#endif
 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
-index d63cb862d58e..1630690ba709 100644
+index 5491ad5f48a9..cd9e5b3f1831 100644
 --- a/include/linux/nfs_xdr.h
 +++ b/include/linux/nfs_xdr.h
-@@ -1670,7 +1670,7 @@ struct nfs_unlinkdata {
+@@ -1675,7 +1675,7 @@ struct nfs_unlinkdata {
  	struct nfs_removeargs args;
  	struct nfs_removeres res;
  	struct dentry *dentry;
@@ -10151,7 +10151,7 @@
 +
 +#endif
 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
-index c5adba5e79e7..f251ba473f77 100644
+index 7d12c76e8fa4..d6d279e03541 100644
 --- a/include/linux/rcupdate.h
 +++ b/include/linux/rcupdate.h
 @@ -52,6 +52,11 @@ void __rcu_read_unlock(void);
@@ -10589,7 +10589,7 @@
   * lock for reading
   */
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 76cd21fa5501..e688e9307a21 100644
+index 2660ee4b08ad..8944be44aaf4 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -34,6 +34,7 @@
@@ -10633,7 +10633,7 @@
  /*
   * set_special_state() should be used for those states when the blocking task
   * can not use the regular condition based wait-loop. In that case we must
-@@ -637,6 +640,13 @@ struct wake_q_node {
+@@ -645,6 +648,13 @@ struct wake_q_node {
  	struct wake_q_node *next;
  };
  
@@ -10647,7 +10647,7 @@
  struct task_struct {
  #ifdef CONFIG_THREAD_INFO_IN_TASK
  	/*
-@@ -647,6 +657,8 @@ struct task_struct {
+@@ -655,6 +665,8 @@ struct task_struct {
  #endif
  	/* -1 unrunnable, 0 runnable, >0 stopped: */
  	volatile long			state;
@@ -10656,7 +10656,7 @@
  
  	/*
  	 * This begins the randomizable portion of task_struct. Only
-@@ -722,6 +734,11 @@ struct task_struct {
+@@ -730,6 +742,11 @@ struct task_struct {
  	int				nr_cpus_allowed;
  	const cpumask_t			*cpus_ptr;
  	cpumask_t			cpus_mask;
@@ -10668,7 +10668,7 @@
  
  #ifdef CONFIG_PREEMPT_RCU
  	int				rcu_read_lock_nesting;
-@@ -968,11 +985,16 @@ struct task_struct {
+@@ -976,11 +993,16 @@ struct task_struct {
  	/* Signal handlers: */
  	struct signal_struct		*signal;
  	struct sighand_struct __rcu		*sighand;
@@ -10685,7 +10685,7 @@
  	unsigned long			sas_ss_sp;
  	size_t				sas_ss_size;
  	unsigned int			sas_ss_flags;
-@@ -999,6 +1021,7 @@ struct task_struct {
+@@ -1007,6 +1029,7 @@ struct task_struct {
  	raw_spinlock_t			pi_lock;
  
  	struct wake_q_node		wake_q;
@@ -10693,7 +10693,7 @@
  
  #ifdef CONFIG_RT_MUTEXES
  	/* PI waiters blocked on a rt_mutex held by this task: */
-@@ -1026,6 +1049,9 @@ struct task_struct {
+@@ -1034,6 +1057,9 @@ struct task_struct {
  	int				softirq_context;
  	int				irq_config;
  #endif
@@ -10703,7 +10703,7 @@
  
  #ifdef CONFIG_LOCKDEP
  # define MAX_LOCK_DEPTH			48UL
-@@ -1311,6 +1337,7 @@ struct task_struct {
+@@ -1319,6 +1345,7 @@ struct task_struct {
  	unsigned int			sequential_io;
  	unsigned int			sequential_io_avg;
  #endif
@@ -10711,7 +10711,7 @@
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long			task_state_change;
  #endif
-@@ -1755,6 +1782,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
+@@ -1763,6 +1790,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
  
  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  extern int wake_up_process(struct task_struct *tsk);
@@ -10719,7 +10719,7 @@
  extern void wake_up_new_task(struct task_struct *tsk);
  
  #ifdef CONFIG_SMP
-@@ -1845,6 +1873,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
+@@ -1853,6 +1881,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
  	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
@@ -10950,7 +10950,7 @@
  /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
  static inline int valid_signal(unsigned long sig)
 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index a828cf99c521..2e4f80cd41df 100644
+index 2d01b2bbb746..4277cbc5c6f5 100644
 --- a/include/linux/skbuff.h
 +++ b/include/linux/skbuff.h
 @@ -295,6 +295,7 @@ struct sk_buff_head {
@@ -10975,7 +10975,7 @@
  		struct lock_class_key *class)
  {
 diff --git a/include/linux/smp.h b/include/linux/smp.h
-index 9f13966d3d92..c1f6aaade44a 100644
+index 04f44e0aa2e0..cae66815f9e2 100644
 --- a/include/linux/smp.h
 +++ b/include/linux/smp.h
 @@ -239,6 +239,9 @@ static inline int get_boot_cpu_id(void)
@@ -11859,7 +11859,7 @@
  
  #endif /* _LINUX_KERNEL_VTIME_H */
 diff --git a/include/linux/wait.h b/include/linux/wait.h
-index 27fb99cfeb02..93b42387b4c6 100644
+index f8b0704968a1..0b5ab4e537f6 100644
 --- a/include/linux/wait.h
 +++ b/include/linux/wait.h
 @@ -10,6 +10,7 @@
@@ -11958,7 +11958,7 @@
 +
 +#endif
 diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
-index b59d73d529ba..e816b6a3ef2b 100644
+index 22e1bc72b979..9b376b87bd54 100644
 --- a/include/net/netns/xfrm.h
 +++ b/include/net/netns/xfrm.h
 @@ -73,7 +73,7 @@ struct netns_xfrm {
@@ -11967,11 +11967,11 @@
  	spinlock_t		xfrm_state_lock;
 -	seqcount_t		xfrm_state_hash_generation;
 +	seqcount_spinlock_t	xfrm_state_hash_generation;
+ 	seqcount_spinlock_t	xfrm_policy_hash_generation;
  
  	spinlock_t xfrm_policy_lock;
- 	struct mutex xfrm_cfg_mutex;
 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
-index 3648164faa06..6a0434d2c279 100644
+index f8631ad3c868..9854155a3e88 100644
 --- a/include/net/sch_generic.h
 +++ b/include/net/sch_generic.h
 @@ -10,6 +10,7 @@
@@ -11982,7 +11982,7 @@
  #include <linux/refcount.h>
  #include <linux/workqueue.h>
  #include <linux/mutex.h>
-@@ -100,7 +101,7 @@ struct Qdisc {
+@@ -101,7 +102,7 @@ struct Qdisc {
  	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
  	struct qdisc_skb_head	q;
  	struct gnet_stats_basic_packed bstats;
@@ -11991,7 +11991,7 @@
  	struct gnet_stats_queue	qstats;
  	unsigned long		state;
  	struct Qdisc            *next_sched;
-@@ -141,7 +142,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
+@@ -142,7 +143,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
  {
  	if (qdisc->flags & TCQ_F_NOLOCK)
  		return spin_is_locked(&qdisc->seqlock);
@@ -12003,7 +12003,7 @@
  }
  
  static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-@@ -165,17 +170,35 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+@@ -203,17 +208,35 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  	} else if (qdisc_is_running(qdisc)) {
  		return false;
  	}
@@ -12036,10 +12036,10 @@
 +#else
  	write_seqcount_end(&qdisc->running);
 +#endif
- 	if (qdisc->flags & TCQ_F_NOLOCK)
+ 	if (qdisc->flags & TCQ_F_NOLOCK) {
  		spin_unlock(&qdisc->seqlock);
- }
-@@ -550,7 +573,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
+ 
+@@ -595,7 +618,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  	return qdisc_lock(root);
  }
  
@@ -12444,10 +12444,10 @@
  		/* if @may_sleep, play nice and yield if necessary */
  		if (may_sleep && (need_resched() ||
 diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 2b8d7a5db383..4e11e91010e1 100644
+index 67c22941b5f2..016f2d0686b6 100644
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1606,7 +1606,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+@@ -1655,7 +1655,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
  		.name			= "ap:online",
  	},
  	/*
@@ -12456,7 +12456,7 @@
  	 * this itself.
  	 */
  	[CPUHP_TEARDOWN_CPU] = {
-@@ -1615,6 +1615,13 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+@@ -1664,6 +1664,13 @@ static struct cpuhp_step cpuhp_hp_states[] = {
  		.teardown.single	= takedown_cpu,
  		.cant_stop		= true,
  	},
@@ -12567,7 +12567,7 @@
  	spin_unlock(&sighand->siglock);
  
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 7c044d377926..1e97f271ac59 100644
+index 096945ef49ad..27f042d75850 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -42,6 +42,7 @@
@@ -12645,10 +12645,10 @@
  	p->utime = p->stime = p->gtime = 0;
  #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 7cf1987cfdb4..90785b5a5b78 100644
+index 98a6e1b80bfe..b2b275bc1958 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1499,6 +1499,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
+@@ -1498,6 +1498,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
  	struct task_struct *new_owner;
  	bool postunlock = false;
  	DEFINE_WAKE_Q(wake_q);
@@ -12656,7 +12656,7 @@
  	int ret = 0;
  
  	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1548,14 +1549,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
+@@ -1547,14 +1548,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
  		 * not fail.
  		 */
  		pi_state_update_owner(pi_state, new_owner);
@@ -12674,7 +12674,7 @@
  
  	return ret;
  }
-@@ -2156,6 +2158,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+@@ -2155,6 +2157,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
  				 */
  				requeue_pi_wake_futex(this, &key2, hb2);
  				continue;
@@ -12691,7 +12691,7 @@
  			} else if (ret) {
  				/*
  				 * rt_mutex_start_proxy_lock() detected a
-@@ -2848,7 +2860,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2847,7 +2859,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
  		goto no_block;
  	}
  
@@ -12700,7 +12700,7 @@
  
  	/*
  	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3173,7 +3185,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -3172,7 +3184,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  {
  	struct hrtimer_sleeper timeout, *to;
  	struct rt_mutex_waiter rt_waiter;
@@ -12709,7 +12709,7 @@
  	union futex_key key2 = FUTEX_KEY_INIT;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -3194,7 +3206,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -3193,7 +3205,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  	 * The waiter is allocated on our stack, manipulated by the requeue
  	 * code while we sleep on uaddr.
  	 */
@@ -12718,7 +12718,7 @@
  
  	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
  	if (unlikely(ret != 0))
-@@ -3225,20 +3237,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -3224,20 +3236,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
  	futex_wait_queue_me(hb, &q, to);
  
@@ -12785,7 +12785,7 @@
  
  	/* Check if the requeue code acquired the second futex for us. */
  	if (!q.rt_waiter) {
-@@ -3247,14 +3294,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -3246,14 +3293,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  		 * did a lock-steal - fix up the PI-state in that case.
  		 */
  		if (q.pi_state && (q.pi_state->owner != current)) {
@@ -12803,7 +12803,7 @@
  			/*
  			 * Adjust the return value. It's either -EFAULT or
  			 * success (1) but the caller expects 0 for success.
-@@ -3273,7 +3321,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -3272,7 +3320,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
  		pi_mutex = &q.pi_state->pi_mutex;
  		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
  
@@ -13063,7 +13063,7 @@
  	NULL
  };
 diff --git a/kernel/kthread.c b/kernel/kthread.c
-index 5edf7e19ab26..cdfaf64263b3 100644
+index 9825cf89c614..3a7ecfbe0f50 100644
 --- a/kernel/kthread.c
 +++ b/kernel/kthread.c
 @@ -243,6 +243,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme);
@@ -13150,10 +13150,10 @@
  obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
  obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
-index 38d7c03e694c..4d515978dae9 100644
+index 8ae9d7abebc0..0008bb2235a7 100644
 --- a/kernel/locking/lockdep.c
 +++ b/kernel/locking/lockdep.c
-@@ -5292,6 +5292,7 @@ static noinstr void check_flags(unsigned long flags)
+@@ -5408,6 +5408,7 @@ static noinstr void check_flags(unsigned long flags)
  		}
  	}
  
@@ -13161,7 +13161,7 @@
  	/*
  	 * We dont accurately track softirq state in e.g.
  	 * hardirq contexts (such as on 4KSTACKS), so only
-@@ -5306,6 +5307,7 @@ static noinstr void check_flags(unsigned long flags)
+@@ -5422,6 +5423,7 @@ static noinstr void check_flags(unsigned long flags)
  			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
  		}
  	}
@@ -16006,7 +16006,7 @@
 -static inline bool printk_percpu_data_ready(void) { return false; }
 -#endif /* CONFIG_PRINTK */
 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index d0df95346ab3..f56fd2e34cc7 100644
+index d0df95346ab3..53d90278494b 100644
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
 @@ -44,6 +44,9 @@
@@ -17976,7 +17976,7 @@
   * @syslog: include the "<4>" prefixes
   * @buf: buffer to copy the line to
   * @size: maximum size of the buffer
-@@ -3365,116 +3331,254 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+@@ -3365,116 +3331,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
   * A return value of FALSE indicates that there are no more records to
   * read.
   */
@@ -18259,7 +18259,9 @@
 -	logbuf_unlock_irqrestore(flags);
 -}
 -EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
-+	may_sleep = (preemptible() && !in_softirq());
++	may_sleep = (preemptible() &&
++		     !in_softirq() &&
++		     system_state >= SYSTEM_RUNNING);
  
 -#endif
 +	seq = prb_next_seq(prb);
@@ -18726,13 +18728,13 @@
 -	printk_safe_flush();
 -}
 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 79de1294f8eb..2f09a6dbe140 100644
+index eb4d04cb3aaf..ec2d9fd37a11 100644
 --- a/kernel/ptrace.c
 +++ b/kernel/ptrace.c
-@@ -180,7 +180,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
- 
+@@ -196,7 +196,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
  	spin_lock_irq(&task->sighand->siglock);
- 	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+ 	if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+ 	    !__fatal_signal_pending(task)) {
 -		task->state = __TASK_TRACED;
 +		unsigned long flags;
 +
@@ -18745,7 +18747,7 @@
  		ret = true;
  	}
  	spin_unlock_irq(&task->sighand->siglock);
-@@ -190,8 +197,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+@@ -206,8 +213,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
  
  static void ptrace_unfreeze_traced(struct task_struct *task)
  {
@@ -18756,7 +18758,7 @@
  
  	WARN_ON(!task->ptrace || task->parent != current);
  
-@@ -200,12 +207,19 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
+@@ -216,12 +223,19 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
  	 * Recheck state under the lock to close this race.
  	 */
  	spin_lock_irq(&task->sighand->siglock);
@@ -18947,7 +18949,7 @@
  }
  
 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
-index 5dc36c6e80fd..782a3152bafc 100644
+index 8c3ba0185082..88da7a9689f5 100644
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
 @@ -100,8 +100,10 @@ static struct rcu_state rcu_state = {
@@ -18963,7 +18965,7 @@
  static bool rcu_fanout_exact;
  module_param(rcu_fanout_exact, bool, 0444);
 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
-index 39334d2d2b37..b95ae86c40a7 100644
+index 849f0aa99333..dd94a602a6d2 100644
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
 @@ -56,8 +56,10 @@
@@ -18979,7 +18981,7 @@
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 3a150445e0cb..f6b931d82443 100644
+index 84c105902027..b815467e959b 100644
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
 @@ -64,7 +64,11 @@ const_debug unsigned int sysctl_sched_features =
@@ -19121,7 +19123,7 @@
  void resched_cpu(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -1694,6 +1763,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+@@ -1692,6 +1761,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
  
  #ifdef CONFIG_SMP
  
@@ -19204,7 +19206,7 @@
  /*
   * Per-CPU kthreads are allowed to run on !active && online CPUs, see
   * __set_cpus_allowed_ptr() and select_fallback_rq().
-@@ -1703,7 +1848,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
+@@ -1701,7 +1846,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
  	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
  		return false;
  
@@ -19213,7 +19215,7 @@
  		return cpu_online(cpu);
  
  	return cpu_active(cpu);
-@@ -1748,8 +1893,16 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
+@@ -1746,8 +1891,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
  }
  
  struct migration_arg {
@@ -19224,22 +19226,25 @@
 +	struct set_affinity_pending	*pending;
 +};
 +
++/*
++ * @refs: number of wait_for_completion()
++ * @stop_pending: is @stop_work in use
++ */
 +struct set_affinity_pending {
 +	refcount_t		refs;
++	unsigned int		stop_pending;
 +	struct completion	done;
 +	struct cpu_stop_work	stop_work;
 +	struct migration_arg	arg;
  };
  
  /*
-@@ -1781,16 +1934,19 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
-  */
+@@ -1780,15 +1938,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
  static int migration_cpu_stop(void *data)
  {
-+	struct set_affinity_pending *pending;
  	struct migration_arg *arg = data;
++	struct set_affinity_pending *pending = arg->pending;
  	struct task_struct *p = arg->task;
-+	int dest_cpu = arg->dest_cpu;
  	struct rq *rq = this_rq();
 +	bool complete = false;
  	struct rq_flags rf;
@@ -19253,12 +19258,11 @@
  	/*
  	 * We need to explicitly wake pending tasks before running
  	 * __migrate_task() such that we will not miss enforcing cpus_ptr
-@@ -1800,21 +1956,137 @@ static int migration_cpu_stop(void *data)
+@@ -1798,21 +1958,121 @@ static int migration_cpu_stop(void *data)
  
  	raw_spin_lock(&p->pi_lock);
  	rq_lock(rq, &rf);
 +
-+	pending = p->migration_pending;
  	/*
  	 * If task_rq(p) != rq, it cannot be migrated here, because we're
  	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
@@ -19269,33 +19273,27 @@
 +			goto out;
 +
 +		if (pending) {
-+			p->migration_pending = NULL;
++			if (p->migration_pending == pending)
++				p->migration_pending = NULL;
 +			complete = true;
-+		}
 +
-+		/* migrate_enable() --  we must not race against SCA */
-+		if (dest_cpu < 0) {
-+			/*
-+			 * When this was migrate_enable() but we no longer
-+			 * have a @pending, a concurrent SCA 'fixed' things
-+			 * and we should be valid again. Nothing to do.
-+			 */
-+			if (!pending) {
-+				WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
++			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
 +				goto out;
-+			}
-+
-+			dest_cpu = cpumask_any_distribute(&p->cpus_mask);
 +		}
 +
  		if (task_on_rq_queued(p))
--			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
-+			rq = __migrate_task(rq, &rf, p, dest_cpu);
+ 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
  		else
--			p->wake_cpu = arg->dest_cpu;
-+			p->wake_cpu = dest_cpu;
+ 			p->wake_cpu = arg->dest_cpu;
 +
-+	} else if (dest_cpu < 0 || pending) {
++		/*
++		 * XXX __migrate_task() can fail, at which point we might end
++		 * up running on a dodgy CPU, AFAICT this can only happen
++		 * during CPU hotplug, at which point we'll get pushed out
++		 * anyway, so it's probably not a big deal.
++		 */
++
++	} else if (pending) {
 +		/*
 +		 * This happens when we get migrated between migrate_enable()'s
 +		 * preempt_enable() and scheduling the stopper task. At that
@@ -19310,27 +19308,19 @@
 +		 * ->pi_lock, so the allowed mask is stable - if it got
 +		 * somewhere allowed, we're done.
 +		 */
-+		if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
-+			p->migration_pending = NULL;
++		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
++			if (p->migration_pending == pending)
++				p->migration_pending = NULL;
 +			complete = true;
 +			goto out;
 +		}
 +
 +		/*
-+		 * When this was migrate_enable() but we no longer have an
-+		 * @pending, a concurrent SCA 'fixed' things and we should be
-+		 * valid again. Nothing to do.
-+		 */
-+		if (!pending) {
-+			WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
-+			goto out;
-+		}
-+
-+		/*
 +		 * When migrate_enable() hits a rq mis-match we can't reliably
 +		 * determine is_migration_disabled() and so have to chase after
 +		 * it.
 +		 */
++		WARN_ON_ONCE(!pending->stop_pending);
 +		task_rq_unlock(rq, p, &rf);
 +		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
 +				    &pending->arg, &pending->stop_work);
@@ -19339,15 +19329,12 @@
 -	rq_unlock(rq, &rf);
 -	raw_spin_unlock(&p->pi_lock);
 +out:
++	if (pending)
++		pending->stop_pending = false;
 +	task_rq_unlock(rq, p, &rf);
 +
 +	if (complete)
 +		complete_all(&pending->done);
-+
-+	/* For pending->{arg,stop_work} */
-+	pending = arg->pending;
-+	if (pending && refcount_dec_and_test(&pending->refs))
-+		wake_up_var(&pending->refs);
  
 -	local_irq_enable();
 +	return 0;
@@ -19396,7 +19383,7 @@
  	return 0;
  }
  
-@@ -1822,18 +2094,39 @@ static int migration_cpu_stop(void *data)
+@@ -1820,18 +2080,39 @@ static int migration_cpu_stop(void *data)
   * sched_class::set_cpus_allowed must do the below, but is not required to
   * actually call this function.
   */
@@ -19439,7 +19426,7 @@
  
  	queued = task_on_rq_queued(p);
  	running = task_current(rq, p);
-@@ -1849,7 +2142,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+@@ -1847,7 +2128,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  	if (running)
  		put_prev_task(rq, p);
  
@@ -19448,7 +19435,7 @@
  
  	if (queued)
  		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
-@@ -1857,6 +2150,208 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+@@ -1855,6 +2136,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  		set_next_task(rq, p);
  }
  
@@ -19532,11 +19519,7 @@
 +			    int dest_cpu, unsigned int flags)
 +{
 +	struct set_affinity_pending my_pending = { }, *pending = NULL;
-+	struct migration_arg arg = {
-+		.task = p,
-+		.dest_cpu = dest_cpu,
-+	};
-+	bool complete = false;
++	bool stop_pending, complete = false;
 +
 +	/* Can the task run on the task's current CPU? If so, we're done */
 +	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -19548,12 +19531,16 @@
 +			push_task = get_task_struct(p);
 +		}
 +
++		/*
++		 * If there are pending waiters, but no pending stop_work,
++		 * then complete now.
++		 */
 +		pending = p->migration_pending;
-+		if (pending) {
-+			refcount_inc(&pending->refs);
++		if (pending && !pending->stop_pending) {
 +			p->migration_pending = NULL;
 +			complete = true;
 +		}
++
 +		task_rq_unlock(rq, p, rf);
 +
 +		if (push_task) {
@@ -19562,7 +19549,7 @@
 +		}
 +
 +		if (complete)
-+			goto do_complete;
++			complete_all(&pending->done);
 +
 +		return 0;
 +	}
@@ -19573,10 +19560,25 @@
 +			/* Install the request */
 +			refcount_set(&my_pending.refs, 1);
 +			init_completion(&my_pending.done);
++			my_pending.arg = (struct migration_arg) {
++				.task = p,
++				.dest_cpu = dest_cpu,
++				.pending = &my_pending,
++			};
++
 +			p->migration_pending = &my_pending;
 +		} else {
 +			pending = p->migration_pending;
 +			refcount_inc(&pending->refs);
++			/*
++			 * Affinity has changed, but we've already installed a
++			 * pending. migration_cpu_stop() *must* see this, else
++			 * we risk a completion of the pending despite having a
++			 * task on a disallowed CPU.
++			 *
++			 * Serialized by p->pi_lock, so this is safe.
++			 */
++			pending->arg.dest_cpu = dest_cpu;
 +		}
 +	}
 +	pending = p->migration_pending;
@@ -19597,45 +19599,41 @@
 +		return -EINVAL;
 +	}
 +
-+	if (flags & SCA_MIGRATE_ENABLE) {
-+
-+		refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
-+		p->migration_flags &= ~MDF_PUSH;
-+		task_rq_unlock(rq, p, rf);
-+
-+		pending->arg = (struct migration_arg) {
-+			.task = p,
-+			.dest_cpu = -1,
-+			.pending = pending,
-+		};
-+
-+		stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
-+				    &pending->arg, &pending->stop_work);
-+
-+		return 0;
-+	}
-+
 +	if (task_running(rq, p) || p->state == TASK_WAKING) {
 +		/*
-+		 * Lessen races (and headaches) by delegating
-+		 * is_migration_disabled(p) checks to the stopper, which will
-+		 * run on the same CPU as said p.
++		 * MIGRATE_ENABLE gets here because 'p == current', but for
++		 * anything else we cannot do is_migration_disabled(), punt
++		 * and have the stopper function handle it all race-free.
 +		 */
-+		task_rq_unlock(rq, p, rf);
-+		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++		stop_pending = pending->stop_pending;
++		if (!stop_pending)
++			pending->stop_pending = true;
 +
++		if (flags & SCA_MIGRATE_ENABLE)
++			p->migration_flags &= ~MDF_PUSH;
++
++		task_rq_unlock(rq, p, rf);
++
++		if (!stop_pending) {
++			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
++					    &pending->arg, &pending->stop_work);
++		}
++
++		if (flags & SCA_MIGRATE_ENABLE)
++			return 0;
 +	} else {
 +
 +		if (!is_migration_disabled(p)) {
 +			if (task_on_rq_queued(p))
 +				rq = move_queued_task(rq, rf, p, dest_cpu);
 +
-+			p->migration_pending = NULL;
-+			complete = true;
++			if (!pending->stop_pending) {
++				p->migration_pending = NULL;
++				complete = true;
++			}
 +		}
 +		task_rq_unlock(rq, p, rf);
 +
-+do_complete:
 +		if (complete)
 +			complete_all(&pending->done);
 +	}
@@ -19643,7 +19641,7 @@
 +	wait_for_completion(&pending->done);
 +
 +	if (refcount_dec_and_test(&pending->refs))
-+		wake_up_var(&pending->refs);
++		wake_up_var(&pending->refs); /* No UaF, just an address */
 +
 +	/*
 +	 * Block the original owner of &pending until all subsequent callers
@@ -19651,13 +19649,16 @@
 +	 */
 +	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
 +
++	/* ARGH */
++	WARN_ON_ONCE(my_pending.stop_pending);
++
 +	return 0;
 +}
 +
  /*
   * Change a given task's CPU affinity. Migrate the thread to a
   * proper CPU and schedule it away if the CPU it's executing on
-@@ -1867,7 +2362,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+@@ -1865,7 +2362,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
   * call is not atomic; no spinlocks may be held.
   */
  static int __set_cpus_allowed_ptr(struct task_struct *p,
@@ -19667,7 +19668,7 @@
  {
  	const struct cpumask *cpu_valid_mask = cpu_active_mask;
  	unsigned int dest_cpu;
-@@ -1878,9 +2374,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1876,9 +2374,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
  	rq = task_rq_lock(p, &rf);
  	update_rq_clock(rq);
  
@@ -19684,7 +19685,7 @@
  		 */
  		cpu_valid_mask = cpu_online_mask;
  	}
-@@ -1889,13 +2390,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1887,13 +2390,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
  	 * Must re-check here, to close a race against __kthread_bind(),
  	 * sched_setaffinity() is not guaranteed to observe the flag.
  	 */
@@ -19710,7 +19711,7 @@
  
  	/*
  	 * Picking a ~random cpu helps in cases where we are changing affinity
-@@ -1908,7 +2418,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1906,7 +2418,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
  		goto out;
  	}
  
@@ -19719,7 +19720,7 @@
  
  	if (p->flags & PF_KTHREAD) {
  		/*
-@@ -1920,23 +2430,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1918,23 +2430,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
  			p->nr_cpus_allowed != 1);
  	}
  
@@ -19744,7 +19745,7 @@
  out:
  	task_rq_unlock(rq, p, &rf);
  
-@@ -1945,7 +2440,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1943,7 +2440,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
  
  int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
  {
@@ -19753,7 +19754,7 @@
  }
  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  
-@@ -1986,6 +2481,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+@@ -1984,6 +2481,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
  	 */
  	WARN_ON_ONCE(!cpu_online(new_cpu));
@@ -19762,7 +19763,7 @@
  #endif
  
  	trace_sched_migrate_task(p, new_cpu);
-@@ -2118,6 +2615,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
+@@ -2116,6 +2615,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
  }
  #endif /* CONFIG_NUMA_BALANCING */
  
@@ -19781,7 +19782,7 @@
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
-@@ -2162,7 +2671,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -2160,7 +2671,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  		 * is actually now running somewhere else!
  		 */
  		while (task_running(rq, p)) {
@@ -19790,7 +19791,7 @@
  				return 0;
  			cpu_relax();
  		}
-@@ -2177,7 +2686,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -2175,7 +2686,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  		running = task_running(rq, p);
  		queued = task_on_rq_queued(p);
  		ncsw = 0;
@@ -19800,7 +19801,7 @@
  			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  		task_rq_unlock(rq, p, &rf);
  
-@@ -2316,6 +2826,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
+@@ -2314,6 +2826,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
  			}
  			fallthrough;
  		case possible:
@@ -19813,7 +19814,7 @@
  			do_set_cpus_allowed(p, cpu_possible_mask);
  			state = fail;
  			break;
-@@ -2350,7 +2866,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+@@ -2348,7 +2866,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
  {
  	lockdep_assert_held(&p->pi_lock);
  
@@ -19822,7 +19823,7 @@
  		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
  	else
  		cpu = cpumask_any(p->cpus_ptr);
-@@ -2373,6 +2889,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+@@ -2371,6 +2889,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
  
  void sched_set_stop_task(int cpu, struct task_struct *stop)
  {
@@ -19830,7 +19831,7 @@
  	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  	struct task_struct *old_stop = cpu_rq(cpu)->stop;
  
-@@ -2388,6 +2905,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
+@@ -2386,6 +2905,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
  		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
  
  		stop->sched_class = &stop_sched_class;
@@ -19851,7 +19852,7 @@
  	}
  
  	cpu_rq(cpu)->stop = stop;
-@@ -2401,15 +2932,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
+@@ -2399,15 +2932,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
  	}
  }
  
@@ -19878,7 +19879,7 @@
  
  static void
  ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-@@ -2827,7 +3366,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2825,7 +3366,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  	int cpu, success = 0;
  
  	preempt_disable();
@@ -19887,7 +19888,7 @@
  		/*
  		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
  		 * == smp_processor_id()'. Together this means we can special
-@@ -2857,8 +3396,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2855,8 +3396,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  	 */
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
  	smp_mb__after_spinlock();
@@ -19915,7 +19916,7 @@
  
  	trace_sched_waking(p);
  
-@@ -3047,6 +3604,18 @@ int wake_up_process(struct task_struct *p)
+@@ -3045,6 +3604,18 @@ int wake_up_process(struct task_struct *p)
  }
  EXPORT_SYMBOL(wake_up_process);
  
@@ -19934,7 +19935,7 @@
  int wake_up_state(struct task_struct *p, unsigned int state)
  {
  	return try_to_wake_up(p, state, 0);
-@@ -3100,6 +3669,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -3098,6 +3669,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
  	init_numa_balancing(clone_flags, p);
  #ifdef CONFIG_SMP
  	p->wake_entry.u_flags = CSD_TYPE_TTWU;
@@ -19942,7 +19943,7 @@
  #endif
  }
  
-@@ -3293,6 +3863,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -3291,6 +3863,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
  	p->on_cpu = 0;
  #endif
  	init_task_preempt_count(p);
@@ -19952,7 +19953,7 @@
  #ifdef CONFIG_SMP
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3444,49 +4017,133 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
+@@ -3442,49 +4017,133 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
  
  #else /* !CONFIG_PREEMPT_NOTIFIERS */
  
@@ -20117,7 +20118,7 @@
  static inline void
  prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
  {
-@@ -3512,6 +4169,7 @@ static inline void finish_lock_switch(struct rq *rq)
+@@ -3510,6 +4169,7 @@ static inline void finish_lock_switch(struct rq *rq)
  	 * prev into current:
  	 */
  	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
@@ -20125,7 +20126,7 @@
  	raw_spin_unlock_irq(&rq->lock);
  }
  
-@@ -3527,6 +4185,22 @@ static inline void finish_lock_switch(struct rq *rq)
+@@ -3525,6 +4185,22 @@ static inline void finish_lock_switch(struct rq *rq)
  # define finish_arch_post_lock_switch()	do { } while (0)
  #endif
  
@@ -20148,7 +20149,7 @@
  /**
   * prepare_task_switch - prepare to switch tasks
   * @rq: the runqueue preparing to switch
-@@ -3549,6 +4223,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
+@@ -3547,6 +4223,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
  	perf_event_task_sched_out(prev, next);
  	rseq_preempt(prev);
  	fire_sched_out_preempt_notifiers(prev, next);
@@ -20156,7 +20157,7 @@
  	prepare_task(next);
  	prepare_arch_switch(next);
  }
-@@ -3615,6 +4290,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+@@ -3613,6 +4290,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
  	finish_lock_switch(rq);
  	finish_arch_post_lock_switch();
  	kcov_finish_switch(current);
@@ -20164,7 +20165,7 @@
  
  	fire_sched_in_preempt_notifiers(current);
  	/*
-@@ -3629,23 +4305,18 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+@@ -3627,23 +4305,18 @@ static struct rq *finish_task_switch(struct task_struct *prev)
  	 *   provided by mmdrop(),
  	 * - a sync_core for SYNC_CORE.
  	 */
@@ -20193,7 +20194,7 @@
  		put_task_struct_rcu_user(prev);
  	}
  
-@@ -3653,43 +4324,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+@@ -3651,43 +4324,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
  	return rq;
  }
  
@@ -20237,7 +20238,7 @@
  /**
   * schedule_tail - first thing a freshly forked thread must call.
   * @prev: the thread we just switched away from.
-@@ -3709,7 +4343,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
+@@ -3707,7 +4343,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
  	 */
  
  	rq = finish_task_switch(prev);
@@ -20245,7 +20246,7 @@
  	preempt_enable();
  
  	if (current->set_child_tid)
-@@ -4404,7 +5037,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -4402,7 +5037,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
   *
   * WARNING: must be called with preemption disabled!
   */
@@ -20254,7 +20255,7 @@
  {
  	struct task_struct *prev, *next;
  	unsigned long *switch_count;
-@@ -4457,7 +5090,7 @@ static void __sched notrace __schedule(bool preempt)
+@@ -4455,7 +5090,7 @@ static void __sched notrace __schedule(bool preempt)
  	 *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
  	 */
  	prev_state = prev->state;
@@ -20263,7 +20264,7 @@
  		if (signal_pending_state(prev_state, prev)) {
  			prev->state = TASK_RUNNING;
  		} else {
-@@ -4492,6 +5125,7 @@ static void __sched notrace __schedule(bool preempt)
+@@ -4490,6 +5125,7 @@ static void __sched notrace __schedule(bool preempt)
  
  	next = pick_next_task(rq, prev, &rf);
  	clear_tsk_need_resched(prev);
@@ -20271,7 +20272,7 @@
  	clear_preempt_need_resched();
  
  	if (likely(prev != next)) {
-@@ -4517,6 +5151,7 @@ static void __sched notrace __schedule(bool preempt)
+@@ -4515,6 +5151,7 @@ static void __sched notrace __schedule(bool preempt)
  		 */
  		++*switch_count;
  
@@ -20279,7 +20280,7 @@
  		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
  
  		trace_sched_switch(preempt, prev, next);
-@@ -4525,10 +5160,11 @@ static void __sched notrace __schedule(bool preempt)
+@@ -4523,10 +5160,11 @@ static void __sched notrace __schedule(bool preempt)
  		rq = context_switch(rq, prev, next, &rf);
  	} else {
  		rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
@@ -20294,7 +20295,7 @@
  }
  
  void __noreturn do_task_dead(void)
-@@ -4539,7 +5175,7 @@ void __noreturn do_task_dead(void)
+@@ -4537,7 +5175,7 @@ void __noreturn do_task_dead(void)
  	/* Tell freezer to ignore us: */
  	current->flags |= PF_NOFREEZE;
  
@@ -20303,7 +20304,7 @@
  	BUG();
  
  	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-@@ -4572,9 +5208,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
+@@ -4570,9 +5208,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
  		preempt_enable_no_resched();
  	}
  
@@ -20313,7 +20314,7 @@
  	/*
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
-@@ -4600,7 +5233,7 @@ asmlinkage __visible void __sched schedule(void)
+@@ -4598,7 +5233,7 @@ asmlinkage __visible void __sched schedule(void)
  	sched_submit_work(tsk);
  	do {
  		preempt_disable();
@@ -20322,7 +20323,7 @@
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
  	sched_update_worker(tsk);
-@@ -4628,7 +5261,7 @@ void __sched schedule_idle(void)
+@@ -4626,7 +5261,7 @@ void __sched schedule_idle(void)
  	 */
  	WARN_ON_ONCE(current->state);
  	do {
@@ -20331,7 +20332,7 @@
  	} while (need_resched());
  }
  
-@@ -4681,7 +5314,7 @@ static void __sched notrace preempt_schedule_common(void)
+@@ -4679,7 +5314,7 @@ static void __sched notrace preempt_schedule_common(void)
  		 */
  		preempt_disable_notrace();
  		preempt_latency_start(1);
@@ -20340,7 +20341,7 @@
  		preempt_latency_stop(1);
  		preempt_enable_no_resched_notrace();
  
-@@ -4692,6 +5325,30 @@ static void __sched notrace preempt_schedule_common(void)
+@@ -4690,6 +5325,30 @@ static void __sched notrace preempt_schedule_common(void)
  	} while (need_resched());
  }
  
@@ -20371,7 +20372,7 @@
  #ifdef CONFIG_PREEMPTION
  /*
   * This is the entry point to schedule() from in-kernel preemption
-@@ -4705,12 +5362,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+@@ -4703,12 +5362,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
  	 */
  	if (likely(!preemptible()))
  		return;
@@ -20399,7 +20400,7 @@
  /**
   * preempt_schedule_notrace - preempt_schedule called by tracing
   *
-@@ -4732,6 +5403,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -4730,6 +5403,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
  	if (likely(!preemptible()))
  		return;
  
@@ -20409,7 +20410,7 @@
  	do {
  		/*
  		 * Because the function tracer can trace preempt_count_sub()
-@@ -4754,7 +5428,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -4752,7 +5428,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
  		 * an infinite recursion.
  		 */
  		prev_ctx = exception_enter();
@@ -20418,7 +20419,7 @@
  		exception_exit(prev_ctx);
  
  		preempt_latency_stop(1);
-@@ -4783,7 +5457,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
+@@ -4781,7 +5457,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
  	do {
  		preempt_disable();
  		local_irq_enable();
@@ -20427,7 +20428,7 @@
  		local_irq_disable();
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
-@@ -4940,9 +5614,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+@@ -4947,9 +5623,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
  out_unlock:
  	/* Avoid rq from going away on us: */
  	preempt_disable();
@@ -20441,15 +20442,15 @@
  	preempt_enable();
  }
  #else
-@@ -5216,6 +5892,7 @@ static int __sched_setscheduler(struct task_struct *p,
- 	int retval, oldprio, oldpolicy = -1, queued, running;
- 	int new_effective_prio, policy = attr->sched_policy;
+@@ -5192,6 +5870,7 @@ static int __sched_setscheduler(struct task_struct *p,
+ 	int oldpolicy = -1, policy = attr->sched_policy;
+ 	int retval, oldprio, newprio, queued, running;
  	const struct sched_class *prev_class;
 +	struct callback_head *head;
  	struct rq_flags rf;
  	int reset_on_fork;
  	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
-@@ -5454,6 +6131,7 @@ static int __sched_setscheduler(struct task_struct *p,
+@@ -5434,6 +6113,7 @@ static int __sched_setscheduler(struct task_struct *p,
  
  	/* Avoid rq from going away on us: */
  	preempt_disable();
@@ -20457,7 +20458,7 @@
  	task_rq_unlock(rq, p, &rf);
  
  	if (pi) {
-@@ -5462,7 +6140,7 @@ static int __sched_setscheduler(struct task_struct *p,
+@@ -5442,7 +6122,7 @@ static int __sched_setscheduler(struct task_struct *p,
  	}
  
  	/* Run balance callbacks after we've adjusted the PI chain: */
@@ -20466,7 +20467,7 @@
  	preempt_enable();
  
  	return 0;
-@@ -5957,7 +6635,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+@@ -5937,7 +6617,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  	}
  #endif
  again:
@@ -20475,7 +20476,7 @@
  
  	if (!retval) {
  		cpuset_cpus_allowed(p, cpus_allowed);
-@@ -6536,7 +7214,7 @@ void init_idle(struct task_struct *idle, int cpu)
+@@ -6516,7 +7196,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
  	 *
  	 * And since this is boot we can forgo the serialization.
  	 */
@@ -20484,7 +20485,7 @@
  #endif
  	/*
  	 * We're having a chicken and egg problem, even though we are
-@@ -6563,7 +7241,9 @@ void init_idle(struct task_struct *idle, int cpu)
+@@ -6543,7 +7223,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	init_idle_preempt_count(idle, cpu);
@@ -20495,7 +20496,7 @@
  	/*
  	 * The idle tasks have their own, simple scheduling class:
  	 */
-@@ -6668,6 +7348,7 @@ void sched_setnuma(struct task_struct *p, int nid)
+@@ -6648,6 +7330,7 @@ void sched_setnuma(struct task_struct *p, int nid)
  #endif /* CONFIG_NUMA_BALANCING */
  
  #ifdef CONFIG_HOTPLUG_CPU
@@ -20503,7 +20504,7 @@
  /*
   * Ensure that the idle task is using init_mm right before its CPU goes
   * offline.
-@@ -6687,119 +7368,126 @@ void idle_task_exit(void)
+@@ -6667,119 +7350,126 @@ void idle_task_exit(void)
  	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
  }
  
@@ -20720,7 +20721,7 @@
  #endif /* CONFIG_HOTPLUG_CPU */
  
  void set_rq_online(struct rq *rq)
-@@ -6885,6 +7573,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6865,6 +7555,8 @@ int sched_cpu_activate(unsigned int cpu)
  	struct rq *rq = cpu_rq(cpu);
  	struct rq_flags rf;
  
@@ -20729,7 +20730,7 @@
  #ifdef CONFIG_SCHED_SMT
  	/*
  	 * When going up, increment the number of cores with SMT present.
-@@ -6920,6 +7610,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6900,6 +7592,8 @@ int sched_cpu_activate(unsigned int cpu)
  
  int sched_cpu_deactivate(unsigned int cpu)
  {
@@ -20738,7 +20739,7 @@
  	int ret;
  
  	set_cpu_active(cpu, false);
-@@ -6932,6 +7624,16 @@ int sched_cpu_deactivate(unsigned int cpu)
+@@ -6912,6 +7606,16 @@ int sched_cpu_deactivate(unsigned int cpu)
  	 */
  	synchronize_rcu();
  
@@ -20755,7 +20756,7 @@
  #ifdef CONFIG_SCHED_SMT
  	/*
  	 * When going down, decrement the number of cores with SMT present.
-@@ -6945,6 +7647,7 @@ int sched_cpu_deactivate(unsigned int cpu)
+@@ -6925,6 +7629,7 @@ int sched_cpu_deactivate(unsigned int cpu)
  
  	ret = cpuset_cpu_inactive(cpu);
  	if (ret) {
@@ -20763,7 +20764,7 @@
  		set_cpu_active(cpu, true);
  		return ret;
  	}
-@@ -6968,6 +7671,41 @@ int sched_cpu_starting(unsigned int cpu)
+@@ -6948,6 +7653,41 @@ int sched_cpu_starting(unsigned int cpu)
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
@@ -20805,7 +20806,7 @@
  int sched_cpu_dying(unsigned int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -6977,12 +7715,7 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -6957,12 +7697,7 @@ int sched_cpu_dying(unsigned int cpu)
  	sched_tick_stop(cpu);
  
  	rq_lock_irqsave(rq, &rf);
@@ -20819,7 +20820,7 @@
  	rq_unlock_irqrestore(rq, &rf);
  
  	calc_load_migrate(rq);
-@@ -7189,6 +7922,9 @@ void __init sched_init(void)
+@@ -7169,6 +7904,9 @@ void __init sched_init(void)
  
  		rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
  #endif
@@ -20829,7 +20830,7 @@
  #endif /* CONFIG_SMP */
  		hrtick_rq_init(rq);
  		atomic_set(&rq->nr_iowait, 0);
-@@ -7229,7 +7965,7 @@ void __init sched_init(void)
+@@ -7209,7 +7947,7 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
@@ -20838,7 +20839,7 @@
  
  	return (nested == preempt_offset);
  }
-@@ -7326,6 +8062,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset)
+@@ -7306,6 +8044,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset)
  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
  }
  EXPORT_SYMBOL_GPL(__cant_sleep);
@@ -20998,7 +20999,7 @@
  void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
  		    u64 *ut, u64 *st)
 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index 8d06d1f4e2f7..56faef8c9238 100644
+index 6b98c1fe6e7f..15b312e98e5b 100644
 --- a/kernel/sched/deadline.c
 +++ b/kernel/sched/deadline.c
 @@ -565,7 +565,7 @@ static int push_dl_task(struct rq *rq);
@@ -21129,7 +21130,7 @@
  }
  
  /* Assumes rq->lock is held */
-@@ -2529,6 +2545,7 @@ const struct sched_class dl_sched_class
+@@ -2531,6 +2547,7 @@ const struct sched_class dl_sched_class
  	.rq_online              = rq_online_dl,
  	.rq_offline             = rq_offline_dl,
  	.task_woken		= task_woken_dl,
@@ -21138,10 +21139,10 @@
  
  	.task_tick		= task_tick_dl,
 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 348605306027..e7d6ae7882c1 100644
+index 262b02d75007..6dcc229427b8 100644
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
-@@ -4372,7 +4372,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4397,7 +4397,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  	ideal_runtime = sched_slice(cfs_rq, curr);
  	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  	if (delta_exec > ideal_runtime) {
@@ -21150,7 +21151,7 @@
  		/*
  		 * The current task ran long enough, ensure it doesn't get
  		 * re-elected due to buddy favours.
-@@ -4396,7 +4396,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4421,7 +4421,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  		return;
  
  	if (delta > ideal_runtime)
@@ -21159,7 +21160,7 @@
  }
  
  static void
-@@ -4539,7 +4539,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+@@ -4564,7 +4564,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  	 * validating it and just reschedule.
  	 */
  	if (queued) {
@@ -21168,7 +21169,7 @@
  		return;
  	}
  	/*
-@@ -4676,7 +4676,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
+@@ -4701,7 +4701,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
  	 * hierarchy can be throttled
  	 */
  	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -21177,7 +21178,7 @@
  }
  
  static __always_inline
-@@ -5411,7 +5411,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
+@@ -5436,7 +5436,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
  
  		if (delta < 0) {
  			if (rq->curr == p)
@@ -21186,7 +21187,7 @@
  			return;
  		}
  		hrtick_start(rq, delta);
-@@ -6992,7 +6992,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7017,7 +7017,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
  	return;
  
  preempt:
@@ -21195,7 +21196,7 @@
  	/*
  	 * Only set the backward buddy when the current task is still
  	 * on the rq. This can happen when a wakeup gets interleaved
-@@ -10749,7 +10749,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10777,7 +10777,7 @@ static void task_fork_fair(struct task_struct *p)
  		 * 'current' within the tree based on its new key value.
  		 */
  		swap(curr->vruntime, se->vruntime);
@@ -21204,7 +21205,7 @@
  	}
  
  	se->vruntime -= cfs_rq->min_vruntime;
-@@ -10776,7 +10776,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
+@@ -10804,7 +10804,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
  	 */
  	if (rq->curr == p) {
  		if (p->prio > oldprio)
@@ -21214,7 +21215,7 @@
  		check_preempt_curr(rq, p, 0);
  }
 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index 68d369cba9e4..5a2e27297126 100644
+index f1bf5e12d889..bc2466af142e 100644
 --- a/kernel/sched/features.h
 +++ b/kernel/sched/features.h
 @@ -45,11 +45,19 @@ SCHED_FEAT(DOUBLE_TICK, false)
@@ -21238,7 +21239,7 @@
  /*
   * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index 49ec096a8aa1..1ed7e3dfee9e 100644
+index b5cf418e2e3f..d906118ba451 100644
 --- a/kernel/sched/rt.c
 +++ b/kernel/sched/rt.c
 @@ -265,7 +265,7 @@ static void pull_rt_task(struct rq *this_rq);
@@ -21422,7 +21423,7 @@
  	}
  
  	if (resched)
-@@ -2449,6 +2493,7 @@ const struct sched_class rt_sched_class
+@@ -2456,6 +2500,7 @@ const struct sched_class rt_sched_class
  	.rq_offline             = rq_offline_rt,
  	.task_woken		= task_woken_rt,
  	.switched_from		= switched_from_rt,
@@ -21431,10 +21432,10 @@
  
  	.task_tick		= task_tick_rt,
 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index fac1b121d113..a6dc180ae5ef 100644
+index 39112ac7ab34..c3b137a755fa 100644
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -967,6 +967,7 @@ struct rq {
+@@ -974,6 +974,7 @@ struct rq {
  	unsigned long		cpu_capacity_orig;
  
  	struct callback_head	*balance_callback;
@@ -21442,7 +21443,7 @@
  
  	unsigned char		nohz_idle_balance;
  	unsigned char		idle_balance;
-@@ -997,6 +998,10 @@ struct rq {
+@@ -1004,6 +1005,10 @@ struct rq {
  
  	/* This is used to determine avg_idle's max value */
  	u64			max_idle_balance_cost;
@@ -21453,7 +21454,7 @@
  #endif /* CONFIG_SMP */
  
  #ifdef CONFIG_IRQ_TIME_ACCOUNTING
-@@ -1043,6 +1048,12 @@ struct rq {
+@@ -1050,6 +1055,12 @@ struct rq {
  	/* Must be inspected within a rcu lock section */
  	struct cpuidle_state	*idle_state;
  #endif
@@ -21466,7 +21467,7 @@
  };
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -1070,6 +1081,16 @@ static inline int cpu_of(struct rq *rq)
+@@ -1077,6 +1088,16 @@ static inline int cpu_of(struct rq *rq)
  #endif
  }
  
@@ -21483,7 +21484,7 @@
  
  #ifdef CONFIG_SCHED_SMT
  extern void __update_idle_core(struct rq *rq);
-@@ -1216,6 +1237,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
+@@ -1223,6 +1244,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
  	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
  	rf->clock_update_flags = 0;
  #endif
@@ -21493,7 +21494,7 @@
  }
  
  static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
-@@ -1377,6 +1401,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
+@@ -1384,6 +1408,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
  
  #ifdef CONFIG_SMP
  
@@ -21503,7 +21504,7 @@
  static inline void
  queue_balance_callback(struct rq *rq,
  		       struct callback_head *head,
-@@ -1384,12 +1411,13 @@ queue_balance_callback(struct rq *rq,
+@@ -1391,12 +1418,13 @@ queue_balance_callback(struct rq *rq,
  {
  	lockdep_assert_held(&rq->lock);
  
@@ -21518,7 +21519,7 @@
  }
  
  #define rcu_dereference_check_sched_domain(p) \
-@@ -1716,6 +1744,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
+@@ -1723,6 +1751,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
  #define WF_FORK			0x02		/* Child wakeup after fork */
  #define WF_MIGRATED		0x04		/* Internal use, task got migrated */
  #define WF_ON_CPU		0x08		/* Wakee is on_cpu */
@@ -21526,7 +21527,7 @@
  
  /*
   * To aid in avoiding the subversion of "niceness" due to uneven distribution
-@@ -1797,10 +1826,13 @@ struct sched_class {
+@@ -1804,10 +1833,13 @@ struct sched_class {
  	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
  
  	void (*set_cpus_allowed)(struct task_struct *p,
@@ -21541,7 +21542,7 @@
  #endif
  
  	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
-@@ -1884,13 +1916,35 @@ static inline bool sched_fair_runnable(struct rq *rq)
+@@ -1891,13 +1923,35 @@ static inline bool sched_fair_runnable(struct rq *rq)
  extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
  extern struct task_struct *pick_next_task_idle(struct rq *rq);
  
@@ -21578,7 +21579,7 @@
  
  #endif
  
-@@ -1934,6 +1988,15 @@ extern void reweight_task(struct task_struct *p, int prio);
+@@ -1941,6 +1995,15 @@ extern void reweight_task(struct task_struct *p, int prio);
  extern void resched_curr(struct rq *rq);
  extern void resched_cpu(int cpu);
  
@@ -21813,7 +21814,7 @@
  		cgroup_leave_frozen(true);
  	} else {
 diff --git a/kernel/smp.c b/kernel/smp.c
-index 25240fb2df94..23778281aaa7 100644
+index f73a597c8e4c..244462d9b61e 100644
 --- a/kernel/smp.c
 +++ b/kernel/smp.c
 @@ -450,8 +450,18 @@ void flush_smp_call_function_from_idle(void)
@@ -22537,21 +22538,10 @@
  			pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n",
  				(unsigned int) local_softirq_pending());
 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index c3ad64fb9d8b..b6477db234e6 100644
+index a3ec21be3b14..af3daf03c917 100644
 --- a/kernel/time/timer.c
 +++ b/kernel/time/timer.c
-@@ -1263,8 +1263,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
- static void timer_sync_wait_running(struct timer_base *base)
- {
- 	if (atomic_read(&base->timer_waiters)) {
-+		raw_spin_unlock_irq(&base->lock);
- 		spin_unlock(&base->expiry_lock);
- 		spin_lock(&base->expiry_lock);
-+		raw_spin_lock_irq(&base->lock);
- 	}
- }
- 
-@@ -1283,7 +1285,7 @@ static void del_timer_wait_running(struct timer_list *timer)
+@@ -1287,7 +1287,7 @@ static void del_timer_wait_running(struct timer_list *timer)
  	u32 tf;
  
  	tf = READ_ONCE(timer->flags);
@@ -22560,7 +22550,7 @@
  		struct timer_base *base = get_timer_base(tf);
  
  		/*
-@@ -1367,6 +1369,13 @@ int del_timer_sync(struct timer_list *timer)
+@@ -1371,6 +1371,13 @@ int del_timer_sync(struct timer_list *timer)
  	 */
  	WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
  
@@ -22574,24 +22564,7 @@
  	do {
  		ret = try_to_del_timer_sync(timer);
  
-@@ -1448,14 +1457,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
- 		if (timer->flags & TIMER_IRQSAFE) {
- 			raw_spin_unlock(&base->lock);
- 			call_timer_fn(timer, fn, baseclk);
--			base->running_timer = NULL;
- 			raw_spin_lock(&base->lock);
-+			base->running_timer = NULL;
- 		} else {
- 			raw_spin_unlock_irq(&base->lock);
- 			call_timer_fn(timer, fn, baseclk);
-+			raw_spin_lock_irq(&base->lock);
- 			base->running_timer = NULL;
- 			timer_sync_wait_running(base);
--			raw_spin_lock_irq(&base->lock);
- 		}
- 	}
- }
-@@ -1757,6 +1766,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+@@ -1760,6 +1767,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
  {
  	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
@@ -22668,7 +22641,7 @@
  		}
  	}
 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 8bfa4e78d895..b3cc666223ce 100644
+index e4f154119e52..3602857934be 100644
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
 @@ -176,7 +176,7 @@ static union trace_eval_map_item *trace_eval_maps;
@@ -22818,7 +22791,7 @@
  
  	ret = 1;
   out:
-@@ -2584,36 +2578,52 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
+@@ -2592,36 +2586,52 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
  }
  EXPORT_SYMBOL_GPL(trace_handle_return);
  
@@ -22891,7 +22864,7 @@
  }
  
  DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
-@@ -2733,7 +2743,7 @@ struct ring_buffer_event *
+@@ -2741,7 +2751,7 @@ struct ring_buffer_event *
  trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
  			  struct trace_event_file *trace_file,
  			  int type, unsigned long len,
@@ -22900,16 +22873,16 @@
  {
  	struct ring_buffer_event *entry;
  	int val;
-@@ -2746,7 +2756,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+@@ -2754,7 +2764,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
  		/* Try to use the per cpu buffer first */
  		val = this_cpu_inc_return(trace_buffered_event_cnt);
- 		if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+ 		if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
 -			trace_event_setup(entry, type, flags, pc);
 +			trace_event_setup(entry, type, trace_ctx);
  			entry->array[0] = len;
  			return entry;
  		}
-@@ -2754,7 +2764,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+@@ -2762,7 +2772,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
  	}
  
  	entry = __trace_buffer_lock_reserve(*current_rb,
@@ -22918,7 +22891,7 @@
  	/*
  	 * If tracing is off, but we have triggers enabled
  	 * we still need to look at the event data. Use the temp_buffer
-@@ -2763,8 +2773,8 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+@@ -2771,8 +2781,8 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
  	 */
  	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
  		*current_rb = temp_buffer;
@@ -22929,7 +22902,7 @@
  	}
  	return entry;
  }
-@@ -2850,7 +2860,7 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+@@ -2858,7 +2868,7 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
  		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
  	event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
  				    fbuffer->event, fbuffer->entry,
@@ -22938,7 +22911,7 @@
  }
  EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
  
-@@ -2866,7 +2876,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
+@@ -2874,7 +2884,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
  void trace_buffer_unlock_commit_regs(struct trace_array *tr,
  				     struct trace_buffer *buffer,
  				     struct ring_buffer_event *event,
@@ -22947,7 +22920,7 @@
  				     struct pt_regs *regs)
  {
  	__buffer_unlock_commit(buffer, event);
-@@ -2877,8 +2887,8 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+@@ -2885,8 +2895,8 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
  	 * and mmiotrace, but that's ok if they lose a function or
  	 * two. They are not that meaningful.
  	 */
@@ -22958,7 +22931,7 @@
  }
  
  /*
-@@ -2892,9 +2902,8 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
+@@ -2900,9 +2910,8 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
  }
  
  void
@@ -22970,7 +22943,7 @@
  {
  	struct trace_event_call *call = &event_function;
  	struct trace_buffer *buffer = tr->array_buffer.buffer;
-@@ -2902,7 +2911,7 @@ trace_function(struct trace_array *tr,
+@@ -2910,7 +2919,7 @@ trace_function(struct trace_array *tr,
  	struct ftrace_entry *entry;
  
  	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -22979,7 +22952,7 @@
  	if (!event)
  		return;
  	entry	= ring_buffer_event_data(event);
-@@ -2936,8 +2945,8 @@ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
+@@ -2944,8 +2953,8 @@ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
  static DEFINE_PER_CPU(int, ftrace_stack_reserve);
  
  static void __ftrace_trace_stack(struct trace_buffer *buffer,
@@ -22990,7 +22963,7 @@
  {
  	struct trace_event_call *call = &event_kernel_stack;
  	struct ring_buffer_event *event;
-@@ -2985,7 +2994,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+@@ -2993,7 +3002,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
  	size = nr_entries * sizeof(unsigned long);
  	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
  				    (sizeof(*entry) - sizeof(entry->caller)) + size,
@@ -22999,7 +22972,7 @@
  	if (!event)
  		goto out;
  	entry = ring_buffer_event_data(event);
-@@ -3006,22 +3015,22 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+@@ -3014,22 +3023,22 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
  
  static inline void ftrace_trace_stack(struct trace_array *tr,
  				      struct trace_buffer *buffer,
@@ -23028,7 +23001,7 @@
  		return;
  	}
  
-@@ -3035,7 +3044,7 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+@@ -3043,7 +3052,7 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
  		return;
  
  	rcu_irq_enter_irqson();
@@ -23037,7 +23010,7 @@
  	rcu_irq_exit_irqson();
  }
  
-@@ -3045,19 +3054,15 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+@@ -3053,19 +3062,15 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
   */
  void trace_dump_stack(int skip)
  {
@@ -23058,7 +23031,7 @@
  }
  EXPORT_SYMBOL_GPL(trace_dump_stack);
  
-@@ -3066,7 +3071,7 @@ static DEFINE_PER_CPU(int, user_stack_count);
+@@ -3074,7 +3079,7 @@ static DEFINE_PER_CPU(int, user_stack_count);
  
  static void
  ftrace_trace_userstack(struct trace_array *tr,
@@ -23067,7 +23040,7 @@
  {
  	struct trace_event_call *call = &event_user_stack;
  	struct ring_buffer_event *event;
-@@ -3093,7 +3098,7 @@ ftrace_trace_userstack(struct trace_array *tr,
+@@ -3101,7 +3106,7 @@ ftrace_trace_userstack(struct trace_array *tr,
  	__this_cpu_inc(user_stack_count);
  
  	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
@@ -23076,7 +23049,7 @@
  	if (!event)
  		goto out_drop_count;
  	entry	= ring_buffer_event_data(event);
-@@ -3113,7 +3118,7 @@ ftrace_trace_userstack(struct trace_array *tr,
+@@ -3121,7 +3126,7 @@ ftrace_trace_userstack(struct trace_array *tr,
  #else /* CONFIG_USER_STACKTRACE_SUPPORT */
  static void ftrace_trace_userstack(struct trace_array *tr,
  				   struct trace_buffer *buffer,
@@ -23085,7 +23058,7 @@
  {
  }
  #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
-@@ -3243,9 +3248,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+@@ -3251,9 +3256,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  	struct trace_buffer *buffer;
  	struct trace_array *tr = &global_trace;
  	struct bprint_entry *entry;
@@ -23097,7 +23070,7 @@
  
  	if (unlikely(tracing_selftest_running || tracing_disabled))
  		return 0;
-@@ -3253,7 +3258,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+@@ -3261,7 +3266,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  	/* Don't pollute graph traces with trace_vprintk internals */
  	pause_graph_tracing();
  
@@ -23106,7 +23079,7 @@
  	preempt_disable_notrace();
  
  	tbuffer = get_trace_buf();
-@@ -3267,12 +3272,11 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+@@ -3275,12 +3280,11 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
  		goto out_put;
  
@@ -23120,7 +23093,7 @@
  	if (!event)
  		goto out;
  	entry = ring_buffer_event_data(event);
-@@ -3282,7 +3286,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+@@ -3290,7 +3294,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
  	if (!call_filter_check_discard(call, entry, buffer, event)) {
  		__buffer_unlock_commit(buffer, event);
@@ -23129,7 +23102,7 @@
  	}
  
  out:
-@@ -3305,9 +3309,9 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+@@ -3313,9 +3317,9 @@ __trace_array_vprintk(struct trace_buffer *buffer,
  {
  	struct trace_event_call *call = &event_print;
  	struct ring_buffer_event *event;
@@ -23141,7 +23114,7 @@
  	char *tbuffer;
  
  	if (tracing_disabled || tracing_selftest_running)
-@@ -3316,7 +3320,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+@@ -3324,7 +3328,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
  	/* Don't pollute graph traces with trace_vprintk internals */
  	pause_graph_tracing();
  
@@ -23150,7 +23123,7 @@
  	preempt_disable_notrace();
  
  
-@@ -3328,11 +3332,10 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+@@ -3336,11 +3340,10 @@ __trace_array_vprintk(struct trace_buffer *buffer,
  
  	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
  
@@ -23163,7 +23136,7 @@
  	if (!event)
  		goto out;
  	entry = ring_buffer_event_data(event);
-@@ -3341,7 +3344,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+@@ -3349,7 +3352,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
  	memcpy(&entry->buf, tbuffer, len + 1);
  	if (!call_filter_check_discard(call, entry, buffer, event)) {
  		__buffer_unlock_commit(buffer, event);
@@ -23172,7 +23145,7 @@
  	}
  
  out:
-@@ -3813,14 +3816,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
+@@ -3815,14 +3818,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
  
  static void print_lat_help_header(struct seq_file *m)
  {
@@ -23198,7 +23171,7 @@
  }
  
  static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3854,13 +3860,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
+@@ -3856,13 +3862,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
  
  	print_event_info(buf, m);
  
@@ -23222,7 +23195,7 @@
  }
  
  void
-@@ -6654,7 +6663,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+@@ -6655,7 +6664,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
  	enum event_trigger_type tt = ETT_NONE;
  	struct trace_buffer *buffer;
  	struct print_entry *entry;
@@ -23230,7 +23203,7 @@
  	ssize_t written;
  	int size;
  	int len;
-@@ -6674,7 +6682,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+@@ -6675,7 +6683,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
  
  	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
  
@@ -23238,7 +23211,7 @@
  	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
  
  	/* If less than "<faulted>", then make sure we can still add that */
-@@ -6683,7 +6690,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+@@ -6684,7 +6691,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
  
  	buffer = tr->array_buffer.buffer;
  	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
@@ -23247,7 +23220,7 @@
  	if (unlikely(!event))
  		/* Ring buffer disabled, return as if not open for write */
  		return -EBADF;
-@@ -6735,7 +6742,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+@@ -6736,7 +6743,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
  	struct ring_buffer_event *event;
  	struct trace_buffer *buffer;
  	struct raw_data_entry *entry;
@@ -23255,7 +23228,7 @@
  	ssize_t written;
  	int size;
  	int len;
-@@ -6757,14 +6763,13 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+@@ -6758,14 +6764,13 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
  
  	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
  
@@ -23271,7 +23244,7 @@
  	if (!event)
  		/* Ring buffer disabled, return as if not open for write */
  		return -EBADF;
-@@ -9315,7 +9320,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+@@ -9318,7 +9323,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
  	tracing_off();
  
  	local_irq_save(flags);
@@ -23279,7 +23252,7 @@
  
  	/* Simulate the iterator */
  	trace_init_global_iter(&iter);
-@@ -9395,7 +9399,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+@@ -9398,7 +9402,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
  		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
  	}
  	atomic_dec(&dump_running);
@@ -24505,10 +24478,10 @@
  
  /* uprobe handler */
 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 1e2ca744dadb..c9a0c961d6e0 100644
+index 51d19fc71e61..bcb501a1affb 100644
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
-@@ -4912,6 +4912,10 @@ static void unbind_workers(int cpu)
+@@ -4919,6 +4919,10 @@ static void unbind_workers(int cpu)
  		pool->flags |= POOL_DISASSOCIATED;
  
  		raw_spin_unlock_irq(&pool->lock);
@@ -24520,10 +24493,10 @@
  
  		/*
 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index dcf4a9028e16..1751f130e783 100644
+index ffccc13d685b..b9bded0e249b 100644
 --- a/lib/Kconfig.debug
 +++ b/lib/Kconfig.debug
-@@ -1330,7 +1330,7 @@ config DEBUG_ATOMIC_SLEEP
+@@ -1329,7 +1329,7 @@ config DEBUG_ATOMIC_SLEEP
  
  config DEBUG_LOCKING_API_SELFTESTS
  	bool "Locking API boot-time self-tests"
@@ -24533,10 +24506,10 @@
  	  Say Y here if you want the kernel to run a short self-test during
  	  bootup. The self-test checks whether common types of locking bugs
 diff --git a/lib/bug.c b/lib/bug.c
-index 7103440c0ee1..baf61c307a6a 100644
+index 4ab398a2de93..9c681f29e61e 100644
 --- a/lib/bug.c
 +++ b/lib/bug.c
-@@ -205,6 +205,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+@@ -202,6 +202,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
  	else
  		pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
  			(void *)bugaddr);
@@ -24651,10 +24624,10 @@
  	return 0;
  }
 diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
-index a899b3f0e2e5..a2baedfff9ee 100644
+index 76c52b0b76d3..98c376b02dff 100644
 --- a/lib/locking-selftest.c
 +++ b/lib/locking-selftest.c
-@@ -786,6 +786,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
+@@ -787,6 +787,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
  #include "locking-selftest-spin-hardirq.h"
  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
  
@@ -24663,7 +24636,7 @@
  #include "locking-selftest-rlock-hardirq.h"
  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
  
-@@ -801,9 +803,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
+@@ -802,9 +804,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
  #include "locking-selftest-wlock-softirq.h"
  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
  
@@ -24676,7 +24649,7 @@
  /*
   * Enabling hardirqs with a softirq-safe lock held:
   */
-@@ -836,6 +841,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+@@ -837,6 +842,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
  #undef E1
  #undef E2
  
@@ -24685,7 +24658,7 @@
  /*
   * Enabling irqs with an irq-safe lock held:
   */
-@@ -859,6 +866,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+@@ -860,6 +867,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
  #include "locking-selftest-spin-hardirq.h"
  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
  
@@ -24694,7 +24667,7 @@
  #include "locking-selftest-rlock-hardirq.h"
  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
  
-@@ -874,6 +883,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
+@@ -875,6 +884,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
  #include "locking-selftest-wlock-softirq.h"
  GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
  
@@ -24703,7 +24676,7 @@
  #undef E1
  #undef E2
  
-@@ -905,6 +916,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+@@ -906,6 +917,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
  #include "locking-selftest-spin-hardirq.h"
  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
  
@@ -24712,7 +24685,7 @@
  #include "locking-selftest-rlock-hardirq.h"
  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
  
-@@ -920,6 +933,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
+@@ -921,6 +934,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
  #include "locking-selftest-wlock-softirq.h"
  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
  
@@ -24721,7 +24694,7 @@
  #undef E1
  #undef E2
  #undef E3
-@@ -953,6 +968,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+@@ -954,6 +969,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
  #include "locking-selftest-spin-hardirq.h"
  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
  
@@ -24730,7 +24703,7 @@
  #include "locking-selftest-rlock-hardirq.h"
  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
  
-@@ -968,10 +985,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
+@@ -969,10 +986,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
  #include "locking-selftest-wlock-softirq.h"
  GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
  
@@ -24745,7 +24718,7 @@
  /*
   * read-lock / write-lock irq inversion.
   *
-@@ -1161,6 +1182,11 @@ GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
+@@ -1162,6 +1183,11 @@ GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
  #undef E1
  #undef E2
  #undef E3
@@ -24757,7 +24730,7 @@
  /*
   * read-lock / write-lock recursion that is actually safe.
   */
-@@ -1207,6 +1233,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
+@@ -1208,6 +1234,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
  #undef E2
  #undef E3
  
@@ -24766,7 +24739,7 @@
  /*
   * read-lock / write-lock recursion that is unsafe.
   */
-@@ -2455,6 +2483,7 @@ void locking_selftest(void)
+@@ -2456,6 +2484,7 @@ void locking_selftest(void)
  
  	printk("  --------------------------------------------------------------------------\n");
  
@@ -24774,7 +24747,7 @@
  	/*
  	 * irq-context testcases:
  	 */
-@@ -2469,6 +2498,28 @@ void locking_selftest(void)
+@@ -2470,6 +2499,28 @@ void locking_selftest(void)
  	DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
  	DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
  
@@ -24885,11 +24858,11 @@
  	if ((wait_state != TASK_RUNNING ||
 diff --git a/localversion-rt b/localversion-rt
 new file mode 100644
-index 000000000000..5498386d0d0c
+index 000000000000..75493460c41f
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt39
++-rt51
 diff --git a/mm/Kconfig b/mm/Kconfig
 index 390165ffbb0f..c8cbcb5118b0 100644
 --- a/mm/Kconfig
@@ -25221,7 +25194,7 @@
  #if defined(HASHED_PAGE_VIRTUAL)
  
 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index d72d2b90474a..4f9cd45aaf50 100644
+index 92bf987d0a41..d1a1a973b03c 100644
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
 @@ -63,6 +63,7 @@
@@ -25341,7 +25314,7 @@
  	mutex_unlock(&percpu_charge_mutex);
  }
  
-@@ -3137,7 +3148,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+@@ -3145,7 +3156,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
  	unsigned long flags;
  	bool ret = false;
  
@@ -25350,7 +25323,7 @@
  
  	stock = this_cpu_ptr(&memcg_stock);
  	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
-@@ -3145,7 +3156,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+@@ -3153,7 +3164,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
  		ret = true;
  	}
  
@@ -25359,7 +25332,7 @@
  
  	return ret;
  }
-@@ -3204,7 +3215,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+@@ -3220,7 +3231,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
  	struct memcg_stock_pcp *stock;
  	unsigned long flags;
  
@@ -25368,7 +25341,7 @@
  
  	stock = this_cpu_ptr(&memcg_stock);
  	if (stock->cached_objcg != objcg) { /* reset if necessary */
-@@ -3218,7 +3229,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+@@ -3234,7 +3245,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
  	if (stock->nr_bytes > PAGE_SIZE)
  		drain_obj_stock(stock);
  
@@ -25377,7 +25350,7 @@
  }
  
  int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -5723,12 +5734,12 @@ static int mem_cgroup_move_account(struct page *page,
+@@ -5739,12 +5750,12 @@ static int mem_cgroup_move_account(struct page *page,
  
  	ret = 0;
  
@@ -25392,7 +25365,7 @@
  out_unlock:
  	unlock_page(page);
  out:
-@@ -6798,10 +6809,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+@@ -6814,10 +6825,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
  	css_get(&memcg->css);
  	commit_charge(page, memcg);
  
@@ -25405,7 +25378,7 @@
  
  	/*
  	 * Cgroup1's unified memory+swap counter has been charged with the
-@@ -6857,11 +6868,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
+@@ -6873,11 +6884,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
  		memcg_oom_recover(ug->memcg);
  	}
  
@@ -25419,7 +25392,7 @@
  
  	/* drop reference from uncharge_page */
  	css_put(&ug->memcg->css);
-@@ -7015,10 +7026,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+@@ -7031,10 +7042,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
  	css_get(&memcg->css);
  	commit_charge(newpage, memcg);
  
@@ -25432,7 +25405,7 @@
  }
  
  DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -7138,9 +7149,13 @@ static int __init mem_cgroup_init(void)
+@@ -7154,9 +7165,13 @@ static int __init mem_cgroup_init(void)
  	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
  				  memcg_hotplug_cpu_dead);
  
@@ -25449,7 +25422,7 @@
  
  	for_each_node(node) {
  		struct mem_cgroup_tree_per_node *rtpn;
-@@ -7189,6 +7204,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+@@ -7205,6 +7220,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  	struct mem_cgroup *memcg, *swap_memcg;
  	unsigned int nr_entries;
  	unsigned short oldid;
@@ -25457,7 +25430,7 @@
  
  	VM_BUG_ON_PAGE(PageLRU(page), page);
  	VM_BUG_ON_PAGE(page_count(page), page);
-@@ -7234,9 +7250,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+@@ -7250,9 +7266,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  	 * important here to have the interrupts disabled because it is the
  	 * only synchronisation we have for updating the per-CPU variables.
  	 */
@@ -25472,7 +25445,7 @@
  	css_put(&memcg->css);
  }
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 7ffa706e5c30..287f3afc3cf1 100644
+index 0166558d3d64..accab201af7c 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -61,6 +61,7 @@
@@ -25825,7 +25798,7 @@
  	return NULL;
  }
  
-@@ -8733,7 +8803,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8754,7 +8824,7 @@ void zone_pcp_reset(struct zone *zone)
  	struct per_cpu_pageset *pset;
  
  	/* avoid races with drain_pages()  */
@@ -25834,7 +25807,7 @@
  	if (zone->pageset != &boot_pageset) {
  		for_each_online_cpu(cpu) {
  			pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8742,7 +8812,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8763,7 +8833,7 @@ void zone_pcp_reset(struct zone *zone)
  		free_percpu(zone->pageset);
  		zone->pageset = &boot_pageset;
  	}
@@ -25844,7 +25817,7 @@
  
  #ifdef CONFIG_MEMORY_HOTREMOVE
 diff --git a/mm/shmem.c b/mm/shmem.c
-index 537c137698f8..1c473d6123bc 100644
+index ae8adca3b56d..01d4fd96fb15 100644
 --- a/mm/shmem.c
 +++ b/mm/shmem.c
 @@ -278,10 +278,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
@@ -25911,7 +25884,7 @@
  	}
  	return mpol;
  }
-@@ -3592,9 +3593,10 @@ static int shmem_reconfigure(struct fs_context *fc)
+@@ -3588,9 +3589,10 @@ static int shmem_reconfigure(struct fs_context *fc)
  	struct shmem_options *ctx = fc->fs_private;
  	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
  	unsigned long inodes;
@@ -25923,7 +25896,7 @@
  	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
  	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
  		if (!sbinfo->max_blocks) {
-@@ -3639,14 +3641,15 @@ static int shmem_reconfigure(struct fs_context *fc)
+@@ -3635,14 +3637,15 @@ static int shmem_reconfigure(struct fs_context *fc)
  	 * Preserve previous mempolicy unless mpol remount option was specified.
  	 */
  	if (ctx->mpol) {
@@ -25942,7 +25915,7 @@
  	return invalfc(fc, "%s", err);
  }
  
-@@ -3763,7 +3766,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
+@@ -3759,7 +3762,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
  	sbinfo->mpol = ctx->mpol;
  	ctx->mpol = NULL;
  
@@ -25952,7 +25925,7 @@
  		goto failed;
  	spin_lock_init(&sbinfo->shrinklist_lock);
 diff --git a/mm/slab.c b/mm/slab.c
-index b1113561b98b..a28b54325d9e 100644
+index b2cc2cf7d8a3..677c0651ef66 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -26087,7 +26060,7 @@
  
  		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
  			node, total_slabs - free_slabs, total_slabs,
-@@ -2106,7 +2106,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
+@@ -2105,7 +2105,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
  {
  #ifdef CONFIG_SMP
  	check_irq_off();
@@ -26096,7 +26069,7 @@
  #endif
  }
  
-@@ -2114,7 +2114,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
+@@ -2113,7 +2113,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
  {
  #ifdef CONFIG_SMP
  	check_irq_off();
@@ -26105,7 +26078,7 @@
  #endif
  }
  
-@@ -2154,9 +2154,9 @@ static void do_drain(void *arg)
+@@ -2153,9 +2153,9 @@ static void do_drain(void *arg)
  	check_irq_off();
  	ac = cpu_cache_get(cachep);
  	n = get_node(cachep, node);
@@ -26117,7 +26090,7 @@
  	ac->avail = 0;
  	slabs_destroy(cachep, &list);
  }
-@@ -2174,9 +2174,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
+@@ -2173,9 +2173,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
  			drain_alien_cache(cachep, n->alien);
  
  	for_each_kmem_cache_node(cachep, node, n) {
@@ -26129,7 +26102,7 @@
  
  		slabs_destroy(cachep, &list);
  	}
-@@ -2198,10 +2198,10 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2197,10 +2197,10 @@ static int drain_freelist(struct kmem_cache *cache,
  	nr_freed = 0;
  	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
  
@@ -26142,7 +26115,7 @@
  			goto out;
  		}
  
-@@ -2214,7 +2214,7 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2213,7 +2213,7 @@ static int drain_freelist(struct kmem_cache *cache,
  		 * to the cache.
  		 */
  		n->free_objects -= cache->num;
@@ -26151,7 +26124,7 @@
  		slab_destroy(cache, page);
  		nr_freed++;
  	}
-@@ -2650,7 +2650,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
+@@ -2649,7 +2649,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
  	INIT_LIST_HEAD(&page->slab_list);
  	n = get_node(cachep, page_to_nid(page));
  
@@ -26160,7 +26133,7 @@
  	n->total_slabs++;
  	if (!page->active) {
  		list_add_tail(&page->slab_list, &n->slabs_free);
-@@ -2660,7 +2660,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
+@@ -2659,7 +2659,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
  
  	STATS_INC_GROWN(cachep);
  	n->free_objects += cachep->num - page->active;
@@ -26169,7 +26142,7 @@
  
  	fixup_objfreelist_debug(cachep, &list);
  }
-@@ -2826,7 +2826,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
+@@ -2825,7 +2825,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
  {
  	struct page *page;
  
@@ -26178,7 +26151,7 @@
  	page = list_first_entry_or_null(&n->slabs_partial, struct page,
  					slab_list);
  	if (!page) {
-@@ -2853,10 +2853,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
+@@ -2852,10 +2852,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
  	if (!gfp_pfmemalloc_allowed(flags))
  		return NULL;
  
@@ -26191,7 +26164,7 @@
  		return NULL;
  	}
  
-@@ -2865,7 +2865,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
+@@ -2864,7 +2864,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
  
  	fixup_slab_list(cachep, n, page, &list);
  
@@ -26200,7 +26173,7 @@
  	fixup_objfreelist_debug(cachep, &list);
  
  	return obj;
-@@ -2924,7 +2924,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
+@@ -2923,7 +2923,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
  	if (!n->free_objects && (!shared || !shared->avail))
  		goto direct_grow;
  
@@ -26209,7 +26182,7 @@
  	shared = READ_ONCE(n->shared);
  
  	/* See if we can refill from the shared array */
-@@ -2948,7 +2948,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
+@@ -2947,7 +2947,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
  must_grow:
  	n->free_objects -= ac->avail;
  alloc_done:
@@ -26218,7 +26191,7 @@
  	fixup_objfreelist_debug(cachep, &list);
  
  direct_grow:
-@@ -3173,7 +3173,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+@@ -3172,7 +3172,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
  	BUG_ON(!n);
  
  	check_irq_off();
@@ -26227,7 +26200,7 @@
  	page = get_first_slab(n, false);
  	if (!page)
  		goto must_grow;
-@@ -3191,12 +3191,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+@@ -3190,12 +3190,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
  
  	fixup_slab_list(cachep, n, page, &list);
  
@@ -26242,7 +26215,7 @@
  	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
  	if (page) {
  		/* This slab isn't counted yet so don't update free_objects */
-@@ -3374,7 +3374,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+@@ -3373,7 +3373,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  
  	check_irq_off();
  	n = get_node(cachep, node);
@@ -26251,7 +26224,7 @@
  	if (n->shared) {
  		struct array_cache *shared_array = n->shared;
  		int max = shared_array->limit - shared_array->avail;
-@@ -3403,7 +3403,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+@@ -3402,7 +3402,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  		STATS_SET_FREEABLE(cachep, i);
  	}
  #endif
@@ -26260,7 +26233,7 @@
  	ac->avail -= batchcount;
  	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
  	slabs_destroy(cachep, &list);
-@@ -3832,9 +3832,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -3831,9 +3831,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  
  		node = cpu_to_mem(cpu);
  		n = get_node(cachep, node);
@@ -26272,7 +26245,7 @@
  		slabs_destroy(cachep, &list);
  	}
  	free_percpu(prev);
-@@ -3929,9 +3929,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
+@@ -3928,9 +3928,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
  		return;
  	}
  
@@ -26284,7 +26257,7 @@
  
  	slabs_destroy(cachep, &list);
  }
-@@ -4015,7 +4015,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
+@@ -4014,7 +4014,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
  
  	for_each_kmem_cache_node(cachep, node, n) {
  		check_irq_on();
@@ -26293,7 +26266,7 @@
  
  		total_slabs += n->total_slabs;
  		free_slabs += n->free_slabs;
-@@ -4024,7 +4024,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
+@@ -4023,7 +4023,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
  		if (n->shared)
  			shared_avail += n->shared->avail;
  
@@ -26303,10 +26276,10 @@
  	num_objs = total_slabs * cachep->num;
  	active_slabs = total_slabs - free_slabs;
 diff --git a/mm/slab.h b/mm/slab.h
-index f9977d6613d6..c9a43b787609 100644
+index 944e8b2040ae..78e668fd51a0 100644
 --- a/mm/slab.h
 +++ b/mm/slab.h
-@@ -546,7 +546,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
+@@ -543,7 +543,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
@@ -26316,10 +26289,10 @@
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
 diff --git a/mm/slub.c b/mm/slub.c
-index fbc415c34009..b6f4b45f3849 100644
+index f5fc44208bdc..da20831cff01 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -434,7 +434,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+@@ -435,7 +435,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  
  #ifdef CONFIG_SLUB_DEBUG
  static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
@@ -26328,7 +26301,7 @@
  
  /*
   * Determine a map of object in use on a page.
-@@ -450,7 +450,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+@@ -451,7 +451,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
  
  	VM_BUG_ON(!irqs_disabled());
  
@@ -26337,7 +26310,7 @@
  
  	bitmap_zero(object_map, page->objects);
  
-@@ -463,7 +463,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+@@ -464,7 +464,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
  static void put_map(unsigned long *map) __releases(&object_map_lock)
  {
  	VM_BUG_ON(map != object_map);
@@ -26346,7 +26319,7 @@
  }
  
  static inline unsigned int size_from_object(struct kmem_cache *s)
-@@ -1213,7 +1213,7 @@ static noinline int free_debug_processing(
+@@ -1214,7 +1214,7 @@ static noinline int free_debug_processing(
  	unsigned long flags;
  	int ret = 0;
  
@@ -26355,7 +26328,7 @@
  	slab_lock(page);
  
  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1248,7 +1248,7 @@ static noinline int free_debug_processing(
+@@ -1249,7 +1249,7 @@ static noinline int free_debug_processing(
  			 bulk_cnt, cnt);
  
  	slab_unlock(page);
@@ -26364,7 +26337,7 @@
  	if (!ret)
  		slab_fix(s, "Object at 0x%p not freed", object);
  	return ret;
-@@ -1496,6 +1496,12 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
+@@ -1494,6 +1494,12 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
  }
  #endif /* CONFIG_SLUB_DEBUG */
  
@@ -26377,7 +26350,7 @@
  /*
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
-@@ -1739,10 +1745,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1737,10 +1743,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  	void *start, *p, *next;
  	int idx;
  	bool shuffle;
@@ -26396,7 +26369,7 @@
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1801,7 +1815,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1799,7 +1813,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  	page->frozen = 1;
  
  out:
@@ -26405,7 +26378,7 @@
  		local_irq_disable();
  	if (!page)
  		return NULL;
-@@ -1844,6 +1858,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
+@@ -1842,6 +1856,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
  	__free_pages(page, order);
  }
  
@@ -26422,7 +26395,7 @@
  static void rcu_free_slab(struct rcu_head *h)
  {
  	struct page *page = container_of(h, struct page, rcu_head);
-@@ -1855,6 +1879,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
+@@ -1853,6 +1877,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
  {
  	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
  		call_rcu(&page->rcu_head, rcu_free_slab);
@@ -26435,7 +26408,7 @@
  	} else
  		__free_slab(s, page);
  }
-@@ -1962,7 +1992,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -1960,7 +1990,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
  	if (!n || !n->nr_partial)
  		return NULL;
  
@@ -26444,7 +26417,7 @@
  	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
  		void *t;
  
-@@ -1987,7 +2017,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -1985,7 +2015,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
  			break;
  
  	}
@@ -26453,7 +26426,7 @@
  	return object;
  }
  
-@@ -2241,7 +2271,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2239,7 +2269,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
@@ -26462,7 +26435,7 @@
  		}
  	} else {
  		m = M_FULL;
-@@ -2253,7 +2283,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2251,7 +2281,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
@@ -26471,7 +26444,7 @@
  		}
  #endif
  	}
-@@ -2278,7 +2308,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2276,7 +2306,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
  		goto redo;
  
  	if (lock)
@@ -26480,7 +26453,7 @@
  
  	if (m == M_PARTIAL)
  		stat(s, tail);
-@@ -2317,10 +2347,10 @@ static void unfreeze_partials(struct kmem_cache *s,
+@@ -2315,10 +2345,10 @@ static void unfreeze_partials(struct kmem_cache *s,
  		n2 = get_node(s, page_to_nid(page));
  		if (n != n2) {
  			if (n)
@@ -26493,7 +26466,7 @@
  		}
  
  		do {
-@@ -2349,7 +2379,7 @@ static void unfreeze_partials(struct kmem_cache *s,
+@@ -2347,7 +2377,7 @@ static void unfreeze_partials(struct kmem_cache *s,
  	}
  
  	if (n)
@@ -26502,7 +26475,7 @@
  
  	while (discard_page) {
  		page = discard_page;
-@@ -2386,14 +2416,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+@@ -2384,14 +2414,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > slub_cpu_partial(s)) {
@@ -26524,7 +26497,7 @@
  				oldpage = NULL;
  				pobjects = 0;
  				pages = 0;
-@@ -2461,7 +2498,19 @@ static bool has_cpu_slab(int cpu, void *info)
+@@ -2459,7 +2496,19 @@ static bool has_cpu_slab(int cpu, void *info)
  
  static void flush_all(struct kmem_cache *s)
  {
@@ -26544,7 +26517,7 @@
  }
  
  /*
-@@ -2516,10 +2565,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
+@@ -2514,10 +2563,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
  	unsigned long x = 0;
  	struct page *page;
  
@@ -26557,7 +26530,7 @@
  	return x;
  }
  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2658,8 +2707,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+@@ -2656,8 +2705,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
   * already disabled (which is the case for bulk allocation).
   */
  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -26569,7 +26542,7 @@
  	void *freelist;
  	struct page *page;
  
-@@ -2727,6 +2778,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2725,6 +2776,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  	VM_BUG_ON(!c->page->frozen);
  	c->freelist = get_freepointer(s, freelist);
  	c->tid = next_tid(c->tid);
@@ -26583,7 +26556,7 @@
  	return freelist;
  
  new_slab:
-@@ -2742,7 +2800,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2740,7 +2798,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  
  	if (unlikely(!freelist)) {
  		slab_out_of_memory(s, gfpflags, node);
@@ -26592,7 +26565,7 @@
  	}
  
  	page = c->page;
-@@ -2755,7 +2813,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2753,7 +2811,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  		goto new_slab;	/* Slab failed checks. Next slab needed */
  
  	deactivate_slab(s, page, get_freepointer(s, freelist), c);
@@ -26601,7 +26574,7 @@
  }
  
  /*
-@@ -2767,6 +2825,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2765,6 +2823,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  {
  	void *p;
  	unsigned long flags;
@@ -26609,7 +26582,7 @@
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPTION
-@@ -2778,8 +2837,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2776,8 +2835,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
  
@@ -26620,7 +26593,7 @@
  	return p;
  }
  
-@@ -2813,6 +2873,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
+@@ -2811,6 +2871,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
  	unsigned long tid;
  	struct obj_cgroup *objcg = NULL;
  
@@ -26631,7 +26604,7 @@
  	s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
  	if (!s)
  		return NULL;
-@@ -2978,7 +3042,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2976,7 +3040,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  
  	do {
  		if (unlikely(n)) {
@@ -26640,7 +26613,7 @@
  			n = NULL;
  		}
  		prior = page->freelist;
-@@ -3010,7 +3074,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -3008,7 +3072,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
@@ -26649,7 +26622,7 @@
  
  			}
  		}
-@@ -3052,7 +3116,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -3050,7 +3114,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  		add_partial(n, page, DEACTIVATE_TO_TAIL);
  		stat(s, FREE_ADD_PARTIAL);
  	}
@@ -26658,7 +26631,7 @@
  	return;
  
  slab_empty:
-@@ -3067,7 +3131,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -3065,7 +3129,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
  		remove_full(s, n, page);
  	}
  
@@ -26667,7 +26640,7 @@
  	stat(s, FREE_SLAB);
  	discard_slab(s, page);
  }
-@@ -3275,9 +3339,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3273,9 +3337,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  			  void **p)
  {
  	struct kmem_cache_cpu *c;
@@ -26682,7 +26655,7 @@
  	/* memcg and kmem_cache debug support */
  	s = slab_pre_alloc_hook(s, &objcg, size, flags);
  	if (unlikely(!s))
-@@ -3308,7 +3377,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3306,7 +3375,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  			 * of re-populating per CPU c->freelist
  			 */
  			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -26691,7 +26664,7 @@
  			if (unlikely(!p[i]))
  				goto error;
  
-@@ -3323,6 +3392,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3321,6 +3390,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  	}
  	c->tid = next_tid(c->tid);
  	local_irq_enable();
@@ -26699,7 +26672,7 @@
  
  	/* Clear memory outside IRQ disabled fastpath loop */
  	if (unlikely(slab_want_init_on_alloc(flags, s))) {
-@@ -3337,6 +3407,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3335,6 +3405,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  	return i;
  error:
  	local_irq_enable();
@@ -26707,7 +26680,7 @@
  	slab_post_alloc_hook(s, objcg, flags, i, p);
  	__kmem_cache_free_bulk(s, i, p);
  	return 0;
-@@ -3472,7 +3543,7 @@ static void
+@@ -3470,7 +3541,7 @@ static void
  init_kmem_cache_node(struct kmem_cache_node *n)
  {
  	n->nr_partial = 0;
@@ -26716,7 +26689,7 @@
  	INIT_LIST_HEAD(&n->partial);
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_set(&n->nr_slabs, 0);
-@@ -3873,7 +3944,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+@@ -3865,7 +3936,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
  	struct page *page, *h;
  
  	BUG_ON(irqs_disabled());
@@ -26725,7 +26698,7 @@
  	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
  		if (!page->inuse) {
  			remove_partial(n, page);
-@@ -3883,7 +3954,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+@@ -3875,7 +3946,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
  			  "Objects remaining in %s on __kmem_cache_shutdown()");
  		}
  	}
@@ -26734,7 +26707,7 @@
  
  	list_for_each_entry_safe(page, h, &discard, slab_list)
  		discard_slab(s, page);
-@@ -4154,7 +4225,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+@@ -4146,7 +4217,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
  			INIT_LIST_HEAD(promote + i);
  
@@ -26743,7 +26716,7 @@
  
  		/*
  		 * Build lists of slabs to discard or promote.
-@@ -4185,7 +4256,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+@@ -4177,7 +4248,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
  			list_splice(promote + i, &n->partial);
  
@@ -26752,7 +26725,7 @@
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, &discard, slab_list)
-@@ -4360,6 +4431,12 @@ void __init kmem_cache_init(void)
+@@ -4352,6 +4423,12 @@ void __init kmem_cache_init(void)
  {
  	static __initdata struct kmem_cache boot_kmem_cache,
  		boot_kmem_cache_node;
@@ -26765,7 +26738,7 @@
  
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
-@@ -4547,7 +4624,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4539,7 +4616,7 @@ static int validate_slab_node(struct kmem_cache *s,
  	struct page *page;
  	unsigned long flags;
  
@@ -26774,7 +26747,7 @@
  
  	list_for_each_entry(page, &n->partial, slab_list) {
  		validate_slab(s, page);
-@@ -4569,7 +4646,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4561,7 +4638,7 @@ static int validate_slab_node(struct kmem_cache *s,
  		       s->name, count, atomic_long_read(&n->nr_slabs));
  
  out:
@@ -26783,7 +26756,7 @@
  	return count;
  }
  
-@@ -4620,6 +4697,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
+@@ -4612,6 +4689,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
  	struct location *l;
  	int order;
  
@@ -26793,7 +26766,7 @@
  	order = get_order(sizeof(struct location) * max);
  
  	l = (void *)__get_free_pages(flags, order);
-@@ -4748,12 +4828,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
+@@ -4740,12 +4820,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
  		if (!atomic_long_read(&n->nr_slabs))
  			continue;
  
@@ -26987,7 +26960,7 @@
  	if (node->count && node->count == node->nr_values) {
  		if (list_empty(&node->private_list)) {
 diff --git a/mm/z3fold.c b/mm/z3fold.c
-index 8ae944eeb8e2..36d810cac99d 100644
+index 912ac9a64a15..f3d875fcaeb7 100644
 --- a/mm/z3fold.c
 +++ b/mm/z3fold.c
 @@ -623,14 +623,16 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool,
@@ -27369,7 +27342,7 @@
  config BQL
  	bool
 diff --git a/net/core/dev.c b/net/core/dev.c
-index 64f4c7ec729d..73341e6c2e5d 100644
+index b9d19fbb1589..b8df85c2e647 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
 @@ -221,14 +221,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
@@ -27405,7 +27378,7 @@
  }
  EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
-@@ -3777,7 +3779,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+@@ -3778,7 +3780,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  	 * This permits qdisc->running owner to get the lock more
  	 * often and dequeue packets faster.
  	 */
@@ -27417,7 +27390,7 @@
  	if (unlikely(contended))
  		spin_lock(&q->busylock);
  
-@@ -4572,6 +4578,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -4573,6 +4579,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  	rps_unlock(sd);
  
  	local_irq_restore(flags);
@@ -27425,7 +27398,7 @@
  
  	atomic_long_inc(&skb->dev->rx_dropped);
  	kfree_skb(skb);
-@@ -4787,7 +4794,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -4788,7 +4795,7 @@ static int netif_rx_internal(struct sk_buff *skb)
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@ -27434,7 +27407,7 @@
  		rcu_read_lock();
  
  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4797,14 +4804,14 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -4798,14 +4805,14 @@ static int netif_rx_internal(struct sk_buff *skb)
  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  
  		rcu_read_unlock();
@@ -27452,7 +27425,7 @@
  	}
  	return ret;
  }
-@@ -4843,11 +4850,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4844,11 +4851,9 @@ int netif_rx_ni(struct sk_buff *skb)
  
  	trace_netif_rx_ni_entry(skb);
  
@@ -27466,7 +27439,7 @@
  	trace_netif_rx_ni_exit(err);
  
  	return err;
-@@ -6291,12 +6296,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -6324,12 +6329,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  		sd->rps_ipi_list = NULL;
  
  		local_irq_enable();
@@ -27481,7 +27454,7 @@
  }
  
  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -6374,6 +6381,7 @@ void __napi_schedule(struct napi_struct *n)
+@@ -6407,6 +6414,7 @@ void __napi_schedule(struct napi_struct *n)
  	local_irq_save(flags);
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  	local_irq_restore(flags);
@@ -27489,7 +27462,7 @@
  }
  EXPORT_SYMBOL(__napi_schedule);
  
-@@ -10905,6 +10913,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+@@ -10951,6 +10959,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
  
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_enable();
@@ -27497,7 +27470,7 @@
  
  #ifdef CONFIG_RPS
  	remsd = oldsd->rps_ipi_list;
-@@ -10918,7 +10927,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+@@ -10964,7 +10973,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
@@ -27506,7 +27479,7 @@
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
-@@ -11234,7 +11243,7 @@ static int __init net_dev_init(void)
+@@ -11280,7 +11289,7 @@ static int __init net_dev_init(void)
  
  		INIT_WORK(flush, flush_backlog);
  
@@ -27597,10 +27570,10 @@
  			 struct gnet_stats_basic_cpu __percpu *cpu,
  			 struct gnet_stats_basic_packed *b)
 diff --git a/net/core/sock.c b/net/core/sock.c
-index c75c1e723a84..5dcdc71839e6 100644
+index d638c5361ed2..a56dd73b4c76 100644
 --- a/net/core/sock.c
 +++ b/net/core/sock.c
-@@ -3031,12 +3031,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
+@@ -3039,12 +3039,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
  	if (sk->sk_lock.owned)
  		__lock_sock(sk);
  	sk->sk_lock.owned = 1;
@@ -27614,7 +27587,7 @@
  }
  EXPORT_SYMBOL(lock_sock_nested);
  
-@@ -3085,12 +3084,11 @@ bool lock_sock_fast(struct sock *sk)
+@@ -3093,12 +3092,11 @@ bool lock_sock_fast(struct sock *sk)
  
  	__lock_sock(sk);
  	sk->sk_lock.owned = 1;
@@ -27721,10 +27694,10 @@
  		err = -EOPNOTSUPP;
  		if (sch->flags & TCQ_F_MQROOT) {
 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
-index 49eae93d1489..512a39d6edec 100644
+index 05aa2571a409..1cd1cbf3afb0 100644
 --- a/net/sched/sch_generic.c
 +++ b/net/sched/sch_generic.c
-@@ -553,7 +553,11 @@ struct Qdisc noop_qdisc = {
+@@ -578,7 +578,11 @@ struct Qdisc noop_qdisc = {
  	.ops		=	&noop_qdisc_ops,
  	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
  	.dev_queue	=	&noop_netdev_queue,
@@ -27736,8 +27709,8 @@
  	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
  	.gso_skb = {
  		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
-@@ -845,9 +849,15 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- 	lockdep_set_class(&sch->busylock,
+@@ -889,9 +893,15 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 	lockdep_set_class(&sch->seqlock,
  			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
  
 +#ifdef CONFIG_PREEMPT_RT
@@ -27775,10 +27748,10 @@
  }
  EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
-index 77499abd9f99..7a2840d53654 100644
+index c158e70e8ae1..ca93d225df35 100644
 --- a/net/xfrm/xfrm_state.c
 +++ b/net/xfrm/xfrm_state.c
-@@ -2663,7 +2663,8 @@ int __net_init xfrm_state_init(struct net *net)
+@@ -2673,7 +2673,8 @@ int __net_init xfrm_state_init(struct net *net)
  	net->xfrm.state_num = 0;
  	INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
  	spin_lock_init(&net->xfrm.xfrm_state_lock);
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0001-dm-init-convert-dm-to-dm-mod.create.patch b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0001-dm-init-convert-dm-to-dm-mod.create.patch
index 2eeebd5..5b35161 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0001-dm-init-convert-dm-to-dm-mod.create.patch
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0001-dm-init-convert-dm-to-dm-mod.create.patch
@@ -1,4 +1,4 @@
-From 0c7710e888368388bfdf5e1b87ed08b7c97f38a2 Mon Sep 17 00:00:00 2001
+From 83b7c75e49afcd8723a90361b91c81cb804164a4 Mon Sep 17 00:00:00 2001
 From: Helen Koike <helen.koike@collabora.com>
 Date: Tue, 23 Apr 2019 15:58:36 -0300
 Subject: [PATCH 1/2] dm: init: convert dm= to dm-mod.create=
@@ -291,5 +291,5 @@
 +
 +__setup("dm=", dm_chrome_shim);
 -- 
-2.31.1.751.gd2f1c929bd-goog
+2.32.0.605.g8dce9f2422-goog
 
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0002-md-dm-verity-fixes-chromeos-needs.patch b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0002-md-dm-verity-fixes-chromeos-needs.patch
index 80f9255..de6ec5c 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0002-md-dm-verity-fixes-chromeos-needs.patch
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0002-md-dm-verity-fixes-chromeos-needs.patch
@@ -1,4 +1,4 @@
-From 6e4bf82dc120c5f9339ecf93a86a52c2af16fd3a Mon Sep 17 00:00:00 2001
+From a2a6ce49574434646ed6bf1a0e711e386e186418 Mon Sep 17 00:00:00 2001
 From: Paul Taysom <taysom@chromium.org>
 Date: Wed, 16 Jan 2013 14:55:05 -0800
 Subject: [PATCH 2/2] md: dm-verity fixes chromeos needs
@@ -528,5 +528,5 @@
  						     struct dm_verity_io *io)
  {
 -- 
-2.31.1.751.gd2f1c929bd-goog
+2.32.0.605.g8dce9f2422-goog
 
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0003-seq-file-disallow-extremely-large-seq-buffer-allocations.patch b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0003-seq-file-disallow-extremely-large-seq-buffer-allocations.patch
deleted file mode 100644
index eedc4bf..0000000
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/files/0003-seq-file-disallow-extremely-large-seq-buffer-allocations.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 8cae8cd89f05f6de223d63e6d15e31c8ba9cf53b Mon Sep 17 00:00:00 2001
-From: Eric Sandeen <sandeen@redhat.com>
-Date: Tue, 13 Jul 2021 17:49:23 +0200
-Subject: [PATCH] seq_file: disallow extremely large seq buffer allocations
-
-There is no reasonable need for a buffer larger than this, and it avoids
-int overflow pitfalls.
-
-Fixes: 058504edd026 ("fs/seq_file: fallback to vmalloc allocation")
-Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
-Reported-by: Qualys Security Advisory <qsa@qualys.com>
-Signed-off-by: Eric Sandeen <sandeen@redhat.com>
-Cc: stable@kernel.org
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
- fs/seq_file.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/seq_file.c b/fs/seq_file.c
-index b117b212ef2887..4a2cda04d3e293 100644
---- a/fs/seq_file.c
-+++ b/fs/seq_file.c
-@@ -32,6 +32,9 @@ static void seq_set_overflow(struct seq_file *m)
- 
- static void *seq_buf_alloc(unsigned long size)
- {
-+	if (unlikely(size > MAX_RW_COUNT))
-+		return NULL;
-+
- 	return kvmalloc(size, GFP_KERNEL_ACCOUNT);
- }
- 
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.28-r4.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.28-r4.ebuild
deleted file mode 120000
index 931ff96..0000000
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.28-r4.ebuild
+++ /dev/null
@@ -1 +0,0 @@
-lakitu-kernel-rt-5_10-5.10.28.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.59-r7.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.59-r7.ebuild
new file mode 120000
index 0000000..b3fba9e
--- /dev/null
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.59-r7.ebuild
@@ -0,0 +1 @@
+lakitu-kernel-rt-5_10-5.10.59.ebuild
\ No newline at end of file
diff --git a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.28.ebuild b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.59.ebuild
similarity index 93%
rename from project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.28.ebuild
rename to project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.59.ebuild
index 825cb7d..52c5098 100644
--- a/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.28.ebuild
+++ b/project-edgeos/sys-kernel/lakitu-kernel-rt-5_10/lakitu-kernel-rt-5_10-5.10.59.ebuild
@@ -13,8 +13,8 @@
 
 EAPI=6
 
-CROS_WORKON_COMMIT="f53a3a4808625f876aebc5a0bfb354480bbf0c21"
-CROS_WORKON_TREE="f53a3a4808625f876aebc5a0bfb354480bbf0c21"
+CROS_WORKON_COMMIT="5805e5eec901e830c7741d4916270d0b9cfd6743"
+CROS_WORKON_TREE="5805e5eec901e830c7741d4916270d0b9cfd6743"
 CROS_WORKON_REPO="https://cos.googlesource.com"
 CROS_WORKON_PROJECT="third_party/kernel"
 CROS_WORKON_LOCALNAME="kernel/v5.10"