Revert "lakitu: kernel-4.4: Backport fixes for L1TF vulnerability"
This reverts commit 0007ae6f9f50f1d5a7dd176c83f1994e2c55486d.
Reason for revert: Moved to kernel tree.
Original change's description:
> lakitu: kernel-4.4: Backport fixes for L1TF vulnerability
>
> Temporarily holding them in overlay-lakitu until they land in the kernel
> tree.
>
> TEST=trybots
> RELEASE_NOTE=None
> BUG=None
>
> Change-Id: Id3e1704d86a31f037efc612711e18c0a0b6eac50
> Reviewed-on: https://chromium-review.googlesource.com/1174979
> Tested-by: Aditya Kali <adityakali@google.com>
> Reviewed-by: Pradeep Sawlani <sawlani@google.com>
> Commit-Queue: Aditya Kali <adityakali@google.com>
BUG=None
TEST=trybots
Change-Id: Ieb0b4c5d9bfe3d00491cb1e1a5d28d5df69c2070
Reviewed-on: https://chromium-review.googlesource.com/1175961
Reviewed-by: Aditya Kali <adityakali@google.com>
Commit-Queue: Aditya Kali <adityakali@google.com>
Tested-by: Aditya Kali <adityakali@google.com>
diff --git a/overlay-lakitu/profiles/base/package.use b/overlay-lakitu/profiles/base/package.use
index 5441695..9ef7396 100644
--- a/overlay-lakitu/profiles/base/package.use
+++ b/overlay-lakitu/profiles/base/package.use
@@ -49,7 +49,3 @@
# ('ssh-in-browser') does not handle HPN-capable server very well, so disable
# HPN support in OpenSSH (b/64450408).
net-misc/openssh -hpn
-
-# Apply kernel patches from FILESDIR.
-# TODO: Remove once they are merged in the kernel tree.
-sys-kernel/lakitu-kernel-4_4 apply_patches
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0001-x86-cpu-Factor-out-application-of-forced-CPU-caps.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0001-x86-cpu-Factor-out-application-of-forced-CPU-caps.patch
deleted file mode 100644
index 4199205..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0001-x86-cpu-Factor-out-application-of-forced-CPU-caps.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From 0f2505616e2b6c68e7dc61f6f768ec761dcc8d54 Mon Sep 17 00:00:00 2001
-From: Andy Lutomirski <luto@kernel.org>
-Date: Wed, 18 Jan 2017 11:15:38 -0800
-Subject: [PATCH 01/22] x86/cpu: Factor out application of forced CPU caps
-
-commit 8bf1ebca215c262e48c15a4a15f175991776f57f upstream.
-
-There are multiple call sites that apply forced CPU caps. Factor
-them into a helper.
-
-Signed-off-by: Andy Lutomirski <luto@kernel.org>
-Reviewed-by: Borislav Petkov <bp@suse.de>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Brian Gerst <brgerst@gmail.com>
-Cc: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Fenghua Yu <fenghua.yu@intel.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Matthew Whitehead <tedheadster@gmail.com>
-Cc: Oleg Nesterov <oleg@redhat.com>
-Cc: One Thousand Gnomes <gnomes@lxorguk.ukuu.org.uk>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
-Link: http://lkml.kernel.org/r/623ff7555488122143e4417de09b18be2085ad06.1484705016.git.luto@kernel.org
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/x86/kernel/cpu/common.c | 22 +++++++++++++---------
- 1 file changed, 13 insertions(+), 9 deletions(-)
-
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 134e8c0d0559..8b3815b740d6 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -665,6 +665,16 @@ void cpu_detect(struct cpuinfo_x86 *c)
- }
- }
-
-+static void apply_forced_caps(struct cpuinfo_x86 *c)
-+{
-+ int i;
-+
-+ for (i = 0; i < NCAPINTS; i++) {
-+ c->x86_capability[i] &= ~cpu_caps_cleared[i];
-+ c->x86_capability[i] |= cpu_caps_set[i];
-+ }
-+}
-+
- void get_cpu_cap(struct cpuinfo_x86 *c)
- {
- u32 tfms, xlvl;
-@@ -956,11 +966,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
- if (this_cpu->c_identify)
- this_cpu->c_identify(c);
-
-- /* Clear/Set all flags overriden by options, after probe */
-- for (i = 0; i < NCAPINTS; i++) {
-- c->x86_capability[i] &= ~cpu_caps_cleared[i];
-- c->x86_capability[i] |= cpu_caps_set[i];
-- }
-+ /* Clear/Set all flags overridden by options, after probe */
-+ apply_forced_caps(c);
-
- #ifdef CONFIG_X86_64
- c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
-@@ -1024,10 +1031,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
- * Clear/Set all flags overriden by options, need do it
- * before following smp all cpus cap AND.
- */
-- for (i = 0; i < NCAPINTS; i++) {
-- c->x86_capability[i] &= ~cpu_caps_cleared[i];
-- c->x86_capability[i] |= cpu_caps_set[i];
-- }
-+ apply_forced_caps(c);
-
- /*
- * On SMP, boot_cpu_data holds the common feature set between
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0002-x86-cpufeatures-Make-CPU-bugs-sticky.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0002-x86-cpufeatures-Make-CPU-bugs-sticky.patch
deleted file mode 100644
index bc21046..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0002-x86-cpufeatures-Make-CPU-bugs-sticky.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From 96d1fdc33f85ac935bdc9dc546b7d743539ae070 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 4 Dec 2017 15:07:32 +0100
-Subject: [PATCH 02/22] x86/cpufeatures: Make CPU bugs sticky
-
-commit 6cbd2171e89b13377261d15e64384df60ecb530e upstream.
-
-There is currently no way to force CPU bug bits like CPU feature bits. That
-makes it impossible to set a bug bit once at boot and have it stick for all
-upcoming CPUs.
-
-Extend the force set/clear arrays to handle bug bits as well.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Borislav Petkov <bpetkov@suse.de>
-Cc: Brian Gerst <brgerst@gmail.com>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: David Laight <David.Laight@aculab.com>
-Cc: Denys Vlasenko <dvlasenk@redhat.com>
-Cc: Eduardo Valentin <eduval@amazon.com>
-Cc: Greg KH <gregkh@linuxfoundation.org>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Josh Poimboeuf <jpoimboe@redhat.com>
-Cc: Juergen Gross <jgross@suse.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Will Deacon <will.deacon@arm.com>
-Cc: aliguori@amazon.com
-Cc: daniel.gruss@iaik.tugraz.at
-Cc: hughd@google.com
-Cc: keescook@google.com
-Link: https://lkml.kernel.org/r/20171204150606.992156574@linutronix.de
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/x86/include/asm/cpufeature.h | 2 ++
- arch/x86/include/asm/processor.h | 4 ++--
- arch/x86/kernel/cpu/common.c | 6 +++---
- 3 files changed, 7 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index f6605712ca90..34c4106230f1 100644
---- a/arch/x86/include/asm/cpufeature.h
-+++ b/arch/x86/include/asm/cpufeature.h
-@@ -359,6 +359,8 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
- set_bit(bit, (unsigned long *)cpu_caps_set); \
- } while (0)
-
-+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
-+
- #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
- #define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
- #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
-diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 91bb056d2cb0..0f8e88e82039 100644
---- a/arch/x86/include/asm/processor.h
-+++ b/arch/x86/include/asm/processor.h
-@@ -156,8 +156,8 @@ extern struct cpuinfo_x86 boot_cpu_data;
- extern struct cpuinfo_x86 new_cpu_data;
-
- extern struct tss_struct doublefault_tss;
--extern __u32 cpu_caps_cleared[NCAPINTS];
--extern __u32 cpu_caps_set[NCAPINTS];
-+extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
-+extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
-
- #ifdef CONFIG_SMP
- DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 8b3815b740d6..0199dd6368a3 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -433,8 +433,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
- return NULL; /* Not found */
- }
-
--__u32 cpu_caps_cleared[NCAPINTS];
--__u32 cpu_caps_set[NCAPINTS];
-+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
-+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
-
- void load_percpu_segment(int cpu)
- {
-@@ -669,7 +669,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
- {
- int i;
-
-- for (i = 0; i < NCAPINTS; i++) {
-+ for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
- c->x86_capability[i] &= ~cpu_caps_cleared[i];
- c->x86_capability[i] |= cpu_caps_set[i];
- }
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0003-x86-speculation-l1tf-Increase-32bit-PAE-__PHYSICAL_P.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0003-x86-speculation-l1tf-Increase-32bit-PAE-__PHYSICAL_P.patch
deleted file mode 100644
index b31c4e3..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0003-x86-speculation-l1tf-Increase-32bit-PAE-__PHYSICAL_P.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From d3033bd2ee11e13fe9790071a995f1092e74959a Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Wed, 13 Jun 2018 15:48:21 -0700
-Subject: [PATCH 03/22] x86/speculation/l1tf: Increase 32bit PAE
- __PHYSICAL_PAGE_SHIFT
-
-commit 50896e180c6aa3a9c61a26ced99e15d602666a4c upstream
-
-L1 Terminal Fault (L1TF) is a speculation related vulnerability. The CPU
-speculates on PTE entries which do not have the PRESENT bit set, if the
-content of the resulting physical address is available in the L1D cache.
-
-The OS side mitigation makes sure that a !PRESENT PTE entry points to a
-physical address outside the actually existing and cachable memory
-space. This is achieved by inverting the upper bits of the PTE. Due to the
-address space limitations this only works for 64bit and 32bit PAE kernels,
-but not for 32bit non PAE.
-
-This mitigation applies to both host and guest kernels, but in case of a
-64bit host (hypervisor) and a 32bit PAE guest, inverting the upper bits of
-the PAE address space (44bit) is not enough if the host has more than 43
-bits of populated memory address space, because the speculation treats the
-PTE content as a physical host address bypassing EPT.
-
-The host (hypervisor) protects itself against the guest by flushing L1D as
-needed, but pages inside the guest are not protected against attacks from
-other processes inside the same guest.
-
-For the guest the inverted PTE mask has to match the host to provide the
-full protection for all pages the host could possibly map into the
-guest. The hosts populated address space is not known to the guest, so the
-mask must cover the possible maximal host address space, i.e. 52 bit.
-
-On 32bit PAE the maximum PTE mask is currently set to 44 bit because that
-is the limit imposed by 32bit unsigned long PFNs in the VMs. This limits
-the mask to be below what the host could possible use for physical pages.
-
-The L1TF PROT_NONE protection code uses the PTE masks to determine which
-bits to invert to make sure the higher bits are set for unmapped entries to
-prevent L1TF speculation attacks against EPT inside guests.
-
-In order to invert all bits that could be used by the host, increase
-__PHYSICAL_PAGE_SHIFT to 52 to match 64bit.
-
-The real limit for a 32bit PAE kernel is still 44 bits because all Linux
-PTEs are created from unsigned long PFNs, so they cannot be higher than 44
-bits on a 32bit kernel. So these extra PFN bits should be never set. The
-only users of this macro are using it to look at PTEs, so it's safe.
-
-[ tglx: Massaged changelog ]
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/page_32_types.h | 9 +++++++--
- 1 file changed, 7 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
-index 3a52ee0e726d..bfceb5cc6347 100644
---- a/arch/x86/include/asm/page_32_types.h
-+++ b/arch/x86/include/asm/page_32_types.h
-@@ -27,8 +27,13 @@
- #define N_EXCEPTION_STACKS 1
-
- #ifdef CONFIG_X86_PAE
--/* 44=32+12, the limit we can fit into an unsigned long pfn */
--#define __PHYSICAL_MASK_SHIFT 44
-+/*
-+ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
-+ * but we need the full mask to make sure inverted PROT_NONE
-+ * entries have all the host bits set in a guest.
-+ * The real limit is still 44 bits.
-+ */
-+#define __PHYSICAL_MASK_SHIFT 52
- #define __VIRTUAL_MASK_SHIFT 32
-
- #else /* !CONFIG_X86_PAE */
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0004-x86-mm-Move-swap-offset-type-up-in-PTE-to-work-aroun.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0004-x86-mm-Move-swap-offset-type-up-in-PTE-to-work-aroun.patch
deleted file mode 100644
index cebd686..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0004-x86-mm-Move-swap-offset-type-up-in-PTE-to-work-aroun.patch
+++ /dev/null
@@ -1,103 +0,0 @@
-From d81c5cb84ff94bc3ed67cabde3b0f3013534c72a Mon Sep 17 00:00:00 2001
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Date: Thu, 7 Jul 2016 17:19:11 -0700
-Subject: [PATCH 04/22] x86/mm: Move swap offset/type up in PTE to work around
- erratum
-
-commit 00839ee3b299303c6a5e26a0a2485427a3afcbbf upstream
-
-This erratum can result in Accessed/Dirty getting set by the hardware
-when we do not expect them to be (on !Present PTEs).
-
-Instead of trying to fix them up after this happens, we just
-allow the bits to get set and try to ignore them. We do this by
-shifting the layout of the bits we use for swap offset/type in
-our 64-bit PTEs.
-
-It looks like this:
-
- bitnrs: | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0|
- names: | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P|
- before: | OFFSET (9-63) |0|X|X| TYPE(1-5) |0|
- after: | OFFSET (14-63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0|
-
-Note that D was already a don't care (X) even before. We just
-move TYPE up and turn its old spot (which could be hit by the
-A bit) into all don't cares.
-
-We take 5 bits away from the offset, but that still leaves us
-with 50 bits which lets us index into a 62-bit swapfile (4 EiB).
-I think that's probably fine for the moment. We could
-theoretically reclaim 5 of the bits (1, 2, 3, 4, 7) but it
-doesn't gain us anything.
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Brian Gerst <brgerst@gmail.com>
-Cc: Dave Hansen <dave@sr71.net>
-Cc: Denys Vlasenko <dvlasenk@redhat.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Josh Poimboeuf <jpoimboe@redhat.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Luis R. Rodriguez <mcgrof@suse.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Toshi Kani <toshi.kani@hp.com>
-Cc: dave.hansen@intel.com
-Cc: linux-mm@kvack.org
-Cc: mhocko@suse.com
-Link: http://lkml.kernel.org/r/20160708001911.9A3FD2B6@viggo.jf.intel.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable_64.h | 26 ++++++++++++++++++++------
- 1 file changed, 20 insertions(+), 6 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index c810226e741a..225405b690b8 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -163,18 +163,32 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
- #define pte_unmap(pte) ((void)(pte))/* NOP */
-
--/* Encode and de-code a swap entry */
-+/*
-+ * Encode and de-code a swap entry
-+ *
-+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
-+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
-+ * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
-+ *
-+ * G (8) is aliased and used as a PROT_NONE indicator for
-+ * !present ptes. We need to start storing swap entries above
-+ * there. We also need to avoid using A and D because of an
-+ * erratum where they can be incorrectly set by hardware on
-+ * non-present PTEs.
-+ */
-+#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
- #define SWP_TYPE_BITS 5
--#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
-+/* Place the offset above the type: */
-+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
-
- #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-
--#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
-+#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
- & ((1U << SWP_TYPE_BITS) - 1))
--#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
-+#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
- #define __swp_entry(type, offset) ((swp_entry_t) { \
-- ((type) << (_PAGE_BIT_PRESENT + 1)) \
-- | ((offset) << SWP_OFFSET_SHIFT) })
-+ ((type) << (SWP_TYPE_FIRST_BIT)) \
-+ | ((offset) << SWP_OFFSET_FIRST_BIT) })
- #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
- #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
-
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0005-x86-mm-Fix-swap-entry-comment-and-macro.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0005-x86-mm-Fix-swap-entry-comment-and-macro.patch
deleted file mode 100644
index b9868c3..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0005-x86-mm-Fix-swap-entry-comment-and-macro.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From f053a8cbb62fd6937a1570c4959b3eb0339c4c95 Mon Sep 17 00:00:00 2001
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Date: Wed, 10 Aug 2016 10:23:25 -0700
-Subject: [PATCH 05/22] x86/mm: Fix swap entry comment and macro
-
-commit ace7fab7a6cdd363a615ec537f2aa94dbc761ee2 upstream
-
-A recent patch changed the format of a swap PTE.
-
-The comment explaining the format of the swap PTE is wrong about
-the bits used for the swap type field. Amusingly, the ASCII art
-and the patch description are correct, but the comment itself
-is wrong.
-
-As I was looking at this, I also noticed that the
-SWP_OFFSET_FIRST_BIT has an off-by-one error. This does not
-really hurt anything. It just wasted a bit of space in the PTE,
-giving us 2^59 bytes of addressable space in our swapfiles
-instead of 2^60. But, it doesn't match with the comments, and it
-wastes a bit of space, so fix it.
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Brian Gerst <brgerst@gmail.com>
-Cc: Dave Hansen <dave@sr71.net>
-Cc: Denys Vlasenko <dvlasenk@redhat.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Josh Poimboeuf <jpoimboe@redhat.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Luis R. Rodriguez <mcgrof@suse.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Toshi Kani <toshi.kani@hp.com>
-Fixes: 00839ee3b299 ("x86/mm: Move swap offset/type up in PTE to work around erratum")
-Link: http://lkml.kernel.org/r/20160810172325.E56AD7DA@viggo.jf.intel.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable_64.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 225405b690b8..ce97c8c6a310 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -168,7 +168,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- *
- * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
- * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
-- * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
-+ * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
- *
- * G (8) is aliased and used as a PROT_NONE indicator for
- * !present ptes. We need to start storing swap entries above
-@@ -179,7 +179,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
- #define SWP_TYPE_BITS 5
- /* Place the offset above the type: */
--#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
-+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
-
- #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0006-mm-x86-move-_PAGE_SWP_SOFT_DIRTY-from-bit-7-to-bit-1.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0006-mm-x86-move-_PAGE_SWP_SOFT_DIRTY-from-bit-7-to-bit-1.patch
deleted file mode 100644
index 7867588..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0006-mm-x86-move-_PAGE_SWP_SOFT_DIRTY-from-bit-7-to-bit-1.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From f54c74459aef86727994f85257ce02cc373d900f Mon Sep 17 00:00:00 2001
-From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
-Date: Fri, 8 Sep 2017 16:10:46 -0700
-Subject: [PATCH 06/22] mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1
-
-commit eee4818baac0f2b37848fdf90e4b16430dc536ac upstream
-
-_PAGE_PSE is used to distinguish between a truly non-present
-(_PAGE_PRESENT=0) PMD, and a PMD which is undergoing a THP split and
-should be treated as present.
-
-But _PAGE_SWP_SOFT_DIRTY currently uses the _PAGE_PSE bit, which would
-cause confusion between one of those PMDs undergoing a THP split, and a
-soft-dirty PMD. Dropping _PAGE_PSE check in pmd_present() does not work
-well, because it can hurt optimization of tlb handling in thp split.
-
-Thus, we need to move the bit.
-
-In the current kernel, bits 1-4 are not used in non-present format since
-commit 00839ee3b299 ("x86/mm: Move swap offset/type up in PTE to work
-around erratum"). So let's move _PAGE_SWP_SOFT_DIRTY to bit 1. Bit 7
-is used as reserved (always clear), so please don't use it for other
-purpose.
-
-[dwmw2: Pulled in to 4.9 backport to support L1TF changes]
-
-Link: http://lkml.kernel.org/r/20170717193955.20207-3-zi.yan@sent.com
-Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
-Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-Cc: "H. Peter Anvin" <hpa@zytor.com>
-Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
-Cc: David Nellans <dnellans@nvidia.com>
-Cc: Ingo Molnar <mingo@elte.hu>
-Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
-Cc: Mel Gorman <mgorman@techsingularity.net>
-Cc: Minchan Kim <minchan@kernel.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Vlastimil Babka <vbabka@suse.cz>
-Cc: Andrea Arcangeli <aarcange@redhat.com>
-Cc: Michal Hocko <mhocko@kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-(cherry picked from commit 71924d820ee64d7fa0eef5a65fbb62c05388ce61)
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable_64.h | 12 +++++++++---
- arch/x86/include/asm/pgtable_types.h | 10 +++++-----
- 2 files changed, 14 insertions(+), 8 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index ce97c8c6a310..008e1a58f96c 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -166,15 +166,21 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- /*
- * Encode and de-code a swap entry
- *
-- * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
-- * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
-- * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
-+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
-+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
-+ * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
- *
- * G (8) is aliased and used as a PROT_NONE indicator for
- * !present ptes. We need to start storing swap entries above
- * there. We also need to avoid using A and D because of an
- * erratum where they can be incorrectly set by hardware on
- * non-present PTEs.
-+ *
-+ * SD (1) in swp entry is used to store soft dirty bit, which helps us
-+ * remember soft dirty over page migration
-+ *
-+ * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
-+ * but also L and G.
- */
- #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
- #define SWP_TYPE_BITS 5
-diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index 8dba273da25a..7572ce32055e 100644
---- a/arch/x86/include/asm/pgtable_types.h
-+++ b/arch/x86/include/asm/pgtable_types.h
-@@ -70,15 +70,15 @@
- /*
- * Tracking soft dirty bit when a page goes to a swap is tricky.
- * We need a bit which can be stored in pte _and_ not conflict
-- * with swap entry format. On x86 bits 6 and 7 are *not* involved
-- * into swap entry computation, but bit 6 is used for nonlinear
-- * file mapping, so we borrow bit 7 for soft dirty tracking.
-+ * with swap entry format. On x86 bits 1-4 are *not* involved
-+ * into swap entry computation, but bit 7 is used for thp migration,
-+ * so we borrow bit 1 for soft dirty tracking.
- *
- * Please note that this bit must be treated as swap dirty page
-- * mark if and only if the PTE has present bit clear!
-+ * mark if and only if the PTE/PMD has present bit clear!
- */
- #ifdef CONFIG_MEM_SOFT_DIRTY
--#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
-+#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
- #else
- #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
- #endif
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0007-x86-speculation-l1tf-Change-order-of-offset-type-in-.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0007-x86-speculation-l1tf-Change-order-of-offset-type-in-.patch
deleted file mode 100644
index e9fb8b9..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0007-x86-speculation-l1tf-Change-order-of-offset-type-in-.patch
+++ /dev/null
@@ -1,110 +0,0 @@
-From aaf54fbfbb66ff618fb41197efd9a3043d2cb160 Mon Sep 17 00:00:00 2001
-From: Linus Torvalds <torvalds@linux-foundation.org>
-Date: Wed, 13 Jun 2018 15:48:22 -0700
-Subject: [PATCH 07/22] x86/speculation/l1tf: Change order of offset/type in
- swap entry
-
-commit bcd11afa7adad8d720e7ba5ef58bdcd9775cf45f upstream
-
-If pages are swapped out, the swap entry is stored in the corresponding
-PTE, which has the Present bit cleared. CPUs vulnerable to L1TF speculate
-on PTE entries which have the present bit set and would treat the swap
-entry as phsyical address (PFN). To mitigate that the upper bits of the PTE
-must be set so the PTE points to non existent memory.
-
-The swap entry stores the type and the offset of a swapped out page in the
-PTE. type is stored in bit 9-13 and offset in bit 14-63. The hardware
-ignores the bits beyond the phsyical address space limit, so to make the
-mitigation effective its required to start 'offset' at the lowest possible
-bit so that even large swap offsets do not reach into the physical address
-space limit bits.
-
-Move offset to bit 9-58 and type to bit 59-63 which are the bits that
-hardware generally doesn't care about.
-
-That, in turn, means that if you on desktop chip with only 40 bits of
-physical addressing, now that the offset starts at bit 9, there needs to be
-30 bits of offset actually *in use* until bit 39 ends up being set, which
-means when inverted it will again point into existing memory.
-
-So that's 4 terabyte of swap space (because the offset is counted in pages,
-so 30 bits of offset is 42 bits of actual coverage). With bigger physical
-addressing, that obviously grows further, until the limit of the offset is
-hit (at 50 bits of offset - 62 bits of actual swap file coverage).
-
-This is a preparatory change for the actual swap entry inversion to protect
-against L1TF.
-
-[ AK: Updated description and minor tweaks. Split into two parts ]
-[ tglx: Massaged changelog ]
-
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Andi Kleen <ak@linux.intel.com>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-(cherry picked from commit abff97dd7244663ecca48919c1c5793a7596ba4c)
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable_64.h | 31 ++++++++++++++++++++-----------
- 1 file changed, 20 insertions(+), 11 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 008e1a58f96c..a72c2ab24006 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -168,7 +168,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- *
- * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
- * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
-- * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
-+ * | TYPE (59-63) | OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
- *
- * G (8) is aliased and used as a PROT_NONE indicator for
- * !present ptes. We need to start storing swap entries above
-@@ -182,19 +182,28 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
- * but also L and G.
- */
--#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
--#define SWP_TYPE_BITS 5
--/* Place the offset above the type: */
--#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
-+#define SWP_TYPE_BITS 5
-+
-+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
-+
-+/* We always extract/encode the offset by shifting it all the way up, and then down again */
-+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
-
- #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-
--#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
-- & ((1U << SWP_TYPE_BITS) - 1))
--#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
--#define __swp_entry(type, offset) ((swp_entry_t) { \
-- ((type) << (SWP_TYPE_FIRST_BIT)) \
-- | ((offset) << SWP_OFFSET_FIRST_BIT) })
-+/* Extract the high bits for type */
-+#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
-+
-+/* Shift up (to get rid of type), then down to get value */
-+#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
-+
-+/*
-+ * Shift the offset up "too far" by TYPE bits, then down again
-+ */
-+#define __swp_entry(type, offset) ((swp_entry_t) { \
-+ ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
-+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
-+
- #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
- #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
-
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0008-x86-speculation-l1tf-Protect-swap-entries-against-L1.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0008-x86-speculation-l1tf-Protect-swap-entries-against-L1.patch
deleted file mode 100644
index db7869f..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0008-x86-speculation-l1tf-Protect-swap-entries-against-L1.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From 77e6ac066549627fd2b9f9520e57426b4c6883db Mon Sep 17 00:00:00 2001
-From: Linus Torvalds <torvalds@linux-foundation.org>
-Date: Wed, 13 Jun 2018 15:48:23 -0700
-Subject: [PATCH 08/22] x86/speculation/l1tf: Protect swap entries against L1TF
-
-commit 2f22b4cd45b67b3496f4aa4c7180a1271c6452f6 upstream
-
-With L1 terminal fault the CPU speculates into unmapped PTEs, and resulting
-side effects allow to read the memory the PTE is pointing too, if its
-values are still in the L1 cache.
-
-For swapped out pages Linux uses unmapped PTEs and stores a swap entry into
-them.
-
-To protect against L1TF it must be ensured that the swap entry is not
-pointing to valid memory, which requires setting higher bits (between bit
-36 and bit 45) that are inside the CPUs physical address space, but outside
-any real memory.
-
-To do this invert the offset to make sure the higher bits are always set,
-as long as the swap file is not too big.
-
-Note there is no workaround for 32bit !PAE, or on systems which have more
-than MAX_PA/2 worth of memory. The later case is very unlikely to happen on
-real systems.
-
-[AK: updated description and minor tweaks by. Split out from the original
- patch ]
-
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Andi Kleen <ak@linux.intel.com>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-(cherry picked from commit 5269f817b330384eb67b073038c11bc5f3a9c685)
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable_64.h | 11 ++++++++---
- 1 file changed, 8 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index a72c2ab24006..67f2fe43a593 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -168,7 +168,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- *
- * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
- * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
-- * | TYPE (59-63) | OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
-+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
- *
- * G (8) is aliased and used as a PROT_NONE indicator for
- * !present ptes. We need to start storing swap entries above
-@@ -181,6 +181,9 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- *
- * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
- * but also L and G.
-+ *
-+ * The offset is inverted by a binary not operation to make the high
-+ * physical bits set.
- */
- #define SWP_TYPE_BITS 5
-
-@@ -195,13 +198,15 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
- #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
-
- /* Shift up (to get rid of type), then down to get value */
--#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
-+#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
-
- /*
- * Shift the offset up "too far" by TYPE bits, then down again
-+ * The offset is inverted by a binary not operation to make the high
-+ * physical bits set.
- */
- #define __swp_entry(type, offset) ((swp_entry_t) { \
-- ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
-+ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
- | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
-
- #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0009-x86-speculation-l1tf-Protect-PROT_NONE-PTEs-against-.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0009-x86-speculation-l1tf-Protect-PROT_NONE-PTEs-against-.patch
deleted file mode 100644
index a4f4fe0..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0009-x86-speculation-l1tf-Protect-PROT_NONE-PTEs-against-.patch
+++ /dev/null
@@ -1,259 +0,0 @@
-From 48df879ce642c1203c22f5aa86f05096a011da4a Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Wed, 13 Jun 2018 15:48:24 -0700
-Subject: [PATCH 09/22] x86/speculation/l1tf: Protect PROT_NONE PTEs against
- speculation
-
-commit 6b28baca9b1f0d4a42b865da7a05b1c81424bd5c upstream
-
-When PTEs are set to PROT_NONE the kernel just clears the Present bit and
-preserves the PFN, which creates attack surface for L1TF speculation
-speculation attacks.
-
-This is important inside guests, because L1TF speculation bypasses physical
-page remapping. While the host has its own migitations preventing leaking
-data from other VMs into the guest, this would still risk leaking the wrong
-page inside the current guest.
-
-This uses the same technique as Linus' swap entry patch: while an entry is
-is in PROTNONE state invert the complete PFN part part of it. This ensures
-that the the highest bit will point to non existing memory.
-
-The invert is done by pte/pmd_modify and pfn/pmd/pud_pte for PROTNONE and
-pte/pmd/pud_pfn undo it.
-
-This assume that no code path touches the PFN part of a PTE directly
-without using these primitives.
-
-This doesn't handle the case that MMIO is on the top of the CPU physical
-memory. If such an MMIO region was exposed by an unpriviledged driver for
-mmap it would be possible to attack some real memory. However this
-situation is all rather unlikely.
-
-For 32bit non PAE the inversion is not done because there are really not
-enough bits to protect anything.
-
-Q: Why does the guest need to be protected when the HyperVisor already has
- L1TF mitigations?
-
-A: Here's an example:
-
- Physical pages 1 2 get mapped into a guest as
- GPA 1 -> PA 2
- GPA 2 -> PA 1
- through EPT.
-
- The L1TF speculation ignores the EPT remapping.
-
- Now the guest kernel maps GPA 1 to process A and GPA 2 to process B, and
- they belong to different users and should be isolated.
-
- A sets the GPA 1 PA 2 PTE to PROT_NONE to bypass the EPT remapping and
- gets read access to the underlying physical page. Which in this case
- points to PA 2, so it can read process B's data, if it happened to be in
- L1, so isolation inside the guest is broken.
-
- There's nothing the hypervisor can do about this. This mitigation has to
- be done in the guest itself.
-
-[ tglx: Massaged changelog ]
-[ dwmw2: backported to 4.9 ]
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-(cherry picked from commit eb1ac7f552ddbe3c7c775447cd5a9b75e2e8bf79)
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable-2level.h | 17 ++++++++++++
- arch/x86/include/asm/pgtable-3level.h | 2 ++
- arch/x86/include/asm/pgtable-invert.h | 32 ++++++++++++++++++++++
- arch/x86/include/asm/pgtable.h | 38 +++++++++++++++++++--------
- arch/x86/include/asm/pgtable_64.h | 2 ++
- 5 files changed, 80 insertions(+), 11 deletions(-)
- create mode 100644 arch/x86/include/asm/pgtable-invert.h
-
-diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
-index fd74a11959de..89c50332a71e 100644
---- a/arch/x86/include/asm/pgtable-2level.h
-+++ b/arch/x86/include/asm/pgtable-2level.h
-@@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
- #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
- #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
-
-+/* No inverted PFNs on 2 level page tables */
-+
-+static inline u64 protnone_mask(u64 val)
-+{
-+ return 0;
-+}
-+
-+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
-+{
-+ return val;
-+}
-+
-+static inline bool __pte_needs_invert(u64 val)
-+{
-+ return false;
-+}
-+
- #endif /* _ASM_X86_PGTABLE_2LEVEL_H */
-diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index cdaa58c9b39e..0c89891c7b44 100644
---- a/arch/x86/include/asm/pgtable-3level.h
-+++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -184,4 +184,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
- #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
- #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
-
-+#include <asm/pgtable-invert.h>
-+
- #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
-diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
-new file mode 100644
-index 000000000000..177564187fc0
---- /dev/null
-+++ b/arch/x86/include/asm/pgtable-invert.h
-@@ -0,0 +1,32 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+#ifndef _ASM_PGTABLE_INVERT_H
-+#define _ASM_PGTABLE_INVERT_H 1
-+
-+#ifndef __ASSEMBLY__
-+
-+static inline bool __pte_needs_invert(u64 val)
-+{
-+ return (val & (_PAGE_PRESENT|_PAGE_PROTNONE)) == _PAGE_PROTNONE;
-+}
-+
-+/* Get a mask to xor with the page table entry to get the correct pfn. */
-+static inline u64 protnone_mask(u64 val)
-+{
-+ return __pte_needs_invert(val) ? ~0ull : 0;
-+}
-+
-+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
-+{
-+ /*
-+ * When a PTE transitions from NONE to !NONE or vice-versa
-+ * invert the PFN part to stop speculation.
-+ * pte_pfn undoes this when needed.
-+ */
-+ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
-+ val = (val & ~mask) | (~val & mask);
-+ return val;
-+}
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 84c62d950023..2ed1556d99b1 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -148,19 +148,29 @@ static inline int pte_special(pte_t pte)
- return pte_flags(pte) & _PAGE_SPECIAL;
- }
-
-+/* Entries that were set to PROT_NONE are inverted */
-+
-+static inline u64 protnone_mask(u64 val);
-+
- static inline unsigned long pte_pfn(pte_t pte)
- {
-- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
-+ unsigned long pfn = pte_val(pte);
-+ pfn ^= protnone_mask(pfn);
-+ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
- }
-
- static inline unsigned long pmd_pfn(pmd_t pmd)
- {
-- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
-+ unsigned long pfn = pmd_val(pmd);
-+ pfn ^= protnone_mask(pfn);
-+ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
- }
-
- static inline unsigned long pud_pfn(pud_t pud)
- {
-- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
-+ unsigned long pfn = pud_val(pud);
-+ pfn ^= protnone_mask(pfn);
-+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
- }
-
- #define pte_page(pte) pfn_to_page(pte_pfn(pte))
-@@ -359,19 +369,25 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
-
- static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
- {
-- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
-- massage_pgprot(pgprot));
-+ phys_addr_t pfn = page_nr << PAGE_SHIFT;
-+ pfn ^= protnone_mask(pgprot_val(pgprot));
-+ pfn &= PTE_PFN_MASK;
-+ return __pte(pfn | massage_pgprot(pgprot));
- }
-
- static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
- {
-- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
-- massage_pgprot(pgprot));
-+ phys_addr_t pfn = page_nr << PAGE_SHIFT;
-+ pfn ^= protnone_mask(pgprot_val(pgprot));
-+ pfn &= PHYSICAL_PMD_PAGE_MASK;
-+ return __pmd(pfn | massage_pgprot(pgprot));
- }
-
-+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
-+
- static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- {
-- pteval_t val = pte_val(pte);
-+ pteval_t val = pte_val(pte), oldval = val;
-
- /*
- * Chop off the NX bit (if present), and add the NX portion of
-@@ -379,17 +395,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- */
- val &= _PAGE_CHG_MASK;
- val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
--
-+ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
- return __pte(val);
- }
-
- static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
- {
-- pmdval_t val = pmd_val(pmd);
-+ pmdval_t val = pmd_val(pmd), oldval = val;
-
- val &= _HPAGE_CHG_MASK;
- val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
--
-+ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
- return __pmd(val);
- }
-
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 67f2fe43a593..221a32ed1372 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -235,6 +235,8 @@ extern void cleanup_highmap(void);
- extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
- extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
-+#include <asm/pgtable-invert.h>
-+
- #endif /* !__ASSEMBLY__ */
-
- #endif /* _ASM_X86_PGTABLE_64_H */
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0010-x86-speculation-l1tf-Make-sure-the-first-page-is-alw.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0010-x86-speculation-l1tf-Make-sure-the-first-page-is-alw.patch
deleted file mode 100644
index 17ad62f..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0010-x86-speculation-l1tf-Make-sure-the-first-page-is-alw.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From d0b4efacbfd2ccae1b476ae7e08fa19f234ed976 Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Wed, 13 Jun 2018 15:48:25 -0700
-Subject: [PATCH 10/22] x86/speculation/l1tf: Make sure the first page is
- always reserved
-
-commit 10a70416e1f067f6c4efda6ffd8ea96002ac4223 upstream
-
-The L1TF workaround doesn't make any attempt to mitigate speculate accesses
-to the first physical page for zeroed PTEs. Normally it only contains some
-data from the early real mode BIOS.
-
-It's not entirely clear that the first page is reserved in all
-configurations, so add an extra reservation call to make sure it is really
-reserved. In most configurations (e.g. with the standard reservations)
-it's likely a nop.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-(cherry picked from commit 53f222ca0d130562081abbb42c517e3e92ac2162)
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/kernel/setup.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index bbaae4cf9e8e..31c4bc0d3372 100644
---- a/arch/x86/kernel/setup.c
-+++ b/arch/x86/kernel/setup.c
-@@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p)
- memblock_reserve(__pa_symbol(_text),
- (unsigned long)__bss_stop - (unsigned long)_text);
-
-+ /*
-+ * Make sure page 0 is always reserved because on systems with
-+ * L1TF its contents can be leaked to user processes.
-+ */
-+ memblock_reserve(0, PAGE_SIZE);
-+
- early_reserve_initrd();
-
- /*
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0011-CHROMIUM-x86-speculation-l1tf-Add-l1tf-detection-and.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0011-CHROMIUM-x86-speculation-l1tf-Add-l1tf-detection-and.patch
deleted file mode 100644
index cc79d34..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0011-CHROMIUM-x86-speculation-l1tf-Add-l1tf-detection-and.patch
+++ /dev/null
@@ -1,131 +0,0 @@
-From 3e7c663e28a1c30297264b4ab2d9985579848b63 Mon Sep 17 00:00:00 2001
-From: Aditya Kali <adityakali@google.com>
-Date: Mon, 13 Aug 2018 12:31:33 -0700
-Subject: [PATCH 11/22] CHROMIUM: x86/speculation/l1tf: Add l1tf detection and
- mitigation check
-
-This is simplified version of "x86/speculation/l1tf: Add sysfs reporting
-for l1tf" (commit 17dbca119312b4e8173d4e25ff64262119fcef38 upstream) by
-Andi Kleen <ak@linux.intel.com>.
-This patch adds the X86_FEATURE_L1TF_PTEINV and X86_BUG_L1TF bits. All
-CPUs are assumed to be affected by X86_BUG_L1TF.
-This drops the sysfs reporting part from the original patch.
----
- arch/x86/include/asm/cpufeature.h | 3 +++
- arch/x86/include/asm/processor.h | 5 +++++
- arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++++++++++++
- arch/x86/kernel/cpu/common.c | 4 ++++
- 4 files changed, 47 insertions(+)
-
-diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index 34c4106230f1..7a603b6619a9 100644
---- a/arch/x86/include/asm/cpufeature.h
-+++ b/arch/x86/include/asm/cpufeature.h
-@@ -200,6 +200,8 @@
- #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
- #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
-
-+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
-+
- /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
- #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
-
-@@ -277,6 +279,7 @@
- #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
- #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
- #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
-
- #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
-
-diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 0f8e88e82039..0949ff949bb4 100644
---- a/arch/x86/include/asm/processor.h
-+++ b/arch/x86/include/asm/processor.h
-@@ -173,6 +173,11 @@ extern const struct seq_operations cpuinfo_op;
-
- extern void cpu_detect(struct cpuinfo_x86 *c);
-
-+static inline unsigned long l1tf_pfn_limit(void)
-+{
-+ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
-+}
-+
- extern void early_cpu_init(void);
- extern void identify_boot_cpu(void);
- extern void identify_secondary_cpu(struct cpuinfo_x86 *);
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index 0b6124315441..6af353ba845d 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -16,6 +16,10 @@
- #include <asm/msr.h>
- #include <asm/paravirt.h>
- #include <asm/alternative.h>
-+#include <asm/pgtable.h>
-+#include <asm/e820.h>
-+
-+static void __init l1tf_select_mitigation(void);
-
- void __init check_bugs(void)
- {
-@@ -33,6 +37,8 @@ void __init check_bugs(void)
- print_cpu_info(&boot_cpu_data);
- #endif
-
-+ l1tf_select_mitigation();
-+
- /*
- * Check whether we are able to run this kernel safely on SMP.
- *
-@@ -49,3 +55,32 @@ void __init check_bugs(void)
-
- fpu__init_check_bugs();
- }
-+
-+#undef pr_fmt
-+#define pr_fmt(fmt) "L1TF: " fmt
-+static void __init l1tf_select_mitigation(void)
-+{
-+ u64 half_pa;
-+
-+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
-+ return;
-+
-+#if CONFIG_PGTABLE_LEVELS == 2
-+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
-+ return;
-+#endif
-+
-+ /*
-+ * This is extremely unlikely to happen because almost all
-+ * systems have far more MAX_PA/2 than RAM can be fit into
-+ * DIMM slots.
-+ */
-+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
-+ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
-+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
-+ return;
-+ }
-+
-+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
-+}
-+#undef pr_fmt
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 0199dd6368a3..9735ec99ca45 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -831,6 +831,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
- }
-
- setup_force_cpu_cap(X86_FEATURE_ALWAYS);
-+
-+ /* Assume all CPUs are affected. */
-+ setup_force_cpu_bug(X86_BUG_L1TF);
-+
- fpu__init_system(c);
- }
-
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0012-mm-Add-vm_insert_pfn_prot.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0012-mm-Add-vm_insert_pfn_prot.patch
deleted file mode 100644
index 3fe879b..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0012-mm-Add-vm_insert_pfn_prot.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-From 01e710c1699839f8e1bfe05443c4a970e4cdc483 Mon Sep 17 00:00:00 2001
-From: Andy Lutomirski <luto@kernel.org>
-Date: Tue, 29 Dec 2015 20:12:20 -0800
-Subject: [PATCH 12/22] mm: Add vm_insert_pfn_prot()
-
-commit 1745cbc5d0dee0749a6bc0ea8e872c5db0074061 upstream
-
-The x86 vvar vma contains pages with differing cacheability
-flags. x86 currently implements this by manually inserting all
-the ptes using (io_)remap_pfn_range when the vma is set up.
-
-x86 wants to move to using .fault with VM_FAULT_NOPAGE to set up
-the mappings as needed. The correct API to use to insert a pfn
-in .fault is vm_insert_pfn(), but vm_insert_pfn() can't override the
-vma's cache mode, and the HPET page in particular needs to be
-uncached despite the fact that the rest of the VMA is cached.
-
-Add vm_insert_pfn_prot() to support varying cacheability within
-the same non-COW VMA in a more sane manner.
-
-x86 could alternatively use multiple VMAs, but that's messy,
-would break CRIU, and would create unnecessary VMAs that would
-waste memory.
-
-Signed-off-by: Andy Lutomirski <luto@kernel.org>
-Reviewed-by: Kees Cook <keescook@chromium.org>
-Acked-by: Andrew Morton <akpm@linux-foundation.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Fenghua Yu <fenghua.yu@intel.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Oleg Nesterov <oleg@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Link: http://lkml.kernel.org/r/d2938d1eb37be7a5e4f86182db646551f11e45aa.1451446564.git.luto@kernel.org
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- include/linux/mm.h | 2 ++
- mm/memory.c | 25 +++++++++++++++++++++++--
- 2 files changed, 25 insertions(+), 2 deletions(-)
-
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 0cc2e4107a79..3231e9dbba74 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -2116,6 +2116,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
- int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
- int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn);
-+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
-+ unsigned long pfn, pgprot_t pgprot);
- int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn);
- int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
-diff --git a/mm/memory.c b/mm/memory.c
-index 8cca32df69e2..7bb7b35e9a82 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1604,9 +1604,30 @@ out:
- */
- int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn)
-+{
-+ return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
-+}
-+EXPORT_SYMBOL(vm_insert_pfn);
-+
-+/**
-+ * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
-+ * @vma: user vma to map to
-+ * @addr: target user address of this page
-+ * @pfn: source kernel pfn
-+ * @pgprot: pgprot flags for the inserted page
-+ *
-+ * This is exactly like vm_insert_pfn, except that it allows drivers to
-+ * to override pgprot on a per-page basis.
-+ *
-+ * This only makes sense for IO mappings, and it makes no sense for
-+ * cow mappings. In general, using multiple vmas is preferable;
-+ * vm_insert_pfn_prot should only be used if using multiple VMAs is
-+ * impractical.
-+ */
-+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
-+ unsigned long pfn, pgprot_t pgprot)
- {
- int ret;
-- pgprot_t pgprot = vma->vm_page_prot;
- /*
- * Technically, architectures with pte_special can avoid all these
- * restrictions (same for remap_pfn_range). However we would like
-@@ -1628,7 +1649,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-
- return ret;
- }
--EXPORT_SYMBOL(vm_insert_pfn);
-+EXPORT_SYMBOL(vm_insert_pfn_prot);
-
- int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn)
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0013-mm-fix-cache-mode-tracking-in-vm_insert_mixed.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0013-mm-fix-cache-mode-tracking-in-vm_insert_mixed.patch
deleted file mode 100644
index 7388916..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0013-mm-fix-cache-mode-tracking-in-vm_insert_mixed.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From 85a0c72ceabff8266c96b32bee0d3a28a9048b9f Mon Sep 17 00:00:00 2001
-From: Dan Williams <dan.j.williams@intel.com>
-Date: Fri, 7 Oct 2016 17:00:18 -0700
-Subject: [PATCH 13/22] mm: fix cache mode tracking in vm_insert_mixed()
-
-commit 87744ab3832b83ba71b931f86f9cfdb000d07da5 upstream
-
-vm_insert_mixed() unlike vm_insert_pfn_prot() and vmf_insert_pfn_pmd(),
-fails to check the pgprot_t it uses for the mapping against the one
-recorded in the memtype tracking tree. Add the missing call to
-track_pfn_insert() to preclude cases where incompatible aliased mappings
-are established for a given physical address range.
-
-[groeck: Backport to v4.4.y]
-
-Link: http://lkml.kernel.org/r/147328717909.35069.14256589123570653697.stgit@dwillia2-desk3.amr.corp.intel.com
-Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-Cc: David Airlie <airlied@linux.ie>
-Cc: Matthew Wilcox <mawilcox@microsoft.com>
-Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- mm/memory.c | 8 ++++++--
- 1 file changed, 6 insertions(+), 2 deletions(-)
-
-diff --git a/mm/memory.c b/mm/memory.c
-index 7bb7b35e9a82..1da8179178ba 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1654,10 +1654,14 @@ EXPORT_SYMBOL(vm_insert_pfn_prot);
- int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn)
- {
-+ pgprot_t pgprot = vma->vm_page_prot;
-+
- BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return -EFAULT;
-+ if (track_pfn_insert(vma, &pgprot, pfn))
-+ return -EINVAL;
-
- /*
- * If we don't have pte special, then we have to use the pfn_valid()
-@@ -1670,9 +1674,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- struct page *page;
-
- page = pfn_to_page(pfn);
-- return insert_page(vma, addr, page, vma->vm_page_prot);
-+ return insert_page(vma, addr, page, pgprot);
- }
-- return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
-+ return insert_pfn(vma, addr, pfn, pgprot);
- }
- EXPORT_SYMBOL(vm_insert_mixed);
-
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0014-x86-speculation-l1tf-Disallow-non-privileged-high-MM.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0014-x86-speculation-l1tf-Disallow-non-privileged-high-MM.patch
deleted file mode 100644
index e9d30dd..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0014-x86-speculation-l1tf-Disallow-non-privileged-high-MM.patch
+++ /dev/null
@@ -1,286 +0,0 @@
-From f47926ad5667fa2d968dab10478e289737f8cfe1 Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Wed, 13 Jun 2018 15:48:27 -0700
-Subject: [PATCH 14/22] x86/speculation/l1tf: Disallow non privileged high MMIO
- PROT_NONE mappings
-
-commit 42e4089c7890725fcd329999252dc489b72f2921 upstream
-
-For L1TF PROT_NONE mappings are protected by inverting the PFN in the page
-table entry. This sets the high bits in the CPU's address space, thus
-making sure to point to not point an unmapped entry to valid cached memory.
-
-Some server system BIOSes put the MMIO mappings high up in the physical
-address space. If such an high mapping was mapped to unprivileged users
-they could attack low memory by setting such a mapping to PROT_NONE. This
-could happen through a special device driver which is not access
-protected. Normal /dev/mem is of course access protected.
-
-To avoid this forbid PROT_NONE mappings or mprotect for high MMIO mappings.
-
-Valid page mappings are allowed because the system is then unsafe anyways.
-
-It's not expected that users commonly use PROT_NONE on MMIO. But to
-minimize any impact this is only enforced if the mapping actually refers to
-a high MMIO address (defined as the MAX_PA-1 bit being set), and also skip
-the check for root.
-
-For mmaps this is straight forward and can be handled in vm_insert_pfn and
-in remap_pfn_range().
-
-For mprotect it's a bit trickier. At the point where the actual PTEs are
-accessed a lot of state has been changed and it would be difficult to undo
-on an error. Since this is a uncommon case use a separate early page talk
-walk pass for MMIO PROT_NONE mappings that checks for this condition
-early. For non MMIO and non PROT_NONE there are no changes.
-
-[dwmw2: Backport to 4.9]
-[groeck: Backport to 4.4]
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable.h | 8 ++++++
- arch/x86/mm/mmap.c | 21 +++++++++++++++
- include/asm-generic/pgtable.h | 12 +++++++++
- mm/memory.c | 29 +++++++++++++++-----
- mm/mprotect.c | 49 ++++++++++++++++++++++++++++++++++
- 5 files changed, 112 insertions(+), 7 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 2ed1556d99b1..70e2248353cb 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -942,6 +942,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
- }
- #endif
-
-+#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
-+extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
-+
-+static inline bool arch_has_pfn_modify_check(void)
-+{
-+ return boot_cpu_has_bug(X86_BUG_L1TF);
-+}
-+
- #include <asm-generic/pgtable.h>
- #endif /* __ASSEMBLY__ */
-
-diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index d2dc0438d654..5aad869fa205 100644
---- a/arch/x86/mm/mmap.c
-+++ b/arch/x86/mm/mmap.c
-@@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
- return "[mpx]";
- return NULL;
- }
-+
-+/*
-+ * Only allow root to set high MMIO mappings to PROT_NONE.
-+ * This prevents an unpriv. user to set them to PROT_NONE and invert
-+ * them, then pointing to valid memory for L1TF speculation.
-+ *
-+ * Note: for locked down kernels may want to disable the root override.
-+ */
-+bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
-+{
-+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
-+ return true;
-+ if (!__pte_needs_invert(pgprot_val(prot)))
-+ return true;
-+ /* If it's real memory always allow */
-+ if (pfn_valid(pfn))
-+ return true;
-+ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
-+ return false;
-+ return true;
-+}
-diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
-index 3a6803cb0ec9..e0dfc4b394d9 100644
---- a/include/asm-generic/pgtable.h
-+++ b/include/asm-generic/pgtable.h
-@@ -788,4 +788,16 @@ static inline int pmd_clear_huge(pmd_t *pmd)
- #define io_remap_pfn_range remap_pfn_range
- #endif
-
-+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
-+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
-+{
-+ return true;
-+}
-+
-+static inline bool arch_has_pfn_modify_check(void)
-+{
-+ return false;
-+}
-+#endif
-+
- #endif /* _ASM_GENERIC_PGTABLE_H */
-diff --git a/mm/memory.c b/mm/memory.c
-index 1da8179178ba..4308fe59b58a 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1645,6 +1645,9 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
- if (track_pfn_insert(vma, &pgprot, pfn))
- return -EINVAL;
-
-+ if (!pfn_modify_allowed(pfn, pgprot))
-+ return -EACCES;
-+
- ret = insert_pfn(vma, addr, pfn, pgprot);
-
- return ret;
-@@ -1663,6 +1666,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- if (track_pfn_insert(vma, &pgprot, pfn))
- return -EINVAL;
-
-+ if (!pfn_modify_allowed(pfn, pgprot))
-+ return -EACCES;
-+
- /*
- * If we don't have pte special, then we have to use the pfn_valid()
- * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
-@@ -1691,6 +1697,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
- {
- pte_t *pte;
- spinlock_t *ptl;
-+ int err = 0;
-
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (!pte)
-@@ -1698,12 +1705,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
- arch_enter_lazy_mmu_mode();
- do {
- BUG_ON(!pte_none(*pte));
-+ if (!pfn_modify_allowed(pfn, prot)) {
-+ err = -EACCES;
-+ break;
-+ }
- set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(pte - 1, ptl);
-- return 0;
-+ return err;
- }
-
- static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
-@@ -1712,6 +1723,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
- {
- pmd_t *pmd;
- unsigned long next;
-+ int err;
-
- pfn -= addr >> PAGE_SHIFT;
- pmd = pmd_alloc(mm, pud, addr);
-@@ -1720,9 +1732,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
- VM_BUG_ON(pmd_trans_huge(*pmd));
- do {
- next = pmd_addr_end(addr, end);
-- if (remap_pte_range(mm, pmd, addr, next,
-- pfn + (addr >> PAGE_SHIFT), prot))
-- return -ENOMEM;
-+ err = remap_pte_range(mm, pmd, addr, next,
-+ pfn + (addr >> PAGE_SHIFT), prot);
-+ if (err)
-+ return err;
- } while (pmd++, addr = next, addr != end);
- return 0;
- }
-@@ -1733,6 +1746,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
- {
- pud_t *pud;
- unsigned long next;
-+ int err;
-
- pfn -= addr >> PAGE_SHIFT;
- pud = pud_alloc(mm, pgd, addr);
-@@ -1740,9 +1754,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-- if (remap_pmd_range(mm, pud, addr, next,
-- pfn + (addr >> PAGE_SHIFT), prot))
-- return -ENOMEM;
-+ err = remap_pmd_range(mm, pud, addr, next,
-+ pfn + (addr >> PAGE_SHIFT), prot);
-+ if (err)
-+ return err;
- } while (pud++, addr = next, addr != end);
- return 0;
- }
-diff --git a/mm/mprotect.c b/mm/mprotect.c
-index b8849a3930a0..fcd678c3bd24 100644
---- a/mm/mprotect.c
-+++ b/mm/mprotect.c
-@@ -255,6 +255,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
- return pages;
- }
-
-+static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
-+ unsigned long next, struct mm_walk *walk)
-+{
-+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
-+ 0 : -EACCES;
-+}
-+
-+static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
-+ unsigned long addr, unsigned long next,
-+ struct mm_walk *walk)
-+{
-+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
-+ 0 : -EACCES;
-+}
-+
-+static int prot_none_test(unsigned long addr, unsigned long next,
-+ struct mm_walk *walk)
-+{
-+ return 0;
-+}
-+
-+static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
-+ unsigned long end, unsigned long newflags)
-+{
-+ pgprot_t new_pgprot = vm_get_page_prot(newflags);
-+ struct mm_walk prot_none_walk = {
-+ .pte_entry = prot_none_pte_entry,
-+ .hugetlb_entry = prot_none_hugetlb_entry,
-+ .test_walk = prot_none_test,
-+ .mm = current->mm,
-+ .private = &new_pgprot,
-+ };
-+
-+ return walk_page_range(start, end, &prot_none_walk);
-+}
-+
- int
- mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- unsigned long start, unsigned long end, unsigned long newflags)
-@@ -272,6 +308,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- return 0;
- }
-
-+ /*
-+ * Do PROT_NONE PFN permission checks here when we can still
-+ * bail out without undoing a lot of state. This is a rather
-+ * uncommon case, so doesn't need to be very optimized.
-+ */
-+ if (arch_has_pfn_modify_check() &&
-+ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
-+ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
-+ error = prot_none_walk(vma, start, end, newflags);
-+ if (error)
-+ return error;
-+ }
-+
- /*
- * If we make a private mapping writable we increase our commit;
- * but (without finer accounting) cannot reduce our commit if we
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0015-x86-speculation-l1tf-Limit-swap-file-size-to-MAX_PA-.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0015-x86-speculation-l1tf-Limit-swap-file-size-to-MAX_PA-.patch
deleted file mode 100644
index 17a4379..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0015-x86-speculation-l1tf-Limit-swap-file-size-to-MAX_PA-.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From 7a990cc29bb1723bdb52ab3cd302862344a01343 Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Wed, 13 Jun 2018 15:48:28 -0700
-Subject: [PATCH 15/22] x86/speculation/l1tf: Limit swap file size to MAX_PA/2
-
-commit 377eeaa8e11fe815b1d07c81c4a0e2843a8c15eb upstream
-
-For the L1TF workaround its necessary to limit the swap file size to below
-MAX_PA/2, so that the higher bits of the swap offset inverted never point
-to valid memory.
-
-Add a mechanism for the architecture to override the swap file size check
-in swapfile.c and add a x86 specific max swapfile check function that
-enforces that limit.
-
-The check is only enabled if the CPU is vulnerable to L1TF.
-
-In VMs with 42bit MAX_PA the typical limit is 2TB now, on a native system
-with 46bit PA it is 32TB. The limit is only per individual swap file, so
-it's always possible to exceed these limits with multiple swap files or
-partitions.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
-Acked-by: Michal Hocko <mhocko@suse.com>
-Acked-by: Dave Hansen <dave.hansen@intel.com>
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/mm/init.c | 15 +++++++++++++
- include/linux/swapfile.h | 2 ++
- mm/swapfile.c | 46 ++++++++++++++++++++++++++--------------
- 3 files changed, 47 insertions(+), 16 deletions(-)
-
-diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 151fd33e9043..afde6da2768f 100644
---- a/arch/x86/mm/init.c
-+++ b/arch/x86/mm/init.c
-@@ -4,6 +4,8 @@
- #include <linux/swap.h>
- #include <linux/memblock.h>
- #include <linux/bootmem.h> /* for max_low_pfn */
-+#include <linux/swapfile.h>
-+#include <linux/swapops.h>
-
- #include <asm/cacheflush.h>
- #include <asm/e820.h>
-@@ -767,3 +769,16 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
- __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
- __pte2cachemode_tbl[entry] = cache;
- }
-+
-+unsigned long max_swapfile_size(void)
-+{
-+ unsigned long pages;
-+
-+ pages = generic_max_swapfile_size();
-+
-+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
-+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
-+ pages = min_t(unsigned long, l1tf_pfn_limit() + 1, pages);
-+ }
-+ return pages;
-+}
-diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
-index 388293a91e8c..e4594de79bc4 100644
---- a/include/linux/swapfile.h
-+++ b/include/linux/swapfile.h
-@@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
- extern struct plist_head swap_active_head;
- extern struct swap_info_struct *swap_info[];
- extern int try_to_unuse(unsigned int, bool, unsigned long);
-+extern unsigned long generic_max_swapfile_size(void);
-+extern unsigned long max_swapfile_size(void);
-
- #endif /* _LINUX_SWAPFILE_H */
-diff --git a/mm/swapfile.c b/mm/swapfile.c
-index eb1c32764ae9..c6f5befa62b7 100644
---- a/mm/swapfile.c
-+++ b/mm/swapfile.c
-@@ -2231,6 +2231,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
- return 0;
- }
-
-+
-+/*
-+ * Find out how many pages are allowed for a single swap device. There
-+ * are two limiting factors:
-+ * 1) the number of bits for the swap offset in the swp_entry_t type, and
-+ * 2) the number of bits in the swap pte, as defined by the different
-+ * architectures.
-+ *
-+ * In order to find the largest possible bit mask, a swap entry with
-+ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
-+ * decoded to a swp_entry_t again, and finally the swap offset is
-+ * extracted.
-+ *
-+ * This will mask all the bits from the initial ~0UL mask that can't
-+ * be encoded in either the swp_entry_t or the architecture definition
-+ * of a swap pte.
-+ */
-+unsigned long generic_max_swapfile_size(void)
-+{
-+ return swp_offset(pte_to_swp_entry(
-+ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
-+}
-+
-+/* Can be overridden by an architecture for additional checks. */
-+__weak unsigned long max_swapfile_size(void)
-+{
-+ return generic_max_swapfile_size();
-+}
-+
- static unsigned long read_swap_header(struct swap_info_struct *p,
- union swap_header *swap_header,
- struct inode *inode)
-@@ -2266,22 +2295,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
- p->cluster_next = 1;
- p->cluster_nr = 0;
-
-- /*
-- * Find out how many pages are allowed for a single swap
-- * device. There are two limiting factors: 1) the number
-- * of bits for the swap offset in the swp_entry_t type, and
-- * 2) the number of bits in the swap pte as defined by the
-- * different architectures. In order to find the
-- * largest possible bit mask, a swap entry with swap type 0
-- * and swap offset ~0UL is created, encoded to a swap pte,
-- * decoded to a swp_entry_t again, and finally the swap
-- * offset is extracted. This will mask all the bits from
-- * the initial ~0UL mask that can't be encoded in either
-- * the swp_entry_t or the architecture definition of a
-- * swap pte.
-- */
-- maxpages = swp_offset(pte_to_swp_entry(
-- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
-+ maxpages = max_swapfile_size();
- last_page = swap_header->info.last_page;
- if (last_page > maxpages) {
- pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0016-x86-speculation-l1tf-Extend-64bit-swap-file-size-lim.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0016-x86-speculation-l1tf-Extend-64bit-swap-file-size-lim.patch
deleted file mode 100644
index 36491e4..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0016-x86-speculation-l1tf-Extend-64bit-swap-file-size-lim.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 095be8cfb6d35515c3b93a45c53c9ee3e411ee13 Mon Sep 17 00:00:00 2001
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Thu, 21 Jun 2018 12:36:29 +0200
-Subject: [PATCH 16/22] x86/speculation/l1tf: Extend 64bit swap file size limit
-
-commit 1a7ed1ba4bba6c075d5ad61bb75e3fbc870840d6 upstream
-
-The previous patch has limited swap file size so that large offsets cannot
-clear bits above MAX_PA/2 in the pte and interfere with L1TF mitigation.
-
-It assumed that offsets are encoded starting with bit 12, same as pfn. But
-on x86_64, offsets are encoded starting with bit 9.
-
-Thus the limit can be raised by 3 bits. That means 16TB with 42bit MAX_PA
-and 256TB with 46bit MAX_PA.
-
-Fixes: 377eeaa8e11f ("x86/speculation/l1tf: Limit swap file size to MAX_PA/2")
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/mm/init.c | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
-diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index afde6da2768f..8de904926d7f 100644
---- a/arch/x86/mm/init.c
-+++ b/arch/x86/mm/init.c
-@@ -778,7 +778,15 @@ unsigned long max_swapfile_size(void)
-
- if (boot_cpu_has_bug(X86_BUG_L1TF)) {
- /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
-- pages = min_t(unsigned long, l1tf_pfn_limit() + 1, pages);
-+ unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
-+ /*
-+ * We encode swap offsets also with 3 bits below those for pfn
-+ * which makes the usable limit higher.
-+ */
-+#ifdef CONFIG_X86_64
-+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
-+#endif
-+ pages = min_t(unsigned long, l1tf_limit, pages);
- }
- return pages;
- }
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0017-x86-speculation-l1tf-Protect-PAE-swap-entries-agains.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0017-x86-speculation-l1tf-Protect-PAE-swap-entries-agains.patch
deleted file mode 100644
index 231e660..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0017-x86-speculation-l1tf-Protect-PAE-swap-entries-agains.patch
+++ /dev/null
@@ -1,97 +0,0 @@
-From e95a79b5f1cabc9225d12af3d85ca3f9b05583ad Mon Sep 17 00:00:00 2001
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Fri, 22 Jun 2018 17:39:33 +0200
-Subject: [PATCH 17/22] x86/speculation/l1tf: Protect PAE swap entries against
- L1TF
-
-commit 0d0f6249058834ffe1ceaad0bb31464af66f6e7a upstream
-
-The PAE 3-level paging code currently doesn't mitigate L1TF by flipping the
-offset bits, and uses the high PTE word, thus bits 32-36 for type, 37-63 for
-offset. The lower word is zeroed, thus systems with less than 4GB memory are
-safe. With 4GB to 128GB the swap type selects the memory locations vulnerable
-to L1TF; with even more memory, also the swap offfset influences the address.
-This might be a problem with 32bit PAE guests running on large 64bit hosts.
-
-By continuing to keep the whole swap entry in either high or low 32bit word of
-PTE we would limit the swap size too much. Thus this patch uses the whole PAE
-PTE with the same layout as the 64bit version does. The macros just become a
-bit tricky since they assume the arch-dependent swp_entry_t to be 32bit.
-
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Michal Hocko <mhocko@suse.com>
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable-3level.h | 35 +++++++++++++++++++++++++--
- arch/x86/mm/init.c | 2 +-
- 2 files changed, 34 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index 0c89891c7b44..5c686382d84b 100644
---- a/arch/x86/include/asm/pgtable-3level.h
-+++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -177,12 +177,43 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
- #endif
-
- /* Encode and de-code a swap entry */
-+#define SWP_TYPE_BITS 5
-+
-+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
-+
-+/* We always extract/encode the offset by shifting it all the way up, and then down again */
-+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
-+
- #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
- #define __swp_type(x) (((x).val) & 0x1f)
- #define __swp_offset(x) ((x).val >> 5)
- #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
--#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
--#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
-+
-+/*
-+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
-+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
-+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
-+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
-+ * __swp_entry_to_pte() through the following helper macro based on 64bit
-+ * __swp_entry().
-+ */
-+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
-+ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
-+ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
-+
-+#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
-+ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
-+/*
-+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
-+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
-+ * intermediate representation, using the following macros based on 64bit
-+ * __swp_type() and __swp_offset().
-+ */
-+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
-+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
-+
-+#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
-+ __pteval_swp_offset(pte)))
-
- #include <asm/pgtable-invert.h>
-
-diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 8de904926d7f..3a8e9abe0667 100644
---- a/arch/x86/mm/init.c
-+++ b/arch/x86/mm/init.c
-@@ -783,7 +783,7 @@ unsigned long max_swapfile_size(void)
- * We encode swap offsets also with 3 bits below those for pfn
- * which makes the usable limit higher.
- */
--#ifdef CONFIG_X86_64
-+#if CONFIG_PGTABLE_LEVELS > 2
- l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
- #endif
- pages = min_t(unsigned long, l1tf_limit, pages);
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0018-x86-speculation-l1tf-Fix-up-pte-pfn-conversion-for-P.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0018-x86-speculation-l1tf-Fix-up-pte-pfn-conversion-for-P.patch
deleted file mode 100644
index b84d75a..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0018-x86-speculation-l1tf-Fix-up-pte-pfn-conversion-for-P.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 81c7477c3eefc3337e109af41ee4922d20acd4ff Mon Sep 17 00:00:00 2001
-From: Michal Hocko <mhocko@suse.cz>
-Date: Wed, 27 Jun 2018 17:46:50 +0200
-Subject: [PATCH 18/22] x86/speculation/l1tf: Fix up pte->pfn conversion for
- PAE
-
-commit e14d7dfb41f5807a0c1c26a13f2b8ef16af24935 upstream
-
-Jan has noticed that pte_pfn and co. resp. pfn_pte are incorrect for
-CONFIG_PAE because phys_addr_t is wider than unsigned long and so the
-pte_val reps. shift left would get truncated. Fix this up by using proper
-types.
-
-[dwmw2: Backport to 4.9]
-
-Fixes: 6b28baca9b1f ("x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation")
-Reported-by: Jan Beulich <JBeulich@suse.com>
-Signed-off-by: Michal Hocko <mhocko@suse.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable.h | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 70e2248353cb..16c6886a1ece 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -154,21 +154,21 @@ static inline u64 protnone_mask(u64 val);
-
- static inline unsigned long pte_pfn(pte_t pte)
- {
-- unsigned long pfn = pte_val(pte);
-+ phys_addr_t pfn = pte_val(pte);
- pfn ^= protnone_mask(pfn);
- return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
- }
-
- static inline unsigned long pmd_pfn(pmd_t pmd)
- {
-- unsigned long pfn = pmd_val(pmd);
-+ phys_addr_t pfn = pmd_val(pmd);
- pfn ^= protnone_mask(pfn);
- return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
- }
-
- static inline unsigned long pud_pfn(pud_t pud)
- {
-- unsigned long pfn = pud_val(pud);
-+ phys_addr_t pfn = pud_val(pud);
- pfn ^= protnone_mask(pfn);
- return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
- }
-@@ -369,7 +369,7 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
-
- static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
- {
-- phys_addr_t pfn = page_nr << PAGE_SHIFT;
-+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
- pfn ^= protnone_mask(pgprot_val(pgprot));
- pfn &= PTE_PFN_MASK;
- return __pte(pfn | massage_pgprot(pgprot));
-@@ -377,7 +377,7 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-
- static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
- {
-- phys_addr_t pfn = page_nr << PAGE_SHIFT;
-+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
- pfn ^= protnone_mask(pgprot_val(pgprot));
- pfn &= PHYSICAL_PMD_PAGE_MASK;
- return __pmd(pfn | massage_pgprot(pgprot));
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0019-x86-speculation-l1tf-Invert-all-not-present-mappings.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0019-x86-speculation-l1tf-Invert-all-not-present-mappings.patch
deleted file mode 100644
index bfbc4dc..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0019-x86-speculation-l1tf-Invert-all-not-present-mappings.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From b09c7934cee94a30ceecd25d89ff79990cd8c997 Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Tue, 7 Aug 2018 15:09:36 -0700
-Subject: [PATCH 19/22] x86/speculation/l1tf: Invert all not present mappings
-
-commit f22cc87f6c1f771b57c407555cfefd811cdd9507 upstream
-
-For kernel mappings PAGE_PROTNONE is not necessarily set for a non present
-mapping, but the inversion logic explicitely checks for !PRESENT and
-PROT_NONE.
-
-Remove the PROT_NONE check and make the inversion unconditional for all not
-present mappings.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable-invert.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
-index 177564187fc0..44b1203ece12 100644
---- a/arch/x86/include/asm/pgtable-invert.h
-+++ b/arch/x86/include/asm/pgtable-invert.h
-@@ -6,7 +6,7 @@
-
- static inline bool __pte_needs_invert(u64 val)
- {
-- return (val & (_PAGE_PRESENT|_PAGE_PROTNONE)) == _PAGE_PROTNONE;
-+ return !(val & _PAGE_PRESENT);
- }
-
- /* Get a mask to xor with the page table entry to get the correct pfn. */
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0020-x86-speculation-l1tf-Make-pmd-pud_mknotpresent-inver.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0020-x86-speculation-l1tf-Make-pmd-pud_mknotpresent-inver.patch
deleted file mode 100644
index d7cdfa5..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0020-x86-speculation-l1tf-Make-pmd-pud_mknotpresent-inver.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From daecbd32d678c00ebeb9ab77c1a2eeec2bc97105 Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Tue, 7 Aug 2018 15:09:37 -0700
-Subject: [PATCH 20/22] x86/speculation/l1tf: Make pmd/pud_mknotpresent()
- invert
-
-commit 0768f91530ff46683e0b372df14fd79fe8d156e5 upstream
-
-Some cases in THP like:
- - MADV_FREE
- - mprotect
- - split
-
-mark the PMD non present for temporarily to prevent races. The window for
-an L1TF attack in these contexts is very small, but it wants to be fixed
-for correctness sake.
-
-Use the proper low level functions for pmd/pud_mknotpresent() to address
-this.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable.h | 11 ++++++-----
- 1 file changed, 6 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 16c6886a1ece..b5e157c065ae 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -315,11 +315,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
- return pmd_set_flags(pmd, _PAGE_RW);
- }
-
--static inline pmd_t pmd_mknotpresent(pmd_t pmd)
--{
-- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
--}
--
- #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
- static inline int pte_soft_dirty(pte_t pte)
- {
-@@ -383,6 +378,12 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
- return __pmd(pfn | massage_pgprot(pgprot));
- }
-
-+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-+{
-+ return pfn_pmd(pmd_pfn(pmd),
-+ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
-+}
-+
- static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
-
- static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0021-x86-mm-pat-Make-set_memory_np-L1TF-safe.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0021-x86-mm-pat-Make-set_memory_np-L1TF-safe.patch
deleted file mode 100644
index 3972ebf..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0021-x86-mm-pat-Make-set_memory_np-L1TF-safe.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-From a3722246733b337e8d54885fc12b6660620ba67a Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Tue, 7 Aug 2018 15:09:39 -0700
-Subject: [PATCH 21/22] x86/mm/pat: Make set_memory_np() L1TF safe
-
-commit 958f79b9ee55dfaf00c8106ed1c22a2919e0028b upstream
-
-set_memory_np() is used to mark kernel mappings not present, but it has
-it's own open coded mechanism which does not have the L1TF protection of
-inverting the address bits.
-
-Replace the open coded PTE manipulation with the L1TF protecting low level
-PTE routines.
-
-Passes the CPA self test.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-[ dwmw2: Pull in pud_mkhuge() from commit a00cc7d9dd, and pfn_pud() ]
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-[groeck: port to 4.4]
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/include/asm/pgtable.h | 27 +++++++++++++++++++++++++++
- arch/x86/mm/pageattr.c | 8 ++++----
- 2 files changed, 31 insertions(+), 4 deletions(-)
-
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index b5e157c065ae..4de6c282c02a 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -378,12 +378,39 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
- return __pmd(pfn | massage_pgprot(pgprot));
- }
-
-+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ phys_addr_t pfn = page_nr << PAGE_SHIFT;
-+ pfn ^= protnone_mask(pgprot_val(pgprot));
-+ pfn &= PHYSICAL_PUD_PAGE_MASK;
-+ return __pud(pfn | massage_pgprot(pgprot));
-+}
-+
- static inline pmd_t pmd_mknotpresent(pmd_t pmd)
- {
- return pfn_pmd(pmd_pfn(pmd),
- __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
- }
-
-+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
-+{
-+ pudval_t v = native_pud_val(pud);
-+
-+ return __pud(v | set);
-+}
-+
-+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
-+{
-+ pudval_t v = native_pud_val(pud);
-+
-+ return __pud(v & ~clear);
-+}
-+
-+static inline pud_t pud_mkhuge(pud_t pud)
-+{
-+ return pud_set_flags(pud, _PAGE_PSE);
-+}
-+
- static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
-
- static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 79377e2a7bcd..27610c2d1821 100644
---- a/arch/x86/mm/pageattr.c
-+++ b/arch/x86/mm/pageattr.c
-@@ -1006,8 +1006,8 @@ static int populate_pmd(struct cpa_data *cpa,
-
- pmd = pmd_offset(pud, start);
-
-- set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
-- massage_pgprot(pmd_pgprot)));
-+ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
-+ canon_pgprot(pmd_pgprot))));
-
- start += PMD_SIZE;
- cpa->pfn += PMD_SIZE;
-@@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
- * Map everything starting from the Gb boundary, possibly with 1G pages
- */
- while (end - start >= PUD_SIZE) {
-- set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
-- massage_pgprot(pud_pgprot)));
-+ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
-+ canon_pgprot(pud_pgprot))));
-
- start += PUD_SIZE;
- cpa->pfn += PUD_SIZE;
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0022-x86-mm-kmmio-Make-the-tracer-robust-against-L1TF.patch b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0022-x86-mm-kmmio-Make-the-tracer-robust-against-L1TF.patch
deleted file mode 100644
index 4c5504b..0000000
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/files/0022-x86-mm-kmmio-Make-the-tracer-robust-against-L1TF.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From 2371b928413c7e1546ac347cd029fc860d6ae08d Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Tue, 7 Aug 2018 15:09:38 -0700
-Subject: [PATCH 22/22] x86/mm/kmmio: Make the tracer robust against L1TF
-
-commit 1063711b57393c1999248cccb57bebfaf16739e7 upstream
-
-The mmio tracer sets io mapping PTEs and PMDs to non present when enabled
-without inverting the address bits, which makes the PTE entry vulnerable
-for L1TF.
-
-Make it use the right low level macros to actually invert the address bits
-to protect against L1TF.
-
-In principle this could be avoided because MMIO tracing is not likely to be
-enabled on production machines, but the fix is straigt forward and for
-consistency sake it's better to get rid of the open coded PTE manipulation.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Guenter Roeck <linux@roeck-us.net>
----
- arch/x86/mm/kmmio.c | 25 +++++++++++++++----------
- 1 file changed, 15 insertions(+), 10 deletions(-)
-
-diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
-index ddb2244b06a1..3af96901a72b 100644
---- a/arch/x86/mm/kmmio.c
-+++ b/arch/x86/mm/kmmio.c
-@@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
-
- static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
- {
-+ pmd_t new_pmd;
- pmdval_t v = pmd_val(*pmd);
- if (clear) {
-- *old = v & _PAGE_PRESENT;
-- v &= ~_PAGE_PRESENT;
-- } else /* presume this has been called with clear==true previously */
-- v |= *old;
-- set_pmd(pmd, __pmd(v));
-+ *old = v;
-+ new_pmd = pmd_mknotpresent(*pmd);
-+ } else {
-+ /* Presume this has been called with clear==true previously */
-+ new_pmd = __pmd(*old);
-+ }
-+ set_pmd(pmd, new_pmd);
- }
-
- static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
- {
- pteval_t v = pte_val(*pte);
- if (clear) {
-- *old = v & _PAGE_PRESENT;
-- v &= ~_PAGE_PRESENT;
-- } else /* presume this has been called with clear==true previously */
-- v |= *old;
-- set_pte_atomic(pte, __pte(v));
-+ *old = v;
-+ /* Nothing should care about address */
-+ pte_clear(&init_mm, 0, pte);
-+ } else {
-+ /* Presume this has been called with clear==true previously */
-+ set_pte_atomic(pte, __pte(*old));
-+ }
- }
-
- static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
---
-2.18.0.597.ga71716f1ad-goog
-
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-4.4.111-r1320.ebuild b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-4.4.111-r1321.ebuild
similarity index 100%
rename from overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-4.4.111-r1320.ebuild
rename to overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-4.4.111-r1321.ebuild
diff --git a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-9999.ebuild b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-9999.ebuild
index dbaf628..d005a8b 100644
--- a/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-9999.ebuild
+++ b/overlay-lakitu/sys-kernel/lakitu-kernel-4_4/lakitu-kernel-4_4-9999.ebuild
@@ -33,4 +33,4 @@
# NOTE: There's nothing magic keeping this number prime but you just need to
# make _any_ change to this file. ...so why not keep it prime?
#
-# The coolest prime number is: 11
+# The coolest prime number is: 7