[SRU Bionic 2/3] mm: mmu_notifier fix for tlb_end_vma
Thadeu Lima de Souza Cascardo
cascardo at canonical.com
Mon Nov 29 18:52:46 UTC 2021
From: Nicholas Piggin <npiggin at gmail.com>
The generic tlb_end_vma does not call invalidate_range mmu notifier, and
it resets resets the mmu_gather range, which means the notifier won't be
called on part of the range in case of an unmap that spans multiple
vmas.
ARM64 seems to be the only arch I could see that has notifiers and uses
the generic tlb_end_vma. I have not actually tested it.
[ Catalin and Will point out that ARM64 currently only uses the
notifiers for KVM, which doesn't use the ->invalidate_range()
callback right now, so it's a bug, but one that happens to
not affect them. So not necessary for stable. - Linus ]
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
Acked-by: Catalin Marinas <catalin.marinas at arm.com>
Acked-by: Will Deacon <will.deacon at arm.com>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
(cherry picked from commit fd1102f0aadec7d18792b132e1d224290b2aecca)
CVE-2021-4002
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo at canonical.com>
---
include/asm-generic/tlb.h | 17 +++++++++++++----
mm/memory.c | 10 ----------
2 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..ca83b0927520 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,6 +15,7 @@
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
+#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ if (!tlb->end)
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
+}
+
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define __tlb_end_vma(tlb, vma) \
do { \
- if (!tlb->fullmm && tlb->end) { \
- tlb_flush(tlb); \
- __tlb_reset_range(tlb); \
- } \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
} while (0)
#ifndef tlb_end_vma
diff --git a/mm/memory.c b/mm/memory.c
index 60449b16febd..c9786d33cb14 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -251,16 +251,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
__tlb_reset_range(tlb);
}
-static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- if (!tlb->end)
- return;
-
- tlb_flush(tlb);
- mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
- __tlb_reset_range(tlb);
-}
-
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
--
2.32.0
More information about the kernel-team
mailing list