[SRU][N:gke][PATCH 104/106] mm: make folio_pte_batch available outside of mm/memory.c
Tim Whisonant
tim.whisonant at canonical.com
Mon Jul 21 16:22:27 UTC 2025
From: Barry Song <v-songbaohua at oppo.com>
BugLink: https://bugs.launchpad.net/bugs/2117098
madvise, mprotect and some others might need folio_pte_batch to check if a
range of PTEs are completely mapped to a large folio with contiguous
physical addresses. Let's make it available in mm/internal.h.
While at it, add proper kernel doc and sanity-check more input parameters
using two additional VM_WARN_ON_FOLIO().
[21cnbao at gmail.com: build fix]
Link: https://lkml.kernel.org/r/CAGsJ_4wWzG-37D82vqP_zt+Fcbz+URVe5oXLBc4M5wbN8A_gpQ@mail.gmail.com
[david at redhat.com: improve the doc for the exported func]
Link: https://lkml.kernel.org/r/20240227104201.337988-1-21cnbao@gmail.com
Signed-off-by: David Hildenbrand <david at redhat.com>
Signed-off-by: Barry Song <v-songbaohua at oppo.com>
Suggested-by: David Hildenbrand <david at redhat.com>
Reviewed-by: Ryan Roberts <ryan.roberts at arm.com>
Acked-by: David Hildenbrand <david at redhat.com>
Cc: Lance Yang <ioworker0 at gmail.com>
Cc: Yin Fengwei <fengwei.yin at intel.com>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
(backported from commit ac96cc4d1ceda01d08deda1e45b9f1b55b0624d2)
[tswhison: Functions mmap_file() and vma_close() were brought
into the tree by a stable update prior to the inclusion of this
patch. Adjusted context accordingly.
3dd6ed34ce1f ("mm: avoid unsafe VMA hook invocation when error arises
on mmap hook") brings in mmap_file().
4080ef1579b2 ("mm: unconditionally close VMAs on error") brings
in vma_close().]
Signed-off-by: Tim Whisonant <tim.whisonant at canonical.com>
---
mm/internal.h | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++
mm/memory.c | 76 -----------------------------------------
2 files changed, 93 insertions(+), 76 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index b5a2263f11f15..9bfa8f5992f47 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -128,6 +128,99 @@ static inline void vma_close(struct vm_area_struct *vma)
}
}
+#ifdef CONFIG_MMU
+
+/* Flags for folio_pte_batch(). */
+typedef int __bitwise fpb_t;
+
+/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
+#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
+
+/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
+#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
+
+static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
+{
+ if (flags & FPB_IGNORE_DIRTY)
+ pte = pte_mkclean(pte);
+ if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
+ pte = pte_clear_soft_dirty(pte);
+ return pte_wrprotect(pte_mkold(pte));
+}
+
+/**
+ * folio_pte_batch - detect a PTE batch for a large folio
+ * @folio: The large folio to detect a PTE batch for.
+ * @addr: The user virtual address the first page is mapped at.
+ * @start_ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @max_nr: The maximum number of table entries to consider.
+ * @flags: Flags to modify the PTE batch semantics.
+ * @any_writable: Optional pointer to indicate whether any entry except the
+ * first one is writable.
+ *
+ * Detect a PTE batch: consecutive (present) PTEs that map consecutive
+ * pages of the same large folio.
+ *
+ * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
+ * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
+ * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
+ *
+ * start_ptep must map any page of the folio. max_nr must be at least one and
+ * must be limited by the caller so scanning cannot exceed a single page table.
+ *
+ * Return: the number of table entries in the batch.
+ */
+static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
+ pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
+ bool *any_writable)
+{
+ unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
+ const pte_t *end_ptep = start_ptep + max_nr;
+ pte_t expected_pte, *ptep;
+ bool writable;
+ int nr;
+
+ if (any_writable)
+ *any_writable = false;
+
+ VM_WARN_ON_FOLIO(!pte_present(pte), folio);
+ VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
+ VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
+
+ nr = pte_batch_hint(start_ptep, pte);
+ expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
+ ptep = start_ptep + nr;
+
+ while (ptep < end_ptep) {
+ pte = ptep_get(ptep);
+ if (any_writable)
+ writable = !!pte_write(pte);
+ pte = __pte_batch_clear_ignored(pte, flags);
+
+ if (!pte_same(pte, expected_pte))
+ break;
+
+ /*
+ * Stop immediately once we reached the end of the folio. In
+ * corner cases the next PFN might fall into a different
+ * folio.
+ */
+ if (pte_pfn(pte) >= folio_end_pfn)
+ break;
+
+ if (any_writable)
+ *any_writable |= writable;
+
+ nr = pte_batch_hint(ptep, pte);
+ expected_pte = pte_advance_pfn(expected_pte, nr);
+ ptep += nr;
+ }
+
+ return min(ptep - start_ptep, max_nr);
+}
+#endif /* CONFIG_MMU */
+
void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
int nr_throttled);
static inline void acct_reclaim_writeback(struct folio *folio)
diff --git a/mm/memory.c b/mm/memory.c
index 46f1ebce10ad1..55dcf2c2854b5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -953,82 +953,6 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
}
-/* Flags for folio_pte_batch(). */
-typedef int __bitwise fpb_t;
-
-/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
-#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
-
-/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
-#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
-
-static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
-{
- if (flags & FPB_IGNORE_DIRTY)
- pte = pte_mkclean(pte);
- if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
- pte = pte_clear_soft_dirty(pte);
- return pte_wrprotect(pte_mkold(pte));
-}
-
-/*
- * Detect a PTE batch: consecutive (present) PTEs that map consecutive
- * pages of the same folio.
- *
- * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
- * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
- * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
- *
- * If "any_writable" is set, it will indicate if any other PTE besides the
- * first (given) PTE is writable.
- */
-static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
- pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
- bool *any_writable)
-{
- unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
- const pte_t *end_ptep = start_ptep + max_nr;
- pte_t expected_pte, *ptep;
- bool writable;
- int nr;
-
- if (any_writable)
- *any_writable = false;
-
- VM_WARN_ON_FOLIO(!pte_present(pte), folio);
-
- nr = pte_batch_hint(start_ptep, pte);
- expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
- ptep = start_ptep + nr;
-
- while (ptep < end_ptep) {
- pte = ptep_get(ptep);
- if (any_writable)
- writable = !!pte_write(pte);
- pte = __pte_batch_clear_ignored(pte, flags);
-
- if (!pte_same(pte, expected_pte))
- break;
-
- /*
- * Stop immediately once we reached the end of the folio. In
- * corner cases the next PFN might fall into a different
- * folio.
- */
- if (pte_pfn(pte) >= folio_end_pfn)
- break;
-
- if (any_writable)
- *any_writable |= writable;
-
- nr = pte_batch_hint(ptep, pte);
- expected_pte = pte_advance_pfn(expected_pte, nr);
- ptep += nr;
- }
-
- return min(ptep - start_ptep, max_nr);
-}
-
/*
* Copy one present PTE, trying to batch-process subsequent PTEs that map
* consecutive pages of the same folio by copying them as well.
--
2.43.0
More information about the kernel-team
mailing list