[SRU][N:gke][PATCH 032/106] arm64/mm: __always_inline to improve fork() perf
Tim Whisonant
tim.whisonant at canonical.com
Mon Jul 21 16:21:15 UTC 2025
From: Ryan Roberts <ryan.roberts at arm.com>
BugLink: https://bugs.launchpad.net/bugs/2059316
BugLink: https://bugs.launchpad.net/bugs/2117098
As set_ptes() and wrprotect_ptes() become a bit more complex, the compiler
may choose not to inline them. But this is critical for fork()
performance. So mark the functions, along with contpte_try_unfold() which
is called by them, as __always_inline. This is worth ~1% on the fork()
microbenchmark with order-0 folios (the common case).
Link: https://lkml.kernel.org/r/20240215103205.2607016-18-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts at arm.com>
Acked-by: Mark Rutland <mark.rutland at arm.com>
Acked-by: Catalin Marinas <catalin.marinas at arm.com>
Cc: Alistair Popple <apopple at nvidia.com>
Cc: Andrey Ryabinin <ryabinin.a.a at gmail.com>
Cc: Ard Biesheuvel <ardb at kernel.org>
Cc: Barry Song <21cnbao at gmail.com>
Cc: Borislav Petkov (AMD) <bp at alien8.de>
Cc: Dave Hansen <dave.hansen at linux.intel.com>
Cc: David Hildenbrand <david at redhat.com>
Cc: "H. Peter Anvin" <hpa at zytor.com>
Cc: Ingo Molnar <mingo at redhat.com>
Cc: James Morse <james.morse at arm.com>
Cc: John Hubbard <jhubbard at nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang at huawei.com>
Cc: Marc Zyngier <maz at kernel.org>
Cc: Matthew Wilcox (Oracle) <willy at infradead.org>
Cc: Thomas Gleixner <tglx at linutronix.de>
Cc: Will Deacon <will at kernel.org>
Cc: Yang Shi <shy828301 at gmail.com>
Cc: Zi Yan <ziy at nvidia.com>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
(cherry picked from commit b972fc6afba002319fe23bc698ce6431ee43868c)
Signed-off-by: dann frazier <dann.frazier at canonical.com>
Acked-by: Brad Figg <bfigg at nvidia.com>
Acked-by: Noah Wager <noah.wager at canonical.com>
Acked-by: Jacob Martin <jacob.martin at canonical.com>
Signed-off-by: Brad Figg <bfigg at nvidia.com>
Signed-off-by: Tim Whisonant <tim.whisonant at canonical.com>
---
arch/arm64/include/asm/pgtable.h | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d759a20d2929a..8310875133ffc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1206,8 +1206,8 @@ extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t entry, int dirty);
-static inline void contpte_try_unfold(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+static __always_inline void contpte_try_unfold(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pte)
{
if (unlikely(pte_valid_cont(pte)))
__contpte_try_unfold(mm, addr, ptep, pte);
@@ -1278,7 +1278,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
#define set_ptes set_ptes
-static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
pte = pte_mknoncont(pte);
@@ -1360,8 +1360,8 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
}
#define wrprotect_ptes wrprotect_ptes
-static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, unsigned int nr)
+static __always_inline void wrprotect_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
{
if (likely(nr == 1)) {
/*
--
2.43.0
More information about the kernel-team
mailing list