ACK: [Zesty PATCH 1/2] UBUNTU: SAUCE: s390/mm: fix local TLB flushing vs. detach of an mm address space
Colin Ian King
colin.king at canonical.com
Tue Sep 12 12:01:23 UTC 2017
On 12/09/17 10:45, Stefan Bader wrote:
> From: Martin Schwidefsky <schwidefsky at de.ibm.com>
>
> BugLink: http://bugs.launchpad.net/bugs/1708399
>
> The local TLB flushing code keeps an additional mask in the mm.context,
> the cpu_attach_mask. At the time a global flush of an address space is
> done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
> future global flushes in case the mm is used by a single CPU only after
> the flush.
>
> Trouble is that the reset of the mm_cpumask is racy against the detach
> of an mm address space by switch_mm. The current order is first the
> global TLB flush and then the copy of the cpu_attach_mask to the
> mm_cpumask. The order needs to be the other way around.
>
> Cc: <stable at vger.kernel.org>
> Reviewed-by: Heiko Carstens <heiko.carstens at de.ibm.com>
> Signed-off-by: Martin Schwidefsky <schwidefsky at de.ibm.com>
> (cherry-picked from commit b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
> Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
> ---
> arch/s390/include/asm/mmu_context.h | 4 ++--
> arch/s390/include/asm/tlbflush.h | 26 +++++---------------------
> 2 files changed, 7 insertions(+), 23 deletions(-)
>
> diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
> index 72e9ca8..8823e35 100644
> --- a/arch/s390/include/asm/mmu_context.h
> +++ b/arch/s390/include/asm/mmu_context.h
> @@ -103,7 +103,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
> if (prev == next)
> return;
> cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
> - cpumask_set_cpu(cpu, mm_cpumask(next));
> /* Clear old ASCE by loading the kernel ASCE. */
> __ctl_load(S390_lowcore.kernel_asce, 1, 1);
> __ctl_load(S390_lowcore.kernel_asce, 7, 7);
> @@ -121,7 +120,7 @@ static inline void finish_arch_post_lock_switch(void)
> preempt_disable();
> while (atomic_read(&mm->context.flush_count))
> cpu_relax();
> -
> + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
> if (mm->context.flush_mm)
> __tlb_flush_mm(mm);
> preempt_enable();
> @@ -136,6 +135,7 @@ static inline void activate_mm(struct mm_struct *prev,
> struct mm_struct *next)
> {
> switch_mm(prev, next, current);
> + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
> set_user_asce(next);
> }
>
> diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
> index 4d759f8..16fe2a3 100644
> --- a/arch/s390/include/asm/tlbflush.h
> +++ b/arch/s390/include/asm/tlbflush.h
> @@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void)
> * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
> * this implicates multiple ASCEs!).
> */
> -static inline void __tlb_flush_full(struct mm_struct *mm)
> -{
> - preempt_disable();
> - atomic_inc(&mm->context.flush_count);
> - if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
> - /* Local TLB flush */
> - __tlb_flush_local();
> - } else {
> - /* Global TLB flush */
> - __tlb_flush_global();
> - /* Reset TLB flush mask */
> - cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> - }
> - atomic_dec(&mm->context.flush_count);
> - preempt_enable();
> -}
> -
> static inline void __tlb_flush_mm(struct mm_struct *mm)
> {
> unsigned long gmap_asce;
> @@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
> */
> preempt_disable();
> atomic_inc(&mm->context.flush_count);
> + /* Reset TLB flush mask */
> + cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> + barrier();
> gmap_asce = READ_ONCE(mm->context.gmap_asce);
> if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
> if (gmap_asce)
> __tlb_flush_idte(gmap_asce);
> __tlb_flush_idte(mm->context.asce);
> } else {
> - __tlb_flush_full(mm);
> + /* Global TLB flush */
> + __tlb_flush_global();
> }
> - /* Reset TLB flush mask */
> - cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
> atomic_dec(&mm->context.flush_count);
> preempt_enable();
> }
> @@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void)
> }
> #else
> #define __tlb_flush_global() __tlb_flush_local()
> -#define __tlb_flush_full(mm) __tlb_flush_local()
>
> /*
> * Flush TLB entries for a specific ASCE on all CPUs.
>
Looks good to me.
Acked-by: Colin Ian King <colin.king at canonical.com>
More information about the kernel-team
mailing list