lkml.org 
[lkml]   [2022]   [Jul]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH 4/4] arm64: support batched/deferred tlb shootdown during page reclamation
On Fri, Jul 8, 2022 at 1:50 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Jul 08, 2022 at 12:52:42AM +1200, Barry Song wrote:
>
> > diff --git a/arch/arm64/include/asm/tlbbatch.h b/arch/arm64/include/asm/tlbbatch.h
> > new file mode 100644
> > index 000000000000..fedb0b87b8db
> > --- /dev/null
> > +++ b/arch/arm64/include/asm/tlbbatch.h
> > @@ -0,0 +1,12 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef _ARCH_ARM64_TLBBATCH_H
> > +#define _ARCH_ARM64_TLBBATCH_H
> > +
> > +struct arch_tlbflush_unmap_batch {
> > + /*
> > + * For arm64, HW can do tlb shootdown, so we don't
> > + * need to record cpumask for sending IPI
> > + */
> > +};
> > +
> > +#endif /* _ARCH_ARM64_TLBBATCH_H */
> > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> > index 412a3b9a3c25..b3ed163267ca 100644
> > --- a/arch/arm64/include/asm/tlbflush.h
> > +++ b/arch/arm64/include/asm/tlbflush.h
> > @@ -272,6 +272,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
> > dsb(ish);
> > }
> >
> > +static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
> > + struct mm_struct *mm,
> > + struct vm_area_struct *vma,
> > + unsigned long uaddr)
> > +{
> > + flush_tlb_page_nosync(vma, uaddr);
> > +}
>
> You're passing that vma along just to get the mm, that's quite silly and
> trivially fixed.

Yes, this was silly. will include your fix in v2.

>
>
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index 412a3b9a3c25..87505ecce1f0 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -254,17 +254,23 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
> dsb(ish);
> }
>
> -static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
> - unsigned long uaddr)
> +static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
> + unsigned long uaddr)
> {
> unsigned long addr;
>
> dsb(ishst);
> - addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
> + addr = __TLBI_VADDR(uaddr, ASID(mm));
> __tlbi(vale1is, addr);
> __tlbi_user(vale1is, addr);
> }
>
> +static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
> + unsigned long uaddr)
> +{
> + return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
> +}
> +
> static inline void flush_tlb_page(struct vm_area_struct *vma,
> unsigned long uaddr)
> {

Thanks
Barry

\
 
 \ /
  Last update: 2022-07-07 23:11    [W:0.064 / U:0.076 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site