lkml.org 
[lkml]   [2022]   [Jul]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patches in this message
/
Date
SubjectRe: [PATCH] x86/mm/tlb: ignore f->new_tlb_gen when zero
From
On 7/7/22 17:30, Nadav Amit wrote:

You might want to fix the clock on the system from which you sent this.
I was really scratching my head trying to figure out how you got this
patch out before Hugh's bug report.

> From: Nadav Amit <namit@vmware.com>
>
> Commit aa44284960d5 ("x86/mm/tlb: Avoid reading mm_tlb_gen when
> possible") introduced an optimization of skipping the flush if the TLB
> generation that is flushed (as provided in flush_tlb_info) was already
> flushed.
>
> However, arch_tlbbatch_flush() does not provide any generation in
> flush_tlb_info. As a result, try_to_unmap_one() would not perform any
> TLB flushes.
>
> Fix it by checking whether f->new_tlb_gen is nonzero. Zero value is
> anyhow is an invalid generation value.

It is, but the check below uses 'f->end == TLB_FLUSH_ALL' as the marker
for f->new_tlb_gen being invalid. Being consistent seems like a good
idea on this stuff.

> In addition, add the missing unlikely() and jump to get tracing right.

There are currently five routes out of flush_tlb_func():
* Three early returns
* One 'goto done'
* One implicit return

The tracing code doesn't get run for any of the early returns, but
that's intentional because they don't *actually* flush the TLB. We
don't want to record that flush_tlb_func() flushed the TLB when it
didn't. There's another tracepoint on the TLB_REMOTE_SEND_IPI side to
tell where the flushes were requested.

That said, I think the

if (unlikely(local_tlb_gen == mm_tlb_gen))
goto done;

is arguably buggy, as is the 'goto done' in this hunk:

> arch/x86/mm/tlb.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index d9314cc8b81f..d81b4084bb8a 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -771,14 +771,14 @@ static void flush_tlb_func(void *info)
> return;
> }
>
> - if (f->new_tlb_gen <= local_tlb_gen) {
> + if (unlikely(f->new_tlb_gen != 0 && f->new_tlb_gen <= local_tlb_gen)) {
> /*
> * The TLB is already up to date in respect to f->new_tlb_gen.
> * While the core might be still behind mm_tlb_gen, checking
> * mm_tlb_gen unnecessarily would have negative caching effects
> * so avoid it.
> */
> - return;
> + goto done;
> }
>
> /*

We might want to (eventually) think about doing something like the
attached patch to make the skipped flushes explicit in the tracing and
make the return paths out of this function more consistent.diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index d400b6d9d246..44ba73601f50 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -720,7 +720,7 @@ void initialize_tlbstate_and_flush(void)
* because all x86 flush operations are serializing and the
* atomic64_read operation won't be reordered by the compiler.
*/
-static void flush_tlb_func(void *info)
+static flush_tlb_func(void *info)
{
/*
* We have three different tlb_gen values in here. They are:
@@ -738,6 +738,7 @@ static void flush_tlb_func(void *info)
u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
bool local = smp_processor_id() == f->initiating_cpu;
unsigned long nr_invalidate = 0;
+ enum tlb_flush_reason;

/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
@@ -747,12 +748,21 @@ static void flush_tlb_func(void *info)
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);

/* Can only happen on remote CPUs */
- if (f->mm && f->mm != loaded_mm)
- return;
+ if (f->mm && f->mm != loaded_mm) {
+ flush_reason = TLB_FLUSH_SKIPPED;
+ goto done;
+ }
+ flush_reason = TLB_REMOTE_SHOOTDOWN;
+ } else if (f->mm == NULL) {
+ flush_reason = TLB_LOCAL_SHOOTDOWN;
+ } else {
+ flush_reason = TLB_LOCAL_MM_SHOOTDOWN;
}

- if (unlikely(loaded_mm == &init_mm))
- return;
+ if (unlikely(loaded_mm == &init_mm)) {
+ flush_reason = TLB_FLUSH_SKIPPED;
+ goto done;
+ }

VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
@@ -768,7 +778,8 @@ static void flush_tlb_func(void *info)
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
- return;
+ flush_reason = TLB_FLUSH_SKIPPED;
+ goto done;
}

if (unlikely(local_tlb_gen == mm_tlb_gen)) {
@@ -778,6 +789,7 @@ static void flush_tlb_func(void *info)
* be handled can catch us all the way up, leaving no work for
* the second flush.
*/
+ flush_reason = TLB_FLUSH_SKIPPED;
goto done;
}

@@ -849,10 +861,7 @@ static void flush_tlb_func(void *info)

/* Tracing is done in a unified manner to reduce the code size */
done:
- trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN :
- (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN :
- TLB_LOCAL_MM_SHOOTDOWN,
- nr_invalidate);
+ trace_tlb_flush(flush_reason, nr_invalidate);
}

static bool tlb_is_not_lazy(int cpu, void *data)
\
 
 \ /
  Last update: 2022-07-08 16:51    [W:0.160 / U:0.940 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site