lkml.org 
[lkml]   [2022]   [Jan]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 35/35] powerpc/mm: attempt speculative mm faults first
    Date
    Attempt speculative mm fault handling first, and fall back to the
    existing (non-speculative) code if that fails.

    This follows the lines of the x86 speculative fault handling code,
    but with some minor arch differences such as the way that the
    access_pkey_error case is handled

    Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
    ---
    arch/powerpc/mm/fault.c | 64 +++++++++++++++++++++++++++++++++++++++++
    1 file changed, 64 insertions(+)

    diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
    index eb8ecd7343a9..3f039504e8fd 100644
    --- a/arch/powerpc/mm/fault.c
    +++ b/arch/powerpc/mm/fault.c
    @@ -395,6 +395,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
    int is_write = page_fault_is_write(error_code);
    vm_fault_t fault, major = 0;
    bool kprobe_fault = kprobe_page_fault(regs, 11);
    +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
    + struct vm_area_struct pvma;
    + unsigned long seq;
    +#endif

    if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
    return 0;
    @@ -451,6 +455,63 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
    if (is_exec)
    flags |= FAULT_FLAG_INSTRUCTION;

    +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
    + /*
    + * No need to try speculative faults for kernel or
    + * single threaded user space.
    + */
    + if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1)
    + goto no_spf;
    +
    + count_vm_event(SPF_ATTEMPT);
    + seq = mmap_seq_read_start(mm);
    + if (seq & 1) {
    + count_vm_spf_event(SPF_ABORT_ODD);
    + goto spf_abort;
    + }
    + rcu_read_lock();
    + vma = __find_vma(mm, address);
    + if (!vma || vma->vm_start > address) {
    + rcu_read_unlock();
    + count_vm_spf_event(SPF_ABORT_UNMAPPED);
    + goto spf_abort;
    + }
    + if (!vma_is_anonymous(vma)) {
    + rcu_read_unlock();
    + count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
    + goto spf_abort;
    + }
    + pvma = *vma;
    + rcu_read_unlock();
    + if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY))
    + goto spf_abort;
    + vma = &pvma;
    +#ifdef CONFIG_PPC_MEM_KEYS
    + if (unlikely(access_pkey_error(is_write, is_exec,
    + (error_code & DSISR_KEYFAULT), vma))) {
    + count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
    + goto spf_abort;
    + }
    +#endif /* CONFIG_PPC_MEM_KEYS */
    + if (unlikely(access_error(is_write, is_exec, vma))) {
    + count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
    + goto spf_abort;
    + }
    + fault = do_handle_mm_fault(vma, address,
    + flags | FAULT_FLAG_SPECULATIVE, seq, regs);
    + major |= fault & VM_FAULT_MAJOR;
    +
    + if (fault_signal_pending(fault, regs))
    + return user_mode(regs) ? 0 : SIGBUS;
    + if (!(fault & VM_FAULT_RETRY))
    + goto done;
    +
    +spf_abort:
    + count_vm_event(SPF_ABORT);
    +no_spf:
    +
    +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
    +
    /* When running in the kernel we expect faults to occur only to
    * addresses in user space. All other faults represent errors in the
    * kernel and should generate an OOPS. Unfortunately, in the case of an
    @@ -522,6 +583,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
    }

    mmap_read_unlock(current->mm);
    +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
    +done:
    +#endif

    if (unlikely(fault & VM_FAULT_ERROR))
    return mm_fault_error(regs, address, fault);
    --
    2.20.1
    \
     
     \ /
      Last update: 2022-01-28 14:20    [W:5.720 / U:0.588 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site