lkml.org 
[lkml]   [2022]   [Dec]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v13 3/7] x86: mm: Skip faulting instruction for VM_DROPPABLE faults
Date
The prior commit introduced VM_DROPPABLE, but in a limited form where
the faulting instruction was retried instead of skipped. Finish that up
with the platform-specific aspect of skipping the actual instruction.

This works by copying userspace's %rip to a stack buffer of size
MAX_INSN_SIZE, decoding it, and then adding the length of the decoded
instruction to userspace's %rip. In the event any of these fail, just
fallback to not advancing %rip and trying again.

Cc: linux-mm@kvack.org
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
arch/x86/mm/fault.c | 19 +++++++++++++++++++
include/linux/mm_types.h | 5 ++++-
mm/memory.c | 4 +++-
3 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7b0d4ab894c8..76ca99ab6eb7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -33,6 +33,8 @@
#include <asm/kvm_para.h> /* kvm_handle_async_pf */
#include <asm/vdso.h> /* fixup_vdso_exception() */
#include <asm/irq_stack.h>
+#include <asm/insn.h> /* struct insn */
+#include <asm/insn-eval.h> /* insn_fetch_from_user(), ... */

#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
@@ -1454,6 +1456,23 @@ void do_user_addr_fault(struct pt_regs *regs,
}

mmap_read_unlock(mm);
+
+ if (fault & VM_FAULT_SKIP_INSN) {
+ u8 buf[MAX_INSN_SIZE];
+ struct insn insn;
+ int nr_copied;
+
+ nr_copied = insn_fetch_from_user(regs, buf);
+ if (nr_copied <= 0)
+ return;
+
+ if (!insn_decode_from_regs(&insn, regs, buf, nr_copied))
+ return;
+
+ regs->ip += insn.length;
+ return;
+ }
+
if (likely(!(fault & VM_FAULT_ERROR)))
return;

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3b8475007734..e76ab9ad555c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -945,6 +945,7 @@ typedef __bitwise unsigned int vm_fault_t;
* fsync() to complete (for synchronous page faults
* in DAX)
* @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
+ * @VM_FAULT_SKIP_INSN: ->handle the fault by skipping faulting instruction
* @VM_FAULT_HINDEX_MASK: mask HINDEX value
*
*/
@@ -962,6 +963,7 @@ enum vm_fault_reason {
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
+ VM_FAULT_SKIP_INSN = (__force vm_fault_t)0x008000,
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
};

@@ -985,7 +987,8 @@ enum vm_fault_reason {
{ VM_FAULT_RETRY, "RETRY" }, \
{ VM_FAULT_FALLBACK, "FALLBACK" }, \
{ VM_FAULT_DONE_COW, "DONE_COW" }, \
- { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
+ { VM_FAULT_SKIP_INSN, "SKIP_INSN" }

struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
diff --git a/mm/memory.c b/mm/memory.c
index 1ade407ccbf9..62ba9b7b713e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5221,8 +5221,10 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
lru_gen_exit_fault();

/* If the mapping is droppable, then errors due to OOM aren't fatal. */
- if (vma->vm_flags & VM_DROPPABLE)
+ if ((ret & VM_FAULT_OOM) && (vma->vm_flags & VM_DROPPABLE)) {
ret &= ~VM_FAULT_OOM;
+ ret |= VM_FAULT_SKIP_INSN;
+ }

if (flags & FAULT_FLAG_USER) {
mem_cgroup_exit_user_fault();
--
2.39.0
\
 
 \ /
  Last update: 2023-03-26 23:16    [W:0.090 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site