lkml.org 
[lkml]   [2022]   [Aug]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    SubjectRe: [PATCH v4 3/7] mm/swap: Add swp_offset_pfn() to fetch PFN from swap entry
    Date
    Peter Xu <peterx@redhat.com> writes:

    > We've got a bunch of special swap entries that stores PFN inside the swap
    > offset fields. To fetch the PFN, normally the user just calls swp_offset()
    > assuming that'll be the PFN.
    >
    > Add a helper swp_offset_pfn() to fetch the PFN instead, fetching only the
    > max possible length of a PFN on the host, meanwhile doing proper check with
    > MAX_PHYSMEM_BITS to make sure the swap offsets can actually store the PFNs
    > properly always using the BUILD_BUG_ON() in is_pfn_swap_entry().
    >
    > One reason to do so is we never tried to sanitize whether swap offset can
    > really fit for storing PFN. At the meantime, this patch also prepares us
    > with the future possibility to store more information inside the swp offset
    > field, so assuming "swp_offset(entry)" to be the PFN will not stand any
    > more very soon.
    >
    > Replace many of the swp_offset() callers to use swp_offset_pfn() where
    > proper. Note that many of the existing users are not candidates for the
    > replacement, e.g.:
    >
    > (1) When the swap entry is not a pfn swap entry at all, or,
    > (2) when we wanna keep the whole swp_offset but only change the swp type.
    >
    > For the latter, it can happen when fork() triggered on a write-migration
    > swap entry pte, we may want to only change the migration type from
    > write->read but keep the rest, so it's not "fetching PFN" but "changing
    > swap type only". They're left aside so that when there're more information
    > within the swp offset they'll be carried over naturally in those cases.
    >
    > Since at it, dropping hwpoison_entry_to_pfn() because that's exactly what
    > the new swp_offset_pfn() is about.
    >
    > Signed-off-by: Peter Xu <peterx@redhat.com>

    LGTM, Thanks!

    Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

    > ---
    > arch/arm64/mm/hugetlbpage.c | 2 +-
    > fs/proc/task_mmu.c | 20 +++++++++++++++++---
    > include/linux/swapops.h | 35 +++++++++++++++++++++++++++++------
    > mm/hmm.c | 2 +-
    > mm/memory-failure.c | 2 +-
    > mm/page_vma_mapped.c | 6 +++---
    > 6 files changed, 52 insertions(+), 15 deletions(-)
    >
    > diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
    > index 0795028f017c..35e9a468d13e 100644
    > --- a/arch/arm64/mm/hugetlbpage.c
    > +++ b/arch/arm64/mm/hugetlbpage.c
    > @@ -245,7 +245,7 @@ static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
    > {
    > VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
    >
    > - return page_folio(pfn_to_page(swp_offset(entry)));
    > + return page_folio(pfn_to_page(swp_offset_pfn(entry)));
    > }
    >
    > void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
    > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
    > index d56c65f98d00..b3e79128fca0 100644
    > --- a/fs/proc/task_mmu.c
    > +++ b/fs/proc/task_mmu.c
    > @@ -1419,9 +1419,19 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
    > if (pte_swp_uffd_wp(pte))
    > flags |= PM_UFFD_WP;
    > entry = pte_to_swp_entry(pte);
    > - if (pm->show_pfn)
    > + if (pm->show_pfn) {
    > + pgoff_t offset;
    > + /*
    > + * For PFN swap offsets, keeping the offset field
    > + * to be PFN only to be compatible with old smaps.
    > + */
    > + if (is_pfn_swap_entry(entry))
    > + offset = swp_offset_pfn(entry);
    > + else
    > + offset = swp_offset(entry);
    > frame = swp_type(entry) |
    > - (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
    > + (offset << MAX_SWAPFILES_SHIFT);
    > + }
    > flags |= PM_SWAP;
    > migration = is_migration_entry(entry);
    > if (is_pfn_swap_entry(entry))
    > @@ -1478,7 +1488,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
    > unsigned long offset;
    >
    > if (pm->show_pfn) {
    > - offset = swp_offset(entry) +
    > + if (is_pfn_swap_entry(entry))
    > + offset = swp_offset_pfn(entry);
    > + else
    > + offset = swp_offset(entry);
    > + offset = offset +
    > ((addr & ~PMD_MASK) >> PAGE_SHIFT);
    > frame = swp_type(entry) |
    > (offset << MAX_SWAPFILES_SHIFT);
    > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
    > index 3a2901ff4f1e..bd4c6f0c2103 100644
    > --- a/include/linux/swapops.h
    > +++ b/include/linux/swapops.h
    > @@ -23,6 +23,20 @@
    > #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
    > #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
    >
    > +/*
    > + * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
    > + * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
    > + * can use the extra bits to store other information besides PFN.
    > + */
    > +#ifdef MAX_PHYSMEM_BITS
    > +#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
    > +#else /* MAX_PHYSMEM_BITS */
    > +#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
    > +#endif /* MAX_PHYSMEM_BITS */
    > +#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
    > +
    > +static inline bool is_pfn_swap_entry(swp_entry_t entry);
    > +
    > /* Clear all flags but only keep swp_entry_t related information */
    > static inline pte_t pte_swp_clear_flags(pte_t pte)
    > {
    > @@ -64,6 +78,17 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
    > return entry.val & SWP_OFFSET_MASK;
    > }
    >
    > +/*
    > + * This should only be called upon a pfn swap entry to get the PFN stored
    > + * in the swap entry. Please refers to is_pfn_swap_entry() for definition
    > + * of pfn swap entry.
    > + */
    > +static inline unsigned long swp_offset_pfn(swp_entry_t entry)
    > +{
    > + VM_BUG_ON(!is_pfn_swap_entry(entry));
    > + return swp_offset(entry) & SWP_PFN_MASK;
    > +}
    > +
    > /* check whether a pte points to a swap entry */
    > static inline int is_swap_pte(pte_t pte)
    > {
    > @@ -369,7 +394,7 @@ static inline int pte_none_mostly(pte_t pte)
    >
    > static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
    > {
    > - struct page *p = pfn_to_page(swp_offset(entry));
    > + struct page *p = pfn_to_page(swp_offset_pfn(entry));
    >
    > /*
    > * Any use of migration entries may only occur while the
    > @@ -387,6 +412,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
    > */
    > static inline bool is_pfn_swap_entry(swp_entry_t entry)
    > {
    > + /* Make sure the swp offset can always store the needed fields */
    > + BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
    > +
    > return is_migration_entry(entry) || is_device_private_entry(entry) ||
    > is_device_exclusive_entry(entry);
    > }
    > @@ -475,11 +503,6 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
    > return swp_type(entry) == SWP_HWPOISON;
    > }
    >
    > -static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
    > -{
    > - return swp_offset(entry);
    > -}
    > -
    > static inline void num_poisoned_pages_inc(void)
    > {
    > atomic_long_inc(&num_poisoned_pages);
    > diff --git a/mm/hmm.c b/mm/hmm.c
    > index f2aa63b94d9b..3850fb625dda 100644
    > --- a/mm/hmm.c
    > +++ b/mm/hmm.c
    > @@ -253,7 +253,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
    > cpu_flags = HMM_PFN_VALID;
    > if (is_writable_device_private_entry(entry))
    > cpu_flags |= HMM_PFN_WRITE;
    > - *hmm_pfn = swp_offset(entry) | cpu_flags;
    > + *hmm_pfn = swp_offset_pfn(entry) | cpu_flags;
    > return 0;
    > }
    >
    > diff --git a/mm/memory-failure.c b/mm/memory-failure.c
    > index 0dfed9d7b273..e48f6f6a259d 100644
    > --- a/mm/memory-failure.c
    > +++ b/mm/memory-failure.c
    > @@ -632,7 +632,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
    > swp_entry_t swp = pte_to_swp_entry(pte);
    >
    > if (is_hwpoison_entry(swp))
    > - pfn = hwpoison_entry_to_pfn(swp);
    > + pfn = swp_offset_pfn(swp);
    > }
    >
    > if (!pfn || pfn != poisoned_pfn)
    > diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
    > index 8e9e574d535a..93e13fc17d3c 100644
    > --- a/mm/page_vma_mapped.c
    > +++ b/mm/page_vma_mapped.c
    > @@ -86,7 +86,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
    > !is_device_exclusive_entry(entry))
    > return false;
    >
    > - pfn = swp_offset(entry);
    > + pfn = swp_offset_pfn(entry);
    > } else if (is_swap_pte(*pvmw->pte)) {
    > swp_entry_t entry;
    >
    > @@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
    > !is_device_exclusive_entry(entry))
    > return false;
    >
    > - pfn = swp_offset(entry);
    > + pfn = swp_offset_pfn(entry);
    > } else {
    > if (!pte_present(*pvmw->pte))
    > return false;
    > @@ -221,7 +221,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
    > return not_found(pvmw);
    > entry = pmd_to_swp_entry(pmde);
    > if (!is_migration_entry(entry) ||
    > - !check_pmd(swp_offset(entry), pvmw))
    > + !check_pmd(swp_offset_pfn(entry), pvmw))
    > return not_found(pvmw);
    > return true;
    > }

    \
     
     \ /
      Last update: 2022-08-12 04:33    [W:2.780 / U:0.168 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site