lkml.org 
[lkml]   [2022]   [Jun]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 1/2] mm/swapfile: Extract operations of resource release in the swapoff process
Date
In order to realize the interface reuse in the swapoff resource
release process, some of the operations are abstracted
into separate interfaces.

Only code movement, no functional modifications and changes.

del_useless_swap_info():
Remove specific swap_info_struct from swap_active_head and
update total_swap_pages.

release_swap_info_memory():
Clear the corresponding resources of swap_info_struct.

Signed-off-by: liubo <liubo254@huawei.com>
---
v2:
Only code movement, no functional modifications and changes.
Former: https://lore.kernel.org/linux-mm/20220528084941.28391-1-liubo254@huawei.com/
---
mm/swapfile.c | 168 +++++++++++++++++++++++++++-----------------------
1 file changed, 91 insertions(+), 77 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 3fa26f6971e9..2ef5e7b4918e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2386,18 +2386,103 @@ bool has_usable_swap(void)
return ret;
}

-SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+static void release_swap_info_memory(struct swap_info_struct *p)
{
- struct swap_info_struct *p = NULL;
unsigned char *swap_map;
struct swap_cluster_info *cluster_info;
unsigned long *frontswap_map;
- struct file *swap_file, *victim;
+ struct file *swap_file;
struct address_space *mapping;
struct inode *inode;
+ unsigned int old_block_size;
+
+ mutex_lock(&swapon_mutex);
+ spin_lock(&swap_lock);
+ spin_lock(&p->lock);
+ drain_mmlist();
+
+ /* wait for anyone still in scan_swap_map */
+ p->highest_bit = 0; /* cuts scans short */
+ while (p->flags >= SWP_SCANNING) {
+ spin_unlock(&p->lock);
+ spin_unlock(&swap_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&swap_lock);
+ spin_lock(&p->lock);
+ }
+
+ swap_file = p->swap_file;
+ mapping = p->swap_file->f_mapping;
+ old_block_size = p->old_block_size;
+ p->swap_file = NULL;
+ p->max = 0;
+ swap_map = p->swap_map;
+ p->swap_map = NULL;
+ cluster_info = p->cluster_info;
+ p->cluster_info = NULL;
+ frontswap_map = frontswap_map_get(p);
+ spin_unlock(&p->lock);
+ spin_unlock(&swap_lock);
+ arch_swap_invalidate_area(p->type);
+ frontswap_invalidate_area(p->type);
+ frontswap_map_set(p, NULL);
+ mutex_unlock(&swapon_mutex);
+ free_percpu(p->percpu_cluster);
+ p->percpu_cluster = NULL;
+ free_percpu(p->cluster_next_cpu);
+ p->cluster_next_cpu = NULL;
+ vfree(swap_map);
+ kvfree(cluster_info);
+ kvfree(frontswap_map);
+ /* Destroy swap account information */
+ swap_cgroup_swapoff(p->type);
+ exit_swap_address_space(p->type);
+
+ inode = mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ struct block_device *bdev = I_BDEV(inode);
+
+ set_blocksize(bdev, old_block_size);
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ }
+
+ inode_lock(inode);
+ inode->i_flags &= ~S_SWAPFILE;
+ inode_unlock(inode);
+ filp_close(swap_file, NULL);
+}
+
+static void del_useless_swap_info(struct swap_info_struct *p)
+{
+ del_from_avail_list(p);
+ spin_lock(&p->lock);
+ if (p->prio < 0) {
+ struct swap_info_struct *si = p;
+ int nid;
+
+ plist_for_each_entry_continue(si, &swap_active_head, list) {
+ si->prio++;
+ si->list.prio--;
+ for_each_node(nid) {
+ if (si->avail_lists[nid].prio != 1)
+ si->avail_lists[nid].prio--;
+ }
+ }
+ least_priority++;
+ }
+ plist_del(&p->list, &swap_active_head);
+ atomic_long_sub(p->pages, &nr_swap_pages);
+ p->flags &= ~SWP_WRITEOK;
+ spin_unlock(&p->lock);
+}
+
+SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+{
+ struct swap_info_struct *p = NULL;
+ struct file *victim;
+ struct address_space *mapping;
struct filename *pathname;
int err, found = 0;
- unsigned int old_block_size;
unsigned int inuse_pages;

if (!capable(CAP_SYS_ADMIN))
@@ -2440,26 +2525,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
- del_from_avail_list(p);
- spin_lock(&p->lock);
- if (p->prio < 0) {
- struct swap_info_struct *si = p;
- int nid;

- plist_for_each_entry_continue(si, &swap_active_head, list) {
- si->prio++;
- si->list.prio--;
- for_each_node(nid) {
- if (si->avail_lists[nid].prio != 1)
- si->avail_lists[nid].prio--;
- }
- }
- least_priority++;
- }
- plist_del(&p->list, &swap_active_head);
- atomic_long_sub(p->pages, &nr_swap_pages);
- p->flags &= ~SWP_WRITEOK;
- spin_unlock(&p->lock);
+ del_useless_swap_info(p);
spin_unlock(&swap_lock);

disable_swap_slots_cache_lock();
@@ -2497,60 +2564,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (!p->bdev || !bdev_nonrot(p->bdev))
atomic_dec(&nr_rotate_swap);

- mutex_lock(&swapon_mutex);
- spin_lock(&swap_lock);
- spin_lock(&p->lock);
- drain_mmlist();
-
- /* wait for anyone still in scan_swap_map_slots */
- p->highest_bit = 0; /* cuts scans short */
- while (p->flags >= SWP_SCANNING) {
- spin_unlock(&p->lock);
- spin_unlock(&swap_lock);
- schedule_timeout_uninterruptible(1);
- spin_lock(&swap_lock);
- spin_lock(&p->lock);
- }
-
- swap_file = p->swap_file;
- old_block_size = p->old_block_size;
- p->swap_file = NULL;
- p->max = 0;
- swap_map = p->swap_map;
- p->swap_map = NULL;
- cluster_info = p->cluster_info;
- p->cluster_info = NULL;
- frontswap_map = frontswap_map_get(p);
- spin_unlock(&p->lock);
- spin_unlock(&swap_lock);
- arch_swap_invalidate_area(p->type);
- frontswap_invalidate_area(p->type);
- frontswap_map_set(p, NULL);
- mutex_unlock(&swapon_mutex);
- free_percpu(p->percpu_cluster);
- p->percpu_cluster = NULL;
- free_percpu(p->cluster_next_cpu);
- p->cluster_next_cpu = NULL;
- vfree(swap_map);
- kvfree(cluster_info);
- kvfree(frontswap_map);
- /* Destroy swap account information */
- swap_cgroup_swapoff(p->type);
- exit_swap_address_space(p->type);
-
- inode = mapping->host;
- if (S_ISBLK(inode->i_mode)) {
- struct block_device *bdev = I_BDEV(inode);
-
- set_blocksize(bdev, old_block_size);
- blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
- }
-
- inode_lock(inode);
- inode->i_flags &= ~S_SWAPFILE;
- inode_unlock(inode);
- filp_close(swap_file, NULL);
-
+ release_swap_info_memory(p);
/*
* Clear the SWP_USED flag after all resources are freed so that swapon
* can reuse this swap_info in alloc_swap_info() safely. It is ok to
--
2.27.0
\
 
 \ /
  Last update: 2022-06-15 09:04    [W:0.361 / U:0.136 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site