lkml.org 
[lkml]   [2020]   [Jun]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 3/5] f2fs: shrink node_write lock coverage
On 06/18, Chao Yu wrote:
> - to avoid race between checkpoint and quota file writeback, it
> just needs to hold read lock of node_write in writeback path.
> - node_write lock has covered all LFS data write paths, it's not
> necessary, we only need to hold node_write lock at write path of
> quota file.

I've added this:

This refactors commit ca7f76e68074 ("f2fs: fix wrong discard space").

>
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
> fs/f2fs/compress.c | 18 +++++++++++++++---
> fs/f2fs/data.c | 12 ++++++++++++
> fs/f2fs/segment.c | 11 -----------
> 3 files changed, 27 insertions(+), 14 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 36b51795b0c3..3ff6c0305ec6 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1096,8 +1096,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> loff_t psize;
> int i, err;
>
> - if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
> + if (IS_NOQUOTA(inode)) {
> + /*
> + * We need to wait for node_write to avoid block allocation during
> + * checkpoint. This can only happen to quota writes which can cause
> + * the below discard race condition.
> + */
> + down_read(&sbi->node_write);
> + } else if (!f2fs_trylock_op(sbi)) {
> return -EAGAIN;
> + }
>
> set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
>
> @@ -1203,7 +1211,9 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
>
> f2fs_put_dnode(&dn);
> - if (!IS_NOQUOTA(inode))
> + if (IS_NOQUOTA(inode))
> + up_read(&sbi->node_write);
> + else
> f2fs_unlock_op(sbi);
>
> spin_lock(&fi->i_size_lock);
> @@ -1230,7 +1240,9 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> out_put_dnode:
> f2fs_put_dnode(&dn);
> out_unlock_op:
> - if (!IS_NOQUOTA(inode))
> + if (IS_NOQUOTA(inode))
> + up_read(&sbi->node_write);
> + else
> f2fs_unlock_op(sbi);
> return -EAGAIN;
> }
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index c78ce08f6400..cbdf062d3562 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2719,8 +2719,20 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
>
> /* Dentry/quota blocks are controlled by checkpoint */
> if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
> + /*
> + * We need to wait for node_write to avoid block allocation during
> + * checkpoint. This can only happen to quota writes which can cause
> + * the below discard race condition.
> + */
> + if (IS_NOQUOTA(inode))
> + down_read(&sbi->node_write);
> +
> fio.need_lock = LOCK_DONE;
> err = f2fs_do_write_data_page(&fio);
> +
> + if (IS_NOQUOTA(inode))
> + up_read(&sbi->node_write);
> +
> goto done;
> }
>
> diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> index 5b2a6f865a6d..cb861ed98ee3 100644
> --- a/fs/f2fs/segment.c
> +++ b/fs/f2fs/segment.c
> @@ -3107,14 +3107,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
> type = CURSEG_COLD_DATA;
> }
>
> - /*
> - * We need to wait for node_write to avoid block allocation during
> - * checkpoint. This can only happen to quota writes which can cause
> - * the below discard race condition.
> - */
> - if (IS_DATASEG(type))
> - down_write(&sbi->node_write);
> -
> down_read(&SM_I(sbi)->curseg_lock);
>
> mutex_lock(&curseg->curseg_mutex);
> @@ -3180,9 +3172,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
>
> up_read(&SM_I(sbi)->curseg_lock);
>
> - if (IS_DATASEG(type))
> - up_write(&sbi->node_write);
> -
> if (put_pin_sem)
> up_read(&sbi->pin_sem);
> }
> --
> 2.18.0.rc1

\
 
 \ /
  Last update: 2020-06-19 07:57    [W:0.276 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site