lkml.org 
[lkml]   [2020]   [Jun]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    SubjectRe: [PATCH 2/2] block: move request_queue member docs to kdoc
    Date
    On 2020/06/24 7:03, Luis Chamberlain wrote:
    > Now that we have a template, expand on the kdoc form for
    > the request_queue data structure with documentation from
    > the rest of the members, *only* for information we already
    > had.
    >
    > This does not add any new documentation. This just shifts
    > documentation to kdoc form.
    >
    > Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
    > ---
    > include/linux/blkdev.h | 139 ++++++++++++-----------------------------
    > 1 file changed, 40 insertions(+), 99 deletions(-)
    >
    > diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
    > index ea319c2b0593..d30bfef893b9 100644
    > --- a/include/linux/blkdev.h
    > +++ b/include/linux/blkdev.h
    > @@ -396,8 +396,38 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
    >
    > /**
    > * struct request_queue - block device driver request queue
    > + * @queue_ctx: software queue context
    > + * @queue_hw_ctx: hw dispatch queues
    > + * @queuedata: the queue owner gets to use this for whatever they like.
    > + * ll_rw_blk doesn't touch it.
    > + * @queue_flags: various queue flags, see %QUEUE_* below
    > + * @pm_only: Number of contexts that have called blk_set_pm_only(). If this
    > + * counter is above zero then only %RQF_PM and %RQF_PREEMPT requests are
    > + * processed.
    > + * @id: ida allocated id for this queue. Used to index queues from ioctx.
    > + * @bounce_gfp: queue needs bounce pages for pages above this limit
    > + * @kobj: queue kobject
    > + * @mq_kobj: mq queue kobject
    > + * @nr_requests: maximum number of of requests
    > + * @ksm: Inline crypto capabilities
    > + * @nr_zones:

    Above line is not needed, no ?

    > + * @nr_zones: total number of zones of the device. This is always 0 for regular
    > + * block devices.

    May be: "total number of zones for a zoned block device. This is..."

    > + * @conv_zones_bitmap: bitmap of nr_zones bits which indicates if a zone
    > + * is conventional (bit set) or sequential (bit clear).
    > + * @seq_zones_wlock: bitmap of nr_zones bits which indicates if a zone
    > + * is write locked, that is, if a write request targeting the zone was
    > + * dispatched.
    > + * @debugfs_mutex: used to protect access to the @ebugfs_dir
    > * @debugfs_mutex: used to protect access to the @debugfs_dir
    > * @blk_trace: used by blktrace to keep track of setup / tracing
    > + * @fq: for flush operations
    > + * @td: throttle data
    > + * @unused_hctx_list: list used for reusing dead hctx instance in case of
    > + * updating nr_hw_queues.
    > + * @unused_hctx_lock: used to protect the @unused_hctx_list
    > + * @mq_freeze_lock: protects concurrent access to q_usage_counter by
    > + * percpu_ref_kill() and percpu_ref_reinit().
    > * @debugfs_dir: directory created to place debugfs information. This is always
    > * created for make_request and request-based block drivers upon
    > * initialization. blktrace requires for this directory to be created,
    > @@ -413,67 +443,35 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
    > * o custom solutions such as scsi-generic
    > *
    > * All partitions share the same request_queue data structure.
    > + *
    > + * Zoned block device dispatch control is managed by the fields @nr_zones,
    > + * @conv_zones_bitmap and @seq_zones_wlock. These fields are fields are> + * initialized by the low level device driver (e.g. scsi/sd.c). Stacking
    > + * drivers (device mappers) may or may not initialize these fields.
    > + * Reads of this information must be protected with blk_queue_enter() /
    > + * blk_queue_exit(). Modifying this information is only allowed while
    > + * no requests are being processed. See also blk_mq_freeze_queue() and
    > + * blk_mq_unfreeze_queue().

    Ooops... This is outdated... This should be:

    * Dispatch of write commands to zoned block devices is controlled using the
    * fields @nr_zones, @conv_zones_bitmap and @seq_zones_wlock. These fields are
    * initialized using the blk_revalidate_disk_zones() function from the level
    * request based device drivers (e.g. scsi/sd.c). BIO based drivers, if needed,
    * can initialize these fields directly. Accesses to these fields must be
    * protected with blk_queue_enter() / blk_queue_exit(). Modification of the
    * @nr_zones field and reallocation of the @conv_zones_bitmap and
    * @seq_zones_wlock bitmaps is only allowed while no requests are being
    * processed. See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().

    > */
    > struct request_queue {
    > struct request *last_merge;
    > struct elevator_queue *elevator;
    > -
    > struct blk_queue_stats *stats;
    > struct rq_qos *rq_qos;
    > -
    > make_request_fn *make_request_fn;
    > -
    > const struct blk_mq_ops *mq_ops;
    > -
    > - /* sw queues */
    > struct blk_mq_ctx __percpu *queue_ctx;
    > -
    > unsigned int queue_depth;
    > -
    > - /* hw dispatch queues */
    > struct blk_mq_hw_ctx **queue_hw_ctx;
    > unsigned int nr_hw_queues;
    > -
    > struct backing_dev_info *backing_dev_info;
    > -
    > - /*
    > - * The queue owner gets to use this for whatever they like.
    > - * ll_rw_blk doesn't touch it.
    > - */
    > void *queuedata;
    > -
    > - /*
    > - * various queue flags, see QUEUE_* below
    > - */
    > unsigned long queue_flags;
    > - /*
    > - * Number of contexts that have called blk_set_pm_only(). If this
    > - * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
    > - * processed.
    > - */
    > atomic_t pm_only;
    > -
    > - /*
    > - * ida allocated id for this queue. Used to index queues from
    > - * ioctx.
    > - */
    > int id;
    > -
    > - /*
    > - * queue needs bounce pages for pages above this limit
    > - */
    > gfp_t bounce_gfp;
    > -
    > spinlock_t queue_lock;
    > -
    > - /*
    > - * queue kobject
    > - */
    > struct kobject kobj;
    > -
    > - /*
    > - * mq queue kobject
    > - */
    > struct kobject *mq_kobj;
    >
    > #ifdef CONFIG_BLK_DEV_INTEGRITY
    > @@ -485,66 +483,32 @@ struct request_queue {
    > int rpm_status;
    > unsigned int nr_pending;
    > #endif
    > -
    > - /*
    > - * queue settings
    > - */
    > - unsigned long nr_requests; /* Max # of requests */
    > -
    > + unsigned long nr_requests;
    > unsigned int dma_pad_mask;
    > unsigned int dma_alignment;
    > -
    > #ifdef CONFIG_BLK_INLINE_ENCRYPTION
    > - /* Inline crypto capabilities */
    > struct blk_keyslot_manager *ksm;
    > #endif
    > -
    > unsigned int rq_timeout;
    > int poll_nsec;
    > -
    > struct blk_stat_callback *poll_cb;
    > struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
    > -
    > struct timer_list timeout;
    > struct work_struct timeout_work;
    > -
    > struct list_head icq_list;
    > #ifdef CONFIG_BLK_CGROUP
    > DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
    > struct blkcg_gq *root_blkg;
    > struct list_head blkg_list;
    > #endif
    > -
    > struct queue_limits limits;
    > -
    > unsigned int required_elevator_features;
    > -
    > #ifdef CONFIG_BLK_DEV_ZONED
    > - /*
    > - * Zoned block device information for request dispatch control.
    > - * nr_zones is the total number of zones of the device. This is always
    > - * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
    > - * bits which indicates if a zone is conventional (bit set) or
    > - * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
    > - * bits which indicates if a zone is write locked, that is, if a write
    > - * request targeting the zone was dispatched. All three fields are
    > - * initialized by the low level device driver (e.g. scsi/sd.c).
    > - * Stacking drivers (device mappers) may or may not initialize
    > - * these fields.
    > - *
    > - * Reads of this information must be protected with blk_queue_enter() /
    > - * blk_queue_exit(). Modifying this information is only allowed while
    > - * no requests are being processed. See also blk_mq_freeze_queue() and
    > - * blk_mq_unfreeze_queue().
    > - */
    > unsigned int nr_zones;
    > unsigned long *conv_zones_bitmap;
    > unsigned long *seq_zones_wlock;
    > #endif /* CONFIG_BLK_DEV_ZONED */
    >
    > - /*
    > - * sg stuff
    > - */
    > unsigned int sg_timeout;
    > unsigned int sg_reserved_size;
    > int node;
    > @@ -552,59 +516,36 @@ struct request_queue {
    > #ifdef CONFIG_BLK_DEV_IO_TRACE
    > struct blk_trace __rcu *blk_trace;
    > #endif
    > - /*
    > - * for flush operations
    > - */
    > struct blk_flush_queue *fq;
    > -
    > struct list_head requeue_list;
    > spinlock_t requeue_lock;
    > struct delayed_work requeue_work;
    > -
    > struct mutex sysfs_lock;
    > struct mutex sysfs_dir_lock;
    > -
    > - /*
    > - * for reusing dead hctx instance in case of updating
    > - * nr_hw_queues
    > - */
    > struct list_head unused_hctx_list;
    > spinlock_t unused_hctx_lock;
    > -
    > int mq_freeze_depth;
    > -
    > #if defined(CONFIG_BLK_DEV_BSG)
    > struct bsg_class_device bsg_dev;
    > #endif
    >
    > #ifdef CONFIG_BLK_DEV_THROTTLING
    > - /* Throttle data */
    > struct throtl_data *td;
    > #endif
    > struct rcu_head rcu_head;
    > wait_queue_head_t mq_freeze_wq;
    > - /*
    > - * Protect concurrent access to q_usage_counter by
    > - * percpu_ref_kill() and percpu_ref_reinit().
    > - */
    > struct mutex mq_freeze_lock;
    > struct percpu_ref q_usage_counter;
    > -
    > struct blk_mq_tag_set *tag_set;
    > struct list_head tag_set_list;
    > struct bio_set bio_split;
    > -
    > struct dentry *debugfs_dir;
    > -
    > #ifdef CONFIG_BLK_DEBUG_FS
    > struct dentry *sched_debugfs_dir;
    > struct dentry *rqos_debugfs_dir;
    > #endif
    > -
    > bool mq_sysfs_init_done;
    > -
    > size_t cmd_size;
    > -
    > #define BLK_MAX_WRITE_HINTS 5
    > u64 write_hints[BLK_MAX_WRITE_HINTS];
    > };
    >


    --
    Damien Le Moal
    Western Digital Research

    \
     
     \ /
      Last update: 2020-06-24 02:47    [W:5.425 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site