lkml.org 
[lkml]   [2020]   [May]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 09/24] rcu/tree: cache specified number of objects
On Tue, Apr 28, 2020 at 10:58:48PM +0200, Uladzislau Rezki (Sony) wrote:
> Cache some extra objects per-CPU. During reclaim process
> some pages are cached instead of releasing by linking them
> into the list. Such approach provides O(1) access time to
> the cache.
>
> That reduces number of requests to the page allocator, also
> that makes it more helpful if a low memory condition occurs.
>
> A parameter reflecting the minimum allowed pages to be
> cached per one CPU is propagated via sysfs, it is read
> only, the name is "rcu_min_cached_objs".
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
> kernel/rcu/tree.c | 64 ++++++++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 60 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 89e9ca3f4e3e..d8975819b1c9 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -178,6 +178,14 @@ module_param(gp_init_delay, int, 0444);
> static int gp_cleanup_delay;
> module_param(gp_cleanup_delay, int, 0444);
>
> +/*
> + * This rcu parameter is read-only, but can be write also.

You mean that although the parameter is read-only, you see no reason
why it could not be converted to writeable?

If it was writeable, and a given CPU had the maximum numbr of cached
objects, the rcu_min_cached_objs value was decreased, but that CPU never
saw another kfree_rcu(), would the number of cached objects change?

(Just curious, not asking for a change in functionality.)

> + * It reflects the minimum allowed number of objects which
> + * can be cached per-CPU. Object size is equal to one page.
> + */
> +int rcu_min_cached_objs = 2;
> +module_param(rcu_min_cached_objs, int, 0444);
> +
> /* Retrieve RCU kthreads priority for rcutorture */
> int rcu_get_gp_kthreads_prio(void)
> {
> @@ -2887,7 +2895,6 @@ struct kfree_rcu_cpu_work {
> * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
> * @head: List of kfree_rcu() objects not yet waiting for a grace period
> * @bhead: Bulk-List of kfree_rcu() objects not yet waiting for a grace period
> - * @bcached: Keeps at most one object for later reuse when build chain blocks
> * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> * @lock: Synchronize access to this structure
> * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> @@ -2902,7 +2909,6 @@ struct kfree_rcu_cpu_work {
> struct kfree_rcu_cpu {
> struct rcu_head *head;
> struct kfree_rcu_bulk_data *bhead;
> - struct kfree_rcu_bulk_data *bcached;
> struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> raw_spinlock_t lock;
> struct delayed_work monitor_work;
> @@ -2910,6 +2916,15 @@ struct kfree_rcu_cpu {
> bool initialized;
> // Number of objects for which GP not started
> int count;
> +
> + /*
> + * Number of cached objects which are queued into
> + * the lock-less list. This cache is used by the
> + * kvfree_call_rcu() function and as of now its
> + * size is static.
> + */
> + struct llist_head bkvcache;
> + int nr_bkv_objs;
> };
>
> static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
> @@ -2946,6 +2961,31 @@ krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
> local_irq_restore(flags);
> }
>
> +static inline struct kfree_rcu_bulk_data *
> +get_cached_bnode(struct kfree_rcu_cpu *krcp)
> +{
> + if (!krcp->nr_bkv_objs)
> + return NULL;
> +
> + krcp->nr_bkv_objs--;
> + return (struct kfree_rcu_bulk_data *)
> + llist_del_first(&krcp->bkvcache);
> +}
> +
> +static inline bool
> +put_cached_bnode(struct kfree_rcu_cpu *krcp,
> + struct kfree_rcu_bulk_data *bnode)
> +{
> + /* Check the limit. */
> + if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
> + return false;
> +
> + llist_add((struct llist_node *) bnode, &krcp->bkvcache);
> + krcp->nr_bkv_objs++;
> + return true;
> +
> +}
> +
> /*
> * This function is invoked in workqueue context after a grace period.
> * It frees all the objects queued on ->bhead_free or ->head_free.
> @@ -2981,7 +3021,12 @@ static void kfree_rcu_work(struct work_struct *work)
> kfree_bulk(bhead->nr_records, bhead->records);
> rcu_lock_release(&rcu_callback_map);
>
> - if (cmpxchg(&krcp->bcached, NULL, bhead))
> + krcp = krc_this_cpu_lock(&flags);

Presumably the list can also be accessed without holding this lock,
because otherwise we shouldn't need llist...

Thanx, Paul

> + if (put_cached_bnode(krcp, bhead))
> + bhead = NULL;
> + krc_this_cpu_unlock(krcp, flags);
> +
> + if (bhead)
> free_page((unsigned long) bhead);
>
> cond_resched_tasks_rcu_qs();
> @@ -3114,7 +3159,7 @@ kfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp,
> /* Check if a new block is required. */
> if (!krcp->bhead ||
> krcp->bhead->nr_records == KFREE_BULK_MAX_ENTR) {
> - bnode = xchg(&krcp->bcached, NULL);
> + bnode = get_cached_bnode(krcp);
> if (!bnode) {
> WARN_ON_ONCE(sizeof(struct kfree_rcu_bulk_data) > PAGE_SIZE);
>
> @@ -4167,12 +4212,23 @@ static void __init kfree_rcu_batch_init(void)
>
> for_each_possible_cpu(cpu) {
> struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
> + struct kfree_rcu_bulk_data *bnode;
>
> for (i = 0; i < KFREE_N_BATCHES; i++) {
> INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
> krcp->krw_arr[i].krcp = krcp;
> }
>
> + for (i = 0; i < rcu_min_cached_objs; i++) {
> + bnode = (struct kfree_rcu_bulk_data *)
> + __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> +
> + if (bnode)
> + put_cached_bnode(krcp, bnode);
> + else
> + pr_err("Failed to preallocate for %d CPU!\n", cpu);
> + }
> +
> INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
> krcp->initialized = true;
> }
> --
> 2.20.1
>

\
 
 \ /
  Last update: 2020-05-01 23:29    [W:0.802 / U:0.304 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site