lkml.org 
[lkml]   [2013]   [Jan]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH tip/core/rcu 12/14] rcu: Rename n_nocb_gp_requests to need_future_gp
Date
From: "Paul E. McKenney" <paul.mckenney@linaro.org>

CPUs going idle need to be able to indicate their need for future grace
periods. A mechanism for doing this already exists for no-callbacks
CPUs, so the idea is to re-use that mechanism. This commit therefore
moves the ->n_nocb_gp_requests field of the rcu_node structure out from
under the CONFIG_RCU_NOCB_CPU #ifdef and renames it to ->need_future_gp.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
kernel/rcutree.h | 4 ++--
kernel/rcutree_plugin.h | 18 +++++++++---------
2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 282b1d7..775d96c 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -198,9 +198,9 @@ struct rcu_node {
#ifdef CONFIG_RCU_NOCB_CPU
wait_queue_head_t nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */
- int n_nocb_gp_requests[2];
- /* Counts of upcoming no-CB GP requests. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
+ int need_future_gp[2];
+ /* Counts of upcoming no-CB GP requests. */
raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
} ____cacheline_internodealigned_in_smp;

diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 736dd2c..e4037bd 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2057,7 +2057,7 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
{
struct rcu_node *rnp = rcu_get_root(rsp);

- return rnp->n_nocb_gp_requests[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
+ return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
}

/*
@@ -2071,8 +2071,8 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
int needmore;

wake_up_all(&rnp->nocb_gp_wq[c & 0x1]);
- rnp->n_nocb_gp_requests[c & 0x1] = 0;
- needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1];
+ rnp->need_future_gp[c & 0x1] = 0;
+ needmore = rnp->need_future_gp[(c + 1) & 0x1];
trace_rcu_future_grace_period(rsp->name, rnp->gpnum, rnp->completed,
c, rnp->level, rnp->grplo, rnp->grphi,
needmore ? "CleanupMore" : "Cleanup");
@@ -2080,7 +2080,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
}

/*
- * Set the root rcu_node structure's ->n_nocb_gp_requests field
+ * Set the root rcu_node structure's ->need_future_gp field
* based on the sum of those of all rcu_node structures. This does
* double-count the root rcu_node structure's requests, but this
* is necessary to handle the possibility of a rcu_nocb_kthread()
@@ -2089,7 +2089,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
*/
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
{
- rnp->n_nocb_gp_requests[(rnp->completed + 1) & 0x1] += nrq;
+ rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
}

static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -2220,7 +2220,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
c = rnp->completed + 2;

/* Count our request for a grace period. */
- rnp->n_nocb_gp_requests[c & 0x1]++;
+ rnp->need_future_gp[c & 0x1]++;
trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->completed, c, rnp->level,
rnp->grplo, rnp->grphi, "Startleaf");
@@ -2264,10 +2264,10 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Adjust counters accordingly and start the
* needed grace period.
*/
- rnp->n_nocb_gp_requests[c & 0x1]--;
+ rnp->need_future_gp[c & 0x1]--;
c = rnp_root->completed + 1;
- rnp->n_nocb_gp_requests[c & 0x1]++;
- rnp_root->n_nocb_gp_requests[c & 0x1]++;
+ rnp->need_future_gp[c & 0x1]++;
+ rnp_root->need_future_gp[c & 0x1]++;
trace_rcu_future_grace_period(rdp->rsp->name,
rnp->gpnum,
rnp->completed,
--
1.7.8


\
 
 \ /
  Last update: 2013-01-05 19:41    [W:1.443 / U:0.584 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site