lkml.org 
[lkml]   [2020]   [Apr]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 1/2] sched: eliminate bandwidth race between throttling and distribution
On Fri, Apr 10, 2020 at 03:52:07PM -0700, Josh Don wrote:

> -/* returns 0 on failure to allocate runtime */
> +/* returns 0 on failure to allocate runtime, called with cfs_b->lock held */

That's a gross mis-spelling of lockdep_assert_held(); and since I was
editing things anyway it now looks like so:

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4587,11 +4587,13 @@ static inline struct cfs_bandwidth *tg_c
return &tg->cfs_bandwidth;
}

-/* returns 0 on failure to allocate runtime, called with cfs_b->lock held */
+/* returns 0 on failure to allocate runtime */
static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
struct cfs_rq *cfs_rq, u64 target_runtime)
{
- u64 amount = 0, min_amount;
+ u64 min_amount, amount = 0;
+
+ lockdep_assert_held(cfs_rq->lock);

/* note: this is a positive sum as runtime_remaining <= 0 */
min_amount = target_runtime - cfs_rq->runtime_remaining;
@@ -4616,12 +4618,11 @@ static int __assign_cfs_rq_runtime(struc
/* returns 0 on failure to allocate runtime */
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
- int ret;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ int ret;

raw_spin_lock(&cfs_b->lock);
- ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq,
- sched_cfs_bandwidth_slice());
+ ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
raw_spin_unlock(&cfs_b->lock);

return ret;
\
 
 \ /
  Last update: 2020-04-14 12:54    [W:0.182 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site