lkml.org 
[lkml]   [2020]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 13/24] sched: tasks: Use sequence counter with associated spinlock
    Date
    A sequence counter write side critical section must be protected by some
    form of locking to serialize writers. A plain seqcount_t does not
    contain the information of which lock must be held when entering a write
    side critical section.

    Use the new seqcount_spinlock_t data type, which allows to associate a
    spinlock with the sequence counter. This enables lockdep to verify that
    the spinlock used for writer serialization is held when the write side
    critical section is entered.

    If lockdep is disabled this lock association is compiled out and has
    neither storage size nor runtime overhead.

    Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
    ---
    include/linux/sched.h | 2 +-
    init/init_task.c | 3 ++-
    kernel/fork.c | 2 +-
    3 files changed, 4 insertions(+), 3 deletions(-)

    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 3903a9500926..02b7fbd17bf6 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -1054,7 +1054,7 @@ struct task_struct {
    /* Protected by ->alloc_lock: */
    nodemask_t mems_allowed;
    /* Seqence number to catch updates: */
    - seqcount_t mems_allowed_seq;
    + seqcount_spinlock_t mems_allowed_seq;
    int cpuset_mem_spread_rotor;
    int cpuset_slab_spread_rotor;
    #endif
    diff --git a/init/init_task.c b/init/init_task.c
    index 15089d15010a..94fe3ba1bb60 100644
    --- a/init/init_task.c
    +++ b/init/init_task.c
    @@ -154,7 +154,8 @@ struct task_struct init_task
    .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
    #endif
    #ifdef CONFIG_CPUSETS
    - .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
    + .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
    + &init_task.alloc_lock),
    #endif
    #ifdef CONFIG_RT_MUTEXES
    .pi_waiters = RB_ROOT_CACHED,
    diff --git a/kernel/fork.c b/kernel/fork.c
    index 70d9d0a4de2a..fc72f09a61b2 100644
    --- a/kernel/fork.c
    +++ b/kernel/fork.c
    @@ -2032,7 +2032,7 @@ static __latent_entropy struct task_struct *copy_process(
    #ifdef CONFIG_CPUSETS
    p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
    p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
    - seqcount_init(&p->mems_allowed_seq);
    + seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
    #endif
    #ifdef CONFIG_TRACE_IRQFLAGS
    p->irq_events = 0;
    --
    2.20.1
    \
     
     \ /
      Last update: 2020-07-20 18:40    [W:2.245 / U:1.280 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site