lkml.org 
[lkml]   [2020]   [Sep]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: locking/core] seqlock: seqcount latch APIs: Only allow seqcount_latch_t
    The following commit has been merged into the locking/core branch of tip:

    Commit-ID: 0c9794c8b6781eb7dad8e19b78c5d4557790597a
    Gitweb: https://git.kernel.org/tip/0c9794c8b6781eb7dad8e19b78c5d4557790597a
    Author: Ahmed S. Darwish <a.darwish@linutronix.de>
    AuthorDate: Thu, 27 Aug 2020 13:40:44 +02:00
    Committer: Peter Zijlstra <peterz@infradead.org>
    CommitterDate: Thu, 10 Sep 2020 11:19:30 +02:00

    seqlock: seqcount latch APIs: Only allow seqcount_latch_t

    All latch sequence counter call-sites have now been converted from plain
    seqcount_t to the new seqcount_latch_t data type.

    Enforce type-safety by modifying seqlock.h latch APIs to only accept
    seqcount_latch_t.

    Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Link: https://lkml.kernel.org/r/20200827114044.11173-9-a.darwish@linutronix.de
    ---
    include/linux/seqlock.h | 36 +++++++++++++++---------------------
    1 file changed, 15 insertions(+), 21 deletions(-)

    diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
    index 88b917d..f2a7a46 100644
    --- a/include/linux/seqlock.h
    +++ b/include/linux/seqlock.h
    @@ -620,7 +620,7 @@ static inline void seqcount_latch_init(seqcount_latch_t *s)

    /**
    * raw_read_seqcount_latch() - pick even/odd latch data copy
    - * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
    + * @s: Pointer to seqcount_latch_t
    *
    * See raw_write_seqcount_latch() for details and a full reader/writer
    * usage example.
    @@ -629,17 +629,14 @@ static inline void seqcount_latch_init(seqcount_latch_t *s)
    * picking which data copy to read. The full counter must then be checked
    * with read_seqcount_latch_retry().
    */
    -#define raw_read_seqcount_latch(s) \
    -({ \
    - /* \
    - * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). \
    - * Due to the dependent load, a full smp_rmb() is not needed. \
    - */ \
    - _Generic(*(s), \
    - seqcount_t: READ_ONCE(((seqcount_t *)s)->sequence), \
    - seqcount_raw_spinlock_t: READ_ONCE(((seqcount_raw_spinlock_t *)s)->seqcount.sequence), \
    - seqcount_latch_t: READ_ONCE(((seqcount_latch_t *)s)->seqcount.sequence)); \
    -})
    +static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
    +{
    + /*
    + * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
    + * Due to the dependent load, a full smp_rmb() is not needed.
    + */
    + return READ_ONCE(s->seqcount.sequence);
    +}

    /**
    * read_seqcount_latch_retry() - end a seqcount_latch_t read section
    @@ -656,7 +653,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)

    /**
    * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
    - * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t
    + * @s: Pointer to seqcount_latch_t
    *
    * The latch technique is a multiversion concurrency control method that allows
    * queries during non-atomic modifications. If you can guarantee queries never
    @@ -735,14 +732,11 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
    * When data is a dynamic data structure; one should use regular RCU
    * patterns to manage the lifetimes of the objects within.
    */
    -#define raw_write_seqcount_latch(s) \
    -{ \
    - smp_wmb(); /* prior stores before incrementing "sequence" */ \
    - _Generic(*(s), \
    - seqcount_t: ((seqcount_t *)s)->sequence++, \
    - seqcount_raw_spinlock_t:((seqcount_raw_spinlock_t *)s)->seqcount.sequence++, \
    - seqcount_latch_t: ((seqcount_latch_t *)s)->seqcount.sequence++); \
    - smp_wmb(); /* increment "sequence" before following stores */ \
    +static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
    +{
    + smp_wmb(); /* prior stores before incrementing "sequence" */
    + s->seqcount.sequence++;
    + smp_wmb(); /* increment "sequence" before following stores */
    }

    /*
    \
     
     \ /
      Last update: 2020-09-10 21:27    [W:2.753 / U:0.108 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site