lkml.org 
[lkml]   [2022]   [Aug]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v4 1/2] introduce test_bit_acquire and use it in wait_on_bit
    wait_on_bit tests the bit without any memory barriers, consequently the
    code that follows wait_on_bit may be moved before testing the bit on
    architectures with weak memory ordering. When the code tests for some
    event using wait_on_bit and then performs a load operation, the load may
    be unexpectedly moved before wait_on_bit and it may return data that
    existed before the event occurred.

    Such bugs exist in fs/buffer.c:__wait_on_buffer,
    drivers/md/dm-bufio.c:new_read,
    drivers/media/usb/dvb-usb-v2/dvb_usb_core.c:dvb_usb_start_feed,
    drivers/bluetooth/btusb.c:btusb_mtk_hci_wmt_sync
    and perhaps in other places.

    We fix this class of bugs by adding a new function test_bit_acquire that
    reads the bit and provides acquire memory ordering semantics.

    Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
    Cc: stable@vger.kernel.org

    ---
    arch/s390/include/asm/bitops.h | 10 ++++++++++
    arch/x86/include/asm/bitops.h | 7 ++++++-
    include/asm-generic/bitops/instrumented-non-atomic.h | 11 +++++++++++
    include/asm-generic/bitops/non-atomic.h | 13 +++++++++++++
    include/linux/wait_bit.h | 8 ++++----
    kernel/sched/wait_bit.c | 6 +++---
    6 files changed, 47 insertions(+), 8 deletions(-)

    Index: linux-2.6/arch/x86/include/asm/bitops.h
    ===================================================================
    --- linux-2.6.orig/arch/x86/include/asm/bitops.h 2022-08-01 12:27:43.000000000 +0200
    +++ linux-2.6/arch/x86/include/asm/bitops.h 2022-08-01 12:27:43.000000000 +0200
    @@ -203,8 +203,10 @@ arch_test_and_change_bit(long nr, volati

    static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
    {
    - return ((1UL << (nr & (BITS_PER_LONG-1))) &
    + bool r = ((1UL << (nr & (BITS_PER_LONG-1))) &
    (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
    + barrier();
    + return r;
    }

    static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
    @@ -224,6 +226,9 @@ static __always_inline bool variable_tes
    ? constant_test_bit((nr), (addr)) \
    : variable_test_bit((nr), (addr)))

    +#define arch_test_bit_acquire(nr, addr) \
    + arch_test_bit(nr, addr)
    +
    /**
    * __ffs - find first set bit in word
    * @word: The word to search
    Index: linux-2.6/include/asm-generic/bitops/instrumented-non-atomic.h
    ===================================================================
    --- linux-2.6.orig/include/asm-generic/bitops/instrumented-non-atomic.h 2022-08-01 12:27:43.000000000 +0200
    +++ linux-2.6/include/asm-generic/bitops/instrumented-non-atomic.h 2022-08-01 12:28:33.000000000 +0200
    @@ -135,4 +135,15 @@ static __always_inline bool test_bit(lon
    return arch_test_bit(nr, addr);
    }

    +/**
    + * test_bit_acquire - Determine whether a bit is set with acquire semantics
    + * @nr: bit number to test
    + * @addr: Address to start counting from
    + */
    +static __always_inline bool test_bit_acquire(long nr, const volatile unsigned long *addr)
    +{
    + instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
    + return arch_test_bit_acquire(nr, addr);
    +}
    +
    #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
    Index: linux-2.6/include/asm-generic/bitops/non-atomic.h
    ===================================================================
    --- linux-2.6.orig/include/asm-generic/bitops/non-atomic.h 2022-08-01 12:27:43.000000000 +0200
    +++ linux-2.6/include/asm-generic/bitops/non-atomic.h 2022-08-01 12:27:43.000000000 +0200
    @@ -119,4 +119,17 @@ arch_test_bit(unsigned int nr, const vol
    }
    #define test_bit arch_test_bit

    +/**
    + * arch_test_bit - Determine whether a bit is set with acquire semantics
    + * @nr: bit number to test
    + * @addr: Address to start counting from
    + */
    +static __always_inline int
    +arch_test_bit_acquire(unsigned int nr, const volatile unsigned long *addr)
    +{
    + unsigned val = smp_load_acquire(&addr[BIT_WORD(nr)]);
    + return 1UL & (val >> (nr & (BITS_PER_LONG-1)));
    +}
    +#define test_bit_acquire arch_test_bit_acquire
    +
    #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
    Index: linux-2.6/arch/s390/include/asm/bitops.h
    ===================================================================
    --- linux-2.6.orig/arch/s390/include/asm/bitops.h 2022-08-01 12:27:43.000000000 +0200
    +++ linux-2.6/arch/s390/include/asm/bitops.h 2022-08-01 12:27:43.000000000 +0200
    @@ -184,6 +184,16 @@ static inline bool arch_test_bit(unsigne
    return *addr & mask;
    }

    +static inline bool arch_test_bit_acquire(unsigned long nr,
    + const volatile unsigned long *ptr)
    +{
    + const volatile unsigned long *addr = __bitops_word(nr, ptr);
    + unsigned long val = smp_load_acquire(addr);
    + unsigned long mask = __bitops_mask(nr);
    +
    + return val & mask;
    +}
    +
    static inline bool arch_test_and_set_bit_lock(unsigned long nr,
    volatile unsigned long *ptr)
    {
    Index: linux-2.6/include/linux/wait_bit.h
    ===================================================================
    --- linux-2.6.orig/include/linux/wait_bit.h 2022-08-01 12:27:43.000000000 +0200
    +++ linux-2.6/include/linux/wait_bit.h 2022-08-01 12:27:43.000000000 +0200
    @@ -71,7 +71,7 @@ static inline int
    wait_on_bit(unsigned long *word, int bit, unsigned mode)
    {
    might_sleep();
    - if (!test_bit(bit, word))
    + if (!test_bit_acquire(bit, word))
    return 0;
    return out_of_line_wait_on_bit(word, bit,
    bit_wait,
    @@ -96,7 +96,7 @@ static inline int
    wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
    {
    might_sleep();
    - if (!test_bit(bit, word))
    + if (!test_bit_acquire(bit, word))
    return 0;
    return out_of_line_wait_on_bit(word, bit,
    bit_wait_io,
    @@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word,
    unsigned long timeout)
    {
    might_sleep();
    - if (!test_bit(bit, word))
    + if (!test_bit_acquire(bit, word))
    return 0;
    return out_of_line_wait_on_bit_timeout(word, bit,
    bit_wait_timeout,
    @@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word,
    unsigned mode)
    {
    might_sleep();
    - if (!test_bit(bit, word))
    + if (!test_bit_acquire(bit, word))
    return 0;
    return out_of_line_wait_on_bit(word, bit, action, mode);
    }
    Index: linux-2.6/kernel/sched/wait_bit.c
    ===================================================================
    --- linux-2.6.orig/kernel/sched/wait_bit.c 2022-08-01 12:27:43.000000000 +0200
    +++ linux-2.6/kernel/sched/wait_bit.c 2022-08-01 12:27:43.000000000 +0200
    @@ -25,7 +25,7 @@ int wake_bit_function(struct wait_queue_

    if (wait_bit->key.flags != key->flags ||
    wait_bit->key.bit_nr != key->bit_nr ||
    - test_bit(key->bit_nr, key->flags))
    + test_bit_acquire(key->bit_nr, key->flags))
    return 0;

    return autoremove_wake_function(wq_entry, mode, sync, key);
    @@ -45,9 +45,9 @@ __wait_on_bit(struct wait_queue_head *wq

    do {
    prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
    - if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
    + if (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags))
    ret = (*action)(&wbq_entry->key, mode);
    - } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
    + } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);

    finish_wait(wq_head, &wbq_entry->wq_entry);

    \
     
     \ /
      Last update: 2022-08-01 12:43    [W:5.690 / U:0.080 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site