lkml.org 
[lkml]   [2015]   [May]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH] force inlining of spinlock ops
    Date
    With both gcc 4.7.2 and 4.9.2, sometimes gcc mysteriously doesn't inline
    very small functions we expect to be inlined. In particular,
    with this config: http://busybox.net/~vda/kernel_config
    there are more than a thousand copies of tiny spinlock-related functions:

    $ nm --size-sort vmlinux | grep -iF ' t ' | uniq -c | grep -v '^ *1 ' | sort -rn | grep ' spin'
    473 000000000000000b t spin_unlock_irqrestore
    292 000000000000000b t spin_unlock
    215 000000000000000b t spin_lock
    134 000000000000000b t spin_unlock_irq
    130 000000000000000b t spin_unlock_bh
    120 000000000000000b t spin_lock_irq
    106 000000000000000b t spin_lock_bh

    Disassembly:

    ffffffff81004720 <spin_lock>:
    ffffffff81004720: 55 push %rbp
    ffffffff81004721: 48 89 e5 mov %rsp,%rbp
    ffffffff81004724: e8 f8 4e e2 02 callq <_raw_spin_lock>
    ffffffff81004729: 5d pop %rbp
    ffffffff8100472a: c3 retq

    This patch fixes this via s/inline/__always_inline/ in spinlock.h.
    This decreases vmlinux by about 30k:

    text data bss dec hex filename
    82375570 22255544 20627456 125258570 7774b4a vmlinux.before
    82335059 22255416 20627456 125217931 776ac8b vmlinux

    Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
    Cc: Thomas Graf <tgraf@suug.ch>
    Cc: David S. Miller <davem@davemloft.net>
    Cc: Bart Van Assche <bvanassche@acm.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: David Rientjes <rientjes@google.com>
    Cc: David S. Miller <davem@davemloft.net>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Oleg Nesterov <oleg@redhat.com>
    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Cc: Ingo Molnar <mingo@kernel.org>
    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    CC: linux-kernel@vger.kernel.org
    ---
    include/linux/spinlock.h | 30 +++++++++++++++---------------
    1 file changed, 15 insertions(+), 15 deletions(-)

    diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
    index 3e18379..073925d 100644
    --- a/include/linux/spinlock.h
    +++ b/include/linux/spinlock.h
    @@ -296,7 +296,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
    * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
    */

    -static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
    +static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
    {
    return &lock->rlock;
    }
    @@ -307,17 +307,17 @@ do { \
    raw_spin_lock_init(&(_lock)->rlock); \
    } while (0)

    -static inline void spin_lock(spinlock_t *lock)
    +static __always_inline void spin_lock(spinlock_t *lock)
    {
    raw_spin_lock(&lock->rlock);
    }

    -static inline void spin_lock_bh(spinlock_t *lock)
    +static __always_inline void spin_lock_bh(spinlock_t *lock)
    {
    raw_spin_lock_bh(&lock->rlock);
    }

    -static inline int spin_trylock(spinlock_t *lock)
    +static __always_inline int spin_trylock(spinlock_t *lock)
    {
    return raw_spin_trylock(&lock->rlock);
    }
    @@ -337,7 +337,7 @@ do { \
    raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
    } while (0)

    -static inline void spin_lock_irq(spinlock_t *lock)
    +static __always_inline void spin_lock_irq(spinlock_t *lock)
    {
    raw_spin_lock_irq(&lock->rlock);
    }
    @@ -352,32 +352,32 @@ do { \
    raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
    } while (0)

    -static inline void spin_unlock(spinlock_t *lock)
    +static __always_inline void spin_unlock(spinlock_t *lock)
    {
    raw_spin_unlock(&lock->rlock);
    }

    -static inline void spin_unlock_bh(spinlock_t *lock)
    +static __always_inline void spin_unlock_bh(spinlock_t *lock)
    {
    raw_spin_unlock_bh(&lock->rlock);
    }

    -static inline void spin_unlock_irq(spinlock_t *lock)
    +static __always_inline void spin_unlock_irq(spinlock_t *lock)
    {
    raw_spin_unlock_irq(&lock->rlock);
    }

    -static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
    +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
    {
    raw_spin_unlock_irqrestore(&lock->rlock, flags);
    }

    -static inline int spin_trylock_bh(spinlock_t *lock)
    +static __always_inline int spin_trylock_bh(spinlock_t *lock)
    {
    return raw_spin_trylock_bh(&lock->rlock);
    }

    -static inline int spin_trylock_irq(spinlock_t *lock)
    +static __always_inline int spin_trylock_irq(spinlock_t *lock)
    {
    return raw_spin_trylock_irq(&lock->rlock);
    }
    @@ -387,22 +387,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
    raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
    })

    -static inline void spin_unlock_wait(spinlock_t *lock)
    +static __always_inline void spin_unlock_wait(spinlock_t *lock)
    {
    raw_spin_unlock_wait(&lock->rlock);
    }

    -static inline int spin_is_locked(spinlock_t *lock)
    +static __always_inline int spin_is_locked(spinlock_t *lock)
    {
    return raw_spin_is_locked(&lock->rlock);
    }

    -static inline int spin_is_contended(spinlock_t *lock)
    +static __always_inline int spin_is_contended(spinlock_t *lock)
    {
    return raw_spin_is_contended(&lock->rlock);
    }

    -static inline int spin_can_lock(spinlock_t *lock)
    +static __always_inline int spin_can_lock(spinlock_t *lock)
    {
    return raw_spin_can_lock(&lock->rlock);
    }
    --
    1.8.1.4


    \
     
     \ /
      Last update: 2015-05-11 20:41    [W:3.154 / U:0.564 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site