lkml.org 
[lkml]   [2015]   [Jul]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: Kernel broken on processors without performance counters
On Fri, Jul 10, 2015 at 04:13:59PM +0200, Peter Zijlstra wrote:
> On Wed, Jul 08, 2015 at 05:36:43PM -0700, Andy Lutomirski wrote:
> > >> In what universe is "static_key_false" a reasonable name for a
> > >> function that returns true if a static key is true?
>
> > I think the current naming is almost maximally bad. The naming would
> > be less critical if it were typesafe, though.
>
> How about something like so on top? It will allow us to slowly migrate
> existing and new users over to the hopefully saner interface?

Anybody? (Adding more Cc's)

> ---
> include/linux/jump_label.h | 67 +++++++++++++++++++++++++++++++++++++++++++++-
> kernel/sched/core.c | 18 ++++++-------
> 2 files changed, 74 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
> index f4de473f226b..98ed3c2ec78d 100644
> --- a/include/linux/jump_label.h
> +++ b/include/linux/jump_label.h
> @@ -213,6 +213,71 @@ static inline bool static_key_enabled(struct static_key *key)
> return static_key_count(key) > 0;
> }
>
> -#endif /* _LINUX_JUMP_LABEL_H */
> +static inline void static_key_enable(struct static_key *key)
> +{
> + int count = static_key_count(key);
> +
> + WARN_ON_ONCE(count < 0 || count > 1);
> +
> + if (!count)
> + static_key_slow_inc(key);
> +}
> +
> +static inline void static_key_disable(struct static_key *key)
> +{
> + int count = static_key_count(key);
> +
> + WARN_ON_ONCE(count < 0 || count > 1);
> +
> + if (count)
> + static_key_slow_dec(key);
> +}
> +
> +/* -------------------------------------------------------------------------- */
> +
> +/*
> + * likely -- default enabled, puts the branch body in-line
> + */
> +
> +struct static_likely_key {
> + struct static_key key;
> +};
> +
> +#define STATIC_LIKELY_KEY_INIT (struct static_likely_key){ .key = STATIC_KEY_INIT_TRUE, }
> +
> +static inline bool static_likely_branch(struct static_likely_key *key)
> +{
> + return static_key_true(&key->key);
> +}
> +
> +/*
> + * unlikely -- default disabled, puts the branch body out-of-line
> + */
> +
> +struct static_unlikely_key {
> + struct static_key key;
> +};
> +
> +#define STATIC_UNLIKELY_KEY_INIT (struct static_unlikely_key){ .key = STATIC_KEY_INIT_FALSE, }
> +
> +static inline bool static_unlikely_branch(struct static_unlikely_key *key)
> +{
> + return static_key_false(&key->key);
> +}
> +
> +/*
> + * Advanced usage; refcount, branch is enabled when: count != 0
> + */
> +
> +#define static_branch_inc(_k) static_key_slow_inc(&(_k)->key)
> +#define static_branch_dec(_k) static_key_slow_dec(&(_k)->key)
> +
> +/*
> + * Normal usage; boolean enable/disable.
> + */
> +
> +#define static_branch_enable(_k) static_key_enable(&(_k)->key)
> +#define static_branch_disable(_k) static_key_disable(&(_k)->key)
>
> #endif /* __ASSEMBLY__ */
> +#endif /* _LINUX_JUMP_LABEL_H */
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 78b4bad10081..22ba92297375 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -164,14 +164,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
>
> static void sched_feat_disable(int i)
> {
> - if (static_key_enabled(&sched_feat_keys[i]))
> - static_key_slow_dec(&sched_feat_keys[i]);
> + static_key_enable(&sched_feat_keys[i]);
> }
>
> static void sched_feat_enable(int i)
> {
> - if (!static_key_enabled(&sched_feat_keys[i]))
> - static_key_slow_inc(&sched_feat_keys[i]);
> + static_key_disable(&sched_feat_keys[i]);
> }
> #else
> static void sched_feat_disable(int i) { };
> @@ -2318,17 +2316,17 @@ void wake_up_new_task(struct task_struct *p)
>
> #ifdef CONFIG_PREEMPT_NOTIFIERS
>
> -static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
> +static struct static_unlikely_key preempt_notifier_key = STATIC_UNLIKELY_KEY_INIT;
>
> void preempt_notifier_inc(void)
> {
> - static_key_slow_inc(&preempt_notifier_key);
> + static_branch_inc(&preempt_notifier_key);
> }
> EXPORT_SYMBOL_GPL(preempt_notifier_inc);
>
> void preempt_notifier_dec(void)
> {
> - static_key_slow_dec(&preempt_notifier_key);
> + static_branch_dec(&preempt_notifier_key);
> }
> EXPORT_SYMBOL_GPL(preempt_notifier_dec);
>
> @@ -2338,7 +2336,7 @@ EXPORT_SYMBOL_GPL(preempt_notifier_dec);
> */
> void preempt_notifier_register(struct preempt_notifier *notifier)
> {
> - if (!static_key_false(&preempt_notifier_key))
> + if (!static_unlikely_branch(&preempt_notifier_key))
> WARN(1, "registering preempt_notifier while notifiers disabled\n");
>
> hlist_add_head(&notifier->link, &current->preempt_notifiers);
> @@ -2367,7 +2365,7 @@ static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
>
> static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
> {
> - if (static_key_false(&preempt_notifier_key))
> + if (static_unlikely_branch(&preempt_notifier_key))
> __fire_sched_in_preempt_notifiers(curr);
> }
>
> @@ -2385,7 +2383,7 @@ static __always_inline void
> fire_sched_out_preempt_notifiers(struct task_struct *curr,
> struct task_struct *next)
> {
> - if (static_key_false(&preempt_notifier_key))
> + if (static_unlikely_branch(&preempt_notifier_key))
> __fire_sched_out_preempt_notifiers(curr, next);
> }
>


\
 
 \ /
  Last update: 2015-07-21 10:41    [W:0.093 / U:1.484 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site