lkml.org 
[lkml]   [2014]   [Feb]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 07/11] smp: Consolidate the various smp_call_function_single() declensions
On Sat 08-02-14 17:18:36, Frederic Weisbecker wrote:
> __smp_call_function_single() and smp_call_function_single() share some
> code that can be factorized: execute inline when the target is local,
> check if the target is online, lock the csd, call generic_exec_single().
>
> Lets move the common parts to generic_exec_single().
The patch looks good. You can add:
Reviewed-by: Jan Kara <jack@suse.cz>

Honza

>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Christoph Hellwig <hch@infradead.org>
> Cc: Ingo Molnar <mingo@kernel.org>
> Cc: Jan Kara <jack@suse.cz>
> Cc: Jens Axboe <jens.axboe@oracle.com>
> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
> ---
> kernel/smp.c | 80 +++++++++++++++++++++++++++++-------------------------------
> 1 file changed, 39 insertions(+), 41 deletions(-)
>
> diff --git a/kernel/smp.c b/kernel/smp.c
> index 5ff14e3..64bb0d4 100644
> --- a/kernel/smp.c
> +++ b/kernel/smp.c
> @@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
> csd->flags &= ~CSD_FLAG_LOCK;
> }
>
> +static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
> +
> /*
> * Insert a previously allocated call_single_data element
> * for execution on the given CPU. data must already have
> * ->func, ->info, and ->flags set.
> */
> -static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
> +static int generic_exec_single(int cpu, struct call_single_data *csd,
> + smp_call_func_t func, void *info, int wait)
> {
> + struct call_single_data csd_stack = { .flags = 0 };
> + unsigned long flags;
> +
> +
> + if (cpu == smp_processor_id()) {
> + local_irq_save(flags);
> + func(info);
> + local_irq_restore(flags);
> + return 0;
> + }
> +
> +
> + if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
> + return -ENXIO;
> +
> +
> + if (!csd) {
> + csd = &csd_stack;
> + if (!wait)
> + csd = &__get_cpu_var(csd_data);
> + }
> +
> + csd_lock(csd);
> +
> + csd->func = func;
> + csd->info = info;
> +
> if (wait)
> csd->flags |= CSD_FLAG_WAIT;
>
> @@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
>
> if (wait)
> csd_lock_wait(csd);
> +
> + return 0;
> }
>
> /*
> @@ -168,8 +200,6 @@ void generic_smp_call_function_single_interrupt(void)
> }
> }
>
> -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
> -
> /*
> * smp_call_function_single - Run a function on a specific CPU
> * @func: The function to run. This must be fast and non-blocking.
> @@ -181,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
> int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
> int wait)
> {
> - struct call_single_data d = {
> - .flags = 0,
> - };
> - unsigned long flags;
> int this_cpu;
> - int err = 0;
> + int err;
>
> /*
> * prevent preemption and reschedule on another processor,
> @@ -203,26 +229,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
> WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
> && !oops_in_progress);
>
> - if (cpu == this_cpu) {
> - local_irq_save(flags);
> - func(info);
> - local_irq_restore(flags);
> - } else {
> - if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
> - struct call_single_data *csd = &d;
> -
> - if (!wait)
> - csd = &__get_cpu_var(csd_data);
> -
> - csd_lock(csd);
> -
> - csd->func = func;
> - csd->info = info;
> - generic_exec_single(cpu, csd, wait);
> - } else {
> - err = -ENXIO; /* CPU not online */
> - }
> - }
> + err = generic_exec_single(cpu, NULL, func, info, wait);
>
> put_cpu();
>
> @@ -285,9 +292,8 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
> */
> int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait)
> {
> - unsigned int this_cpu;
> - unsigned long flags;
> int err = 0;
> + int this_cpu;
>
> this_cpu = get_cpu();
> /*
> @@ -296,20 +302,12 @@ int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait)
> * send smp call function interrupt to this cpu and as such deadlocks
> * can't happen.
> */
> - WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
> + WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled()
> && !oops_in_progress);
>
> - if (cpu == this_cpu) {
> - local_irq_save(flags);
> - csd->func(csd->info);
> - local_irq_restore(flags);
> - } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
> - csd_lock(csd);
> - generic_exec_single(cpu, csd, wait);
> - } else {
> - err = -ENXIO; /* CPU not online */
> - }
> + err = generic_exec_single(cpu, csd, csd->func, csd->info, wait);
> put_cpu();
> +
> return err;
> }
> EXPORT_SYMBOL_GPL(__smp_call_function_single);
> --
> 1.8.3.1
>
--
Jan Kara <jack@suse.cz>
SUSE Labs, CR


\
 
 \ /
  Last update: 2014-02-10 21:41    [W:0.114 / U:0.876 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site