Messages in this thread Patch in this message | | | Date | Wed, 21 Aug 2013 14:49:12 -0400 | From | Steven Rostedt <> | Subject | Re: [PATCH 2/4] irq_work: Provide a irq work that can be processed on any cpu |
| |
On Wed, 21 Aug 2013 10:08:30 +0200 Jan Kara <jack@suse.cz> wrote:
> struct irq_work { > unsigned long flags; > diff --git a/kernel/irq_work.c b/kernel/irq_work.c > index 55fcce6..446cd81 100644 > --- a/kernel/irq_work.c > +++ b/kernel/irq_work.c > @@ -22,6 +22,9 @@ > static DEFINE_PER_CPU(struct llist_head, irq_work_list); > static DEFINE_PER_CPU(int, irq_work_raised); > > +/* List of irq-work any CPU can pick up */ > +static LLIST_HEAD(unbound_irq_work_list); > + > /* > * Claim the entry so that no one else will poke at it. > */ > @@ -70,12 +73,16 @@ void irq_work_queue(struct irq_work *work) > /* Queue the entry and raise the IPI if needed. */ > preempt_disable(); > > - llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); > + if (!(work->flags & __IRQ_WORK_UNBOUND)) > + llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); > + else > + llist_add(&work->llnode, &unbound_irq_work_list);
Just for better readability I would have:
if (work->flags & __IRQ_WORK_UNBOUND) llist_add(&work->llnode, &unbound_irq_work_list); else llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
> > /* > * If the work is not "lazy" or the tick is stopped, raise the irq > * work interrupt (if supported by the arch), otherwise, just wait > - * for the next tick. > + * for the next tick. We do this even for unbound work to make sure > + * *some* CPU will be doing the work. > */ > if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { > if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) > @@ -100,28 +107,11 @@ bool irq_work_needs_cpu(void) > return true; > } > > -static void __irq_work_run(void) > +static void process_irq_work_list(struct llist_node *llnode) > { > unsigned long flags; > struct irq_work *work; > - struct llist_head *this_list; > - struct llist_node *llnode; > - > - > - /* > - * Reset the "raised" state right before we check the list because > - * an NMI may enqueue after we find the list empty from the runner. > - */ > - __this_cpu_write(irq_work_raised, 0); > - barrier(); > > - this_list = &__get_cpu_var(irq_work_list); > - if (llist_empty(this_list)) > - return; > - > - BUG_ON(!irqs_disabled()); > - > - llnode = llist_del_all(this_list); > while (llnode != NULL) { > work = llist_entry(llnode, struct irq_work, llnode); > > @@ -146,6 +136,27 @@ static void __irq_work_run(void) > } > } > > +static void __irq_work_run(void) > +{ > + struct llist_head *this_list; > + > + /* > + * Reset the "raised" state right before we check the list because > + * an NMI may enqueue after we find the list empty from the runner. > + */ > + __this_cpu_write(irq_work_raised, 0); > + barrier(); > + > + this_list = &__get_cpu_var(irq_work_list); > + if (llist_empty(this_list) && llist_empty(&unbound_irq_work_list)) > + return; > + > + BUG_ON(!irqs_disabled()); > + > + process_irq_work_list(llist_del_all(this_list)); > + process_irq_work_list(llist_del_all(&unbound_irq_work_list));
OK, I'm being a bit of an micro optimization freak here, but...
How about moving the list_empty() and BUG_ON into the process_irq_work_list() call.
That is,
if (list_empty(llnode)) return;
BUG_ON(!irqs_disabled());
That way we avoid the double xchg() that is done with the two calls to llist_del_all(). -- Steve
> +} > + > /* > * Run the irq_work entries on this cpu. Requires to be ran from hardirq > * context with local IRQs disabled.
| |