lkml.org 
[lkml]   [2023]   [Dec]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v9 1/2] ring-buffer: Introducing ring-buffer mapping functions
On Thu, 21 Dec 2023 17:35:22 +0000
Vincent Donnefort <vdonnefort@google.com> wrote:

> @@ -739,6 +747,22 @@ static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int f
> return (dirty * 100) > (full * nr_pages);
> }
>
> +static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
> +{
> + if (unlikely(READ_ONCE(cpu_buffer->mapped))) {
> + /* Ensure the meta_page is ready */
> + smp_rmb();
> + WRITE_ONCE(cpu_buffer->meta_page->entries,
> + local_read(&cpu_buffer->entries));
> + WRITE_ONCE(cpu_buffer->meta_page->overrun,
> + local_read(&cpu_buffer->overrun));
> + WRITE_ONCE(cpu_buffer->meta_page->subbufs_touched,
> + local_read(&cpu_buffer->pages_touched));
> + WRITE_ONCE(cpu_buffer->meta_page->subbufs_lost,
> + local_read(&cpu_buffer->pages_lost));
> + }
> +}
> +
> /*
> * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
> *
> @@ -749,6 +773,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
> {
> struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
>
> + if (rbwork->is_cpu_buffer) {
> + struct ring_buffer_per_cpu *cpu_buffer;
> +
> + cpu_buffer = container_of(rbwork, struct ring_buffer_per_cpu,
> + irq_work);
> + /*
> + * If the waiter is a cpu_buffer, this might be due to a
> + * userspace mapping. Let's update the meta-page.
> + */
> + rb_update_meta_page(cpu_buffer);
> + }
> +
> wake_up_all(&rbwork->waiters);
> if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
> rbwork->wakeup_full = false;


I think this code would be cleaner if we did:

static void rb_update_meta_page(strucrt rb_irq_work *rbwork)
{
struct ring_buffer_per_cpu *cpu_buffer;

if (!rbwork->is_cpu_buffer)
return;

/*
* If the waiter is a cpu_buffer, this might be due to a
* userspace mapping. Let's update the meta-page.
*/
cpu_buffer = container_of(rbwork, struct ring_buffer_per_cpu,
irq_work);

if (unlikely(READ_ONCE(cpu_buffer->mapped))) {
// I don't think we need the "unlikely"

/* Ensure the meta_page is ready */
smp_rmb();
WRITE_ONCE(cpu_buffer->meta_page->entries,
local_read(&cpu_buffer->entries));
WRITE_ONCE(cpu_buffer->meta_page->overrun,
local_read(&cpu_buffer->overrun));
WRITE_ONCE(cpu_buffer->meta_page->subbufs_touched,
local_read(&cpu_buffer->pages_touched));
WRITE_ONCE(cpu_buffer->meta_page->subbufs_lost,
local_read(&cpu_buffer->pages_lost));
}
}
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
* Schedules a delayed work to wake up any task that is blocked on the
* ring buffer waiters queue.
*/
static void rb_wake_up_waiters(struct irq_work *work)
{
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
rb_update_meta_page(cpu_buffer);

wake_up_all(&rbwork->waiters);
if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
rbwork->wakeup_full = false;
rbwork->full_waiters_pending = false;
wake_up_all(&rbwork->full_waiters);
}
}

-- Steve

\
 
 \ /
  Last update: 2023-12-21 18:50    [W:0.033 / U:0.372 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site