lkml.org 
[lkml]   [2024]   [Apr]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v20 2/5] ring-buffer: Introducing ring-buffer mapping functions
    On Sat,  6 Apr 2024 18:36:46 +0100
    Vincent Donnefort <vdonnefort@google.com> wrote:

    > +int ring_buffer_map(struct trace_buffer *buffer, int cpu,
    > + struct vm_area_struct *vma)
    > +{
    > + struct ring_buffer_per_cpu *cpu_buffer;
    > + unsigned long flags, *subbuf_ids;
    > + int err = 0;
    > +
    > + if (!cpumask_test_cpu(cpu, buffer->cpumask))
    > + return -EINVAL;
    > +
    > + cpu_buffer = buffer->buffers[cpu];
    > +
    > + mutex_lock(&cpu_buffer->mapping_lock);
    > +
    > + if (cpu_buffer->mapped) {
    > + err = __rb_map_vma(cpu_buffer, vma);
    > + if (!err)
    > + err = __rb_inc_dec_mapped(cpu_buffer, true);
    > + mutex_unlock(&cpu_buffer->mapping_lock);
    > + return err;
    > + }
    > +
    > + /* prevent another thread from changing buffer/sub-buffer sizes */
    > + mutex_lock(&buffer->mutex);
    > +
    > + err = rb_alloc_meta_page(cpu_buffer);
    > + if (err)
    > + goto unlock;
    > +
    > + /* subbuf_ids include the reader while nr_pages does not */
    > + subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
    > + if (!subbuf_ids) {
    > + rb_free_meta_page(cpu_buffer);
    > + err = -ENOMEM;
    > + goto unlock;
    > + }
    > +
    > + atomic_inc(&cpu_buffer->resize_disabled);
    > +
    > + /*
    > + * Lock all readers to block any subbuf swap until the subbuf IDs are
    > + * assigned.
    > + */
    > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
    > + rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
    > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
    > +
    > + err = __rb_map_vma(cpu_buffer, vma);
    > + if (!err) {
    > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
    > + cpu_buffer->mapped = 1;
    > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
    > + } else {
    > + kfree(cpu_buffer->subbuf_ids);
    > + cpu_buffer->subbuf_ids = NULL;
    > + rb_free_meta_page(cpu_buffer);
    > + }
    > +unlock:

    Nit: For all labels, please add a space before them. Otherwise, diffs will
    show "unlock" as the function and not "ring_buffer_map", making it harder
    to find where the change is.

    Same for the labels below.

    -- Steve


    > + mutex_unlock(&buffer->mutex);
    > + mutex_unlock(&cpu_buffer->mapping_lock);
    > +
    > + return err;
    > +}
    > +
    > +int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
    > +{
    > + struct ring_buffer_per_cpu *cpu_buffer;
    > + unsigned long flags;
    > + int err = 0;
    > +
    > + if (!cpumask_test_cpu(cpu, buffer->cpumask))
    > + return -EINVAL;
    > +
    > + cpu_buffer = buffer->buffers[cpu];
    > +
    > + mutex_lock(&cpu_buffer->mapping_lock);
    > +
    > + if (!cpu_buffer->mapped) {
    > + err = -ENODEV;
    > + goto out;
    > + } else if (cpu_buffer->mapped > 1) {
    > + __rb_inc_dec_mapped(cpu_buffer, false);
    > + goto out;
    > + }
    > +
    > + mutex_lock(&buffer->mutex);
    > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
    > +
    > + cpu_buffer->mapped = 0;
    > +
    > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
    > +
    > + kfree(cpu_buffer->subbuf_ids);
    > + cpu_buffer->subbuf_ids = NULL;
    > + rb_free_meta_page(cpu_buffer);
    > + atomic_dec(&cpu_buffer->resize_disabled);
    > +
    > + mutex_unlock(&buffer->mutex);
    > +out:
    > + mutex_unlock(&cpu_buffer->mapping_lock);
    > +
    > + return err;
    > +}
    > +
    > +int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
    > +{
    > + struct ring_buffer_per_cpu *cpu_buffer;
    > + unsigned long reader_size;
    > + unsigned long flags;
    > +
    > + cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
    > + if (IS_ERR(cpu_buffer))
    > + return (int)PTR_ERR(cpu_buffer);
    > +
    > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
    > +consume:
    > + if (rb_per_cpu_empty(cpu_buffer))
    > + goto out;
    > +
    > + reader_size = rb_page_size(cpu_buffer->reader_page);
    > +
    > + /*
    > + * There are data to be read on the current reader page, we can
    > + * return to the caller. But before that, we assume the latter will read
    > + * everything. Let's update the kernel reader accordingly.
    > + */
    > + if (cpu_buffer->reader_page->read < reader_size) {
    > + while (cpu_buffer->reader_page->read < reader_size)
    > + rb_advance_reader(cpu_buffer);
    > + goto out;
    > + }
    > +
    > + if (WARN_ON(!rb_get_reader_page(cpu_buffer)))
    > + goto out;
    > +
    > + goto consume;
    > +out:
    > + /* Some archs do not have data cache coherency between kernel and user-space */
    > + flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page));
    > +
    > + rb_update_meta_page(cpu_buffer);
    > +
    > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
    > + rb_put_mapped_buffer(cpu_buffer);
    > +
    > + return 0;
    > +}
    > +
    > /*
    > * We only allocate new buffers, never free them if the CPU goes down.
    > * If we were to free the buffer, then the user would lose any trace that was in


    \
     
     \ /
      Last update: 2024-05-27 16:33    [W:4.831 / U:0.048 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site