lkml.org 
[lkml]   [2015]   [Jun]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH v7 06/10] trace: Add lock-free tracing_map
On Mon,  8 Jun 2015 16:32:05 -0500
Tom Zanussi <tom.zanussi@linux.intel.com> wrote:


> +/**
> + * tracing_map_read_sum - Return the value of a tracing_map_elt's sum
> + * @elt: The tracing_map_elt

Eggs, lettuce, and tomato! Yummmm!



> +static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
> + const struct tracing_map_sort_entry **b)
> +{
> + int ret = 0;
> +
> + if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
> + ret = 1;
> +
> + return ret;
> +}
> +
> +static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
> + const struct tracing_map_sort_entry **b)
> +{
> + const struct tracing_map_elt *elt_a, *elt_b;
> + struct tracing_map_sort_key *sort_key;
> + struct tracing_map_field *field;
> + tracing_map_cmp_fn_t cmp_fn;
> + void *val_a, *val_b;
> + int ret = 0;
> +
> + elt_a = (*a)->elt;
> + elt_b = (*b)->elt;
> +
> + sort_key = &elt_a->map->sort_key;
> +
> + field = &elt_a->fields[sort_key->field_idx];
> + cmp_fn = field->cmp_fn;
> +
> + val_a = &elt_a->fields[sort_key->field_idx].sum;
> + val_b = &elt_b->fields[sort_key->field_idx].sum;
> +
> + ret = cmp_fn(val_a, val_b);
> + if (sort_key->descending)
> + ret = -ret;
> +
> + return ret;
> +}
> +
> +static int cmp_entries_key(const struct tracing_map_sort_entry **a,
> + const struct tracing_map_sort_entry **b)
> +{
> + const struct tracing_map_elt *elt_a, *elt_b;
> + struct tracing_map_sort_key *sort_key;
> + struct tracing_map_field *field;
> + tracing_map_cmp_fn_t cmp_fn;
> + void *val_a, *val_b;
> + int ret = 0;
> +
> + elt_a = (*a)->elt;
> + elt_b = (*b)->elt;
> +
> + sort_key = &elt_a->map->sort_key;
> +
> + field = &elt_a->fields[sort_key->field_idx];
> +
> + cmp_fn = field->cmp_fn;
> +
> + val_a = elt_a->key + field->offset;
> + val_b = elt_b->key + field->offset;
> +
> + ret = cmp_fn(val_a, val_b);
> + if (sort_key->descending)
> + ret = -ret;
> +
> + return ret;
> +}
> +
> +static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
> +{
> + if (!entry)
> + return;
> +
> + if (entry->elt_copied)
> + tracing_map_elt_free(entry->elt);
> +
> + kfree(entry);
> +}
> +
> +/**
> + * tracing_map_destroy_entries - Destroy a tracing_map_sort_entries() array
> + * @entries: The entries to destroy
> + * @n_entries: The number of entries in the array
> + *
> + * Destroy the elements returned by a tracing_map_sort_entries() call.
> + */
> +void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
> + unsigned int n_entries)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < n_entries; i++)
> + destroy_sort_entry(entries[i]);
> +}
> +
> +static struct tracing_map_sort_entry *
> +create_sort_entry(void *key, struct tracing_map_elt *elt)
> +{
> + struct tracing_map_sort_entry *sort_entry;
> +
> + sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
> + if (!sort_entry)
> + return NULL;
> +
> + sort_entry->key = key;
> + sort_entry->elt = elt;
> +
> + return sort_entry;
> +}
> +
> +static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt)
> +{
> + struct tracing_map_elt *dup_elt;
> + unsigned int i;
> +
> + dup_elt = tracing_map_elt_alloc(elt->map);
> + if (!dup_elt)
> + return NULL;
> +
> + if (elt->map->ops && elt->map->ops->elt_copy)
> + elt->map->ops->elt_copy(dup_elt, elt);
> +
> + dup_elt->private_data = elt->private_data;
> + memcpy(dup_elt->key, elt->key, elt->map->key_size);
> +
> + for (i = 0; i < elt->map->n_fields; i++) {
> + atomic64_set(&dup_elt->fields[i].sum,
> + atomic64_read(&elt->fields[i].sum));
> + dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
> + }
> +
> + return dup_elt;
> +}
> +
> +static int merge_dup(struct tracing_map_sort_entry **sort_entries,
> + unsigned int target, unsigned int dup)
> +{
> + struct tracing_map_elt *target_elt, *elt;
> + bool first_dup = (target - dup) == 1;
> + int i;
> +
> + if (first_dup) {
> + elt = sort_entries[target]->elt;
> + target_elt = copy_elt(elt);
> + if (!target_elt)
> + return -ENOMEM;
> + sort_entries[target]->elt = target_elt;
> + sort_entries[target]->elt_copied = true;
> + } else
> + target_elt = sort_entries[target]->elt;
> +
> + elt = sort_entries[dup]->elt;
> +
> + for (i = 0; i < elt->map->n_fields; i++)
> + atomic64_add(atomic64_read(&elt->fields[i].sum),
> + &target_elt->fields[i].sum);
> +
> + sort_entries[dup]->dup = true;
> +
> + return 0;
> +}
> +
> +static int merge_dups(struct tracing_map_sort_entry **sort_entries,
> + int n_entries, unsigned int key_size)
> +{
> + unsigned int dups = 0, total_dups = 0;
> + int err, i, j;
> + void *key;
> +
> + if (n_entries < 2)
> + return total_dups;
> +
> + sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
> + (int (*)(const void *, const void *))cmp_entries_dup, NULL);

Sort.

> +
> + key = sort_entries[0]->key;
> + for (i = 1; i < n_entries; i++) {
> + if (!memcmp(sort_entries[i]->key, key, key_size)) {
> + dups++; total_dups++;
> + err = merge_dup(sort_entries, i - dups, i);
> + if (err)
> + return err;
> + continue;
> + }
> + key = sort_entries[i]->key;
> + dups = 0;
> + }
> +
> + if (!total_dups)
> + return total_dups;
> +
> + for (i = 0, j = 0; i < n_entries; i++) {
> + if (!sort_entries[i]->dup) {
> + sort_entries[j] = sort_entries[i];
> + if (j++ != i)
> + sort_entries[i] = NULL;
> + } else {
> + destroy_sort_entry(sort_entries[i]);
> + sort_entries[i] = NULL;
> + }
> + }
> +
> + return total_dups;
> +}
> +
> +static bool is_key(struct tracing_map *map, unsigned int field_idx)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < map->n_keys; i++)
> + if (map->key_idx[i] == field_idx)
> + return true;
> + return false;
> +}
> +
> +static void sort_secondary(struct tracing_map *map,
> + const struct tracing_map_sort_entry **entries,
> + unsigned int n_entries,
> + struct tracing_map_sort_key *primary_key,
> + struct tracing_map_sort_key *secondary_key)
> +{
> + int (*primary_fn)(const struct tracing_map_sort_entry **,
> + const struct tracing_map_sort_entry **);
> + int (*secondary_fn)(const struct tracing_map_sort_entry **,
> + const struct tracing_map_sort_entry **);
> + unsigned i, start = 0, n_sub = 1;
> +
> + if (is_key(map, primary_key->field_idx))
> + primary_fn = cmp_entries_key;
> + else
> + primary_fn = cmp_entries_sum;
> +
> + if (is_key(map, secondary_key->field_idx))
> + secondary_fn = cmp_entries_key;
> + else
> + secondary_fn = cmp_entries_sum;
> +
> + for (i = 0; i < n_entries - 1; i++) {
> + const struct tracing_map_sort_entry **a = &entries[i];
> + const struct tracing_map_sort_entry **b = &entries[i + 1];
> +
> + if (primary_fn(a, b) == 0) {
> + n_sub++;
> + if (i < n_entries - 2)
> + continue;
> + }
> +
> + if (n_sub < 2) {
> + start = i + 1;
> + n_sub = 1;
> + continue;
> + }
> +
> + set_sort_key(map, secondary_key);
> + sort(&entries[start], n_sub,
> + sizeof(struct tracing_map_sort_entry *),
> + (int (*)(const void *, const void *))secondary_fn, NULL);
> + set_sort_key(map, primary_key);
> +
> + start = i + 1;
> + n_sub = 1;
> + }
> +}
> +
> +/**
> + * tracing_map_sort_entries - Sort the current set of tracing_map_lts in a map
> + * @map: The tracing_map
> + * @sort_key: The sort key to use for sorting
> + * @sort_entries: outval: pointer to allocated and sorted array of entries
> + *
> + * tracing_map_sort_entries() sorts the current set of entries in the
> + * map and returns the list of tracing_map_sort_entries containing
> + * them to the client in the sort_entries param.
> + *
> + * The sort_key has only two fields: idx and descending. 'idx' refers
> + * to the index of the field added via tracing_map_add_sum_field() or
> + * tracing_map_add_key_field() when the tracing_map was initialized.
> + * 'descending' is a flag that if set reverses the sort order, which
> + * by default is ascending.
> + *
> + * The client should not hold on to the returned array but use it
> + * and call tracing_map_destroy_sort_entries() when done.
> + *
> + * Return: the number of sort_entries in the tracing_map_sort_entry
> + * array, negative on err
> + */
> +int tracing_map_sort_entries(struct tracing_map *map,
> + struct tracing_map_sort_key *sort_keys,
> + unsigned int n_sort_keys,
> + struct tracing_map_sort_entry ***sort_entries)
> +{
> + int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
> + const struct tracing_map_sort_entry **);
> + struct tracing_map_sort_entry *sort_entry, **entries;
> + int i, n_entries, ret;
> +
> + entries = kcalloc(map->max_elts, sizeof(sort_entry), GFP_KERNEL);
> + if (!entries)
> + return -ENOMEM;
> +
> + for (i = 0, n_entries = 0; i < map->map_size; i++) {
> + if (!map->map[i].key || !map->map[i].val)
> + continue;
> +
> + entries[n_entries] = create_sort_entry(map->map[i].val->key,
> + map->map[i].val);
> + if (!entries[n_entries++]) {
> + ret = -ENOMEM;
> + goto free;
> + }
> + }
> +
> + if (n_entries == 0) {
> + ret = 0;
> + goto free;
> + }
> +
> + if (n_entries == 1) {
> + *sort_entries = entries;
> + return 1;
> + }
> +
> + ret = merge_dups(entries, n_entries, map->key_size);

So this sorts.

> + if (ret < 0)
> + goto free;
> + n_entries -= ret;
> +
> + if (is_key(map, sort_keys[0].field_idx))
> + cmp_entries_fn = cmp_entries_key;
> + else
> + cmp_entries_fn = cmp_entries_sum;
> +
> + set_sort_key(map, &sort_keys[0]);
> +
> + sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
> + (int (*)(const void *, const void *))cmp_entries_fn, NULL);

Then this sorts.

Why the double sort? Can't you just sort once, and then remove the dups?

-- Steve

> +
> + if (n_sort_keys > 1)
> + sort_secondary(map,
> + (const struct tracing_map_sort_entry **)entries,
> + n_entries,
> + &sort_keys[0],
> + &sort_keys[1]);
> +
> + *sort_entries = entries;
> +
> + return n_entries;
> + free:
> + tracing_map_destroy_sort_entries(entries, n_entries);
> +
> + return ret;
> +}


\
 
 \ /
  Last update: 2015-06-12 23:01    [W:0.167 / U:0.172 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site