lkml.org 
[lkml]   [2019]   [Aug]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 0/2] some cleanups related to RB_DECLARE_CALLBACKS_MAX
On Sun, Aug 11, 2019 at 11:46 AM Uladzislau Rezki (Sony) <urezki@gmail.com> wrote:
> Also i have open question related to validating of the augment tree, i mean
> in case of debugging to check that nodes are maintained correctly. Please
> have a look here: https://lkml.org/lkml/2019/7/29/304
>
> Basically we can add one more function under RB_DECLARE_CALLBACKS_MAX template
> making it public that checks a tree and its augmented nodes. At least i see
> two users where it can be used: vmalloc and lib/rbtree_test.c.

I think it would be sufficient to call RBCOMPUTE(node, true) on every
node and check the return value ?

Something like the following (probably applicable in other files too):

---------------------------------- 8< ------------------------------------

augmented rbtree: use generated compute_max function for debug checks

In debug code, use the generated compute_max function instead of
reimplementing similar functionality in multiple places.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
lib/rbtree_test.c | 15 +-------------
mm/mmap.c | 26 +++--------------------
mm/vmalloc.c | 53 +++++++----------------------------------------
3 files changed, 12 insertions(+), 82 deletions(-)

diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 41ae3c7570d3..a5a04e820f77 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -222,20 +222,7 @@ static void check_augmented(int nr_nodes)
check(nr_nodes);
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
- u32 subtree, max = node->val;
- if (node->rb.rb_left) {
- subtree = rb_entry(node->rb.rb_left, struct test_node,
- rb)->augmented;
- if (max < subtree)
- max = subtree;
- }
- if (node->rb.rb_right) {
- subtree = rb_entry(node->rb.rb_right, struct test_node,
- rb)->augmented;
- if (max < subtree)
- max = subtree;
- }
- WARN_ON_ONCE(node->augmented != max);
+ WARN_ON_ONCE(!augment_callbacks_compute_max(node, true));
}
}

diff --git a/mm/mmap.c b/mm/mmap.c
index 24f0772d6afd..d6d23e6c2d10 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -311,24 +311,6 @@ static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
}

#ifdef CONFIG_DEBUG_VM_RB
-static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
-{
- unsigned long max = vma_compute_gap(vma), subtree_gap;
- if (vma->vm_rb.rb_left) {
- subtree_gap = rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb)->rb_subtree_gap;
- if (subtree_gap > max)
- max = subtree_gap;
- }
- if (vma->vm_rb.rb_right) {
- subtree_gap = rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb)->rb_subtree_gap;
- if (subtree_gap > max)
- max = subtree_gap;
- }
- return max;
-}
-
static int browse_rb(struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
@@ -355,10 +337,8 @@ static int browse_rb(struct mm_struct *mm)
bug = 1;
}
spin_lock(&mm->page_table_lock);
- if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
- pr_emerg("free gap %lx, correct %lx\n",
- vma->rb_subtree_gap,
- vma_compute_subtree_gap(vma));
+ if (!vma_gap_callbacks_compute_max(vma, true)) {
+ pr_emerg("wrong subtree gap in vma %p\n", vma);
bug = 1;
}
spin_unlock(&mm->page_table_lock);
@@ -385,7 +365,7 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
VM_BUG_ON_VMA(vma != ignore &&
- vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
+ !vma_gap_callbacks_compute_max(vma, true),
vma);
}
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f7c61accb0e2..ea23ccaf70fc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -553,48 +553,6 @@ unlink_va(struct vmap_area *va, struct rb_root *root)
RB_CLEAR_NODE(&va->rb_node);
}

-#if DEBUG_AUGMENT_PROPAGATE_CHECK
-static void
-augment_tree_propagate_check(struct rb_node *n)
-{
- struct vmap_area *va;
- struct rb_node *node;
- unsigned long size;
- bool found = false;
-
- if (n == NULL)
- return;
-
- va = rb_entry(n, struct vmap_area, rb_node);
- size = va->subtree_max_size;
- node = n;
-
- while (node) {
- va = rb_entry(node, struct vmap_area, rb_node);
-
- if (get_subtree_max_size(node->rb_left) == size) {
- node = node->rb_left;
- } else {
- if (va_size(va) == size) {
- found = true;
- break;
- }
-
- node = node->rb_right;
- }
- }
-
- if (!found) {
- va = rb_entry(n, struct vmap_area, rb_node);
- pr_emerg("tree is corrupted: %lu, %lu\n",
- va_size(va), va->subtree_max_size);
- }
-
- augment_tree_propagate_check(n->rb_left);
- augment_tree_propagate_check(n->rb_right);
-}
-#endif
-
/*
* This function populates subtree_max_size from bottom to upper
* levels starting from VA point. The propagation must be done
@@ -645,9 +603,14 @@ augment_tree_propagate_from(struct vmap_area *va)
node = rb_parent(&va->rb_node);
}

-#if DEBUG_AUGMENT_PROPAGATE_CHECK
- augment_tree_propagate_check(free_vmap_area_root.rb_node);
-#endif
+ if (DEBUG_AUGMENT_PROPAGATE_CHECK) {
+ struct vmap_area *va;
+
+ list_for_each_entry(va, &free_vmap_area_list, list) {
+ WARN_ON(!free_vmap_area_rb_augment_cb_compute_max(
+ va, true));
+ }
+ }
}

static void
--
2.23.0.rc1.153.gdeed80330f-goog
\
 
 \ /
  Last update: 2019-08-12 09:13    [W:0.118 / U:0.040 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site