lkml.org 
[lkml]   [2019]   [Jan]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.20 61/65] of: __of_detach_node() - remove node from phandle cache
    Date
    4.20-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Frank Rowand <frank.rowand@sony.com>

    commit 5801169a2ed20003f771acecf3ac00574cf10a38 upstream.

    Non-overlay dynamic devicetree node removal may leave the node in
    the phandle cache. Subsequent calls to of_find_node_by_phandle()
    will incorrectly find the stale entry. Remove the node from the
    cache.

    Add paranoia checks in of_find_node_by_phandle() as a second level
    of defense (do not return cached node if detached, do not add node
    to cache if detached).

    Fixes: 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()")
    Reported-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
    Cc: stable@vger.kernel.org # v4.17+
    Signed-off-by: Frank Rowand <frank.rowand@sony.com>
    Signed-off-by: Rob Herring <robh@kernel.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    drivers/of/base.c | 31 ++++++++++++++++++++++++++++++-
    drivers/of/dynamic.c | 3 +++
    drivers/of/of_private.h | 4 ++++
    3 files changed, 37 insertions(+), 1 deletion(-)

    --- a/drivers/of/base.c
    +++ b/drivers/of/base.c
    @@ -162,6 +162,28 @@ int of_free_phandle_cache(void)
    late_initcall_sync(of_free_phandle_cache);
    #endif

    +/*
    + * Caller must hold devtree_lock.
    + */
    +void __of_free_phandle_cache_entry(phandle handle)
    +{
    + phandle masked_handle;
    + struct device_node *np;
    +
    + if (!handle)
    + return;
    +
    + masked_handle = handle & phandle_cache_mask;
    +
    + if (phandle_cache) {
    + np = phandle_cache[masked_handle];
    + if (np && handle == np->phandle) {
    + of_node_put(np);
    + phandle_cache[masked_handle] = NULL;
    + }
    + }
    +}
    +
    void of_populate_phandle_cache(void)
    {
    unsigned long flags;
    @@ -1209,11 +1231,18 @@ struct device_node *of_find_node_by_phan
    if (phandle_cache[masked_handle] &&
    handle == phandle_cache[masked_handle]->phandle)
    np = phandle_cache[masked_handle];
    + if (np && of_node_check_flag(np, OF_DETACHED)) {
    + WARN_ON(1); /* did not uncache np on node removal */
    + of_node_put(np);
    + phandle_cache[masked_handle] = NULL;
    + np = NULL;
    + }
    }

    if (!np) {
    for_each_of_allnodes(np)
    - if (np->phandle == handle) {
    + if (np->phandle == handle &&
    + !of_node_check_flag(np, OF_DETACHED)) {
    if (phandle_cache) {
    /* will put when removed from cache */
    of_node_get(np);
    --- a/drivers/of/dynamic.c
    +++ b/drivers/of/dynamic.c
    @@ -268,6 +268,9 @@ void __of_detach_node(struct device_node
    }

    of_node_set_flag(np, OF_DETACHED);
    +
    + /* race with of_find_node_by_phandle() prevented by devtree_lock */
    + __of_free_phandle_cache_entry(np->phandle);
    }

    /**
    --- a/drivers/of/of_private.h
    +++ b/drivers/of/of_private.h
    @@ -84,6 +84,10 @@ static inline void __of_detach_node_sysf
    int of_resolve_phandles(struct device_node *tree);
    #endif

    +#if defined(CONFIG_OF_DYNAMIC)
    +void __of_free_phandle_cache_entry(phandle handle);
    +#endif
    +
    #if defined(CONFIG_OF_OVERLAY)
    void of_overlay_mutex_lock(void);
    void of_overlay_mutex_unlock(void);

    \
     
     \ /
      Last update: 2019-01-11 15:45    [W:4.139 / U:0.148 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site