lkml.org 
[lkml]   [2023]   [Mar]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH resent] perf/x86/amd/uncore: Fix exception handling in amd_uncore_cpu_up_prepare()
From
On 25/03/23 16:15, Markus Elfring wrote:
> Date: Fri, 17 Mar 2023 13:13:14 +0100
>
> The label “fail” was used to jump to another pointer check despite of
> the detail in the implementation of the function “amd_uncore_cpu_up_prepare”
> that it was determined already that the corresponding variable contained
> a null pointer (because of a failed function call in two cases).
>
> 1. Thus return directly after a call of the function “amd_uncore_alloc”
> failed in the first if branch.
>
> 2. Use more appropriate labels instead.
>
> 3. Reorder jump targets at the end.
>
> 4. Delete a redundant check and kfree() call.
>
> 5. Omit an explicit initialisation for the local variable “uncore_llc”.
>
>
> This issue was detected by using the Coccinelle software.
>
> Fixes: 39621c5808f5dda75d03dc4b2d4d2b13a5a1c34b ("perf/x86/amd/uncore: Use dynamic events array")
> Fixes: 503d3291a937b726757c1f7c45fa02389d2f4324 ("perf/x86/amd: Try to fix some mem allocation failure handling")

Commit should be only the first 12 characters of the hash.
Refer: https://docs.kernel.org/process/submitting-patches.html

But this is not a fix. Redundant calls to kfree do not break
anything.

Also avoid using the term "exception" since, in x86, exceptions are
hardware events. Better to just call it "error handling".

> Signed-off-by: Markus Elfring <elfring@users.sourceforge.net>
> ---
> arch/x86/events/amd/uncore.c | 20 +++++++++-----------
> 1 file changed, 9 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
> index 83f15fe411b3..0a9b5cb97bb4 100644
> --- a/arch/x86/events/amd/uncore.c
> +++ b/arch/x86/events/amd/uncore.c
> @@ -440,13 +440,13 @@ amd_uncore_events_alloc(unsigned int num, unsigned int cpu)
>
> static int amd_uncore_cpu_up_prepare(unsigned int cpu)
> {
> - struct amd_uncore *uncore_nb = NULL, *uncore_llc = NULL;
> + struct amd_uncore *uncore_nb = NULL, *uncore_llc;
>
> if (amd_uncore_nb) {
> *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
> uncore_nb = amd_uncore_alloc(cpu);
> if (!uncore_nb)
> - goto fail;
> + return -ENOMEM;
> uncore_nb->cpu = cpu;
> uncore_nb->num_counters = num_counters_nb;
> uncore_nb->rdpmc_base = RDPMC_BASE_NB;
> @@ -455,7 +455,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
> uncore_nb->pmu = &amd_nb_pmu;
> uncore_nb->events = amd_uncore_events_alloc(num_counters_nb, cpu);
> if (!uncore_nb->events)
> - goto fail;
> + goto free_nb;
> uncore_nb->id = -1;
> *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
> }
> @@ -464,7 +464,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
> *per_cpu_ptr(amd_uncore_llc, cpu) = NULL;
> uncore_llc = amd_uncore_alloc(cpu);
> if (!uncore_llc)
> - goto fail;
> + goto check_uncore_nb;
> uncore_llc->cpu = cpu;
> uncore_llc->num_counters = num_counters_llc;
> uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
> @@ -473,24 +473,22 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
> uncore_llc->pmu = &amd_llc_pmu;
> uncore_llc->events = amd_uncore_events_alloc(num_counters_llc, cpu);
> if (!uncore_llc->events)
> - goto fail;
> + goto free_llc;
> uncore_llc->id = -1;
> *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
> }
>
> return 0;
>
> -fail:
> +free_llc:
> + kfree(uncore_llc);
> +check_uncore_nb:
> if (uncore_nb) {
> kfree(uncore_nb->events);
> +free_nb:
> kfree(uncore_nb);
> }
>
> - if (uncore_llc) {
> - kfree(uncore_llc->events);
> - kfree(uncore_llc);
> - }
> -
> return -ENOMEM;
> }
>
> --
> 2.40.0
>

\
 
 \ /
  Last update: 2023-03-27 11:13    [W:3.020 / U:0.688 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site