lkml.org 
[lkml]   [2019]   [Jun]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v6 4/7] arm: Use common cpu_topology structure and functions.
From
Date
On 5/29/19 2:15 PM, Atish Patra wrote:
> Currently, ARM32 and ARM64 uses different data structures to represent
> their cpu topologies. Since, we are moving the ARM64 topology to common
> code to be used by other architectures, we can reuse that for ARM32 as
> well.
>
> Take this opprtunity to remove the redundant functions from ARM32 and
> reuse the common code instead.
>
> To: Russell King <linux@armlinux.org.uk>
> Signed-off-by: Atish Patra <atish.patra@wdc.com>
> Tested-by: Sudeep Holla <sudeep.holla@arm.com> (on TC2)
> Reviewed-by : Sudeep Holla <sudeep.holla@arm.com>
>
> ---
> Hi Russell,
> Can we get a ACK for this patch ? We are hoping that the entire
> series can be merged at one go.
> ---
> arch/arm/include/asm/topology.h | 20 -----------
> arch/arm/kernel/topology.c | 60 ++++-----------------------------
> drivers/base/arch_topology.c | 4 ++-
> include/linux/arch_topology.h | 6 ++--
> 4 files changed, 11 insertions(+), 79 deletions(-)
>
> diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
> index 2a786f54d8b8..8a0fae94d45e 100644
> --- a/arch/arm/include/asm/topology.h
> +++ b/arch/arm/include/asm/topology.h
> @@ -5,26 +5,6 @@
> #ifdef CONFIG_ARM_CPU_TOPOLOGY
>
> #include <linux/cpumask.h>
> -
> -struct cputopo_arm {
> - int thread_id;
> - int core_id;
> - int socket_id;
> - cpumask_t thread_sibling;
> - cpumask_t core_sibling;
> -};
> -
> -extern struct cputopo_arm cpu_topology[NR_CPUS];
> -
> -#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
> -#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
> -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
> -#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
> -
> -void init_cpu_topology(void);
> -void store_cpu_topology(unsigned int cpuid);
> -const struct cpumask *cpu_coregroup_mask(int cpu);
> -
> #include <linux/arch_topology.h>
>
> /* Replace task scheduler's default frequency-invariant accounting */
> diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
> index 60e375ce1ab2..238f1da0219c 100644
> --- a/arch/arm/kernel/topology.c
> +++ b/arch/arm/kernel/topology.c
> @@ -177,17 +177,6 @@ static inline void parse_dt_topology(void) {}
> static inline void update_cpu_capacity(unsigned int cpuid) {}
> #endif
>
> - /*
> - * cpu topology table
> - */
> -struct cputopo_arm cpu_topology[NR_CPUS];
> -EXPORT_SYMBOL_GPL(cpu_topology);
> -
> -const struct cpumask *cpu_coregroup_mask(int cpu)
> -{
> - return &cpu_topology[cpu].core_sibling;
> -}
> -
> /*
> * The current assumption is that we can power gate each core independently.
> * This will be superseded by DT binding once available.
> @@ -197,32 +186,6 @@ const struct cpumask *cpu_corepower_mask(int cpu)
> return &cpu_topology[cpu].thread_sibling;
> }
>
> -static void update_siblings_masks(unsigned int cpuid)
> -{
> - struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
> - int cpu;
> -
> - /* update core and thread sibling masks */
> - for_each_possible_cpu(cpu) {
> - cpu_topo = &cpu_topology[cpu];
> -
> - if (cpuid_topo->socket_id != cpu_topo->socket_id)
> - continue;
> -
> - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
> - if (cpu != cpuid)
> - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
> -
> - if (cpuid_topo->core_id != cpu_topo->core_id)
> - continue;
> -
> - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
> - if (cpu != cpuid)
> - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
> - }
> - smp_wmb();
> -}
> -
> /*
> * store_cpu_topology is called at boot when only one cpu is running
> * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
> @@ -230,7 +193,7 @@ static void update_siblings_masks(unsigned int cpuid)
> */
> void store_cpu_topology(unsigned int cpuid)
> {
> - struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
> + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
> unsigned int mpidr;
>
> /* If the cpu topology has been already set, just return */
> @@ -250,12 +213,12 @@ void store_cpu_topology(unsigned int cpuid)
> /* core performance interdependency */
> cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
> + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
> } else {
> /* largely independent cores */
> cpuid_topo->thread_id = -1;
> cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> }
> } else {
> /*
> @@ -265,7 +228,7 @@ void store_cpu_topology(unsigned int cpuid)
> */
> cpuid_topo->thread_id = -1;
> cpuid_topo->core_id = 0;
> - cpuid_topo->socket_id = -1;
> + cpuid_topo->package_id = -1;
> }
>
> update_siblings_masks(cpuid);
> @@ -275,7 +238,7 @@ void store_cpu_topology(unsigned int cpuid)
> pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
> cpuid, cpu_topology[cpuid].thread_id,
> cpu_topology[cpuid].core_id,
> - cpu_topology[cpuid].socket_id, mpidr);
> + cpu_topology[cpuid].package_id, mpidr);
> }
>
> static inline int cpu_corepower_flags(void)
> @@ -298,18 +261,7 @@ static struct sched_domain_topology_level arm_topology[] = {
> */
> void __init init_cpu_topology(void)
> {
> - unsigned int cpu;
> -
> - /* init core mask and capacity */
> - for_each_possible_cpu(cpu) {
> - struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
> -
> - cpu_topo->thread_id = -1;
> - cpu_topo->core_id = -1;
> - cpu_topo->socket_id = -1;
> - cpumask_clear(&cpu_topo->core_sibling);
> - cpumask_clear(&cpu_topo->thread_sibling);
> - }
> + reset_cpu_topology();
> smp_wmb();
>
> parse_dt_topology();
> diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
> index 5781bb4c457c..797e3cd71bea 100644
> --- a/drivers/base/arch_topology.c
> +++ b/drivers/base/arch_topology.c
> @@ -426,6 +426,7 @@ static int __init parse_dt_topology(void)
> of_node_put(cn);
> return ret;
> }
> +#endif
>
> /*
> * cpu topology table
> @@ -491,7 +492,7 @@ static void clear_cpu_topology(int cpu)
> cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
> }
>
> -static void __init reset_cpu_topology(void)
> +void __init reset_cpu_topology(void)
> {
> unsigned int cpu;
>
> @@ -526,6 +527,7 @@ __weak int __init parse_acpi_topology(void)
> return 0;
> }
>
> +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
> void __init init_cpu_topology(void)
> {
> reset_cpu_topology();
> diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
> index d4e76e0a283f..d4311127970d 100644
> --- a/include/linux/arch_topology.h
> +++ b/include/linux/arch_topology.h
> @@ -54,11 +54,9 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
> void init_cpu_topology(void);
> void store_cpu_topology(unsigned int cpuid);
> const struct cpumask *cpu_coregroup_mask(int cpu);
> -#endif
> -
> -#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
> void update_siblings_masks(unsigned int cpu);
> -#endif
> void remove_cpu_topology(unsigned int cpuid);
> +void reset_cpu_topology(void);
> +#endif
>
> #endif /* _LINUX_ARCH_TOPOLOGY_H_ */
>
Hi Russell,
Can we get an ACK for ARM if you don't have any objection to the series ?

--
Regards,
Atish

\
 
 \ /
  Last update: 2019-06-06 16:25    [W:0.117 / U:0.728 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site