lkml.org 
[lkml]   [2020]   [Sep]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH AUTOSEL 5.4 159/330] ARM: OMAP2+: Handle errors for cpu_pm
Date
From: Tony Lindgren <tony@atomide.com>

[ Upstream commit 55be2f50336f67800513b46c5ba6270e4ed0e784 ]

We need to check for errors when calling cpu_pm_enter() and
cpu_cluster_pm_enter(). And we need to bail out on errors as
otherwise we can enter a deeper idle state when not desired.

I'm not aware of the lack of error handling causing issues yet,
but we need this at least for blocking deeper idle states when
a GPIO instance has pending interrupts.

Cc: Dave Gerlach <d-gerlach@ti.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Cc: Keerthy <j-keerthy@ti.com>
Cc: Ladislav Michl <ladis@linux-mips.org>
Cc: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Tero Kristo <t-kristo@ti.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
Link: https://lore.kernel.org/r/20200304225433.37336-2-tony@atomide.com
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
arch/arm/mach-omap2/cpuidle34xx.c | 9 +++++++--
arch/arm/mach-omap2/cpuidle44xx.c | 26 +++++++++++++++++---------
arch/arm/mach-omap2/pm34xx.c | 8 ++++++--
3 files changed, 30 insertions(+), 13 deletions(-)

diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 532a3e4b98c6f..090a8aafb25e1 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
int index)
{
struct omap3_idle_statedata *cx = &omap3_idle_data[index];
+ int error;

if (omap_irq_pending() || need_resched())
goto return_sleep_time;
@@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP context is saved.
*/
- if (cx->mpu_state == PWRDM_POWER_OFF)
- cpu_pm_enter();
+ if (cx->mpu_state == PWRDM_POWER_OFF) {
+ error = cpu_pm_enter();
+ if (error)
+ goto out_clkdm_set;
+ }

/* Execute ARM wfi */
omap_sram_idle();
@@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
cpu_pm_exit();

+out_clkdm_set:
/* Re-allow idle for C1 */
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index fe75d4fa60738..6f5f89711f256 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{
struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0;
+ int error;

/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
*/
- cpu_pm_enter();
+ error = cpu_pm_enter();
+ if (error)
+ goto cpu_pm_out;

if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
@@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM enter notifier chain
* to save GIC and wakeupgen context.
*/
- if (mpuss_can_lose_context)
- cpu_cluster_pm_enter();
+ if (mpuss_can_lose_context) {
+ error = cpu_cluster_pm_enter();
+ if (error)
+ goto cpu_cluster_pm_out;
+ }
}

omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;

+cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {

@@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
}
}

- /*
- * Call idle CPU PM exit notifier chain to restore
- * VFP and per CPU IRQ context.
- */
- cpu_pm_exit();
-
/*
* Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context.
@@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit();

+ /*
+ * Call idle CPU PM exit notifier chain to restore
+ * VFP and per CPU IRQ context.
+ */
+ cpu_pm_exit();
+
+cpu_pm_out:
tick_broadcast_exit();

fail:
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 54254fc92c2ed..fa66534a7ae22 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -194,6 +194,7 @@ void omap_sram_idle(void)
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
u32 sdrc_pwr = 0;
+ int error;

mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
switch (mpu_next_state) {
@@ -222,8 +223,11 @@ void omap_sram_idle(void)
pwrdm_pre_transition(NULL);

/* PER */
- if (per_next_state == PWRDM_POWER_OFF)
- cpu_cluster_pm_enter();
+ if (per_next_state == PWRDM_POWER_OFF) {
+ error = cpu_cluster_pm_enter();
+ if (error)
+ return;
+ }

/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
--
2.25.1
\
 
 \ /
  Last update: 2020-09-18 05:06    [W:1.054 / U:0.028 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site