lkml.org 
[lkml]   [2019]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] x86_pkg_temp_thermal: make pkg_temp_lock a raw spinlock
Date
The spinlock pkg_temp_lock has the potential of being taken in atomic
context on v5.2-rt PREEMPT_RT. It's static and limited scope so
go ahead and make it a raw spinlock.

Signed-off-by: Clark Williams <williams@redhat.com>
---
drivers/thermal/intel/x86_pkg_temp_thermal.c | 24 ++++++++++----------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 319b77126168..92ceed3de6f3 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -63,7 +63,7 @@ static int max_packages __read_mostly;
/* Array of package pointers */
static struct pkg_device **packages;
/* Serializes interrupt notification, work and hotplug */
-static DEFINE_SPINLOCK(pkg_temp_lock);
+static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */
static DEFINE_MUTEX(thermal_zone_mutex);

@@ -279,12 +279,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
u64 msr_val, wr_val;

mutex_lock(&thermal_zone_mutex);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;

pkgdev = pkg_temp_thermal_get_dev(cpu);
if (!pkgdev) {
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex);
return;
}
@@ -298,7 +298,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
}

enable_pkg_thres_interrupt();
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);

/*
* If tzone is not NULL, then thermal_zone_mutex will prevent the
@@ -323,7 +323,7 @@ static int pkg_thermal_notify(u64 msr_val)
struct pkg_device *pkgdev;
unsigned long flags;

- spin_lock_irqsave(&pkg_temp_lock, flags);
+ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
++pkg_interrupt_cnt;

disable_pkg_thres_interrupt();
@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val)
pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
}

- spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
return 0;
}

@@ -381,9 +381,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
pkgdev->msr_pkg_therm_high);

cpumask_set_cpu(cpu, &pkgdev->cpumask);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
packages[pkgid] = pkgdev;
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
return 0;
}

@@ -420,7 +420,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
}

/* Protect against work and interrupts */
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);

/*
* Check whether this cpu was the current target and store the new
@@ -452,9 +452,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed.
*/
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
cancel_delayed_work_sync(&pkgdev->work);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
/*
* If this is not the last cpu in the package and the work
* did not run after we dropped the lock above, then we
@@ -465,7 +465,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
pkg_thermal_schedule_work(target, &pkgdev->work);
}

- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);

/* Final cleanup if this is the last cpu */
if (lastcpu)
--
2.21.0
\
 
 \ /
  Last update: 2019-07-15 22:27    [W:0.076 / U:0.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site