diff options
author | Clark Williams <williams@redhat.com> | 2019-10-08 13:00:21 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-03-28 13:21:08 +0100 |
commit | fc32150e6f43d6cb93ea75937bb6a88a1764cc37 (patch) | |
tree | 3b5699e3b9404305753a63106f0743d5f9abb676 /drivers/thermal/intel | |
parent | 51e69e6551a8c6fffe0185ba305bb4e2d7223616 (diff) |
thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t
The pkg_temp_lock spinlock is acquired in the thermal vector handler which
is truly atomic context even on PREEMPT_RT kernels.
The critical sections are tiny, so change it to a raw spinlock.
Signed-off-by: Clark Williams <williams@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20191008110021.2j44ayunal7fkb7i@linutronix.de
Diffstat (limited to 'drivers/thermal/intel')
-rw-r--r-- | drivers/thermal/intel/x86_pkg_temp_thermal.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index ddb4a973c698..e18758a8e911 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -63,7 +63,7 @@ static int max_id __read_mostly; /* Array of zone pointers */ static struct zone_device **zones; /* Serializes interrupt notification, work and hotplug */ -static DEFINE_SPINLOCK(pkg_temp_lock); +static DEFINE_RAW_SPINLOCK(pkg_temp_lock); /* Protects zone operation in the work function against hotplug removal */ static DEFINE_MUTEX(thermal_zone_mutex); @@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) u64 msr_val, wr_val; mutex_lock(&thermal_zone_mutex); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); ++pkg_work_cnt; zonedev = pkg_temp_thermal_get_dev(cpu); if (!zonedev) { - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); mutex_unlock(&thermal_zone_mutex); return; } @@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) } enable_pkg_thres_interrupt(); - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); /* * If tzone is not NULL, then thermal_zone_mutex will prevent the @@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val) struct zone_device *zonedev; unsigned long flags; - spin_lock_irqsave(&pkg_temp_lock, flags); + raw_spin_lock_irqsave(&pkg_temp_lock, flags); ++pkg_interrupt_cnt; disable_pkg_thres_interrupt(); @@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val) pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); } - spin_unlock_irqrestore(&pkg_temp_lock, flags); + raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); return 0; } @@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) zonedev->msr_pkg_therm_high); cpumask_set_cpu(cpu, &zonedev->cpumask); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); zones[id] = zonedev; - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); return 0; } @@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) } /* Protect against work and interrupts */ - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); /* * Check whether this cpu was the current target and store the new @@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) * To cancel the work we need to drop the lock, otherwise * we might deadlock if the work needs to be flushed. */ - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); cancel_delayed_work_sync(&zonedev->work); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); /* * If this is not the last cpu in the package and the work * did not run after we dropped the lock above, then we @@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) pkg_thermal_schedule_work(target, &zonedev->work); } - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); /* Final cleanup if this is the last cpu */ if (lastcpu) |