Merge branches 'pm-sleep', 'pm-cpuidle' and 'pm-em'

Merge updates related to system sleep, a cpuidle update and an Energy
Model handling code update for 6.14-rc1:

 - Allow configuring the system suspend-resume (DPM) watchdog to warn
   earlier than panic (Douglas Anderson).

 - Implement devm_device_init_wakeup() helper and introduce a device-
   managed variant of dev_pm_set_wake_irq() (Joe Hattori, Peng Fan).

 - Remove direct inclusions of 'pm_wakeup.h' which should be only
   included via 'device.h' (Wolfram Sang).

 - Clean up two comments in the core system-wide PM code (Rafael
   Wysocki, Randy Dunlap).

 - Add Clearwater Forest processor support to the intel_idle cpuidle
   driver (Artem Bityutskiy).

 - Move sched domains rebuild function from the schedutil cpufreq
   governor to the Energy Model handling code (Rafael Wysocki).

* pm-sleep:
  PM: sleep: wakeirq: Introduce device-managed variant of dev_pm_set_wake_irq()
  PM: sleep: Allow configuring the DPM watchdog to warn earlier than panic
  PM: sleep: convert comment from kernel-doc to plain comment
  PM: wakeup: implement devm_device_init_wakeup() helper
  PM: sleep: sysfs: don't include 'pm_wakeup.h' directly
  PM: sleep: autosleep: don't include 'pm_wakeup.h' directly
  PM: sleep: Update stale comment in device_resume()

* pm-cpuidle:
  intel_idle: add Clearwater Forest SoC support

* pm-em:
  PM: EM: Move sched domains rebuild function from schedutil to EM
This commit is contained in:
Rafael J. Wysocki 2025-01-20 19:14:15 +01:00
commit 1225bb42b8
13 changed files with 117 additions and 38 deletions

View file

@ -496,6 +496,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@ -512,11 +513,23 @@ struct dpm_watchdog {
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
struct timer_list *timer = &wd->timer;
unsigned int time_left;
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
if (wd->fatal) {
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
show_stack(wd->tsk, NULL, KERN_WARNING);
wd->fatal = true;
mod_timer(timer, jiffies + HZ * time_left);
}
/**
@ -530,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@ -914,7 +928,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
/* Match the pm_runtime_disable() in __device_suspend(). */
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}

View file

@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"

View file

@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
static void devm_pm_clear_wake_irq(void *dev)
{
dev_pm_clear_wake_irq(dev);
}
/**
* devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
* @dev: Device entry
* @irq: Device IO interrupt
*
*
* Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
* but the device will be auto clear wake capability on driver detach.
*/
int devm_pm_set_wake_irq(struct device *dev, int irq)
{
int ret;
ret = dev_pm_set_wake_irq(dev, irq);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt

View file

@ -1538,7 +1538,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
* sugov_eas_rebuild_sd() is called, which will result
* em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.

View file

@ -1651,6 +1651,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf),
{}
};

View file

@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int em_dev_update_chip_binning(struct device *dev);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz);
void em_rebuild_sched_domains(void);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
@ -404,6 +405,7 @@ int em_update_performance_limits(struct em_perf_domain *pd,
{
return -EINVAL;
}
static inline void em_rebuild_sched_domains(void) {}
#endif
#endif

View file

@ -10,6 +10,7 @@ extern int dev_pm_set_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern int devm_pm_set_wake_irq(struct device *dev, int irq);
#else /* !CONFIG_PM */
@ -32,5 +33,10 @@ static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
static inline int devm_pm_set_wake_irq(struct device *dev, int irq)
{
return 0;
}
#endif /* CONFIG_PM */
#endif /* _LINUX_PM_WAKEIRQ_H */

View file

@ -240,4 +240,21 @@ static inline int device_init_wakeup(struct device *dev, bool enable)
return 0;
}
static void device_disable_wakeup(void *dev)
{
device_init_wakeup(dev, false);
}
/**
* devm_device_init_wakeup - Resource managed device wakeup initialization.
* @dev: Device to handle.
*
* This function is the devm managed version of device_init_wakeup(dev, true).
*/
static inline int devm_device_init_wakeup(struct device *dev)
{
device_init_wakeup(dev, true);
return devm_add_action_or_reset(dev, device_disable_wakeup, dev);
}
#endif /* _LINUX_PM_WAKEUP_H */

View file

@ -257,11 +257,30 @@ config DPM_WATCHDOG
boot session.
config DPM_WATCHDOG_TIMEOUT
int "Watchdog timeout in seconds"
int "Watchdog timeout to panic in seconds"
range 1 120
default 120
depends on DPM_WATCHDOG
config DPM_WATCHDOG_WARNING_TIMEOUT
int "Watchdog timeout to warn in seconds"
range 1 DPM_WATCHDOG_TIMEOUT
default DPM_WATCHDOG_TIMEOUT
depends on DPM_WATCHDOG
help
If the DPM watchdog warning timeout and main timeout are
different then a non-fatal warning (with a stack trace of
the stuck suspend routine) will be printed when the warning
timeout expires. If the suspend routine gets un-stuck
before the main timeout expires then no other action is
taken. If the routine continues to be stuck and the main
timeout expires then an emergency-level message and stack
trace will be printed and the system will panic.
If the warning timeout is equal to the main timeout (the
default) then the warning will never happen and the system
will jump straight to panic when the main timeout expires.
config PM_TRACE
bool
help

View file

@ -9,7 +9,6 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/pm_wakeup.h>
#include "power.h"

View file

@ -908,3 +908,20 @@ int em_update_performance_limits(struct em_perf_domain *pd,
return 0;
}
EXPORT_SYMBOL_GPL(em_update_performance_limits);
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
}
void em_rebuild_sched_domains(void)
{
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
/*
* When called from the cpufreq_register_driver() path, the
* cpu_hotplug_lock is already held, so use a work item to
* avoid nested locking in rebuild_sched_domains().
*/
schedule_work(&rebuild_sd_work);
}

View file

@ -110,7 +110,7 @@ extern int hibernate_preallocate_memory(void);
extern void clear_or_poison_free_pages(void);
/**
/*
* Auxiliary structure used for reading the snapshot image data and
* metadata from and writing them to the list of page backup entries
* (PBEs) which is the main data structure of swsusp.

View file

@ -604,31 +604,6 @@ static const struct kobj_type sugov_tunables_ktype = {
/********************** cpufreq governor interface *********************/
#ifdef CONFIG_ENERGY_MODEL
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
}
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
/*
* EAS shouldn't be attempted without sugov, so rebuild the sched_domains
* on governor changes to make sure the scheduler knows about it.
*/
static void sugov_eas_rebuild_sd(void)
{
/*
* When called from the cpufreq_register_driver() path, the
* cpu_hotplug_lock is already held, so use a work item to
* avoid nested locking in rebuild_sched_domains().
*/
schedule_work(&rebuild_sd_work);
}
#else
static inline void sugov_eas_rebuild_sd(void) { };
#endif
struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
@ -784,7 +759,11 @@ static int sugov_init(struct cpufreq_policy *policy)
goto fail;
out:
sugov_eas_rebuild_sd();
/*
* Schedutil is the preferred governor for EAS, so rebuild sched domains
* on governor changes to make sure the scheduler knows about them.
*/
em_rebuild_sched_domains();
mutex_unlock(&global_tunables_lock);
return 0;
@ -826,7 +805,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
sugov_policy_free(sg_policy);
cpufreq_disable_fast_switch(policy);
sugov_eas_rebuild_sd();
em_rebuild_sched_domains();
}
static int sugov_start(struct cpufreq_policy *policy)