mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:24:45 +01:00
Merge branch 'pm-sleep'
Merge updates related to system suspend and hibernation for 6.20-rc1/7.0-rc1: - Stop flagging the PM runtime workqueue as freezable to avoid system suspend and resume deadlocks in subsystems that assume asynchronous runtime PM to work during system-wide PM transitions (Rafael Wysocki) - Drop redundant NULL pointer checks before acomp_request_free() from the hibernation code handling image saving (Rafael Wysocki) - Update wakeup_sources_walk_start() to handle empty lists of wakeup sources as appropriate (Samuel Wu) - Make dev_pm_clear_wake_irq() check the power.wakeirq value under power.lock to avoid race conditions (Gui-Dong Han) - Avoid bit field races related to power.work_in_progress in the core device suspend code (Xuewen Yan) * pm-sleep: PM: sleep: core: Avoid bit field races related to work_in_progress PM: sleep: wakeirq: harden dev_pm_clear_wake_irq() against races PM: wakeup: Handle empty list in wakeup_sources_walk_start() PM: hibernate: Drop NULL pointer checks before acomp_request_free() PM: sleep: Do not flag runtime PM workqueue as freezable
This commit is contained in:
commit
c233403593
7 changed files with 21 additions and 20 deletions
|
|
@ -712,10 +712,9 @@ out the following operations:
|
|||
* During system suspend pm_runtime_get_noresume() is called for every device
|
||||
right before executing the subsystem-level .prepare() callback for it and
|
||||
pm_runtime_barrier() is called for every device right before executing the
|
||||
subsystem-level .suspend() callback for it. In addition to that the PM core
|
||||
calls __pm_runtime_disable() with 'false' as the second argument for every
|
||||
device right before executing the subsystem-level .suspend_late() callback
|
||||
for it.
|
||||
subsystem-level .suspend() callback for it. In addition to that, the PM
|
||||
core disables runtime PM for every device right before executing the
|
||||
subsystem-level .suspend_late() callback for it.
|
||||
|
||||
* During system resume pm_runtime_enable() and pm_runtime_put() are called for
|
||||
every device right after executing the subsystem-level .resume_early()
|
||||
|
|
|
|||
|
|
@ -1647,10 +1647,11 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
|
|||
goto Complete;
|
||||
|
||||
/*
|
||||
* Disable runtime PM for the device without checking if there is a
|
||||
* pending resume request for it.
|
||||
* After this point, any runtime PM operations targeting the device
|
||||
* will fail until the corresponding pm_runtime_enable() call in
|
||||
* device_resume_early().
|
||||
*/
|
||||
__pm_runtime_disable(dev, false);
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Skip;
|
||||
|
|
|
|||
|
|
@ -83,13 +83,16 @@ EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
|
|||
*/
|
||||
void dev_pm_clear_wake_irq(struct device *dev)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
struct wake_irq *wirq;
|
||||
unsigned long flags;
|
||||
|
||||
if (!wirq)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
wirq = dev->power.wakeirq;
|
||||
if (!wirq) {
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
device_wakeup_detach_irq(dev);
|
||||
dev->power.wakeirq = NULL;
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
|
|
|||
|
|
@ -275,9 +275,7 @@ EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
|
|||
*/
|
||||
struct wakeup_source *wakeup_sources_walk_start(void)
|
||||
{
|
||||
struct list_head *ws_head = &wakeup_sources;
|
||||
|
||||
return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
|
||||
return list_first_or_null_rcu(&wakeup_sources, struct wakeup_source, entry);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
|
||||
|
||||
|
|
|
|||
|
|
@ -681,10 +681,10 @@ struct dev_pm_info {
|
|||
struct list_head entry;
|
||||
struct completion completion;
|
||||
struct wakeup_source *wakeup;
|
||||
bool work_in_progress; /* Owned by the PM core */
|
||||
bool wakeup_path:1;
|
||||
bool syscore:1;
|
||||
bool no_pm_callbacks:1; /* Owned by the PM core */
|
||||
bool work_in_progress:1; /* Owned by the PM core */
|
||||
bool smart_suspend:1; /* Owned by the PM core */
|
||||
bool must_resume:1; /* Owned by the PM core */
|
||||
bool may_skip_resume:1; /* Set by subsystems */
|
||||
|
|
|
|||
|
|
@ -1125,7 +1125,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
|
|||
|
||||
static int __init pm_start_workqueues(void)
|
||||
{
|
||||
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE | WQ_UNBOUND, 0);
|
||||
pm_wq = alloc_workqueue("pm", WQ_UNBOUND, 0);
|
||||
if (!pm_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -902,8 +902,8 @@ out_clean:
|
|||
for (thr = 0; thr < nr_threads; thr++) {
|
||||
if (data[thr].thr)
|
||||
kthread_stop(data[thr].thr);
|
||||
if (data[thr].cr)
|
||||
acomp_request_free(data[thr].cr);
|
||||
|
||||
acomp_request_free(data[thr].cr);
|
||||
|
||||
if (!IS_ERR_OR_NULL(data[thr].cc))
|
||||
crypto_free_acomp(data[thr].cc);
|
||||
|
|
@ -1502,8 +1502,8 @@ out_clean:
|
|||
for (thr = 0; thr < nr_threads; thr++) {
|
||||
if (data[thr].thr)
|
||||
kthread_stop(data[thr].thr);
|
||||
if (data[thr].cr)
|
||||
acomp_request_free(data[thr].cr);
|
||||
|
||||
acomp_request_free(data[thr].cr);
|
||||
|
||||
if (!IS_ERR_OR_NULL(data[thr].cc))
|
||||
crypto_free_acomp(data[thr].cc);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue