f80d298de3fa42d1e535e035e012975054920c6a
[muen/linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #define pr_fmt(fmt) "PM: " fmt
21
22 #include <linux/device.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm-trace.h>
28 #include <linux/pm_wakeirq.h>
29 #include <linux/interrupt.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/async.h>
33 #include <linux/suspend.h>
34 #include <trace/events/power.h>
35 #include <linux/cpufreq.h>
36 #include <linux/cpuidle.h>
37 #include <linux/devfreq.h>
38 #include <linux/timer.h>
39
40 #include "../base.h"
41 #include "power.h"
42
43 typedef int (*pm_callback_t)(struct device *);
44
45 /*
46  * The entries in the dpm_list list are in a depth first order, simply
47  * because children are guaranteed to be discovered after parents, and
48  * are inserted at the back of the list on discovery.
49  *
50  * Since device_pm_add() may be called with a device lock held,
51  * we must never try to acquire a device lock while holding
52  * dpm_list_mutex.
53  */
54
55 LIST_HEAD(dpm_list);
56 static LIST_HEAD(dpm_prepared_list);
57 static LIST_HEAD(dpm_suspended_list);
58 static LIST_HEAD(dpm_late_early_list);
59 static LIST_HEAD(dpm_noirq_list);
60
61 struct suspend_stats suspend_stats;
62 static DEFINE_MUTEX(dpm_list_mtx);
63 static pm_message_t pm_transition;
64
65 static int async_error;
66
67 static const char *pm_verb(int event)
68 {
69         switch (event) {
70         case PM_EVENT_SUSPEND:
71                 return "suspend";
72         case PM_EVENT_RESUME:
73                 return "resume";
74         case PM_EVENT_FREEZE:
75                 return "freeze";
76         case PM_EVENT_QUIESCE:
77                 return "quiesce";
78         case PM_EVENT_HIBERNATE:
79                 return "hibernate";
80         case PM_EVENT_THAW:
81                 return "thaw";
82         case PM_EVENT_RESTORE:
83                 return "restore";
84         case PM_EVENT_RECOVER:
85                 return "recover";
86         default:
87                 return "(unknown PM event)";
88         }
89 }
90
91 /**
92  * device_pm_sleep_init - Initialize system suspend-related device fields.
93  * @dev: Device object being initialized.
94  */
95 void device_pm_sleep_init(struct device *dev)
96 {
97         dev->power.is_prepared = false;
98         dev->power.is_suspended = false;
99         dev->power.is_noirq_suspended = false;
100         dev->power.is_late_suspended = false;
101         init_completion(&dev->power.completion);
102         complete_all(&dev->power.completion);
103         dev->power.wakeup = NULL;
104         INIT_LIST_HEAD(&dev->power.entry);
105 }
106
107 /**
108  * device_pm_lock - Lock the list of active devices used by the PM core.
109  */
110 void device_pm_lock(void)
111 {
112         mutex_lock(&dpm_list_mtx);
113 }
114
115 /**
116  * device_pm_unlock - Unlock the list of active devices used by the PM core.
117  */
118 void device_pm_unlock(void)
119 {
120         mutex_unlock(&dpm_list_mtx);
121 }
122
123 /**
124  * device_pm_add - Add a device to the PM core's list of active devices.
125  * @dev: Device to add to the list.
126  */
127 void device_pm_add(struct device *dev)
128 {
129         /* Skip PM setup/initialization. */
130         if (device_pm_not_required(dev))
131                 return;
132
133         pr_debug("Adding info for %s:%s\n",
134                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
135         device_pm_check_callbacks(dev);
136         mutex_lock(&dpm_list_mtx);
137         if (dev->parent && dev->parent->power.is_prepared)
138                 dev_warn(dev, "parent %s should not be sleeping\n",
139                         dev_name(dev->parent));
140         list_add_tail(&dev->power.entry, &dpm_list);
141         dev->power.in_dpm_list = true;
142         mutex_unlock(&dpm_list_mtx);
143 }
144
145 /**
146  * device_pm_remove - Remove a device from the PM core's list of active devices.
147  * @dev: Device to be removed from the list.
148  */
149 void device_pm_remove(struct device *dev)
150 {
151         if (device_pm_not_required(dev))
152                 return;
153
154         pr_debug("Removing info for %s:%s\n",
155                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
156         complete_all(&dev->power.completion);
157         mutex_lock(&dpm_list_mtx);
158         list_del_init(&dev->power.entry);
159         dev->power.in_dpm_list = false;
160         mutex_unlock(&dpm_list_mtx);
161         device_wakeup_disable(dev);
162         pm_runtime_remove(dev);
163         device_pm_check_callbacks(dev);
164 }
165
166 /**
167  * device_pm_move_before - Move device in the PM core's list of active devices.
168  * @deva: Device to move in dpm_list.
169  * @devb: Device @deva should come before.
170  */
171 void device_pm_move_before(struct device *deva, struct device *devb)
172 {
173         pr_debug("Moving %s:%s before %s:%s\n",
174                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
175                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
176         /* Delete deva from dpm_list and reinsert before devb. */
177         list_move_tail(&deva->power.entry, &devb->power.entry);
178 }
179
180 /**
181  * device_pm_move_after - Move device in the PM core's list of active devices.
182  * @deva: Device to move in dpm_list.
183  * @devb: Device @deva should come after.
184  */
185 void device_pm_move_after(struct device *deva, struct device *devb)
186 {
187         pr_debug("Moving %s:%s after %s:%s\n",
188                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
189                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
190         /* Delete deva from dpm_list and reinsert after devb. */
191         list_move(&deva->power.entry, &devb->power.entry);
192 }
193
194 /**
195  * device_pm_move_last - Move device to end of the PM core's list of devices.
196  * @dev: Device to move in dpm_list.
197  */
198 void device_pm_move_last(struct device *dev)
199 {
200         pr_debug("Moving %s:%s to end of list\n",
201                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
202         list_move_tail(&dev->power.entry, &dpm_list);
203 }
204
205 static ktime_t initcall_debug_start(struct device *dev, void *cb)
206 {
207         if (!pm_print_times_enabled)
208                 return 0;
209
210         dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
211                  task_pid_nr(current),
212                  dev->parent ? dev_name(dev->parent) : "none");
213         return ktime_get();
214 }
215
216 static void initcall_debug_report(struct device *dev, ktime_t calltime,
217                                   void *cb, int error)
218 {
219         ktime_t rettime;
220         s64 nsecs;
221
222         if (!pm_print_times_enabled)
223                 return;
224
225         rettime = ktime_get();
226         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
227
228         dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
229                  (unsigned long long)nsecs >> 10);
230 }
231
232 /**
233  * dpm_wait - Wait for a PM operation to complete.
234  * @dev: Device to wait for.
235  * @async: If unset, wait only if the device's power.async_suspend flag is set.
236  */
237 static void dpm_wait(struct device *dev, bool async)
238 {
239         if (!dev)
240                 return;
241
242         if (async || (pm_async_enabled && dev->power.async_suspend))
243                 wait_for_completion(&dev->power.completion);
244 }
245
246 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 {
248         dpm_wait(dev, *((bool *)async_ptr));
249         return 0;
250 }
251
252 static void dpm_wait_for_children(struct device *dev, bool async)
253 {
254        device_for_each_child(dev, &async, dpm_wait_fn);
255 }
256
257 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 {
259         struct device_link *link;
260         int idx;
261
262         idx = device_links_read_lock();
263
264         /*
265          * If the supplier goes away right after we've checked the link to it,
266          * we'll wait for its completion to change the state, but that's fine,
267          * because the only things that will block as a result are the SRCU
268          * callbacks freeing the link objects for the links in the list we're
269          * walking.
270          */
271         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
272                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
273                         dpm_wait(link->supplier, async);
274
275         device_links_read_unlock(idx);
276 }
277
278 static void dpm_wait_for_superior(struct device *dev, bool async)
279 {
280         dpm_wait(dev->parent, async);
281         dpm_wait_for_suppliers(dev, async);
282 }
283
284 static void dpm_wait_for_consumers(struct device *dev, bool async)
285 {
286         struct device_link *link;
287         int idx;
288
289         idx = device_links_read_lock();
290
291         /*
292          * The status of a device link can only be changed from "dormant" by a
293          * probe, but that cannot happen during system suspend/resume.  In
294          * theory it can change to "dormant" at that time, but then it is
295          * reasonable to wait for the target device anyway (eg. if it goes
296          * away, it's better to wait for it to go away completely and then
297          * continue instead of trying to continue in parallel with its
298          * unregistration).
299          */
300         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
301                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
302                         dpm_wait(link->consumer, async);
303
304         device_links_read_unlock(idx);
305 }
306
307 static void dpm_wait_for_subordinate(struct device *dev, bool async)
308 {
309         dpm_wait_for_children(dev, async);
310         dpm_wait_for_consumers(dev, async);
311 }
312
313 /**
314  * pm_op - Return the PM operation appropriate for given PM event.
315  * @ops: PM operations to choose from.
316  * @state: PM transition of the system being carried out.
317  */
318 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
319 {
320         switch (state.event) {
321 #ifdef CONFIG_SUSPEND
322         case PM_EVENT_SUSPEND:
323                 return ops->suspend;
324         case PM_EVENT_RESUME:
325                 return ops->resume;
326 #endif /* CONFIG_SUSPEND */
327 #ifdef CONFIG_HIBERNATE_CALLBACKS
328         case PM_EVENT_FREEZE:
329         case PM_EVENT_QUIESCE:
330                 return ops->freeze;
331         case PM_EVENT_HIBERNATE:
332                 return ops->poweroff;
333         case PM_EVENT_THAW:
334         case PM_EVENT_RECOVER:
335                 return ops->thaw;
336                 break;
337         case PM_EVENT_RESTORE:
338                 return ops->restore;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         }
341
342         return NULL;
343 }
344
345 /**
346  * pm_late_early_op - Return the PM operation appropriate for given PM event.
347  * @ops: PM operations to choose from.
348  * @state: PM transition of the system being carried out.
349  *
350  * Runtime PM is disabled for @dev while this function is being executed.
351  */
352 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
353                                       pm_message_t state)
354 {
355         switch (state.event) {
356 #ifdef CONFIG_SUSPEND
357         case PM_EVENT_SUSPEND:
358                 return ops->suspend_late;
359         case PM_EVENT_RESUME:
360                 return ops->resume_early;
361 #endif /* CONFIG_SUSPEND */
362 #ifdef CONFIG_HIBERNATE_CALLBACKS
363         case PM_EVENT_FREEZE:
364         case PM_EVENT_QUIESCE:
365                 return ops->freeze_late;
366         case PM_EVENT_HIBERNATE:
367                 return ops->poweroff_late;
368         case PM_EVENT_THAW:
369         case PM_EVENT_RECOVER:
370                 return ops->thaw_early;
371         case PM_EVENT_RESTORE:
372                 return ops->restore_early;
373 #endif /* CONFIG_HIBERNATE_CALLBACKS */
374         }
375
376         return NULL;
377 }
378
379 /**
380  * pm_noirq_op - Return the PM operation appropriate for given PM event.
381  * @ops: PM operations to choose from.
382  * @state: PM transition of the system being carried out.
383  *
384  * The driver of @dev will not receive interrupts while this function is being
385  * executed.
386  */
387 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
388 {
389         switch (state.event) {
390 #ifdef CONFIG_SUSPEND
391         case PM_EVENT_SUSPEND:
392                 return ops->suspend_noirq;
393         case PM_EVENT_RESUME:
394                 return ops->resume_noirq;
395 #endif /* CONFIG_SUSPEND */
396 #ifdef CONFIG_HIBERNATE_CALLBACKS
397         case PM_EVENT_FREEZE:
398         case PM_EVENT_QUIESCE:
399                 return ops->freeze_noirq;
400         case PM_EVENT_HIBERNATE:
401                 return ops->poweroff_noirq;
402         case PM_EVENT_THAW:
403         case PM_EVENT_RECOVER:
404                 return ops->thaw_noirq;
405         case PM_EVENT_RESTORE:
406                 return ops->restore_noirq;
407 #endif /* CONFIG_HIBERNATE_CALLBACKS */
408         }
409
410         return NULL;
411 }
412
413 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
414 {
415         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
416                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
417                 ", may wakeup" : "");
418 }
419
420 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
421                         int error)
422 {
423         pr_err("Device %s failed to %s%s: error %d\n",
424                dev_name(dev), pm_verb(state.event), info, error);
425 }
426
427 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
428                           const char *info)
429 {
430         ktime_t calltime;
431         u64 usecs64;
432         int usecs;
433
434         calltime = ktime_get();
435         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
436         do_div(usecs64, NSEC_PER_USEC);
437         usecs = usecs64;
438         if (usecs == 0)
439                 usecs = 1;
440
441         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
442                   info ?: "", info ? " " : "", pm_verb(state.event),
443                   error ? "aborted" : "complete",
444                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
445 }
446
447 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
448                             pm_message_t state, const char *info)
449 {
450         ktime_t calltime;
451         int error;
452
453         if (!cb)
454                 return 0;
455
456         calltime = initcall_debug_start(dev, cb);
457
458         pm_dev_dbg(dev, state, info);
459         trace_device_pm_callback_start(dev, info, state.event);
460         error = cb(dev);
461         trace_device_pm_callback_end(dev, error);
462         suspend_report_result(cb, error);
463
464         initcall_debug_report(dev, calltime, cb, error);
465
466         return error;
467 }
468
469 #ifdef CONFIG_DPM_WATCHDOG
470 struct dpm_watchdog {
471         struct device           *dev;
472         struct task_struct      *tsk;
473         struct timer_list       timer;
474 };
475
476 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
477         struct dpm_watchdog wd
478
479 /**
480  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
481  * @data: Watchdog object address.
482  *
483  * Called when a driver has timed out suspending or resuming.
484  * There's not much we can do here to recover so panic() to
485  * capture a crash-dump in pstore.
486  */
487 static void dpm_watchdog_handler(struct timer_list *t)
488 {
489         struct dpm_watchdog *wd = from_timer(wd, t, timer);
490
491         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
492         show_stack(wd->tsk, NULL);
493         panic("%s %s: unrecoverable failure\n",
494                 dev_driver_string(wd->dev), dev_name(wd->dev));
495 }
496
497 /**
498  * dpm_watchdog_set - Enable pm watchdog for given device.
499  * @wd: Watchdog. Must be allocated on the stack.
500  * @dev: Device to handle.
501  */
502 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
503 {
504         struct timer_list *timer = &wd->timer;
505
506         wd->dev = dev;
507         wd->tsk = current;
508
509         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
510         /* use same timeout value for both suspend and resume */
511         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
512         add_timer(timer);
513 }
514
515 /**
516  * dpm_watchdog_clear - Disable suspend/resume watchdog.
517  * @wd: Watchdog to disable.
518  */
519 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
520 {
521         struct timer_list *timer = &wd->timer;
522
523         del_timer_sync(timer);
524         destroy_timer_on_stack(timer);
525 }
526 #else
527 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
528 #define dpm_watchdog_set(x, y)
529 #define dpm_watchdog_clear(x)
530 #endif
531
532 /*------------------------- Resume routines -------------------------*/
533
534 /**
535  * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
536  * @dev: Target device.
537  *
538  * Make the core skip the "early resume" and "resume" phases for @dev.
539  *
540  * This function can be called by middle-layer code during the "noirq" phase of
541  * system resume if necessary, but not by device drivers.
542  */
543 void dev_pm_skip_next_resume_phases(struct device *dev)
544 {
545         dev->power.is_late_suspended = false;
546         dev->power.is_suspended = false;
547 }
548
549 /**
550  * suspend_event - Return a "suspend" message for given "resume" one.
551  * @resume_msg: PM message representing a system-wide resume transition.
552  */
553 static pm_message_t suspend_event(pm_message_t resume_msg)
554 {
555         switch (resume_msg.event) {
556         case PM_EVENT_RESUME:
557                 return PMSG_SUSPEND;
558         case PM_EVENT_THAW:
559         case PM_EVENT_RESTORE:
560                 return PMSG_FREEZE;
561         case PM_EVENT_RECOVER:
562                 return PMSG_HIBERNATE;
563         }
564         return PMSG_ON;
565 }
566
567 /**
568  * dev_pm_may_skip_resume - System-wide device resume optimization check.
569  * @dev: Target device.
570  *
571  * Checks whether or not the device may be left in suspend after a system-wide
572  * transition to the working state.
573  */
574 bool dev_pm_may_skip_resume(struct device *dev)
575 {
576         return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
577 }
578
579 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
580                                                 pm_message_t state,
581                                                 const char **info_p)
582 {
583         pm_callback_t callback;
584         const char *info;
585
586         if (dev->pm_domain) {
587                 info = "noirq power domain ";
588                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
589         } else if (dev->type && dev->type->pm) {
590                 info = "noirq type ";
591                 callback = pm_noirq_op(dev->type->pm, state);
592         } else if (dev->class && dev->class->pm) {
593                 info = "noirq class ";
594                 callback = pm_noirq_op(dev->class->pm, state);
595         } else if (dev->bus && dev->bus->pm) {
596                 info = "noirq bus ";
597                 callback = pm_noirq_op(dev->bus->pm, state);
598         } else {
599                 return NULL;
600         }
601
602         if (info_p)
603                 *info_p = info;
604
605         return callback;
606 }
607
608 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
609                                                  pm_message_t state,
610                                                  const char **info_p);
611
612 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
613                                                 pm_message_t state,
614                                                 const char **info_p);
615
616 /**
617  * device_resume_noirq - Execute a "noirq resume" callback for given device.
618  * @dev: Device to handle.
619  * @state: PM transition of the system being carried out.
620  * @async: If true, the device is being resumed asynchronously.
621  *
622  * The driver of @dev will not receive interrupts while this function is being
623  * executed.
624  */
625 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
626 {
627         pm_callback_t callback;
628         const char *info;
629         bool skip_resume;
630         int error = 0;
631
632         TRACE_DEVICE(dev);
633         TRACE_RESUME(0);
634
635         if (dev->power.syscore || dev->power.direct_complete)
636                 goto Out;
637
638         if (!dev->power.is_noirq_suspended)
639                 goto Out;
640
641         dpm_wait_for_superior(dev, async);
642
643         skip_resume = dev_pm_may_skip_resume(dev);
644
645         callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
646         if (callback)
647                 goto Run;
648
649         if (skip_resume)
650                 goto Skip;
651
652         if (dev_pm_smart_suspend_and_suspended(dev)) {
653                 pm_message_t suspend_msg = suspend_event(state);
654
655                 /*
656                  * If "freeze" callbacks have been skipped during a transition
657                  * related to hibernation, the subsequent "thaw" callbacks must
658                  * be skipped too or bad things may happen.  Otherwise, resume
659                  * callbacks are going to be run for the device, so its runtime
660                  * PM status must be changed to reflect the new state after the
661                  * transition under way.
662                  */
663                 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
664                     !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
665                         if (state.event == PM_EVENT_THAW) {
666                                 skip_resume = true;
667                                 goto Skip;
668                         } else {
669                                 pm_runtime_set_active(dev);
670                         }
671                 }
672         }
673
674         if (dev->driver && dev->driver->pm) {
675                 info = "noirq driver ";
676                 callback = pm_noirq_op(dev->driver->pm, state);
677         }
678
679 Run:
680         error = dpm_run_callback(callback, dev, state, info);
681
682 Skip:
683         dev->power.is_noirq_suspended = false;
684
685         if (skip_resume) {
686                 /*
687                  * The device is going to be left in suspend, but it might not
688                  * have been in runtime suspend before the system suspended, so
689                  * its runtime PM status needs to be updated to avoid confusing
690                  * the runtime PM framework when runtime PM is enabled for the
691                  * device again.
692                  */
693                 pm_runtime_set_suspended(dev);
694                 dev_pm_skip_next_resume_phases(dev);
695         }
696
697 Out:
698         complete_all(&dev->power.completion);
699         TRACE_RESUME(error);
700         return error;
701 }
702
703 static bool is_async(struct device *dev)
704 {
705         return dev->power.async_suspend && pm_async_enabled
706                 && !pm_trace_is_enabled();
707 }
708
709 static void async_resume_noirq(void *data, async_cookie_t cookie)
710 {
711         struct device *dev = (struct device *)data;
712         int error;
713
714         error = device_resume_noirq(dev, pm_transition, true);
715         if (error)
716                 pm_dev_err(dev, pm_transition, " async", error);
717
718         put_device(dev);
719 }
720
721 void dpm_noirq_resume_devices(pm_message_t state)
722 {
723         struct device *dev;
724         ktime_t starttime = ktime_get();
725
726         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
727         mutex_lock(&dpm_list_mtx);
728         pm_transition = state;
729
730         /*
731          * Advanced the async threads upfront,
732          * in case the starting of async threads is
733          * delayed by non-async resuming devices.
734          */
735         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
736                 reinit_completion(&dev->power.completion);
737                 if (is_async(dev)) {
738                         get_device(dev);
739                         async_schedule_dev(async_resume_noirq, dev);
740                 }
741         }
742
743         while (!list_empty(&dpm_noirq_list)) {
744                 dev = to_device(dpm_noirq_list.next);
745                 get_device(dev);
746                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
747                 mutex_unlock(&dpm_list_mtx);
748
749                 if (!is_async(dev)) {
750                         int error;
751
752                         error = device_resume_noirq(dev, state, false);
753                         if (error) {
754                                 suspend_stats.failed_resume_noirq++;
755                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
756                                 dpm_save_failed_dev(dev_name(dev));
757                                 pm_dev_err(dev, state, " noirq", error);
758                         }
759                 }
760
761                 mutex_lock(&dpm_list_mtx);
762                 put_device(dev);
763         }
764         mutex_unlock(&dpm_list_mtx);
765         async_synchronize_full();
766         dpm_show_time(starttime, state, 0, "noirq");
767         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
768 }
769
770 void dpm_noirq_end(void)
771 {
772         resume_device_irqs();
773         device_wakeup_disarm_wake_irqs();
774         cpuidle_resume();
775 }
776
777 /**
778  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
779  * @state: PM transition of the system being carried out.
780  *
781  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
782  * allow device drivers' interrupt handlers to be called.
783  */
784 void dpm_resume_noirq(pm_message_t state)
785 {
786         dpm_noirq_resume_devices(state);
787         dpm_noirq_end();
788 }
789
790 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
791                                                 pm_message_t state,
792                                                 const char **info_p)
793 {
794         pm_callback_t callback;
795         const char *info;
796
797         if (dev->pm_domain) {
798                 info = "early power domain ";
799                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
800         } else if (dev->type && dev->type->pm) {
801                 info = "early type ";
802                 callback = pm_late_early_op(dev->type->pm, state);
803         } else if (dev->class && dev->class->pm) {
804                 info = "early class ";
805                 callback = pm_late_early_op(dev->class->pm, state);
806         } else if (dev->bus && dev->bus->pm) {
807                 info = "early bus ";
808                 callback = pm_late_early_op(dev->bus->pm, state);
809         } else {
810                 return NULL;
811         }
812
813         if (info_p)
814                 *info_p = info;
815
816         return callback;
817 }
818
819 /**
820  * device_resume_early - Execute an "early resume" callback for given device.
821  * @dev: Device to handle.
822  * @state: PM transition of the system being carried out.
823  * @async: If true, the device is being resumed asynchronously.
824  *
825  * Runtime PM is disabled for @dev while this function is being executed.
826  */
827 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
828 {
829         pm_callback_t callback;
830         const char *info;
831         int error = 0;
832
833         TRACE_DEVICE(dev);
834         TRACE_RESUME(0);
835
836         if (dev->power.syscore || dev->power.direct_complete)
837                 goto Out;
838
839         if (!dev->power.is_late_suspended)
840                 goto Out;
841
842         dpm_wait_for_superior(dev, async);
843
844         callback = dpm_subsys_resume_early_cb(dev, state, &info);
845
846         if (!callback && dev->driver && dev->driver->pm) {
847                 info = "early driver ";
848                 callback = pm_late_early_op(dev->driver->pm, state);
849         }
850
851         error = dpm_run_callback(callback, dev, state, info);
852         dev->power.is_late_suspended = false;
853
854  Out:
855         TRACE_RESUME(error);
856
857         pm_runtime_enable(dev);
858         complete_all(&dev->power.completion);
859         return error;
860 }
861
862 static void async_resume_early(void *data, async_cookie_t cookie)
863 {
864         struct device *dev = (struct device *)data;
865         int error;
866
867         error = device_resume_early(dev, pm_transition, true);
868         if (error)
869                 pm_dev_err(dev, pm_transition, " async", error);
870
871         put_device(dev);
872 }
873
874 /**
875  * dpm_resume_early - Execute "early resume" callbacks for all devices.
876  * @state: PM transition of the system being carried out.
877  */
878 void dpm_resume_early(pm_message_t state)
879 {
880         struct device *dev;
881         ktime_t starttime = ktime_get();
882
883         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
884         mutex_lock(&dpm_list_mtx);
885         pm_transition = state;
886
887         /*
888          * Advanced the async threads upfront,
889          * in case the starting of async threads is
890          * delayed by non-async resuming devices.
891          */
892         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
893                 reinit_completion(&dev->power.completion);
894                 if (is_async(dev)) {
895                         get_device(dev);
896                         async_schedule_dev(async_resume_early, dev);
897                 }
898         }
899
900         while (!list_empty(&dpm_late_early_list)) {
901                 dev = to_device(dpm_late_early_list.next);
902                 get_device(dev);
903                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
904                 mutex_unlock(&dpm_list_mtx);
905
906                 if (!is_async(dev)) {
907                         int error;
908
909                         error = device_resume_early(dev, state, false);
910                         if (error) {
911                                 suspend_stats.failed_resume_early++;
912                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
913                                 dpm_save_failed_dev(dev_name(dev));
914                                 pm_dev_err(dev, state, " early", error);
915                         }
916                 }
917                 mutex_lock(&dpm_list_mtx);
918                 put_device(dev);
919         }
920         mutex_unlock(&dpm_list_mtx);
921         async_synchronize_full();
922         dpm_show_time(starttime, state, 0, "early");
923         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
924 }
925
926 /**
927  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
928  * @state: PM transition of the system being carried out.
929  */
930 void dpm_resume_start(pm_message_t state)
931 {
932         dpm_resume_noirq(state);
933         dpm_resume_early(state);
934 }
935 EXPORT_SYMBOL_GPL(dpm_resume_start);
936
937 /**
938  * device_resume - Execute "resume" callbacks for given device.
939  * @dev: Device to handle.
940  * @state: PM transition of the system being carried out.
941  * @async: If true, the device is being resumed asynchronously.
942  */
943 static int device_resume(struct device *dev, pm_message_t state, bool async)
944 {
945         pm_callback_t callback = NULL;
946         const char *info = NULL;
947         int error = 0;
948         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
949
950         TRACE_DEVICE(dev);
951         TRACE_RESUME(0);
952
953         if (dev->power.syscore)
954                 goto Complete;
955
956         if (dev->power.direct_complete) {
957                 /* Match the pm_runtime_disable() in __device_suspend(). */
958                 pm_runtime_enable(dev);
959                 goto Complete;
960         }
961
962         dpm_wait_for_superior(dev, async);
963         dpm_watchdog_set(&wd, dev);
964         device_lock(dev);
965
966         /*
967          * This is a fib.  But we'll allow new children to be added below
968          * a resumed device, even if the device hasn't been completed yet.
969          */
970         dev->power.is_prepared = false;
971
972         if (!dev->power.is_suspended)
973                 goto Unlock;
974
975         if (dev->pm_domain) {
976                 info = "power domain ";
977                 callback = pm_op(&dev->pm_domain->ops, state);
978                 goto Driver;
979         }
980
981         if (dev->type && dev->type->pm) {
982                 info = "type ";
983                 callback = pm_op(dev->type->pm, state);
984                 goto Driver;
985         }
986
987         if (dev->class && dev->class->pm) {
988                 info = "class ";
989                 callback = pm_op(dev->class->pm, state);
990                 goto Driver;
991         }
992
993         if (dev->bus) {
994                 if (dev->bus->pm) {
995                         info = "bus ";
996                         callback = pm_op(dev->bus->pm, state);
997                 } else if (dev->bus->resume) {
998                         info = "legacy bus ";
999                         callback = dev->bus->resume;
1000                         goto End;
1001                 }
1002         }
1003
1004  Driver:
1005         if (!callback && dev->driver && dev->driver->pm) {
1006                 info = "driver ";
1007                 callback = pm_op(dev->driver->pm, state);
1008         }
1009
1010  End:
1011         error = dpm_run_callback(callback, dev, state, info);
1012         dev->power.is_suspended = false;
1013
1014  Unlock:
1015         device_unlock(dev);
1016         dpm_watchdog_clear(&wd);
1017
1018  Complete:
1019         complete_all(&dev->power.completion);
1020
1021         TRACE_RESUME(error);
1022
1023         return error;
1024 }
1025
1026 static void async_resume(void *data, async_cookie_t cookie)
1027 {
1028         struct device *dev = (struct device *)data;
1029         int error;
1030
1031         error = device_resume(dev, pm_transition, true);
1032         if (error)
1033                 pm_dev_err(dev, pm_transition, " async", error);
1034         put_device(dev);
1035 }
1036
1037 /**
1038  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1039  * @state: PM transition of the system being carried out.
1040  *
1041  * Execute the appropriate "resume" callback for all devices whose status
1042  * indicates that they are suspended.
1043  */
1044 void dpm_resume(pm_message_t state)
1045 {
1046         struct device *dev;
1047         ktime_t starttime = ktime_get();
1048
1049         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1050         might_sleep();
1051
1052         mutex_lock(&dpm_list_mtx);
1053         pm_transition = state;
1054         async_error = 0;
1055
1056         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1057                 reinit_completion(&dev->power.completion);
1058                 if (is_async(dev)) {
1059                         get_device(dev);
1060                         async_schedule_dev(async_resume, dev);
1061                 }
1062         }
1063
1064         while (!list_empty(&dpm_suspended_list)) {
1065                 dev = to_device(dpm_suspended_list.next);
1066                 get_device(dev);
1067                 if (!is_async(dev)) {
1068                         int error;
1069
1070                         mutex_unlock(&dpm_list_mtx);
1071
1072                         error = device_resume(dev, state, false);
1073                         if (error) {
1074                                 suspend_stats.failed_resume++;
1075                                 dpm_save_failed_step(SUSPEND_RESUME);
1076                                 dpm_save_failed_dev(dev_name(dev));
1077                                 pm_dev_err(dev, state, "", error);
1078                         }
1079
1080                         mutex_lock(&dpm_list_mtx);
1081                 }
1082                 if (!list_empty(&dev->power.entry))
1083                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1084                 put_device(dev);
1085         }
1086         mutex_unlock(&dpm_list_mtx);
1087         async_synchronize_full();
1088         dpm_show_time(starttime, state, 0, NULL);
1089
1090         cpufreq_resume();
1091         devfreq_resume();
1092         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1093 }
1094
1095 /**
1096  * device_complete - Complete a PM transition for given device.
1097  * @dev: Device to handle.
1098  * @state: PM transition of the system being carried out.
1099  */
1100 static void device_complete(struct device *dev, pm_message_t state)
1101 {
1102         void (*callback)(struct device *) = NULL;
1103         const char *info = NULL;
1104
1105         if (dev->power.syscore)
1106                 return;
1107
1108         device_lock(dev);
1109
1110         if (dev->pm_domain) {
1111                 info = "completing power domain ";
1112                 callback = dev->pm_domain->ops.complete;
1113         } else if (dev->type && dev->type->pm) {
1114                 info = "completing type ";
1115                 callback = dev->type->pm->complete;
1116         } else if (dev->class && dev->class->pm) {
1117                 info = "completing class ";
1118                 callback = dev->class->pm->complete;
1119         } else if (dev->bus && dev->bus->pm) {
1120                 info = "completing bus ";
1121                 callback = dev->bus->pm->complete;
1122         }
1123
1124         if (!callback && dev->driver && dev->driver->pm) {
1125                 info = "completing driver ";
1126                 callback = dev->driver->pm->complete;
1127         }
1128
1129         if (callback) {
1130                 pm_dev_dbg(dev, state, info);
1131                 callback(dev);
1132         }
1133
1134         device_unlock(dev);
1135
1136         pm_runtime_put(dev);
1137 }
1138
1139 /**
1140  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1141  * @state: PM transition of the system being carried out.
1142  *
1143  * Execute the ->complete() callbacks for all devices whose PM status is not
1144  * DPM_ON (this allows new devices to be registered).
1145  */
1146 void dpm_complete(pm_message_t state)
1147 {
1148         struct list_head list;
1149
1150         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1151         might_sleep();
1152
1153         INIT_LIST_HEAD(&list);
1154         mutex_lock(&dpm_list_mtx);
1155         while (!list_empty(&dpm_prepared_list)) {
1156                 struct device *dev = to_device(dpm_prepared_list.prev);
1157
1158                 get_device(dev);
1159                 dev->power.is_prepared = false;
1160                 list_move(&dev->power.entry, &list);
1161                 mutex_unlock(&dpm_list_mtx);
1162
1163                 trace_device_pm_callback_start(dev, "", state.event);
1164                 device_complete(dev, state);
1165                 trace_device_pm_callback_end(dev, 0);
1166
1167                 mutex_lock(&dpm_list_mtx);
1168                 put_device(dev);
1169         }
1170         list_splice(&list, &dpm_list);
1171         mutex_unlock(&dpm_list_mtx);
1172
1173         /* Allow device probing and trigger re-probing of deferred devices */
1174         device_unblock_probing();
1175         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1176 }
1177
1178 /**
1179  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1180  * @state: PM transition of the system being carried out.
1181  *
1182  * Execute "resume" callbacks for all devices and complete the PM transition of
1183  * the system.
1184  */
1185 void dpm_resume_end(pm_message_t state)
1186 {
1187         dpm_resume(state);
1188         dpm_complete(state);
1189 }
1190 EXPORT_SYMBOL_GPL(dpm_resume_end);
1191
1192
1193 /*------------------------- Suspend routines -------------------------*/
1194
1195 /**
1196  * resume_event - Return a "resume" message for given "suspend" sleep state.
1197  * @sleep_state: PM message representing a sleep state.
1198  *
1199  * Return a PM message representing the resume event corresponding to given
1200  * sleep state.
1201  */
1202 static pm_message_t resume_event(pm_message_t sleep_state)
1203 {
1204         switch (sleep_state.event) {
1205         case PM_EVENT_SUSPEND:
1206                 return PMSG_RESUME;
1207         case PM_EVENT_FREEZE:
1208         case PM_EVENT_QUIESCE:
1209                 return PMSG_RECOVER;
1210         case PM_EVENT_HIBERNATE:
1211                 return PMSG_RESTORE;
1212         }
1213         return PMSG_ON;
1214 }
1215
1216 static void dpm_superior_set_must_resume(struct device *dev)
1217 {
1218         struct device_link *link;
1219         int idx;
1220
1221         if (dev->parent)
1222                 dev->parent->power.must_resume = true;
1223
1224         idx = device_links_read_lock();
1225
1226         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1227                 link->supplier->power.must_resume = true;
1228
1229         device_links_read_unlock(idx);
1230 }
1231
1232 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1233                                                  pm_message_t state,
1234                                                  const char **info_p)
1235 {
1236         pm_callback_t callback;
1237         const char *info;
1238
1239         if (dev->pm_domain) {
1240                 info = "noirq power domain ";
1241                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1242         } else if (dev->type && dev->type->pm) {
1243                 info = "noirq type ";
1244                 callback = pm_noirq_op(dev->type->pm, state);
1245         } else if (dev->class && dev->class->pm) {
1246                 info = "noirq class ";
1247                 callback = pm_noirq_op(dev->class->pm, state);
1248         } else if (dev->bus && dev->bus->pm) {
1249                 info = "noirq bus ";
1250                 callback = pm_noirq_op(dev->bus->pm, state);
1251         } else {
1252                 return NULL;
1253         }
1254
1255         if (info_p)
1256                 *info_p = info;
1257
1258         return callback;
1259 }
1260
1261 static bool device_must_resume(struct device *dev, pm_message_t state,
1262                                bool no_subsys_suspend_noirq)
1263 {
1264         pm_message_t resume_msg = resume_event(state);
1265
1266         /*
1267          * If all of the device driver's "noirq", "late" and "early" callbacks
1268          * are invoked directly by the core, the decision to allow the device to
1269          * stay in suspend can be based on its current runtime PM status and its
1270          * wakeup settings.
1271          */
1272         if (no_subsys_suspend_noirq &&
1273             !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1274             !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1275             !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1276                 return !pm_runtime_status_suspended(dev) &&
1277                         (resume_msg.event != PM_EVENT_RESUME ||
1278                          (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1279
1280         /*
1281          * The only safe strategy here is to require that if the device may not
1282          * be left in suspend, resume callbacks must be invoked for it.
1283          */
1284         return !dev->power.may_skip_resume;
1285 }
1286
1287 /**
1288  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1289  * @dev: Device to handle.
1290  * @state: PM transition of the system being carried out.
1291  * @async: If true, the device is being suspended asynchronously.
1292  *
1293  * The driver of @dev will not receive interrupts while this function is being
1294  * executed.
1295  */
1296 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1297 {
1298         pm_callback_t callback;
1299         const char *info;
1300         bool no_subsys_cb = false;
1301         int error = 0;
1302
1303         TRACE_DEVICE(dev);
1304         TRACE_SUSPEND(0);
1305
1306         dpm_wait_for_subordinate(dev, async);
1307
1308         if (async_error)
1309                 goto Complete;
1310
1311         if (pm_wakeup_pending()) {
1312                 async_error = -EBUSY;
1313                 goto Complete;
1314         }
1315
1316         if (dev->power.syscore || dev->power.direct_complete)
1317                 goto Complete;
1318
1319         callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1320         if (callback)
1321                 goto Run;
1322
1323         no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1324
1325         if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1326                 goto Skip;
1327
1328         if (dev->driver && dev->driver->pm) {
1329                 info = "noirq driver ";
1330                 callback = pm_noirq_op(dev->driver->pm, state);
1331         }
1332
1333 Run:
1334         error = dpm_run_callback(callback, dev, state, info);
1335         if (error) {
1336                 async_error = error;
1337                 goto Complete;
1338         }
1339
1340 Skip:
1341         dev->power.is_noirq_suspended = true;
1342
1343         if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1344                 dev->power.must_resume = dev->power.must_resume ||
1345                                 atomic_read(&dev->power.usage_count) > 1 ||
1346                                 device_must_resume(dev, state, no_subsys_cb);
1347         } else {
1348                 dev->power.must_resume = true;
1349         }
1350
1351         if (dev->power.must_resume)
1352                 dpm_superior_set_must_resume(dev);
1353
1354 Complete:
1355         complete_all(&dev->power.completion);
1356         TRACE_SUSPEND(error);
1357         return error;
1358 }
1359
1360 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1361 {
1362         struct device *dev = (struct device *)data;
1363         int error;
1364
1365         error = __device_suspend_noirq(dev, pm_transition, true);
1366         if (error) {
1367                 dpm_save_failed_dev(dev_name(dev));
1368                 pm_dev_err(dev, pm_transition, " async", error);
1369         }
1370
1371         put_device(dev);
1372 }
1373
1374 static int device_suspend_noirq(struct device *dev)
1375 {
1376         reinit_completion(&dev->power.completion);
1377
1378         if (is_async(dev)) {
1379                 get_device(dev);
1380                 async_schedule_dev(async_suspend_noirq, dev);
1381                 return 0;
1382         }
1383         return __device_suspend_noirq(dev, pm_transition, false);
1384 }
1385
1386 void dpm_noirq_begin(void)
1387 {
1388         cpuidle_pause();
1389         device_wakeup_arm_wake_irqs();
1390         suspend_device_irqs();
1391 }
1392
1393 int dpm_noirq_suspend_devices(pm_message_t state)
1394 {
1395         ktime_t starttime = ktime_get();
1396         int error = 0;
1397
1398         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1399         mutex_lock(&dpm_list_mtx);
1400         pm_transition = state;
1401         async_error = 0;
1402
1403         while (!list_empty(&dpm_late_early_list)) {
1404                 struct device *dev = to_device(dpm_late_early_list.prev);
1405
1406                 get_device(dev);
1407                 mutex_unlock(&dpm_list_mtx);
1408
1409                 error = device_suspend_noirq(dev);
1410
1411                 mutex_lock(&dpm_list_mtx);
1412                 if (error) {
1413                         pm_dev_err(dev, state, " noirq", error);
1414                         dpm_save_failed_dev(dev_name(dev));
1415                         put_device(dev);
1416                         break;
1417                 }
1418                 if (!list_empty(&dev->power.entry))
1419                         list_move(&dev->power.entry, &dpm_noirq_list);
1420                 put_device(dev);
1421
1422                 if (async_error)
1423                         break;
1424         }
1425         mutex_unlock(&dpm_list_mtx);
1426         async_synchronize_full();
1427         if (!error)
1428                 error = async_error;
1429
1430         if (error) {
1431                 suspend_stats.failed_suspend_noirq++;
1432                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1433         }
1434         dpm_show_time(starttime, state, error, "noirq");
1435         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1436         return error;
1437 }
1438
1439 /**
1440  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1441  * @state: PM transition of the system being carried out.
1442  *
1443  * Prevent device drivers' interrupt handlers from being called and invoke
1444  * "noirq" suspend callbacks for all non-sysdev devices.
1445  */
1446 int dpm_suspend_noirq(pm_message_t state)
1447 {
1448         int ret;
1449
1450         dpm_noirq_begin();
1451         ret = dpm_noirq_suspend_devices(state);
1452         if (ret)
1453                 dpm_resume_noirq(resume_event(state));
1454
1455         return ret;
1456 }
1457
1458 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1459 {
1460         struct device *parent = dev->parent;
1461
1462         if (!parent)
1463                 return;
1464
1465         spin_lock_irq(&parent->power.lock);
1466
1467         if (dev->power.wakeup_path && !parent->power.ignore_children)
1468                 parent->power.wakeup_path = true;
1469
1470         spin_unlock_irq(&parent->power.lock);
1471 }
1472
1473 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1474                                                 pm_message_t state,
1475                                                 const char **info_p)
1476 {
1477         pm_callback_t callback;
1478         const char *info;
1479
1480         if (dev->pm_domain) {
1481                 info = "late power domain ";
1482                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1483         } else if (dev->type && dev->type->pm) {
1484                 info = "late type ";
1485                 callback = pm_late_early_op(dev->type->pm, state);
1486         } else if (dev->class && dev->class->pm) {
1487                 info = "late class ";
1488                 callback = pm_late_early_op(dev->class->pm, state);
1489         } else if (dev->bus && dev->bus->pm) {
1490                 info = "late bus ";
1491                 callback = pm_late_early_op(dev->bus->pm, state);
1492         } else {
1493                 return NULL;
1494         }
1495
1496         if (info_p)
1497                 *info_p = info;
1498
1499         return callback;
1500 }
1501
1502 /**
1503  * __device_suspend_late - Execute a "late suspend" callback for given device.
1504  * @dev: Device to handle.
1505  * @state: PM transition of the system being carried out.
1506  * @async: If true, the device is being suspended asynchronously.
1507  *
1508  * Runtime PM is disabled for @dev while this function is being executed.
1509  */
1510 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1511 {
1512         pm_callback_t callback;
1513         const char *info;
1514         int error = 0;
1515
1516         TRACE_DEVICE(dev);
1517         TRACE_SUSPEND(0);
1518
1519         __pm_runtime_disable(dev, false);
1520
1521         dpm_wait_for_subordinate(dev, async);
1522
1523         if (async_error)
1524                 goto Complete;
1525
1526         if (pm_wakeup_pending()) {
1527                 async_error = -EBUSY;
1528                 goto Complete;
1529         }
1530
1531         if (dev->power.syscore || dev->power.direct_complete)
1532                 goto Complete;
1533
1534         callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1535         if (callback)
1536                 goto Run;
1537
1538         if (dev_pm_smart_suspend_and_suspended(dev) &&
1539             !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1540                 goto Skip;
1541
1542         if (dev->driver && dev->driver->pm) {
1543                 info = "late driver ";
1544                 callback = pm_late_early_op(dev->driver->pm, state);
1545         }
1546
1547 Run:
1548         error = dpm_run_callback(callback, dev, state, info);
1549         if (error) {
1550                 async_error = error;
1551                 goto Complete;
1552         }
1553         dpm_propagate_wakeup_to_parent(dev);
1554
1555 Skip:
1556         dev->power.is_late_suspended = true;
1557
1558 Complete:
1559         TRACE_SUSPEND(error);
1560         complete_all(&dev->power.completion);
1561         return error;
1562 }
1563
1564 static void async_suspend_late(void *data, async_cookie_t cookie)
1565 {
1566         struct device *dev = (struct device *)data;
1567         int error;
1568
1569         error = __device_suspend_late(dev, pm_transition, true);
1570         if (error) {
1571                 dpm_save_failed_dev(dev_name(dev));
1572                 pm_dev_err(dev, pm_transition, " async", error);
1573         }
1574         put_device(dev);
1575 }
1576
1577 static int device_suspend_late(struct device *dev)
1578 {
1579         reinit_completion(&dev->power.completion);
1580
1581         if (is_async(dev)) {
1582                 get_device(dev);
1583                 async_schedule_dev(async_suspend_late, dev);
1584                 return 0;
1585         }
1586
1587         return __device_suspend_late(dev, pm_transition, false);
1588 }
1589
1590 /**
1591  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1592  * @state: PM transition of the system being carried out.
1593  */
1594 int dpm_suspend_late(pm_message_t state)
1595 {
1596         ktime_t starttime = ktime_get();
1597         int error = 0;
1598
1599         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1600         mutex_lock(&dpm_list_mtx);
1601         pm_transition = state;
1602         async_error = 0;
1603
1604         while (!list_empty(&dpm_suspended_list)) {
1605                 struct device *dev = to_device(dpm_suspended_list.prev);
1606
1607                 get_device(dev);
1608                 mutex_unlock(&dpm_list_mtx);
1609
1610                 error = device_suspend_late(dev);
1611
1612                 mutex_lock(&dpm_list_mtx);
1613                 if (!list_empty(&dev->power.entry))
1614                         list_move(&dev->power.entry, &dpm_late_early_list);
1615
1616                 if (error) {
1617                         pm_dev_err(dev, state, " late", error);
1618                         dpm_save_failed_dev(dev_name(dev));
1619                         put_device(dev);
1620                         break;
1621                 }
1622                 put_device(dev);
1623
1624                 if (async_error)
1625                         break;
1626         }
1627         mutex_unlock(&dpm_list_mtx);
1628         async_synchronize_full();
1629         if (!error)
1630                 error = async_error;
1631         if (error) {
1632                 suspend_stats.failed_suspend_late++;
1633                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1634                 dpm_resume_early(resume_event(state));
1635         }
1636         dpm_show_time(starttime, state, error, "late");
1637         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1638         return error;
1639 }
1640
1641 /**
1642  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1643  * @state: PM transition of the system being carried out.
1644  */
1645 int dpm_suspend_end(pm_message_t state)
1646 {
1647         int error = dpm_suspend_late(state);
1648         if (error)
1649                 return error;
1650
1651         error = dpm_suspend_noirq(state);
1652         if (error) {
1653                 dpm_resume_early(resume_event(state));
1654                 return error;
1655         }
1656
1657         return 0;
1658 }
1659 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1660
1661 /**
1662  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1663  * @dev: Device to suspend.
1664  * @state: PM transition of the system being carried out.
1665  * @cb: Suspend callback to execute.
1666  * @info: string description of caller.
1667  */
1668 static int legacy_suspend(struct device *dev, pm_message_t state,
1669                           int (*cb)(struct device *dev, pm_message_t state),
1670                           const char *info)
1671 {
1672         int error;
1673         ktime_t calltime;
1674
1675         calltime = initcall_debug_start(dev, cb);
1676
1677         trace_device_pm_callback_start(dev, info, state.event);
1678         error = cb(dev, state);
1679         trace_device_pm_callback_end(dev, error);
1680         suspend_report_result(cb, error);
1681
1682         initcall_debug_report(dev, calltime, cb, error);
1683
1684         return error;
1685 }
1686
1687 static void dpm_clear_superiors_direct_complete(struct device *dev)
1688 {
1689         struct device_link *link;
1690         int idx;
1691
1692         if (dev->parent) {
1693                 spin_lock_irq(&dev->parent->power.lock);
1694                 dev->parent->power.direct_complete = false;
1695                 spin_unlock_irq(&dev->parent->power.lock);
1696         }
1697
1698         idx = device_links_read_lock();
1699
1700         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1701                 spin_lock_irq(&link->supplier->power.lock);
1702                 link->supplier->power.direct_complete = false;
1703                 spin_unlock_irq(&link->supplier->power.lock);
1704         }
1705
1706         device_links_read_unlock(idx);
1707 }
1708
1709 /**
1710  * __device_suspend - Execute "suspend" callbacks for given device.
1711  * @dev: Device to handle.
1712  * @state: PM transition of the system being carried out.
1713  * @async: If true, the device is being suspended asynchronously.
1714  */
1715 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1716 {
1717         pm_callback_t callback = NULL;
1718         const char *info = NULL;
1719         int error = 0;
1720         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1721
1722         TRACE_DEVICE(dev);
1723         TRACE_SUSPEND(0);
1724
1725         dpm_wait_for_subordinate(dev, async);
1726
1727         if (async_error) {
1728                 dev->power.direct_complete = false;
1729                 goto Complete;
1730         }
1731
1732         /*
1733          * If a device configured to wake up the system from sleep states
1734          * has been suspended at run time and there's a resume request pending
1735          * for it, this is equivalent to the device signaling wakeup, so the
1736          * system suspend operation should be aborted.
1737          */
1738         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1739                 pm_wakeup_event(dev, 0);
1740
1741         if (pm_wakeup_pending()) {
1742                 dev->power.direct_complete = false;
1743                 async_error = -EBUSY;
1744                 goto Complete;
1745         }
1746
1747         if (dev->power.syscore)
1748                 goto Complete;
1749
1750         if (dev->power.direct_complete) {
1751                 if (pm_runtime_status_suspended(dev)) {
1752                         pm_runtime_disable(dev);
1753                         if (pm_runtime_status_suspended(dev)) {
1754                                 pm_dev_dbg(dev, state, "direct-complete ");
1755                                 goto Complete;
1756                         }
1757
1758                         pm_runtime_enable(dev);
1759                 }
1760                 dev->power.direct_complete = false;
1761         }
1762
1763         dev->power.may_skip_resume = false;
1764         dev->power.must_resume = false;
1765
1766         dpm_watchdog_set(&wd, dev);
1767         device_lock(dev);
1768
1769         if (dev->pm_domain) {
1770                 info = "power domain ";
1771                 callback = pm_op(&dev->pm_domain->ops, state);
1772                 goto Run;
1773         }
1774
1775         if (dev->type && dev->type->pm) {
1776                 info = "type ";
1777                 callback = pm_op(dev->type->pm, state);
1778                 goto Run;
1779         }
1780
1781         if (dev->class && dev->class->pm) {
1782                 info = "class ";
1783                 callback = pm_op(dev->class->pm, state);
1784                 goto Run;
1785         }
1786
1787         if (dev->bus) {
1788                 if (dev->bus->pm) {
1789                         info = "bus ";
1790                         callback = pm_op(dev->bus->pm, state);
1791                 } else if (dev->bus->suspend) {
1792                         pm_dev_dbg(dev, state, "legacy bus ");
1793                         error = legacy_suspend(dev, state, dev->bus->suspend,
1794                                                 "legacy bus ");
1795                         goto End;
1796                 }
1797         }
1798
1799  Run:
1800         if (!callback && dev->driver && dev->driver->pm) {
1801                 info = "driver ";
1802                 callback = pm_op(dev->driver->pm, state);
1803         }
1804
1805         error = dpm_run_callback(callback, dev, state, info);
1806
1807  End:
1808         if (!error) {
1809                 dev->power.is_suspended = true;
1810                 if (device_may_wakeup(dev))
1811                         dev->power.wakeup_path = true;
1812
1813                 dpm_propagate_wakeup_to_parent(dev);
1814                 dpm_clear_superiors_direct_complete(dev);
1815         }
1816
1817         device_unlock(dev);
1818         dpm_watchdog_clear(&wd);
1819
1820  Complete:
1821         if (error)
1822                 async_error = error;
1823
1824         complete_all(&dev->power.completion);
1825         TRACE_SUSPEND(error);
1826         return error;
1827 }
1828
1829 static void async_suspend(void *data, async_cookie_t cookie)
1830 {
1831         struct device *dev = (struct device *)data;
1832         int error;
1833
1834         error = __device_suspend(dev, pm_transition, true);
1835         if (error) {
1836                 dpm_save_failed_dev(dev_name(dev));
1837                 pm_dev_err(dev, pm_transition, " async", error);
1838         }
1839
1840         put_device(dev);
1841 }
1842
1843 static int device_suspend(struct device *dev)
1844 {
1845         reinit_completion(&dev->power.completion);
1846
1847         if (is_async(dev)) {
1848                 get_device(dev);
1849                 async_schedule_dev(async_suspend, dev);
1850                 return 0;
1851         }
1852
1853         return __device_suspend(dev, pm_transition, false);
1854 }
1855
1856 /**
1857  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1858  * @state: PM transition of the system being carried out.
1859  */
1860 int dpm_suspend(pm_message_t state)
1861 {
1862         ktime_t starttime = ktime_get();
1863         int error = 0;
1864
1865         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1866         might_sleep();
1867
1868         devfreq_suspend();
1869         cpufreq_suspend();
1870
1871         mutex_lock(&dpm_list_mtx);
1872         pm_transition = state;
1873         async_error = 0;
1874         while (!list_empty(&dpm_prepared_list)) {
1875                 struct device *dev = to_device(dpm_prepared_list.prev);
1876
1877                 get_device(dev);
1878                 mutex_unlock(&dpm_list_mtx);
1879
1880                 error = device_suspend(dev);
1881
1882                 mutex_lock(&dpm_list_mtx);
1883                 if (error) {
1884                         pm_dev_err(dev, state, "", error);
1885                         dpm_save_failed_dev(dev_name(dev));
1886                         put_device(dev);
1887                         break;
1888                 }
1889                 if (!list_empty(&dev->power.entry))
1890                         list_move(&dev->power.entry, &dpm_suspended_list);
1891                 put_device(dev);
1892                 if (async_error)
1893                         break;
1894         }
1895         mutex_unlock(&dpm_list_mtx);
1896         async_synchronize_full();
1897         if (!error)
1898                 error = async_error;
1899         if (error) {
1900                 suspend_stats.failed_suspend++;
1901                 dpm_save_failed_step(SUSPEND_SUSPEND);
1902         }
1903         dpm_show_time(starttime, state, error, NULL);
1904         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1905         return error;
1906 }
1907
1908 /**
1909  * device_prepare - Prepare a device for system power transition.
1910  * @dev: Device to handle.
1911  * @state: PM transition of the system being carried out.
1912  *
1913  * Execute the ->prepare() callback(s) for given device.  No new children of the
1914  * device may be registered after this function has returned.
1915  */
1916 static int device_prepare(struct device *dev, pm_message_t state)
1917 {
1918         int (*callback)(struct device *) = NULL;
1919         int ret = 0;
1920
1921         if (dev->power.syscore)
1922                 return 0;
1923
1924         WARN_ON(!pm_runtime_enabled(dev) &&
1925                 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1926                                               DPM_FLAG_LEAVE_SUSPENDED));
1927
1928         /*
1929          * If a device's parent goes into runtime suspend at the wrong time,
1930          * it won't be possible to resume the device.  To prevent this we
1931          * block runtime suspend here, during the prepare phase, and allow
1932          * it again during the complete phase.
1933          */
1934         pm_runtime_get_noresume(dev);
1935
1936         device_lock(dev);
1937
1938         dev->power.wakeup_path = false;
1939
1940         if (dev->power.no_pm_callbacks)
1941                 goto unlock;
1942
1943         if (dev->pm_domain)
1944                 callback = dev->pm_domain->ops.prepare;
1945         else if (dev->type && dev->type->pm)
1946                 callback = dev->type->pm->prepare;
1947         else if (dev->class && dev->class->pm)
1948                 callback = dev->class->pm->prepare;
1949         else if (dev->bus && dev->bus->pm)
1950                 callback = dev->bus->pm->prepare;
1951
1952         if (!callback && dev->driver && dev->driver->pm)
1953                 callback = dev->driver->pm->prepare;
1954
1955         if (callback)
1956                 ret = callback(dev);
1957
1958 unlock:
1959         device_unlock(dev);
1960
1961         if (ret < 0) {
1962                 suspend_report_result(callback, ret);
1963                 pm_runtime_put(dev);
1964                 return ret;
1965         }
1966         /*
1967          * A positive return value from ->prepare() means "this device appears
1968          * to be runtime-suspended and its state is fine, so if it really is
1969          * runtime-suspended, you can leave it in that state provided that you
1970          * will do the same thing with all of its descendants".  This only
1971          * applies to suspend transitions, however.
1972          */
1973         spin_lock_irq(&dev->power.lock);
1974         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1975                 ((pm_runtime_suspended(dev) && ret > 0) ||
1976                  dev->power.no_pm_callbacks) &&
1977                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1978         spin_unlock_irq(&dev->power.lock);
1979         return 0;
1980 }
1981
1982 /**
1983  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1984  * @state: PM transition of the system being carried out.
1985  *
1986  * Execute the ->prepare() callback(s) for all devices.
1987  */
1988 int dpm_prepare(pm_message_t state)
1989 {
1990         int error = 0;
1991
1992         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1993         might_sleep();
1994
1995         /*
1996          * Give a chance for the known devices to complete their probes, before
1997          * disable probing of devices. This sync point is important at least
1998          * at boot time + hibernation restore.
1999          */
2000         wait_for_device_probe();
2001         /*
2002          * It is unsafe if probing of devices will happen during suspend or
2003          * hibernation and system behavior will be unpredictable in this case.
2004          * So, let's prohibit device's probing here and defer their probes
2005          * instead. The normal behavior will be restored in dpm_complete().
2006          */
2007         device_block_probing();
2008
2009         mutex_lock(&dpm_list_mtx);
2010         while (!list_empty(&dpm_list)) {
2011                 struct device *dev = to_device(dpm_list.next);
2012
2013                 get_device(dev);
2014                 mutex_unlock(&dpm_list_mtx);
2015
2016                 trace_device_pm_callback_start(dev, "", state.event);
2017                 error = device_prepare(dev, state);
2018                 trace_device_pm_callback_end(dev, error);
2019
2020                 mutex_lock(&dpm_list_mtx);
2021                 if (error) {
2022                         if (error == -EAGAIN) {
2023                                 put_device(dev);
2024                                 error = 0;
2025                                 continue;
2026                         }
2027                         pr_info("Device %s not prepared for power transition: code %d\n",
2028                                 dev_name(dev), error);
2029                         put_device(dev);
2030                         break;
2031                 }
2032                 dev->power.is_prepared = true;
2033                 if (!list_empty(&dev->power.entry))
2034                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
2035                 put_device(dev);
2036         }
2037         mutex_unlock(&dpm_list_mtx);
2038         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2039         return error;
2040 }
2041
2042 /**
2043  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2044  * @state: PM transition of the system being carried out.
2045  *
2046  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2047  * callbacks for them.
2048  */
2049 int dpm_suspend_start(pm_message_t state)
2050 {
2051         int error;
2052
2053         error = dpm_prepare(state);
2054         if (error) {
2055                 suspend_stats.failed_prepare++;
2056                 dpm_save_failed_step(SUSPEND_PREPARE);
2057         } else
2058                 error = dpm_suspend(state);
2059         return error;
2060 }
2061 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2062
2063 void __suspend_report_result(const char *function, void *fn, int ret)
2064 {
2065         if (ret)
2066                 pr_err("%s(): %pF returns %d\n", function, fn, ret);
2067 }
2068 EXPORT_SYMBOL_GPL(__suspend_report_result);
2069
2070 /**
2071  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2072  * @dev: Device to wait for.
2073  * @subordinate: Device that needs to wait for @dev.
2074  */
2075 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2076 {
2077         dpm_wait(dev, subordinate->power.async_suspend);
2078         return async_error;
2079 }
2080 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2081
2082 /**
2083  * dpm_for_each_dev - device iterator.
2084  * @data: data for the callback.
2085  * @fn: function to be called for each device.
2086  *
2087  * Iterate over devices in dpm_list, and call @fn for each device,
2088  * passing it @data.
2089  */
2090 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2091 {
2092         struct device *dev;
2093
2094         if (!fn)
2095                 return;
2096
2097         device_pm_lock();
2098         list_for_each_entry(dev, &dpm_list, power.entry)
2099                 fn(dev, data);
2100         device_pm_unlock();
2101 }
2102 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2103
2104 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2105 {
2106         if (!ops)
2107                 return true;
2108
2109         return !ops->prepare &&
2110                !ops->suspend &&
2111                !ops->suspend_late &&
2112                !ops->suspend_noirq &&
2113                !ops->resume_noirq &&
2114                !ops->resume_early &&
2115                !ops->resume &&
2116                !ops->complete;
2117 }
2118
2119 void device_pm_check_callbacks(struct device *dev)
2120 {
2121         spin_lock_irq(&dev->power.lock);
2122         dev->power.no_pm_callbacks =
2123                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2124                  !dev->bus->suspend && !dev->bus->resume)) &&
2125                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2126                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2127                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2128                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2129                  !dev->driver->suspend && !dev->driver->resume));
2130         spin_unlock_irq(&dev->power.lock);
2131 }
2132
2133 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2134 {
2135         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2136                 pm_runtime_status_suspended(dev);
2137 }