5a8149829ab3954d74b76004c8b82886343d47ae
[muen/linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         /* Skip PM setup/initialization. */
128         if (device_pm_not_required(dev))
129                 return;
130
131         pr_debug("PM: Adding info for %s:%s\n",
132                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
133         device_pm_check_callbacks(dev);
134         mutex_lock(&dpm_list_mtx);
135         if (dev->parent && dev->parent->power.is_prepared)
136                 dev_warn(dev, "parent %s should not be sleeping\n",
137                         dev_name(dev->parent));
138         list_add_tail(&dev->power.entry, &dpm_list);
139         dev->power.in_dpm_list = true;
140         mutex_unlock(&dpm_list_mtx);
141 }
142
143 /**
144  * device_pm_remove - Remove a device from the PM core's list of active devices.
145  * @dev: Device to be removed from the list.
146  */
147 void device_pm_remove(struct device *dev)
148 {
149         if (device_pm_not_required(dev))
150                 return;
151
152         pr_debug("PM: Removing info for %s:%s\n",
153                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154         complete_all(&dev->power.completion);
155         mutex_lock(&dpm_list_mtx);
156         list_del_init(&dev->power.entry);
157         dev->power.in_dpm_list = false;
158         mutex_unlock(&dpm_list_mtx);
159         device_wakeup_disable(dev);
160         pm_runtime_remove(dev);
161         device_pm_check_callbacks(dev);
162 }
163
164 /**
165  * device_pm_move_before - Move device in the PM core's list of active devices.
166  * @deva: Device to move in dpm_list.
167  * @devb: Device @deva should come before.
168  */
169 void device_pm_move_before(struct device *deva, struct device *devb)
170 {
171         pr_debug("PM: Moving %s:%s before %s:%s\n",
172                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
173                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174         /* Delete deva from dpm_list and reinsert before devb. */
175         list_move_tail(&deva->power.entry, &devb->power.entry);
176 }
177
178 /**
179  * device_pm_move_after - Move device in the PM core's list of active devices.
180  * @deva: Device to move in dpm_list.
181  * @devb: Device @deva should come after.
182  */
183 void device_pm_move_after(struct device *deva, struct device *devb)
184 {
185         pr_debug("PM: Moving %s:%s after %s:%s\n",
186                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188         /* Delete deva from dpm_list and reinsert after devb. */
189         list_move(&deva->power.entry, &devb->power.entry);
190 }
191
192 /**
193  * device_pm_move_last - Move device to end of the PM core's list of devices.
194  * @dev: Device to move in dpm_list.
195  */
196 void device_pm_move_last(struct device *dev)
197 {
198         pr_debug("PM: Moving %s:%s to end of list\n",
199                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
200         list_move_tail(&dev->power.entry, &dpm_list);
201 }
202
203 static ktime_t initcall_debug_start(struct device *dev, void *cb)
204 {
205         if (!pm_print_times_enabled)
206                 return 0;
207
208         dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
209                  task_pid_nr(current),
210                  dev->parent ? dev_name(dev->parent) : "none");
211         return ktime_get();
212 }
213
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215                                   void *cb, int error)
216 {
217         ktime_t rettime;
218         s64 nsecs;
219
220         if (!pm_print_times_enabled)
221                 return;
222
223         rettime = ktime_get();
224         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
225
226         dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
227                  (unsigned long long)nsecs >> 10);
228 }
229
230 /**
231  * dpm_wait - Wait for a PM operation to complete.
232  * @dev: Device to wait for.
233  * @async: If unset, wait only if the device's power.async_suspend flag is set.
234  */
235 static void dpm_wait(struct device *dev, bool async)
236 {
237         if (!dev)
238                 return;
239
240         if (async || (pm_async_enabled && dev->power.async_suspend))
241                 wait_for_completion(&dev->power.completion);
242 }
243
244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246         dpm_wait(dev, *((bool *)async_ptr));
247         return 0;
248 }
249
250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252        device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254
255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257         struct device_link *link;
258         int idx;
259
260         idx = device_links_read_lock();
261
262         /*
263          * If the supplier goes away right after we've checked the link to it,
264          * we'll wait for its completion to change the state, but that's fine,
265          * because the only things that will block as a result are the SRCU
266          * callbacks freeing the link objects for the links in the list we're
267          * walking.
268          */
269         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
270                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271                         dpm_wait(link->supplier, async);
272
273         device_links_read_unlock(idx);
274 }
275
276 static void dpm_wait_for_superior(struct device *dev, bool async)
277 {
278         dpm_wait(dev->parent, async);
279         dpm_wait_for_suppliers(dev, async);
280 }
281
282 static void dpm_wait_for_consumers(struct device *dev, bool async)
283 {
284         struct device_link *link;
285         int idx;
286
287         idx = device_links_read_lock();
288
289         /*
290          * The status of a device link can only be changed from "dormant" by a
291          * probe, but that cannot happen during system suspend/resume.  In
292          * theory it can change to "dormant" at that time, but then it is
293          * reasonable to wait for the target device anyway (eg. if it goes
294          * away, it's better to wait for it to go away completely and then
295          * continue instead of trying to continue in parallel with its
296          * unregistration).
297          */
298         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
299                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
300                         dpm_wait(link->consumer, async);
301
302         device_links_read_unlock(idx);
303 }
304
305 static void dpm_wait_for_subordinate(struct device *dev, bool async)
306 {
307         dpm_wait_for_children(dev, async);
308         dpm_wait_for_consumers(dev, async);
309 }
310
311 /**
312  * pm_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  */
316 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
317 {
318         switch (state.event) {
319 #ifdef CONFIG_SUSPEND
320         case PM_EVENT_SUSPEND:
321                 return ops->suspend;
322         case PM_EVENT_RESUME:
323                 return ops->resume;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATE_CALLBACKS
326         case PM_EVENT_FREEZE:
327         case PM_EVENT_QUIESCE:
328                 return ops->freeze;
329         case PM_EVENT_HIBERNATE:
330                 return ops->poweroff;
331         case PM_EVENT_THAW:
332         case PM_EVENT_RECOVER:
333                 return ops->thaw;
334                 break;
335         case PM_EVENT_RESTORE:
336                 return ops->restore;
337 #endif /* CONFIG_HIBERNATE_CALLBACKS */
338         }
339
340         return NULL;
341 }
342
343 /**
344  * pm_late_early_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  *
348  * Runtime PM is disabled for @dev while this function is being executed.
349  */
350 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
351                                       pm_message_t state)
352 {
353         switch (state.event) {
354 #ifdef CONFIG_SUSPEND
355         case PM_EVENT_SUSPEND:
356                 return ops->suspend_late;
357         case PM_EVENT_RESUME:
358                 return ops->resume_early;
359 #endif /* CONFIG_SUSPEND */
360 #ifdef CONFIG_HIBERNATE_CALLBACKS
361         case PM_EVENT_FREEZE:
362         case PM_EVENT_QUIESCE:
363                 return ops->freeze_late;
364         case PM_EVENT_HIBERNATE:
365                 return ops->poweroff_late;
366         case PM_EVENT_THAW:
367         case PM_EVENT_RECOVER:
368                 return ops->thaw_early;
369         case PM_EVENT_RESTORE:
370                 return ops->restore_early;
371 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372         }
373
374         return NULL;
375 }
376
377 /**
378  * pm_noirq_op - Return the PM operation appropriate for given PM event.
379  * @ops: PM operations to choose from.
380  * @state: PM transition of the system being carried out.
381  *
382  * The driver of @dev will not receive interrupts while this function is being
383  * executed.
384  */
385 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
386 {
387         switch (state.event) {
388 #ifdef CONFIG_SUSPEND
389         case PM_EVENT_SUSPEND:
390                 return ops->suspend_noirq;
391         case PM_EVENT_RESUME:
392                 return ops->resume_noirq;
393 #endif /* CONFIG_SUSPEND */
394 #ifdef CONFIG_HIBERNATE_CALLBACKS
395         case PM_EVENT_FREEZE:
396         case PM_EVENT_QUIESCE:
397                 return ops->freeze_noirq;
398         case PM_EVENT_HIBERNATE:
399                 return ops->poweroff_noirq;
400         case PM_EVENT_THAW:
401         case PM_EVENT_RECOVER:
402                 return ops->thaw_noirq;
403         case PM_EVENT_RESTORE:
404                 return ops->restore_noirq;
405 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406         }
407
408         return NULL;
409 }
410
411 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
412 {
413         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
414                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
415                 ", may wakeup" : "");
416 }
417
418 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
419                         int error)
420 {
421         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
422                 dev_name(dev), pm_verb(state.event), info, error);
423 }
424
425 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
426                           const char *info)
427 {
428         ktime_t calltime;
429         u64 usecs64;
430         int usecs;
431
432         calltime = ktime_get();
433         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
434         do_div(usecs64, NSEC_PER_USEC);
435         usecs = usecs64;
436         if (usecs == 0)
437                 usecs = 1;
438
439         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
440                   info ?: "", info ? " " : "", pm_verb(state.event),
441                   error ? "aborted" : "complete",
442                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
443 }
444
445 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
446                             pm_message_t state, const char *info)
447 {
448         ktime_t calltime;
449         int error;
450
451         if (!cb)
452                 return 0;
453
454         calltime = initcall_debug_start(dev, cb);
455
456         pm_dev_dbg(dev, state, info);
457         trace_device_pm_callback_start(dev, info, state.event);
458         error = cb(dev);
459         trace_device_pm_callback_end(dev, error);
460         suspend_report_result(cb, error);
461
462         initcall_debug_report(dev, calltime, cb, error);
463
464         return error;
465 }
466
467 #ifdef CONFIG_DPM_WATCHDOG
468 struct dpm_watchdog {
469         struct device           *dev;
470         struct task_struct      *tsk;
471         struct timer_list       timer;
472 };
473
474 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
475         struct dpm_watchdog wd
476
477 /**
478  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
479  * @data: Watchdog object address.
480  *
481  * Called when a driver has timed out suspending or resuming.
482  * There's not much we can do here to recover so panic() to
483  * capture a crash-dump in pstore.
484  */
485 static void dpm_watchdog_handler(struct timer_list *t)
486 {
487         struct dpm_watchdog *wd = from_timer(wd, t, timer);
488
489         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
490         show_stack(wd->tsk, NULL);
491         panic("%s %s: unrecoverable failure\n",
492                 dev_driver_string(wd->dev), dev_name(wd->dev));
493 }
494
495 /**
496  * dpm_watchdog_set - Enable pm watchdog for given device.
497  * @wd: Watchdog. Must be allocated on the stack.
498  * @dev: Device to handle.
499  */
500 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
501 {
502         struct timer_list *timer = &wd->timer;
503
504         wd->dev = dev;
505         wd->tsk = current;
506
507         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
508         /* use same timeout value for both suspend and resume */
509         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
510         add_timer(timer);
511 }
512
513 /**
514  * dpm_watchdog_clear - Disable suspend/resume watchdog.
515  * @wd: Watchdog to disable.
516  */
517 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
518 {
519         struct timer_list *timer = &wd->timer;
520
521         del_timer_sync(timer);
522         destroy_timer_on_stack(timer);
523 }
524 #else
525 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
526 #define dpm_watchdog_set(x, y)
527 #define dpm_watchdog_clear(x)
528 #endif
529
530 /*------------------------- Resume routines -------------------------*/
531
532 /**
533  * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
534  * @dev: Target device.
535  *
536  * Make the core skip the "early resume" and "resume" phases for @dev.
537  *
538  * This function can be called by middle-layer code during the "noirq" phase of
539  * system resume if necessary, but not by device drivers.
540  */
541 void dev_pm_skip_next_resume_phases(struct device *dev)
542 {
543         dev->power.is_late_suspended = false;
544         dev->power.is_suspended = false;
545 }
546
547 /**
548  * suspend_event - Return a "suspend" message for given "resume" one.
549  * @resume_msg: PM message representing a system-wide resume transition.
550  */
551 static pm_message_t suspend_event(pm_message_t resume_msg)
552 {
553         switch (resume_msg.event) {
554         case PM_EVENT_RESUME:
555                 return PMSG_SUSPEND;
556         case PM_EVENT_THAW:
557         case PM_EVENT_RESTORE:
558                 return PMSG_FREEZE;
559         case PM_EVENT_RECOVER:
560                 return PMSG_HIBERNATE;
561         }
562         return PMSG_ON;
563 }
564
565 /**
566  * dev_pm_may_skip_resume - System-wide device resume optimization check.
567  * @dev: Target device.
568  *
569  * Checks whether or not the device may be left in suspend after a system-wide
570  * transition to the working state.
571  */
572 bool dev_pm_may_skip_resume(struct device *dev)
573 {
574         return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
575 }
576
577 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
578                                                 pm_message_t state,
579                                                 const char **info_p)
580 {
581         pm_callback_t callback;
582         const char *info;
583
584         if (dev->pm_domain) {
585                 info = "noirq power domain ";
586                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
587         } else if (dev->type && dev->type->pm) {
588                 info = "noirq type ";
589                 callback = pm_noirq_op(dev->type->pm, state);
590         } else if (dev->class && dev->class->pm) {
591                 info = "noirq class ";
592                 callback = pm_noirq_op(dev->class->pm, state);
593         } else if (dev->bus && dev->bus->pm) {
594                 info = "noirq bus ";
595                 callback = pm_noirq_op(dev->bus->pm, state);
596         } else {
597                 return NULL;
598         }
599
600         if (info_p)
601                 *info_p = info;
602
603         return callback;
604 }
605
606 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
607                                                  pm_message_t state,
608                                                  const char **info_p);
609
610 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
611                                                 pm_message_t state,
612                                                 const char **info_p);
613
614 /**
615  * device_resume_noirq - Execute a "noirq resume" callback for given device.
616  * @dev: Device to handle.
617  * @state: PM transition of the system being carried out.
618  * @async: If true, the device is being resumed asynchronously.
619  *
620  * The driver of @dev will not receive interrupts while this function is being
621  * executed.
622  */
623 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
624 {
625         pm_callback_t callback;
626         const char *info;
627         bool skip_resume;
628         int error = 0;
629
630         TRACE_DEVICE(dev);
631         TRACE_RESUME(0);
632
633         if (dev->power.syscore || dev->power.direct_complete)
634                 goto Out;
635
636         if (!dev->power.is_noirq_suspended)
637                 goto Out;
638
639         dpm_wait_for_superior(dev, async);
640
641         skip_resume = dev_pm_may_skip_resume(dev);
642
643         callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
644         if (callback)
645                 goto Run;
646
647         if (skip_resume)
648                 goto Skip;
649
650         if (dev_pm_smart_suspend_and_suspended(dev)) {
651                 pm_message_t suspend_msg = suspend_event(state);
652
653                 /*
654                  * If "freeze" callbacks have been skipped during a transition
655                  * related to hibernation, the subsequent "thaw" callbacks must
656                  * be skipped too or bad things may happen.  Otherwise, resume
657                  * callbacks are going to be run for the device, so its runtime
658                  * PM status must be changed to reflect the new state after the
659                  * transition under way.
660                  */
661                 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
662                     !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
663                         if (state.event == PM_EVENT_THAW) {
664                                 skip_resume = true;
665                                 goto Skip;
666                         } else {
667                                 pm_runtime_set_active(dev);
668                         }
669                 }
670         }
671
672         if (dev->driver && dev->driver->pm) {
673                 info = "noirq driver ";
674                 callback = pm_noirq_op(dev->driver->pm, state);
675         }
676
677 Run:
678         error = dpm_run_callback(callback, dev, state, info);
679
680 Skip:
681         dev->power.is_noirq_suspended = false;
682
683         if (skip_resume) {
684                 /*
685                  * The device is going to be left in suspend, but it might not
686                  * have been in runtime suspend before the system suspended, so
687                  * its runtime PM status needs to be updated to avoid confusing
688                  * the runtime PM framework when runtime PM is enabled for the
689                  * device again.
690                  */
691                 pm_runtime_set_suspended(dev);
692                 dev_pm_skip_next_resume_phases(dev);
693         }
694
695 Out:
696         complete_all(&dev->power.completion);
697         TRACE_RESUME(error);
698         return error;
699 }
700
701 static bool is_async(struct device *dev)
702 {
703         return dev->power.async_suspend && pm_async_enabled
704                 && !pm_trace_is_enabled();
705 }
706
707 static void async_resume_noirq(void *data, async_cookie_t cookie)
708 {
709         struct device *dev = (struct device *)data;
710         int error;
711
712         error = device_resume_noirq(dev, pm_transition, true);
713         if (error)
714                 pm_dev_err(dev, pm_transition, " async", error);
715
716         put_device(dev);
717 }
718
719 void dpm_noirq_resume_devices(pm_message_t state)
720 {
721         struct device *dev;
722         ktime_t starttime = ktime_get();
723
724         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
725         mutex_lock(&dpm_list_mtx);
726         pm_transition = state;
727
728         /*
729          * Advanced the async threads upfront,
730          * in case the starting of async threads is
731          * delayed by non-async resuming devices.
732          */
733         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
734                 reinit_completion(&dev->power.completion);
735                 if (is_async(dev)) {
736                         get_device(dev);
737                         async_schedule_dev(async_resume_noirq, dev);
738                 }
739         }
740
741         while (!list_empty(&dpm_noirq_list)) {
742                 dev = to_device(dpm_noirq_list.next);
743                 get_device(dev);
744                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
745                 mutex_unlock(&dpm_list_mtx);
746
747                 if (!is_async(dev)) {
748                         int error;
749
750                         error = device_resume_noirq(dev, state, false);
751                         if (error) {
752                                 suspend_stats.failed_resume_noirq++;
753                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
754                                 dpm_save_failed_dev(dev_name(dev));
755                                 pm_dev_err(dev, state, " noirq", error);
756                         }
757                 }
758
759                 mutex_lock(&dpm_list_mtx);
760                 put_device(dev);
761         }
762         mutex_unlock(&dpm_list_mtx);
763         async_synchronize_full();
764         dpm_show_time(starttime, state, 0, "noirq");
765         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
766 }
767
768 void dpm_noirq_end(void)
769 {
770         resume_device_irqs();
771         device_wakeup_disarm_wake_irqs();
772         cpuidle_resume();
773 }
774
775 /**
776  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
777  * @state: PM transition of the system being carried out.
778  *
779  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
780  * allow device drivers' interrupt handlers to be called.
781  */
782 void dpm_resume_noirq(pm_message_t state)
783 {
784         dpm_noirq_resume_devices(state);
785         dpm_noirq_end();
786 }
787
788 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
789                                                 pm_message_t state,
790                                                 const char **info_p)
791 {
792         pm_callback_t callback;
793         const char *info;
794
795         if (dev->pm_domain) {
796                 info = "early power domain ";
797                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
798         } else if (dev->type && dev->type->pm) {
799                 info = "early type ";
800                 callback = pm_late_early_op(dev->type->pm, state);
801         } else if (dev->class && dev->class->pm) {
802                 info = "early class ";
803                 callback = pm_late_early_op(dev->class->pm, state);
804         } else if (dev->bus && dev->bus->pm) {
805                 info = "early bus ";
806                 callback = pm_late_early_op(dev->bus->pm, state);
807         } else {
808                 return NULL;
809         }
810
811         if (info_p)
812                 *info_p = info;
813
814         return callback;
815 }
816
817 /**
818  * device_resume_early - Execute an "early resume" callback for given device.
819  * @dev: Device to handle.
820  * @state: PM transition of the system being carried out.
821  * @async: If true, the device is being resumed asynchronously.
822  *
823  * Runtime PM is disabled for @dev while this function is being executed.
824  */
825 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
826 {
827         pm_callback_t callback;
828         const char *info;
829         int error = 0;
830
831         TRACE_DEVICE(dev);
832         TRACE_RESUME(0);
833
834         if (dev->power.syscore || dev->power.direct_complete)
835                 goto Out;
836
837         if (!dev->power.is_late_suspended)
838                 goto Out;
839
840         dpm_wait_for_superior(dev, async);
841
842         callback = dpm_subsys_resume_early_cb(dev, state, &info);
843
844         if (!callback && dev->driver && dev->driver->pm) {
845                 info = "early driver ";
846                 callback = pm_late_early_op(dev->driver->pm, state);
847         }
848
849         error = dpm_run_callback(callback, dev, state, info);
850         dev->power.is_late_suspended = false;
851
852  Out:
853         TRACE_RESUME(error);
854
855         pm_runtime_enable(dev);
856         complete_all(&dev->power.completion);
857         return error;
858 }
859
860 static void async_resume_early(void *data, async_cookie_t cookie)
861 {
862         struct device *dev = (struct device *)data;
863         int error;
864
865         error = device_resume_early(dev, pm_transition, true);
866         if (error)
867                 pm_dev_err(dev, pm_transition, " async", error);
868
869         put_device(dev);
870 }
871
872 /**
873  * dpm_resume_early - Execute "early resume" callbacks for all devices.
874  * @state: PM transition of the system being carried out.
875  */
876 void dpm_resume_early(pm_message_t state)
877 {
878         struct device *dev;
879         ktime_t starttime = ktime_get();
880
881         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
882         mutex_lock(&dpm_list_mtx);
883         pm_transition = state;
884
885         /*
886          * Advanced the async threads upfront,
887          * in case the starting of async threads is
888          * delayed by non-async resuming devices.
889          */
890         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
891                 reinit_completion(&dev->power.completion);
892                 if (is_async(dev)) {
893                         get_device(dev);
894                         async_schedule_dev(async_resume_early, dev);
895                 }
896         }
897
898         while (!list_empty(&dpm_late_early_list)) {
899                 dev = to_device(dpm_late_early_list.next);
900                 get_device(dev);
901                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
902                 mutex_unlock(&dpm_list_mtx);
903
904                 if (!is_async(dev)) {
905                         int error;
906
907                         error = device_resume_early(dev, state, false);
908                         if (error) {
909                                 suspend_stats.failed_resume_early++;
910                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
911                                 dpm_save_failed_dev(dev_name(dev));
912                                 pm_dev_err(dev, state, " early", error);
913                         }
914                 }
915                 mutex_lock(&dpm_list_mtx);
916                 put_device(dev);
917         }
918         mutex_unlock(&dpm_list_mtx);
919         async_synchronize_full();
920         dpm_show_time(starttime, state, 0, "early");
921         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
922 }
923
924 /**
925  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
926  * @state: PM transition of the system being carried out.
927  */
928 void dpm_resume_start(pm_message_t state)
929 {
930         dpm_resume_noirq(state);
931         dpm_resume_early(state);
932 }
933 EXPORT_SYMBOL_GPL(dpm_resume_start);
934
935 /**
936  * device_resume - Execute "resume" callbacks for given device.
937  * @dev: Device to handle.
938  * @state: PM transition of the system being carried out.
939  * @async: If true, the device is being resumed asynchronously.
940  */
941 static int device_resume(struct device *dev, pm_message_t state, bool async)
942 {
943         pm_callback_t callback = NULL;
944         const char *info = NULL;
945         int error = 0;
946         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
947
948         TRACE_DEVICE(dev);
949         TRACE_RESUME(0);
950
951         if (dev->power.syscore)
952                 goto Complete;
953
954         if (dev->power.direct_complete) {
955                 /* Match the pm_runtime_disable() in __device_suspend(). */
956                 pm_runtime_enable(dev);
957                 goto Complete;
958         }
959
960         dpm_wait_for_superior(dev, async);
961         dpm_watchdog_set(&wd, dev);
962         device_lock(dev);
963
964         /*
965          * This is a fib.  But we'll allow new children to be added below
966          * a resumed device, even if the device hasn't been completed yet.
967          */
968         dev->power.is_prepared = false;
969
970         if (!dev->power.is_suspended)
971                 goto Unlock;
972
973         if (dev->pm_domain) {
974                 info = "power domain ";
975                 callback = pm_op(&dev->pm_domain->ops, state);
976                 goto Driver;
977         }
978
979         if (dev->type && dev->type->pm) {
980                 info = "type ";
981                 callback = pm_op(dev->type->pm, state);
982                 goto Driver;
983         }
984
985         if (dev->class && dev->class->pm) {
986                 info = "class ";
987                 callback = pm_op(dev->class->pm, state);
988                 goto Driver;
989         }
990
991         if (dev->bus) {
992                 if (dev->bus->pm) {
993                         info = "bus ";
994                         callback = pm_op(dev->bus->pm, state);
995                 } else if (dev->bus->resume) {
996                         info = "legacy bus ";
997                         callback = dev->bus->resume;
998                         goto End;
999                 }
1000         }
1001
1002  Driver:
1003         if (!callback && dev->driver && dev->driver->pm) {
1004                 info = "driver ";
1005                 callback = pm_op(dev->driver->pm, state);
1006         }
1007
1008  End:
1009         error = dpm_run_callback(callback, dev, state, info);
1010         dev->power.is_suspended = false;
1011
1012  Unlock:
1013         device_unlock(dev);
1014         dpm_watchdog_clear(&wd);
1015
1016  Complete:
1017         complete_all(&dev->power.completion);
1018
1019         TRACE_RESUME(error);
1020
1021         return error;
1022 }
1023
1024 static void async_resume(void *data, async_cookie_t cookie)
1025 {
1026         struct device *dev = (struct device *)data;
1027         int error;
1028
1029         error = device_resume(dev, pm_transition, true);
1030         if (error)
1031                 pm_dev_err(dev, pm_transition, " async", error);
1032         put_device(dev);
1033 }
1034
1035 /**
1036  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1037  * @state: PM transition of the system being carried out.
1038  *
1039  * Execute the appropriate "resume" callback for all devices whose status
1040  * indicates that they are suspended.
1041  */
1042 void dpm_resume(pm_message_t state)
1043 {
1044         struct device *dev;
1045         ktime_t starttime = ktime_get();
1046
1047         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1048         might_sleep();
1049
1050         mutex_lock(&dpm_list_mtx);
1051         pm_transition = state;
1052         async_error = 0;
1053
1054         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1055                 reinit_completion(&dev->power.completion);
1056                 if (is_async(dev)) {
1057                         get_device(dev);
1058                         async_schedule_dev(async_resume, dev);
1059                 }
1060         }
1061
1062         while (!list_empty(&dpm_suspended_list)) {
1063                 dev = to_device(dpm_suspended_list.next);
1064                 get_device(dev);
1065                 if (!is_async(dev)) {
1066                         int error;
1067
1068                         mutex_unlock(&dpm_list_mtx);
1069
1070                         error = device_resume(dev, state, false);
1071                         if (error) {
1072                                 suspend_stats.failed_resume++;
1073                                 dpm_save_failed_step(SUSPEND_RESUME);
1074                                 dpm_save_failed_dev(dev_name(dev));
1075                                 pm_dev_err(dev, state, "", error);
1076                         }
1077
1078                         mutex_lock(&dpm_list_mtx);
1079                 }
1080                 if (!list_empty(&dev->power.entry))
1081                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1082                 put_device(dev);
1083         }
1084         mutex_unlock(&dpm_list_mtx);
1085         async_synchronize_full();
1086         dpm_show_time(starttime, state, 0, NULL);
1087
1088         cpufreq_resume();
1089         devfreq_resume();
1090         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1091 }
1092
1093 /**
1094  * device_complete - Complete a PM transition for given device.
1095  * @dev: Device to handle.
1096  * @state: PM transition of the system being carried out.
1097  */
1098 static void device_complete(struct device *dev, pm_message_t state)
1099 {
1100         void (*callback)(struct device *) = NULL;
1101         const char *info = NULL;
1102
1103         if (dev->power.syscore)
1104                 return;
1105
1106         device_lock(dev);
1107
1108         if (dev->pm_domain) {
1109                 info = "completing power domain ";
1110                 callback = dev->pm_domain->ops.complete;
1111         } else if (dev->type && dev->type->pm) {
1112                 info = "completing type ";
1113                 callback = dev->type->pm->complete;
1114         } else if (dev->class && dev->class->pm) {
1115                 info = "completing class ";
1116                 callback = dev->class->pm->complete;
1117         } else if (dev->bus && dev->bus->pm) {
1118                 info = "completing bus ";
1119                 callback = dev->bus->pm->complete;
1120         }
1121
1122         if (!callback && dev->driver && dev->driver->pm) {
1123                 info = "completing driver ";
1124                 callback = dev->driver->pm->complete;
1125         }
1126
1127         if (callback) {
1128                 pm_dev_dbg(dev, state, info);
1129                 callback(dev);
1130         }
1131
1132         device_unlock(dev);
1133
1134         pm_runtime_put(dev);
1135 }
1136
1137 /**
1138  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1139  * @state: PM transition of the system being carried out.
1140  *
1141  * Execute the ->complete() callbacks for all devices whose PM status is not
1142  * DPM_ON (this allows new devices to be registered).
1143  */
1144 void dpm_complete(pm_message_t state)
1145 {
1146         struct list_head list;
1147
1148         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1149         might_sleep();
1150
1151         INIT_LIST_HEAD(&list);
1152         mutex_lock(&dpm_list_mtx);
1153         while (!list_empty(&dpm_prepared_list)) {
1154                 struct device *dev = to_device(dpm_prepared_list.prev);
1155
1156                 get_device(dev);
1157                 dev->power.is_prepared = false;
1158                 list_move(&dev->power.entry, &list);
1159                 mutex_unlock(&dpm_list_mtx);
1160
1161                 trace_device_pm_callback_start(dev, "", state.event);
1162                 device_complete(dev, state);
1163                 trace_device_pm_callback_end(dev, 0);
1164
1165                 mutex_lock(&dpm_list_mtx);
1166                 put_device(dev);
1167         }
1168         list_splice(&list, &dpm_list);
1169         mutex_unlock(&dpm_list_mtx);
1170
1171         /* Allow device probing and trigger re-probing of deferred devices */
1172         device_unblock_probing();
1173         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1174 }
1175
1176 /**
1177  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1178  * @state: PM transition of the system being carried out.
1179  *
1180  * Execute "resume" callbacks for all devices and complete the PM transition of
1181  * the system.
1182  */
1183 void dpm_resume_end(pm_message_t state)
1184 {
1185         dpm_resume(state);
1186         dpm_complete(state);
1187 }
1188 EXPORT_SYMBOL_GPL(dpm_resume_end);
1189
1190
1191 /*------------------------- Suspend routines -------------------------*/
1192
1193 /**
1194  * resume_event - Return a "resume" message for given "suspend" sleep state.
1195  * @sleep_state: PM message representing a sleep state.
1196  *
1197  * Return a PM message representing the resume event corresponding to given
1198  * sleep state.
1199  */
1200 static pm_message_t resume_event(pm_message_t sleep_state)
1201 {
1202         switch (sleep_state.event) {
1203         case PM_EVENT_SUSPEND:
1204                 return PMSG_RESUME;
1205         case PM_EVENT_FREEZE:
1206         case PM_EVENT_QUIESCE:
1207                 return PMSG_RECOVER;
1208         case PM_EVENT_HIBERNATE:
1209                 return PMSG_RESTORE;
1210         }
1211         return PMSG_ON;
1212 }
1213
1214 static void dpm_superior_set_must_resume(struct device *dev)
1215 {
1216         struct device_link *link;
1217         int idx;
1218
1219         if (dev->parent)
1220                 dev->parent->power.must_resume = true;
1221
1222         idx = device_links_read_lock();
1223
1224         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1225                 link->supplier->power.must_resume = true;
1226
1227         device_links_read_unlock(idx);
1228 }
1229
1230 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1231                                                  pm_message_t state,
1232                                                  const char **info_p)
1233 {
1234         pm_callback_t callback;
1235         const char *info;
1236
1237         if (dev->pm_domain) {
1238                 info = "noirq power domain ";
1239                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1240         } else if (dev->type && dev->type->pm) {
1241                 info = "noirq type ";
1242                 callback = pm_noirq_op(dev->type->pm, state);
1243         } else if (dev->class && dev->class->pm) {
1244                 info = "noirq class ";
1245                 callback = pm_noirq_op(dev->class->pm, state);
1246         } else if (dev->bus && dev->bus->pm) {
1247                 info = "noirq bus ";
1248                 callback = pm_noirq_op(dev->bus->pm, state);
1249         } else {
1250                 return NULL;
1251         }
1252
1253         if (info_p)
1254                 *info_p = info;
1255
1256         return callback;
1257 }
1258
1259 static bool device_must_resume(struct device *dev, pm_message_t state,
1260                                bool no_subsys_suspend_noirq)
1261 {
1262         pm_message_t resume_msg = resume_event(state);
1263
1264         /*
1265          * If all of the device driver's "noirq", "late" and "early" callbacks
1266          * are invoked directly by the core, the decision to allow the device to
1267          * stay in suspend can be based on its current runtime PM status and its
1268          * wakeup settings.
1269          */
1270         if (no_subsys_suspend_noirq &&
1271             !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1272             !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1273             !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1274                 return !pm_runtime_status_suspended(dev) &&
1275                         (resume_msg.event != PM_EVENT_RESUME ||
1276                          (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1277
1278         /*
1279          * The only safe strategy here is to require that if the device may not
1280          * be left in suspend, resume callbacks must be invoked for it.
1281          */
1282         return !dev->power.may_skip_resume;
1283 }
1284
1285 /**
1286  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1287  * @dev: Device to handle.
1288  * @state: PM transition of the system being carried out.
1289  * @async: If true, the device is being suspended asynchronously.
1290  *
1291  * The driver of @dev will not receive interrupts while this function is being
1292  * executed.
1293  */
1294 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1295 {
1296         pm_callback_t callback;
1297         const char *info;
1298         bool no_subsys_cb = false;
1299         int error = 0;
1300
1301         TRACE_DEVICE(dev);
1302         TRACE_SUSPEND(0);
1303
1304         dpm_wait_for_subordinate(dev, async);
1305
1306         if (async_error)
1307                 goto Complete;
1308
1309         if (pm_wakeup_pending()) {
1310                 async_error = -EBUSY;
1311                 goto Complete;
1312         }
1313
1314         if (dev->power.syscore || dev->power.direct_complete)
1315                 goto Complete;
1316
1317         callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1318         if (callback)
1319                 goto Run;
1320
1321         no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1322
1323         if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1324                 goto Skip;
1325
1326         if (dev->driver && dev->driver->pm) {
1327                 info = "noirq driver ";
1328                 callback = pm_noirq_op(dev->driver->pm, state);
1329         }
1330
1331 Run:
1332         error = dpm_run_callback(callback, dev, state, info);
1333         if (error) {
1334                 async_error = error;
1335                 goto Complete;
1336         }
1337
1338 Skip:
1339         dev->power.is_noirq_suspended = true;
1340
1341         if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1342                 dev->power.must_resume = dev->power.must_resume ||
1343                                 atomic_read(&dev->power.usage_count) > 1 ||
1344                                 device_must_resume(dev, state, no_subsys_cb);
1345         } else {
1346                 dev->power.must_resume = true;
1347         }
1348
1349         if (dev->power.must_resume)
1350                 dpm_superior_set_must_resume(dev);
1351
1352 Complete:
1353         complete_all(&dev->power.completion);
1354         TRACE_SUSPEND(error);
1355         return error;
1356 }
1357
1358 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1359 {
1360         struct device *dev = (struct device *)data;
1361         int error;
1362
1363         error = __device_suspend_noirq(dev, pm_transition, true);
1364         if (error) {
1365                 dpm_save_failed_dev(dev_name(dev));
1366                 pm_dev_err(dev, pm_transition, " async", error);
1367         }
1368
1369         put_device(dev);
1370 }
1371
1372 static int device_suspend_noirq(struct device *dev)
1373 {
1374         reinit_completion(&dev->power.completion);
1375
1376         if (is_async(dev)) {
1377                 get_device(dev);
1378                 async_schedule_dev(async_suspend_noirq, dev);
1379                 return 0;
1380         }
1381         return __device_suspend_noirq(dev, pm_transition, false);
1382 }
1383
1384 void dpm_noirq_begin(void)
1385 {
1386         cpuidle_pause();
1387         device_wakeup_arm_wake_irqs();
1388         suspend_device_irqs();
1389 }
1390
1391 int dpm_noirq_suspend_devices(pm_message_t state)
1392 {
1393         ktime_t starttime = ktime_get();
1394         int error = 0;
1395
1396         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1397         mutex_lock(&dpm_list_mtx);
1398         pm_transition = state;
1399         async_error = 0;
1400
1401         while (!list_empty(&dpm_late_early_list)) {
1402                 struct device *dev = to_device(dpm_late_early_list.prev);
1403
1404                 get_device(dev);
1405                 mutex_unlock(&dpm_list_mtx);
1406
1407                 error = device_suspend_noirq(dev);
1408
1409                 mutex_lock(&dpm_list_mtx);
1410                 if (error) {
1411                         pm_dev_err(dev, state, " noirq", error);
1412                         dpm_save_failed_dev(dev_name(dev));
1413                         put_device(dev);
1414                         break;
1415                 }
1416                 if (!list_empty(&dev->power.entry))
1417                         list_move(&dev->power.entry, &dpm_noirq_list);
1418                 put_device(dev);
1419
1420                 if (async_error)
1421                         break;
1422         }
1423         mutex_unlock(&dpm_list_mtx);
1424         async_synchronize_full();
1425         if (!error)
1426                 error = async_error;
1427
1428         if (error) {
1429                 suspend_stats.failed_suspend_noirq++;
1430                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1431         }
1432         dpm_show_time(starttime, state, error, "noirq");
1433         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1434         return error;
1435 }
1436
1437 /**
1438  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1439  * @state: PM transition of the system being carried out.
1440  *
1441  * Prevent device drivers' interrupt handlers from being called and invoke
1442  * "noirq" suspend callbacks for all non-sysdev devices.
1443  */
1444 int dpm_suspend_noirq(pm_message_t state)
1445 {
1446         int ret;
1447
1448         dpm_noirq_begin();
1449         ret = dpm_noirq_suspend_devices(state);
1450         if (ret)
1451                 dpm_resume_noirq(resume_event(state));
1452
1453         return ret;
1454 }
1455
1456 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1457 {
1458         struct device *parent = dev->parent;
1459
1460         if (!parent)
1461                 return;
1462
1463         spin_lock_irq(&parent->power.lock);
1464
1465         if (dev->power.wakeup_path && !parent->power.ignore_children)
1466                 parent->power.wakeup_path = true;
1467
1468         spin_unlock_irq(&parent->power.lock);
1469 }
1470
1471 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1472                                                 pm_message_t state,
1473                                                 const char **info_p)
1474 {
1475         pm_callback_t callback;
1476         const char *info;
1477
1478         if (dev->pm_domain) {
1479                 info = "late power domain ";
1480                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1481         } else if (dev->type && dev->type->pm) {
1482                 info = "late type ";
1483                 callback = pm_late_early_op(dev->type->pm, state);
1484         } else if (dev->class && dev->class->pm) {
1485                 info = "late class ";
1486                 callback = pm_late_early_op(dev->class->pm, state);
1487         } else if (dev->bus && dev->bus->pm) {
1488                 info = "late bus ";
1489                 callback = pm_late_early_op(dev->bus->pm, state);
1490         } else {
1491                 return NULL;
1492         }
1493
1494         if (info_p)
1495                 *info_p = info;
1496
1497         return callback;
1498 }
1499
1500 /**
1501  * __device_suspend_late - Execute a "late suspend" callback for given device.
1502  * @dev: Device to handle.
1503  * @state: PM transition of the system being carried out.
1504  * @async: If true, the device is being suspended asynchronously.
1505  *
1506  * Runtime PM is disabled for @dev while this function is being executed.
1507  */
1508 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1509 {
1510         pm_callback_t callback;
1511         const char *info;
1512         int error = 0;
1513
1514         TRACE_DEVICE(dev);
1515         TRACE_SUSPEND(0);
1516
1517         __pm_runtime_disable(dev, false);
1518
1519         dpm_wait_for_subordinate(dev, async);
1520
1521         if (async_error)
1522                 goto Complete;
1523
1524         if (pm_wakeup_pending()) {
1525                 async_error = -EBUSY;
1526                 goto Complete;
1527         }
1528
1529         if (dev->power.syscore || dev->power.direct_complete)
1530                 goto Complete;
1531
1532         callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1533         if (callback)
1534                 goto Run;
1535
1536         if (dev_pm_smart_suspend_and_suspended(dev) &&
1537             !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1538                 goto Skip;
1539
1540         if (dev->driver && dev->driver->pm) {
1541                 info = "late driver ";
1542                 callback = pm_late_early_op(dev->driver->pm, state);
1543         }
1544
1545 Run:
1546         error = dpm_run_callback(callback, dev, state, info);
1547         if (error) {
1548                 async_error = error;
1549                 goto Complete;
1550         }
1551         dpm_propagate_wakeup_to_parent(dev);
1552
1553 Skip:
1554         dev->power.is_late_suspended = true;
1555
1556 Complete:
1557         TRACE_SUSPEND(error);
1558         complete_all(&dev->power.completion);
1559         return error;
1560 }
1561
1562 static void async_suspend_late(void *data, async_cookie_t cookie)
1563 {
1564         struct device *dev = (struct device *)data;
1565         int error;
1566
1567         error = __device_suspend_late(dev, pm_transition, true);
1568         if (error) {
1569                 dpm_save_failed_dev(dev_name(dev));
1570                 pm_dev_err(dev, pm_transition, " async", error);
1571         }
1572         put_device(dev);
1573 }
1574
1575 static int device_suspend_late(struct device *dev)
1576 {
1577         reinit_completion(&dev->power.completion);
1578
1579         if (is_async(dev)) {
1580                 get_device(dev);
1581                 async_schedule_dev(async_suspend_late, dev);
1582                 return 0;
1583         }
1584
1585         return __device_suspend_late(dev, pm_transition, false);
1586 }
1587
1588 /**
1589  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1590  * @state: PM transition of the system being carried out.
1591  */
1592 int dpm_suspend_late(pm_message_t state)
1593 {
1594         ktime_t starttime = ktime_get();
1595         int error = 0;
1596
1597         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1598         mutex_lock(&dpm_list_mtx);
1599         pm_transition = state;
1600         async_error = 0;
1601
1602         while (!list_empty(&dpm_suspended_list)) {
1603                 struct device *dev = to_device(dpm_suspended_list.prev);
1604
1605                 get_device(dev);
1606                 mutex_unlock(&dpm_list_mtx);
1607
1608                 error = device_suspend_late(dev);
1609
1610                 mutex_lock(&dpm_list_mtx);
1611                 if (!list_empty(&dev->power.entry))
1612                         list_move(&dev->power.entry, &dpm_late_early_list);
1613
1614                 if (error) {
1615                         pm_dev_err(dev, state, " late", error);
1616                         dpm_save_failed_dev(dev_name(dev));
1617                         put_device(dev);
1618                         break;
1619                 }
1620                 put_device(dev);
1621
1622                 if (async_error)
1623                         break;
1624         }
1625         mutex_unlock(&dpm_list_mtx);
1626         async_synchronize_full();
1627         if (!error)
1628                 error = async_error;
1629         if (error) {
1630                 suspend_stats.failed_suspend_late++;
1631                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1632                 dpm_resume_early(resume_event(state));
1633         }
1634         dpm_show_time(starttime, state, error, "late");
1635         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1636         return error;
1637 }
1638
1639 /**
1640  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1641  * @state: PM transition of the system being carried out.
1642  */
1643 int dpm_suspend_end(pm_message_t state)
1644 {
1645         int error = dpm_suspend_late(state);
1646         if (error)
1647                 return error;
1648
1649         error = dpm_suspend_noirq(state);
1650         if (error) {
1651                 dpm_resume_early(resume_event(state));
1652                 return error;
1653         }
1654
1655         return 0;
1656 }
1657 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1658
1659 /**
1660  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1661  * @dev: Device to suspend.
1662  * @state: PM transition of the system being carried out.
1663  * @cb: Suspend callback to execute.
1664  * @info: string description of caller.
1665  */
1666 static int legacy_suspend(struct device *dev, pm_message_t state,
1667                           int (*cb)(struct device *dev, pm_message_t state),
1668                           const char *info)
1669 {
1670         int error;
1671         ktime_t calltime;
1672
1673         calltime = initcall_debug_start(dev, cb);
1674
1675         trace_device_pm_callback_start(dev, info, state.event);
1676         error = cb(dev, state);
1677         trace_device_pm_callback_end(dev, error);
1678         suspend_report_result(cb, error);
1679
1680         initcall_debug_report(dev, calltime, cb, error);
1681
1682         return error;
1683 }
1684
1685 static void dpm_clear_superiors_direct_complete(struct device *dev)
1686 {
1687         struct device_link *link;
1688         int idx;
1689
1690         if (dev->parent) {
1691                 spin_lock_irq(&dev->parent->power.lock);
1692                 dev->parent->power.direct_complete = false;
1693                 spin_unlock_irq(&dev->parent->power.lock);
1694         }
1695
1696         idx = device_links_read_lock();
1697
1698         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1699                 spin_lock_irq(&link->supplier->power.lock);
1700                 link->supplier->power.direct_complete = false;
1701                 spin_unlock_irq(&link->supplier->power.lock);
1702         }
1703
1704         device_links_read_unlock(idx);
1705 }
1706
1707 /**
1708  * __device_suspend - Execute "suspend" callbacks for given device.
1709  * @dev: Device to handle.
1710  * @state: PM transition of the system being carried out.
1711  * @async: If true, the device is being suspended asynchronously.
1712  */
1713 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1714 {
1715         pm_callback_t callback = NULL;
1716         const char *info = NULL;
1717         int error = 0;
1718         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1719
1720         TRACE_DEVICE(dev);
1721         TRACE_SUSPEND(0);
1722
1723         dpm_wait_for_subordinate(dev, async);
1724
1725         if (async_error) {
1726                 dev->power.direct_complete = false;
1727                 goto Complete;
1728         }
1729
1730         /*
1731          * If a device configured to wake up the system from sleep states
1732          * has been suspended at run time and there's a resume request pending
1733          * for it, this is equivalent to the device signaling wakeup, so the
1734          * system suspend operation should be aborted.
1735          */
1736         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1737                 pm_wakeup_event(dev, 0);
1738
1739         if (pm_wakeup_pending()) {
1740                 dev->power.direct_complete = false;
1741                 async_error = -EBUSY;
1742                 goto Complete;
1743         }
1744
1745         if (dev->power.syscore)
1746                 goto Complete;
1747
1748         if (dev->power.direct_complete) {
1749                 if (pm_runtime_status_suspended(dev)) {
1750                         pm_runtime_disable(dev);
1751                         if (pm_runtime_status_suspended(dev)) {
1752                                 pm_dev_dbg(dev, state, "direct-complete ");
1753                                 goto Complete;
1754                         }
1755
1756                         pm_runtime_enable(dev);
1757                 }
1758                 dev->power.direct_complete = false;
1759         }
1760
1761         dev->power.may_skip_resume = false;
1762         dev->power.must_resume = false;
1763
1764         dpm_watchdog_set(&wd, dev);
1765         device_lock(dev);
1766
1767         if (dev->pm_domain) {
1768                 info = "power domain ";
1769                 callback = pm_op(&dev->pm_domain->ops, state);
1770                 goto Run;
1771         }
1772
1773         if (dev->type && dev->type->pm) {
1774                 info = "type ";
1775                 callback = pm_op(dev->type->pm, state);
1776                 goto Run;
1777         }
1778
1779         if (dev->class && dev->class->pm) {
1780                 info = "class ";
1781                 callback = pm_op(dev->class->pm, state);
1782                 goto Run;
1783         }
1784
1785         if (dev->bus) {
1786                 if (dev->bus->pm) {
1787                         info = "bus ";
1788                         callback = pm_op(dev->bus->pm, state);
1789                 } else if (dev->bus->suspend) {
1790                         pm_dev_dbg(dev, state, "legacy bus ");
1791                         error = legacy_suspend(dev, state, dev->bus->suspend,
1792                                                 "legacy bus ");
1793                         goto End;
1794                 }
1795         }
1796
1797  Run:
1798         if (!callback && dev->driver && dev->driver->pm) {
1799                 info = "driver ";
1800                 callback = pm_op(dev->driver->pm, state);
1801         }
1802
1803         error = dpm_run_callback(callback, dev, state, info);
1804
1805  End:
1806         if (!error) {
1807                 dev->power.is_suspended = true;
1808                 if (device_may_wakeup(dev))
1809                         dev->power.wakeup_path = true;
1810
1811                 dpm_propagate_wakeup_to_parent(dev);
1812                 dpm_clear_superiors_direct_complete(dev);
1813         }
1814
1815         device_unlock(dev);
1816         dpm_watchdog_clear(&wd);
1817
1818  Complete:
1819         if (error)
1820                 async_error = error;
1821
1822         complete_all(&dev->power.completion);
1823         TRACE_SUSPEND(error);
1824         return error;
1825 }
1826
1827 static void async_suspend(void *data, async_cookie_t cookie)
1828 {
1829         struct device *dev = (struct device *)data;
1830         int error;
1831
1832         error = __device_suspend(dev, pm_transition, true);
1833         if (error) {
1834                 dpm_save_failed_dev(dev_name(dev));
1835                 pm_dev_err(dev, pm_transition, " async", error);
1836         }
1837
1838         put_device(dev);
1839 }
1840
1841 static int device_suspend(struct device *dev)
1842 {
1843         reinit_completion(&dev->power.completion);
1844
1845         if (is_async(dev)) {
1846                 get_device(dev);
1847                 async_schedule_dev(async_suspend, dev);
1848                 return 0;
1849         }
1850
1851         return __device_suspend(dev, pm_transition, false);
1852 }
1853
1854 /**
1855  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1856  * @state: PM transition of the system being carried out.
1857  */
1858 int dpm_suspend(pm_message_t state)
1859 {
1860         ktime_t starttime = ktime_get();
1861         int error = 0;
1862
1863         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1864         might_sleep();
1865
1866         devfreq_suspend();
1867         cpufreq_suspend();
1868
1869         mutex_lock(&dpm_list_mtx);
1870         pm_transition = state;
1871         async_error = 0;
1872         while (!list_empty(&dpm_prepared_list)) {
1873                 struct device *dev = to_device(dpm_prepared_list.prev);
1874
1875                 get_device(dev);
1876                 mutex_unlock(&dpm_list_mtx);
1877
1878                 error = device_suspend(dev);
1879
1880                 mutex_lock(&dpm_list_mtx);
1881                 if (error) {
1882                         pm_dev_err(dev, state, "", error);
1883                         dpm_save_failed_dev(dev_name(dev));
1884                         put_device(dev);
1885                         break;
1886                 }
1887                 if (!list_empty(&dev->power.entry))
1888                         list_move(&dev->power.entry, &dpm_suspended_list);
1889                 put_device(dev);
1890                 if (async_error)
1891                         break;
1892         }
1893         mutex_unlock(&dpm_list_mtx);
1894         async_synchronize_full();
1895         if (!error)
1896                 error = async_error;
1897         if (error) {
1898                 suspend_stats.failed_suspend++;
1899                 dpm_save_failed_step(SUSPEND_SUSPEND);
1900         }
1901         dpm_show_time(starttime, state, error, NULL);
1902         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1903         return error;
1904 }
1905
1906 /**
1907  * device_prepare - Prepare a device for system power transition.
1908  * @dev: Device to handle.
1909  * @state: PM transition of the system being carried out.
1910  *
1911  * Execute the ->prepare() callback(s) for given device.  No new children of the
1912  * device may be registered after this function has returned.
1913  */
1914 static int device_prepare(struct device *dev, pm_message_t state)
1915 {
1916         int (*callback)(struct device *) = NULL;
1917         int ret = 0;
1918
1919         if (dev->power.syscore)
1920                 return 0;
1921
1922         WARN_ON(!pm_runtime_enabled(dev) &&
1923                 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1924                                               DPM_FLAG_LEAVE_SUSPENDED));
1925
1926         /*
1927          * If a device's parent goes into runtime suspend at the wrong time,
1928          * it won't be possible to resume the device.  To prevent this we
1929          * block runtime suspend here, during the prepare phase, and allow
1930          * it again during the complete phase.
1931          */
1932         pm_runtime_get_noresume(dev);
1933
1934         device_lock(dev);
1935
1936         dev->power.wakeup_path = false;
1937
1938         if (dev->power.no_pm_callbacks)
1939                 goto unlock;
1940
1941         if (dev->pm_domain)
1942                 callback = dev->pm_domain->ops.prepare;
1943         else if (dev->type && dev->type->pm)
1944                 callback = dev->type->pm->prepare;
1945         else if (dev->class && dev->class->pm)
1946                 callback = dev->class->pm->prepare;
1947         else if (dev->bus && dev->bus->pm)
1948                 callback = dev->bus->pm->prepare;
1949
1950         if (!callback && dev->driver && dev->driver->pm)
1951                 callback = dev->driver->pm->prepare;
1952
1953         if (callback)
1954                 ret = callback(dev);
1955
1956 unlock:
1957         device_unlock(dev);
1958
1959         if (ret < 0) {
1960                 suspend_report_result(callback, ret);
1961                 pm_runtime_put(dev);
1962                 return ret;
1963         }
1964         /*
1965          * A positive return value from ->prepare() means "this device appears
1966          * to be runtime-suspended and its state is fine, so if it really is
1967          * runtime-suspended, you can leave it in that state provided that you
1968          * will do the same thing with all of its descendants".  This only
1969          * applies to suspend transitions, however.
1970          */
1971         spin_lock_irq(&dev->power.lock);
1972         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1973                 ((pm_runtime_suspended(dev) && ret > 0) ||
1974                  dev->power.no_pm_callbacks) &&
1975                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1976         spin_unlock_irq(&dev->power.lock);
1977         return 0;
1978 }
1979
1980 /**
1981  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1982  * @state: PM transition of the system being carried out.
1983  *
1984  * Execute the ->prepare() callback(s) for all devices.
1985  */
1986 int dpm_prepare(pm_message_t state)
1987 {
1988         int error = 0;
1989
1990         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1991         might_sleep();
1992
1993         /*
1994          * Give a chance for the known devices to complete their probes, before
1995          * disable probing of devices. This sync point is important at least
1996          * at boot time + hibernation restore.
1997          */
1998         wait_for_device_probe();
1999         /*
2000          * It is unsafe if probing of devices will happen during suspend or
2001          * hibernation and system behavior will be unpredictable in this case.
2002          * So, let's prohibit device's probing here and defer their probes
2003          * instead. The normal behavior will be restored in dpm_complete().
2004          */
2005         device_block_probing();
2006
2007         mutex_lock(&dpm_list_mtx);
2008         while (!list_empty(&dpm_list)) {
2009                 struct device *dev = to_device(dpm_list.next);
2010
2011                 get_device(dev);
2012                 mutex_unlock(&dpm_list_mtx);
2013
2014                 trace_device_pm_callback_start(dev, "", state.event);
2015                 error = device_prepare(dev, state);
2016                 trace_device_pm_callback_end(dev, error);
2017
2018                 mutex_lock(&dpm_list_mtx);
2019                 if (error) {
2020                         if (error == -EAGAIN) {
2021                                 put_device(dev);
2022                                 error = 0;
2023                                 continue;
2024                         }
2025                         printk(KERN_INFO "PM: Device %s not prepared "
2026                                 "for power transition: code %d\n",
2027                                 dev_name(dev), error);
2028                         put_device(dev);
2029                         break;
2030                 }
2031                 dev->power.is_prepared = true;
2032                 if (!list_empty(&dev->power.entry))
2033                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
2034                 put_device(dev);
2035         }
2036         mutex_unlock(&dpm_list_mtx);
2037         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2038         return error;
2039 }
2040
2041 /**
2042  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2043  * @state: PM transition of the system being carried out.
2044  *
2045  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2046  * callbacks for them.
2047  */
2048 int dpm_suspend_start(pm_message_t state)
2049 {
2050         int error;
2051
2052         error = dpm_prepare(state);
2053         if (error) {
2054                 suspend_stats.failed_prepare++;
2055                 dpm_save_failed_step(SUSPEND_PREPARE);
2056         } else
2057                 error = dpm_suspend(state);
2058         return error;
2059 }
2060 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2061
2062 void __suspend_report_result(const char *function, void *fn, int ret)
2063 {
2064         if (ret)
2065                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
2066 }
2067 EXPORT_SYMBOL_GPL(__suspend_report_result);
2068
2069 /**
2070  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2071  * @dev: Device to wait for.
2072  * @subordinate: Device that needs to wait for @dev.
2073  */
2074 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2075 {
2076         dpm_wait(dev, subordinate->power.async_suspend);
2077         return async_error;
2078 }
2079 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2080
2081 /**
2082  * dpm_for_each_dev - device iterator.
2083  * @data: data for the callback.
2084  * @fn: function to be called for each device.
2085  *
2086  * Iterate over devices in dpm_list, and call @fn for each device,
2087  * passing it @data.
2088  */
2089 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2090 {
2091         struct device *dev;
2092
2093         if (!fn)
2094                 return;
2095
2096         device_pm_lock();
2097         list_for_each_entry(dev, &dpm_list, power.entry)
2098                 fn(dev, data);
2099         device_pm_unlock();
2100 }
2101 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2102
2103 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2104 {
2105         if (!ops)
2106                 return true;
2107
2108         return !ops->prepare &&
2109                !ops->suspend &&
2110                !ops->suspend_late &&
2111                !ops->suspend_noirq &&
2112                !ops->resume_noirq &&
2113                !ops->resume_early &&
2114                !ops->resume &&
2115                !ops->complete;
2116 }
2117
2118 void device_pm_check_callbacks(struct device *dev)
2119 {
2120         spin_lock_irq(&dev->power.lock);
2121         dev->power.no_pm_callbacks =
2122                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2123                  !dev->bus->suspend && !dev->bus->resume)) &&
2124                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2125                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2126                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2127                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2128                  !dev->driver->suspend && !dev->driver->resume));
2129         spin_unlock_irq(&dev->power.lock);
2130 }
2131
2132 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2133 {
2134         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2135                 pm_runtime_status_suspended(dev);
2136 }