Merge tag 'microblaze-v5.1-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[muen/linux.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched/mm.h>
11 #include <linux/ktime.h>
12 #include <linux/hrtimer.h>
13 #include <linux/export.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_wakeirq.h>
16 #include <trace/events/rpm.h>
17
18 #include "../base.h"
19 #include "power.h"
20
21 typedef int (*pm_callback_t)(struct device *);
22
23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
24 {
25         pm_callback_t cb;
26         const struct dev_pm_ops *ops;
27
28         if (dev->pm_domain)
29                 ops = &dev->pm_domain->ops;
30         else if (dev->type && dev->type->pm)
31                 ops = dev->type->pm;
32         else if (dev->class && dev->class->pm)
33                 ops = dev->class->pm;
34         else if (dev->bus && dev->bus->pm)
35                 ops = dev->bus->pm;
36         else
37                 ops = NULL;
38
39         if (ops)
40                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
41         else
42                 cb = NULL;
43
44         if (!cb && dev->driver && dev->driver->pm)
45                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
46
47         return cb;
48 }
49
50 #define RPM_GET_CALLBACK(dev, callback) \
51                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
52
53 static int rpm_resume(struct device *dev, int rpmflags);
54 static int rpm_suspend(struct device *dev, int rpmflags);
55
56 /**
57  * update_pm_runtime_accounting - Update the time accounting of power states
58  * @dev: Device to update the accounting for
59  *
60  * In order to be able to have time accounting of the various power states
61  * (as used by programs such as PowerTOP to show the effectiveness of runtime
62  * PM), we need to track the time spent in each state.
63  * update_pm_runtime_accounting must be called each time before the
64  * runtime_status field is updated, to account the time in the old state
65  * correctly.
66  */
67 void update_pm_runtime_accounting(struct device *dev)
68 {
69         u64 now, last, delta;
70
71         if (dev->power.disable_depth > 0)
72                 return;
73
74         last = dev->power.accounting_timestamp;
75
76         now = ktime_get_mono_fast_ns();
77         dev->power.accounting_timestamp = now;
78
79         /*
80          * Because ktime_get_mono_fast_ns() is not monotonic during
81          * timekeeping updates, ensure that 'now' is after the last saved
82          * timesptamp.
83          */
84         if (now < last)
85                 return;
86
87         delta = now - last;
88
89         if (dev->power.runtime_status == RPM_SUSPENDED)
90                 dev->power.suspended_time += delta;
91         else
92                 dev->power.active_time += delta;
93 }
94
95 static void __update_runtime_status(struct device *dev, enum rpm_status status)
96 {
97         update_pm_runtime_accounting(dev);
98         dev->power.runtime_status = status;
99 }
100
101 u64 pm_runtime_suspended_time(struct device *dev)
102 {
103         u64 time;
104         unsigned long flags;
105
106         spin_lock_irqsave(&dev->power.lock, flags);
107
108         update_pm_runtime_accounting(dev);
109         time = dev->power.suspended_time;
110
111         spin_unlock_irqrestore(&dev->power.lock, flags);
112
113         return time;
114 }
115 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
116
117 /**
118  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
119  * @dev: Device to handle.
120  */
121 static void pm_runtime_deactivate_timer(struct device *dev)
122 {
123         if (dev->power.timer_expires > 0) {
124                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
125                 dev->power.timer_expires = 0;
126         }
127 }
128
129 /**
130  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
131  * @dev: Device to handle.
132  */
133 static void pm_runtime_cancel_pending(struct device *dev)
134 {
135         pm_runtime_deactivate_timer(dev);
136         /*
137          * In case there's a request pending, make sure its work function will
138          * return without doing anything.
139          */
140         dev->power.request = RPM_REQ_NONE;
141 }
142
143 /*
144  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
145  * @dev: Device to handle.
146  *
147  * Compute the autosuspend-delay expiration time based on the device's
148  * power.last_busy time.  If the delay has already expired or is disabled
149  * (negative) or the power.use_autosuspend flag isn't set, return 0.
150  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
151  *
152  * This function may be called either with or without dev->power.lock held.
153  * Either way it can be racy, since power.last_busy may be updated at any time.
154  */
155 u64 pm_runtime_autosuspend_expiration(struct device *dev)
156 {
157         int autosuspend_delay;
158         u64 expires;
159
160         if (!dev->power.use_autosuspend)
161                 return 0;
162
163         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
164         if (autosuspend_delay < 0)
165                 return 0;
166
167         expires  = READ_ONCE(dev->power.last_busy);
168         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
169         if (expires > ktime_get_mono_fast_ns())
170                 return expires; /* Expires in the future */
171
172         return 0;
173 }
174 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
175
176 static int dev_memalloc_noio(struct device *dev, void *data)
177 {
178         return dev->power.memalloc_noio;
179 }
180
181 /*
182  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
183  * @dev: Device to handle.
184  * @enable: True for setting the flag and False for clearing the flag.
185  *
186  * Set the flag for all devices in the path from the device to the
187  * root device in the device tree if @enable is true, otherwise clear
188  * the flag for devices in the path whose siblings don't set the flag.
189  *
190  * The function should only be called by block device, or network
191  * device driver for solving the deadlock problem during runtime
192  * resume/suspend:
193  *
194  *     If memory allocation with GFP_KERNEL is called inside runtime
195  *     resume/suspend callback of any one of its ancestors(or the
196  *     block device itself), the deadlock may be triggered inside the
197  *     memory allocation since it might not complete until the block
198  *     device becomes active and the involed page I/O finishes. The
199  *     situation is pointed out first by Alan Stern. Network device
200  *     are involved in iSCSI kind of situation.
201  *
202  * The lock of dev_hotplug_mutex is held in the function for handling
203  * hotplug race because pm_runtime_set_memalloc_noio() may be called
204  * in async probe().
205  *
206  * The function should be called between device_add() and device_del()
207  * on the affected device(block/network device).
208  */
209 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
210 {
211         static DEFINE_MUTEX(dev_hotplug_mutex);
212
213         mutex_lock(&dev_hotplug_mutex);
214         for (;;) {
215                 bool enabled;
216
217                 /* hold power lock since bitfield is not SMP-safe. */
218                 spin_lock_irq(&dev->power.lock);
219                 enabled = dev->power.memalloc_noio;
220                 dev->power.memalloc_noio = enable;
221                 spin_unlock_irq(&dev->power.lock);
222
223                 /*
224                  * not need to enable ancestors any more if the device
225                  * has been enabled.
226                  */
227                 if (enabled && enable)
228                         break;
229
230                 dev = dev->parent;
231
232                 /*
233                  * clear flag of the parent device only if all the
234                  * children don't set the flag because ancestor's
235                  * flag was set by any one of the descendants.
236                  */
237                 if (!dev || (!enable &&
238                              device_for_each_child(dev, NULL,
239                                                    dev_memalloc_noio)))
240                         break;
241         }
242         mutex_unlock(&dev_hotplug_mutex);
243 }
244 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
245
246 /**
247  * rpm_check_suspend_allowed - Test whether a device may be suspended.
248  * @dev: Device to test.
249  */
250 static int rpm_check_suspend_allowed(struct device *dev)
251 {
252         int retval = 0;
253
254         if (dev->power.runtime_error)
255                 retval = -EINVAL;
256         else if (dev->power.disable_depth > 0)
257                 retval = -EACCES;
258         else if (atomic_read(&dev->power.usage_count) > 0)
259                 retval = -EAGAIN;
260         else if (!dev->power.ignore_children &&
261                         atomic_read(&dev->power.child_count))
262                 retval = -EBUSY;
263
264         /* Pending resume requests take precedence over suspends. */
265         else if ((dev->power.deferred_resume
266                         && dev->power.runtime_status == RPM_SUSPENDING)
267             || (dev->power.request_pending
268                         && dev->power.request == RPM_REQ_RESUME))
269                 retval = -EAGAIN;
270         else if (__dev_pm_qos_read_value(dev) == 0)
271                 retval = -EPERM;
272         else if (dev->power.runtime_status == RPM_SUSPENDED)
273                 retval = 1;
274
275         return retval;
276 }
277
278 static int rpm_get_suppliers(struct device *dev)
279 {
280         struct device_link *link;
281
282         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
283                 int retval;
284
285                 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
286                     READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
287                         continue;
288
289                 retval = pm_runtime_get_sync(link->supplier);
290                 /* Ignore suppliers with disabled runtime PM. */
291                 if (retval < 0 && retval != -EACCES) {
292                         pm_runtime_put_noidle(link->supplier);
293                         return retval;
294                 }
295                 refcount_inc(&link->rpm_active);
296         }
297         return 0;
298 }
299
300 static void rpm_put_suppliers(struct device *dev)
301 {
302         struct device_link *link;
303
304         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
305                 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
306                         continue;
307
308                 while (refcount_dec_not_one(&link->rpm_active))
309                         pm_runtime_put(link->supplier);
310         }
311 }
312
313 /**
314  * __rpm_callback - Run a given runtime PM callback for a given device.
315  * @cb: Runtime PM callback to run.
316  * @dev: Device to run the callback for.
317  */
318 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
319         __releases(&dev->power.lock) __acquires(&dev->power.lock)
320 {
321         int retval, idx;
322         bool use_links = dev->power.links_count > 0;
323
324         if (dev->power.irq_safe) {
325                 spin_unlock(&dev->power.lock);
326         } else {
327                 spin_unlock_irq(&dev->power.lock);
328
329                 /*
330                  * Resume suppliers if necessary.
331                  *
332                  * The device's runtime PM status cannot change until this
333                  * routine returns, so it is safe to read the status outside of
334                  * the lock.
335                  */
336                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
337                         idx = device_links_read_lock();
338
339                         retval = rpm_get_suppliers(dev);
340                         if (retval)
341                                 goto fail;
342
343                         device_links_read_unlock(idx);
344                 }
345         }
346
347         retval = cb(dev);
348
349         if (dev->power.irq_safe) {
350                 spin_lock(&dev->power.lock);
351         } else {
352                 /*
353                  * If the device is suspending and the callback has returned
354                  * success, drop the usage counters of the suppliers that have
355                  * been reference counted on its resume.
356                  *
357                  * Do that if resume fails too.
358                  */
359                 if (use_links
360                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
361                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
362                         idx = device_links_read_lock();
363
364  fail:
365                         rpm_put_suppliers(dev);
366
367                         device_links_read_unlock(idx);
368                 }
369
370                 spin_lock_irq(&dev->power.lock);
371         }
372
373         return retval;
374 }
375
376 /**
377  * rpm_idle - Notify device bus type if the device can be suspended.
378  * @dev: Device to notify the bus type about.
379  * @rpmflags: Flag bits.
380  *
381  * Check if the device's runtime PM status allows it to be suspended.  If
382  * another idle notification has been started earlier, return immediately.  If
383  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
384  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
385  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
386  *
387  * This function must be called under dev->power.lock with interrupts disabled.
388  */
389 static int rpm_idle(struct device *dev, int rpmflags)
390 {
391         int (*callback)(struct device *);
392         int retval;
393
394         trace_rpm_idle_rcuidle(dev, rpmflags);
395         retval = rpm_check_suspend_allowed(dev);
396         if (retval < 0)
397                 ;       /* Conditions are wrong. */
398
399         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
400         else if (dev->power.runtime_status != RPM_ACTIVE)
401                 retval = -EAGAIN;
402
403         /*
404          * Any pending request other than an idle notification takes
405          * precedence over us, except that the timer may be running.
406          */
407         else if (dev->power.request_pending &&
408             dev->power.request > RPM_REQ_IDLE)
409                 retval = -EAGAIN;
410
411         /* Act as though RPM_NOWAIT is always set. */
412         else if (dev->power.idle_notification)
413                 retval = -EINPROGRESS;
414         if (retval)
415                 goto out;
416
417         /* Pending requests need to be canceled. */
418         dev->power.request = RPM_REQ_NONE;
419
420         if (dev->power.no_callbacks)
421                 goto out;
422
423         /* Carry out an asynchronous or a synchronous idle notification. */
424         if (rpmflags & RPM_ASYNC) {
425                 dev->power.request = RPM_REQ_IDLE;
426                 if (!dev->power.request_pending) {
427                         dev->power.request_pending = true;
428                         queue_work(pm_wq, &dev->power.work);
429                 }
430                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
431                 return 0;
432         }
433
434         dev->power.idle_notification = true;
435
436         callback = RPM_GET_CALLBACK(dev, runtime_idle);
437
438         if (callback)
439                 retval = __rpm_callback(callback, dev);
440
441         dev->power.idle_notification = false;
442         wake_up_all(&dev->power.wait_queue);
443
444  out:
445         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
446         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
447 }
448
449 /**
450  * rpm_callback - Run a given runtime PM callback for a given device.
451  * @cb: Runtime PM callback to run.
452  * @dev: Device to run the callback for.
453  */
454 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
455 {
456         int retval;
457
458         if (!cb)
459                 return -ENOSYS;
460
461         if (dev->power.memalloc_noio) {
462                 unsigned int noio_flag;
463
464                 /*
465                  * Deadlock might be caused if memory allocation with
466                  * GFP_KERNEL happens inside runtime_suspend and
467                  * runtime_resume callbacks of one block device's
468                  * ancestor or the block device itself. Network
469                  * device might be thought as part of iSCSI block
470                  * device, so network device and its ancestor should
471                  * be marked as memalloc_noio too.
472                  */
473                 noio_flag = memalloc_noio_save();
474                 retval = __rpm_callback(cb, dev);
475                 memalloc_noio_restore(noio_flag);
476         } else {
477                 retval = __rpm_callback(cb, dev);
478         }
479
480         dev->power.runtime_error = retval;
481         return retval != -EACCES ? retval : -EIO;
482 }
483
484 /**
485  * rpm_suspend - Carry out runtime suspend of given device.
486  * @dev: Device to suspend.
487  * @rpmflags: Flag bits.
488  *
489  * Check if the device's runtime PM status allows it to be suspended.
490  * Cancel a pending idle notification, autosuspend or suspend. If
491  * another suspend has been started earlier, either return immediately
492  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
493  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
494  * otherwise run the ->runtime_suspend() callback directly. When
495  * ->runtime_suspend succeeded, if a deferred resume was requested while
496  * the callback was running then carry it out, otherwise send an idle
497  * notification for its parent (if the suspend succeeded and both
498  * ignore_children of parent->power and irq_safe of dev->power are not set).
499  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
500  * flag is set and the next autosuspend-delay expiration time is in the
501  * future, schedule another autosuspend attempt.
502  *
503  * This function must be called under dev->power.lock with interrupts disabled.
504  */
505 static int rpm_suspend(struct device *dev, int rpmflags)
506         __releases(&dev->power.lock) __acquires(&dev->power.lock)
507 {
508         int (*callback)(struct device *);
509         struct device *parent = NULL;
510         int retval;
511
512         trace_rpm_suspend_rcuidle(dev, rpmflags);
513
514  repeat:
515         retval = rpm_check_suspend_allowed(dev);
516
517         if (retval < 0)
518                 ;       /* Conditions are wrong. */
519
520         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
521         else if (dev->power.runtime_status == RPM_RESUMING &&
522             !(rpmflags & RPM_ASYNC))
523                 retval = -EAGAIN;
524         if (retval)
525                 goto out;
526
527         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
528         if ((rpmflags & RPM_AUTO)
529             && dev->power.runtime_status != RPM_SUSPENDING) {
530                 u64 expires = pm_runtime_autosuspend_expiration(dev);
531
532                 if (expires != 0) {
533                         /* Pending requests need to be canceled. */
534                         dev->power.request = RPM_REQ_NONE;
535
536                         /*
537                          * Optimization: If the timer is already running and is
538                          * set to expire at or before the autosuspend delay,
539                          * avoid the overhead of resetting it.  Just let it
540                          * expire; pm_suspend_timer_fn() will take care of the
541                          * rest.
542                          */
543                         if (!(dev->power.timer_expires &&
544                                         dev->power.timer_expires <= expires)) {
545                                 /*
546                                  * We add a slack of 25% to gather wakeups
547                                  * without sacrificing the granularity.
548                                  */
549                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
550                                                     (NSEC_PER_MSEC >> 2);
551
552                                 dev->power.timer_expires = expires;
553                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
554                                                 ns_to_ktime(expires),
555                                                 slack,
556                                                 HRTIMER_MODE_ABS);
557                         }
558                         dev->power.timer_autosuspends = 1;
559                         goto out;
560                 }
561         }
562
563         /* Other scheduled or pending requests need to be canceled. */
564         pm_runtime_cancel_pending(dev);
565
566         if (dev->power.runtime_status == RPM_SUSPENDING) {
567                 DEFINE_WAIT(wait);
568
569                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
570                         retval = -EINPROGRESS;
571                         goto out;
572                 }
573
574                 if (dev->power.irq_safe) {
575                         spin_unlock(&dev->power.lock);
576
577                         cpu_relax();
578
579                         spin_lock(&dev->power.lock);
580                         goto repeat;
581                 }
582
583                 /* Wait for the other suspend running in parallel with us. */
584                 for (;;) {
585                         prepare_to_wait(&dev->power.wait_queue, &wait,
586                                         TASK_UNINTERRUPTIBLE);
587                         if (dev->power.runtime_status != RPM_SUSPENDING)
588                                 break;
589
590                         spin_unlock_irq(&dev->power.lock);
591
592                         schedule();
593
594                         spin_lock_irq(&dev->power.lock);
595                 }
596                 finish_wait(&dev->power.wait_queue, &wait);
597                 goto repeat;
598         }
599
600         if (dev->power.no_callbacks)
601                 goto no_callback;       /* Assume success. */
602
603         /* Carry out an asynchronous or a synchronous suspend. */
604         if (rpmflags & RPM_ASYNC) {
605                 dev->power.request = (rpmflags & RPM_AUTO) ?
606                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
607                 if (!dev->power.request_pending) {
608                         dev->power.request_pending = true;
609                         queue_work(pm_wq, &dev->power.work);
610                 }
611                 goto out;
612         }
613
614         __update_runtime_status(dev, RPM_SUSPENDING);
615
616         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
617
618         dev_pm_enable_wake_irq_check(dev, true);
619         retval = rpm_callback(callback, dev);
620         if (retval)
621                 goto fail;
622
623  no_callback:
624         __update_runtime_status(dev, RPM_SUSPENDED);
625         pm_runtime_deactivate_timer(dev);
626
627         if (dev->parent) {
628                 parent = dev->parent;
629                 atomic_add_unless(&parent->power.child_count, -1, 0);
630         }
631         wake_up_all(&dev->power.wait_queue);
632
633         if (dev->power.deferred_resume) {
634                 dev->power.deferred_resume = false;
635                 rpm_resume(dev, 0);
636                 retval = -EAGAIN;
637                 goto out;
638         }
639
640         /* Maybe the parent is now able to suspend. */
641         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
642                 spin_unlock(&dev->power.lock);
643
644                 spin_lock(&parent->power.lock);
645                 rpm_idle(parent, RPM_ASYNC);
646                 spin_unlock(&parent->power.lock);
647
648                 spin_lock(&dev->power.lock);
649         }
650
651  out:
652         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
653
654         return retval;
655
656  fail:
657         dev_pm_disable_wake_irq_check(dev);
658         __update_runtime_status(dev, RPM_ACTIVE);
659         dev->power.deferred_resume = false;
660         wake_up_all(&dev->power.wait_queue);
661
662         if (retval == -EAGAIN || retval == -EBUSY) {
663                 dev->power.runtime_error = 0;
664
665                 /*
666                  * If the callback routine failed an autosuspend, and
667                  * if the last_busy time has been updated so that there
668                  * is a new autosuspend expiration time, automatically
669                  * reschedule another autosuspend.
670                  */
671                 if ((rpmflags & RPM_AUTO) &&
672                     pm_runtime_autosuspend_expiration(dev) != 0)
673                         goto repeat;
674         } else {
675                 pm_runtime_cancel_pending(dev);
676         }
677         goto out;
678 }
679
680 /**
681  * rpm_resume - Carry out runtime resume of given device.
682  * @dev: Device to resume.
683  * @rpmflags: Flag bits.
684  *
685  * Check if the device's runtime PM status allows it to be resumed.  Cancel
686  * any scheduled or pending requests.  If another resume has been started
687  * earlier, either return immediately or wait for it to finish, depending on the
688  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
689  * parallel with this function, either tell the other process to resume after
690  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
691  * flag is set then queue a resume request; otherwise run the
692  * ->runtime_resume() callback directly.  Queue an idle notification for the
693  * device if the resume succeeded.
694  *
695  * This function must be called under dev->power.lock with interrupts disabled.
696  */
697 static int rpm_resume(struct device *dev, int rpmflags)
698         __releases(&dev->power.lock) __acquires(&dev->power.lock)
699 {
700         int (*callback)(struct device *);
701         struct device *parent = NULL;
702         int retval = 0;
703
704         trace_rpm_resume_rcuidle(dev, rpmflags);
705
706  repeat:
707         if (dev->power.runtime_error)
708                 retval = -EINVAL;
709         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
710             && dev->power.runtime_status == RPM_ACTIVE)
711                 retval = 1;
712         else if (dev->power.disable_depth > 0)
713                 retval = -EACCES;
714         if (retval)
715                 goto out;
716
717         /*
718          * Other scheduled or pending requests need to be canceled.  Small
719          * optimization: If an autosuspend timer is running, leave it running
720          * rather than cancelling it now only to restart it again in the near
721          * future.
722          */
723         dev->power.request = RPM_REQ_NONE;
724         if (!dev->power.timer_autosuspends)
725                 pm_runtime_deactivate_timer(dev);
726
727         if (dev->power.runtime_status == RPM_ACTIVE) {
728                 retval = 1;
729                 goto out;
730         }
731
732         if (dev->power.runtime_status == RPM_RESUMING
733             || dev->power.runtime_status == RPM_SUSPENDING) {
734                 DEFINE_WAIT(wait);
735
736                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
737                         if (dev->power.runtime_status == RPM_SUSPENDING)
738                                 dev->power.deferred_resume = true;
739                         else
740                                 retval = -EINPROGRESS;
741                         goto out;
742                 }
743
744                 if (dev->power.irq_safe) {
745                         spin_unlock(&dev->power.lock);
746
747                         cpu_relax();
748
749                         spin_lock(&dev->power.lock);
750                         goto repeat;
751                 }
752
753                 /* Wait for the operation carried out in parallel with us. */
754                 for (;;) {
755                         prepare_to_wait(&dev->power.wait_queue, &wait,
756                                         TASK_UNINTERRUPTIBLE);
757                         if (dev->power.runtime_status != RPM_RESUMING
758                             && dev->power.runtime_status != RPM_SUSPENDING)
759                                 break;
760
761                         spin_unlock_irq(&dev->power.lock);
762
763                         schedule();
764
765                         spin_lock_irq(&dev->power.lock);
766                 }
767                 finish_wait(&dev->power.wait_queue, &wait);
768                 goto repeat;
769         }
770
771         /*
772          * See if we can skip waking up the parent.  This is safe only if
773          * power.no_callbacks is set, because otherwise we don't know whether
774          * the resume will actually succeed.
775          */
776         if (dev->power.no_callbacks && !parent && dev->parent) {
777                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
778                 if (dev->parent->power.disable_depth > 0
779                     || dev->parent->power.ignore_children
780                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
781                         atomic_inc(&dev->parent->power.child_count);
782                         spin_unlock(&dev->parent->power.lock);
783                         retval = 1;
784                         goto no_callback;       /* Assume success. */
785                 }
786                 spin_unlock(&dev->parent->power.lock);
787         }
788
789         /* Carry out an asynchronous or a synchronous resume. */
790         if (rpmflags & RPM_ASYNC) {
791                 dev->power.request = RPM_REQ_RESUME;
792                 if (!dev->power.request_pending) {
793                         dev->power.request_pending = true;
794                         queue_work(pm_wq, &dev->power.work);
795                 }
796                 retval = 0;
797                 goto out;
798         }
799
800         if (!parent && dev->parent) {
801                 /*
802                  * Increment the parent's usage counter and resume it if
803                  * necessary.  Not needed if dev is irq-safe; then the
804                  * parent is permanently resumed.
805                  */
806                 parent = dev->parent;
807                 if (dev->power.irq_safe)
808                         goto skip_parent;
809                 spin_unlock(&dev->power.lock);
810
811                 pm_runtime_get_noresume(parent);
812
813                 spin_lock(&parent->power.lock);
814                 /*
815                  * Resume the parent if it has runtime PM enabled and not been
816                  * set to ignore its children.
817                  */
818                 if (!parent->power.disable_depth
819                     && !parent->power.ignore_children) {
820                         rpm_resume(parent, 0);
821                         if (parent->power.runtime_status != RPM_ACTIVE)
822                                 retval = -EBUSY;
823                 }
824                 spin_unlock(&parent->power.lock);
825
826                 spin_lock(&dev->power.lock);
827                 if (retval)
828                         goto out;
829                 goto repeat;
830         }
831  skip_parent:
832
833         if (dev->power.no_callbacks)
834                 goto no_callback;       /* Assume success. */
835
836         __update_runtime_status(dev, RPM_RESUMING);
837
838         callback = RPM_GET_CALLBACK(dev, runtime_resume);
839
840         dev_pm_disable_wake_irq_check(dev);
841         retval = rpm_callback(callback, dev);
842         if (retval) {
843                 __update_runtime_status(dev, RPM_SUSPENDED);
844                 pm_runtime_cancel_pending(dev);
845                 dev_pm_enable_wake_irq_check(dev, false);
846         } else {
847  no_callback:
848                 __update_runtime_status(dev, RPM_ACTIVE);
849                 pm_runtime_mark_last_busy(dev);
850                 if (parent)
851                         atomic_inc(&parent->power.child_count);
852         }
853         wake_up_all(&dev->power.wait_queue);
854
855         if (retval >= 0)
856                 rpm_idle(dev, RPM_ASYNC);
857
858  out:
859         if (parent && !dev->power.irq_safe) {
860                 spin_unlock_irq(&dev->power.lock);
861
862                 pm_runtime_put(parent);
863
864                 spin_lock_irq(&dev->power.lock);
865         }
866
867         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
868
869         return retval;
870 }
871
872 /**
873  * pm_runtime_work - Universal runtime PM work function.
874  * @work: Work structure used for scheduling the execution of this function.
875  *
876  * Use @work to get the device object the work is to be done for, determine what
877  * is to be done and execute the appropriate runtime PM function.
878  */
879 static void pm_runtime_work(struct work_struct *work)
880 {
881         struct device *dev = container_of(work, struct device, power.work);
882         enum rpm_request req;
883
884         spin_lock_irq(&dev->power.lock);
885
886         if (!dev->power.request_pending)
887                 goto out;
888
889         req = dev->power.request;
890         dev->power.request = RPM_REQ_NONE;
891         dev->power.request_pending = false;
892
893         switch (req) {
894         case RPM_REQ_NONE:
895                 break;
896         case RPM_REQ_IDLE:
897                 rpm_idle(dev, RPM_NOWAIT);
898                 break;
899         case RPM_REQ_SUSPEND:
900                 rpm_suspend(dev, RPM_NOWAIT);
901                 break;
902         case RPM_REQ_AUTOSUSPEND:
903                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
904                 break;
905         case RPM_REQ_RESUME:
906                 rpm_resume(dev, RPM_NOWAIT);
907                 break;
908         }
909
910  out:
911         spin_unlock_irq(&dev->power.lock);
912 }
913
914 /**
915  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
916  * @data: Device pointer passed by pm_schedule_suspend().
917  *
918  * Check if the time is right and queue a suspend request.
919  */
920 static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
921 {
922         struct device *dev = container_of(timer, struct device, power.suspend_timer);
923         unsigned long flags;
924         u64 expires;
925
926         spin_lock_irqsave(&dev->power.lock, flags);
927
928         expires = dev->power.timer_expires;
929         /*
930          * If 'expires' is after the current time, we've been called
931          * too early.
932          */
933         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
934                 dev->power.timer_expires = 0;
935                 rpm_suspend(dev, dev->power.timer_autosuspends ?
936                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
937         }
938
939         spin_unlock_irqrestore(&dev->power.lock, flags);
940
941         return HRTIMER_NORESTART;
942 }
943
944 /**
945  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
946  * @dev: Device to suspend.
947  * @delay: Time to wait before submitting a suspend request, in milliseconds.
948  */
949 int pm_schedule_suspend(struct device *dev, unsigned int delay)
950 {
951         unsigned long flags;
952         u64 expires;
953         int retval;
954
955         spin_lock_irqsave(&dev->power.lock, flags);
956
957         if (!delay) {
958                 retval = rpm_suspend(dev, RPM_ASYNC);
959                 goto out;
960         }
961
962         retval = rpm_check_suspend_allowed(dev);
963         if (retval)
964                 goto out;
965
966         /* Other scheduled or pending requests need to be canceled. */
967         pm_runtime_cancel_pending(dev);
968
969         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
970         dev->power.timer_expires = expires;
971         dev->power.timer_autosuspends = 0;
972         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
973
974  out:
975         spin_unlock_irqrestore(&dev->power.lock, flags);
976
977         return retval;
978 }
979 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
980
981 /**
982  * __pm_runtime_idle - Entry point for runtime idle operations.
983  * @dev: Device to send idle notification for.
984  * @rpmflags: Flag bits.
985  *
986  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
987  * return immediately if it is larger than zero.  Then carry out an idle
988  * notification, either synchronous or asynchronous.
989  *
990  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
991  * or if pm_runtime_irq_safe() has been called.
992  */
993 int __pm_runtime_idle(struct device *dev, int rpmflags)
994 {
995         unsigned long flags;
996         int retval;
997
998         if (rpmflags & RPM_GET_PUT) {
999                 if (!atomic_dec_and_test(&dev->power.usage_count))
1000                         return 0;
1001         }
1002
1003         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1004
1005         spin_lock_irqsave(&dev->power.lock, flags);
1006         retval = rpm_idle(dev, rpmflags);
1007         spin_unlock_irqrestore(&dev->power.lock, flags);
1008
1009         return retval;
1010 }
1011 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1012
1013 /**
1014  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1015  * @dev: Device to suspend.
1016  * @rpmflags: Flag bits.
1017  *
1018  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1019  * return immediately if it is larger than zero.  Then carry out a suspend,
1020  * either synchronous or asynchronous.
1021  *
1022  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1023  * or if pm_runtime_irq_safe() has been called.
1024  */
1025 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1026 {
1027         unsigned long flags;
1028         int retval;
1029
1030         if (rpmflags & RPM_GET_PUT) {
1031                 if (!atomic_dec_and_test(&dev->power.usage_count))
1032                         return 0;
1033         }
1034
1035         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1036
1037         spin_lock_irqsave(&dev->power.lock, flags);
1038         retval = rpm_suspend(dev, rpmflags);
1039         spin_unlock_irqrestore(&dev->power.lock, flags);
1040
1041         return retval;
1042 }
1043 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1044
1045 /**
1046  * __pm_runtime_resume - Entry point for runtime resume operations.
1047  * @dev: Device to resume.
1048  * @rpmflags: Flag bits.
1049  *
1050  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1051  * carry out a resume, either synchronous or asynchronous.
1052  *
1053  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1054  * or if pm_runtime_irq_safe() has been called.
1055  */
1056 int __pm_runtime_resume(struct device *dev, int rpmflags)
1057 {
1058         unsigned long flags;
1059         int retval;
1060
1061         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1062                         dev->power.runtime_status != RPM_ACTIVE);
1063
1064         if (rpmflags & RPM_GET_PUT)
1065                 atomic_inc(&dev->power.usage_count);
1066
1067         spin_lock_irqsave(&dev->power.lock, flags);
1068         retval = rpm_resume(dev, rpmflags);
1069         spin_unlock_irqrestore(&dev->power.lock, flags);
1070
1071         return retval;
1072 }
1073 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1074
1075 /**
1076  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1077  * @dev: Device to handle.
1078  *
1079  * Return -EINVAL if runtime PM is disabled for the device.
1080  *
1081  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1082  * and the runtime PM usage counter is nonzero, increment the counter and
1083  * return 1.  Otherwise return 0 without changing the counter.
1084  */
1085 int pm_runtime_get_if_in_use(struct device *dev)
1086 {
1087         unsigned long flags;
1088         int retval;
1089
1090         spin_lock_irqsave(&dev->power.lock, flags);
1091         retval = dev->power.disable_depth > 0 ? -EINVAL :
1092                 dev->power.runtime_status == RPM_ACTIVE
1093                         && atomic_inc_not_zero(&dev->power.usage_count);
1094         spin_unlock_irqrestore(&dev->power.lock, flags);
1095         return retval;
1096 }
1097 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1098
1099 /**
1100  * __pm_runtime_set_status - Set runtime PM status of a device.
1101  * @dev: Device to handle.
1102  * @status: New runtime PM status of the device.
1103  *
1104  * If runtime PM of the device is disabled or its power.runtime_error field is
1105  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1106  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1107  * However, if the device has a parent and the parent is not active, and the
1108  * parent's power.ignore_children flag is unset, the device's status cannot be
1109  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1110  *
1111  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1112  * and the device parent's counter of unsuspended children is modified to
1113  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1114  * notification request for the parent is submitted.
1115  *
1116  * If @dev has any suppliers (as reflected by device links to them), and @status
1117  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1118  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1119  * of the @status value) and the suppliers will be deacticated on exit.  The
1120  * error returned by the failing supplier activation will be returned in that
1121  * case.
1122  */
1123 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1124 {
1125         struct device *parent = dev->parent;
1126         bool notify_parent = false;
1127         int error = 0;
1128
1129         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1130                 return -EINVAL;
1131
1132         spin_lock_irq(&dev->power.lock);
1133
1134         /*
1135          * Prevent PM-runtime from being enabled for the device or return an
1136          * error if it is enabled already and working.
1137          */
1138         if (dev->power.runtime_error || dev->power.disable_depth)
1139                 dev->power.disable_depth++;
1140         else
1141                 error = -EAGAIN;
1142
1143         spin_unlock_irq(&dev->power.lock);
1144
1145         if (error)
1146                 return error;
1147
1148         /*
1149          * If the new status is RPM_ACTIVE, the suppliers can be activated
1150          * upfront regardless of the current status, because next time
1151          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1152          * involved will be dropped down to one anyway.
1153          */
1154         if (status == RPM_ACTIVE) {
1155                 int idx = device_links_read_lock();
1156
1157                 error = rpm_get_suppliers(dev);
1158                 if (error)
1159                         status = RPM_SUSPENDED;
1160
1161                 device_links_read_unlock(idx);
1162         }
1163
1164         spin_lock_irq(&dev->power.lock);
1165
1166         if (dev->power.runtime_status == status || !parent)
1167                 goto out_set;
1168
1169         if (status == RPM_SUSPENDED) {
1170                 atomic_add_unless(&parent->power.child_count, -1, 0);
1171                 notify_parent = !parent->power.ignore_children;
1172         } else {
1173                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1174
1175                 /*
1176                  * It is invalid to put an active child under a parent that is
1177                  * not active, has runtime PM enabled and the
1178                  * 'power.ignore_children' flag unset.
1179                  */
1180                 if (!parent->power.disable_depth
1181                     && !parent->power.ignore_children
1182                     && parent->power.runtime_status != RPM_ACTIVE) {
1183                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1184                                 dev_name(dev),
1185                                 dev_name(parent));
1186                         error = -EBUSY;
1187                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1188                         atomic_inc(&parent->power.child_count);
1189                 }
1190
1191                 spin_unlock(&parent->power.lock);
1192
1193                 if (error) {
1194                         status = RPM_SUSPENDED;
1195                         goto out;
1196                 }
1197         }
1198
1199  out_set:
1200         __update_runtime_status(dev, status);
1201         if (!error)
1202                 dev->power.runtime_error = 0;
1203
1204  out:
1205         spin_unlock_irq(&dev->power.lock);
1206
1207         if (notify_parent)
1208                 pm_request_idle(parent);
1209
1210         if (status == RPM_SUSPENDED) {
1211                 int idx = device_links_read_lock();
1212
1213                 rpm_put_suppliers(dev);
1214
1215                 device_links_read_unlock(idx);
1216         }
1217
1218         pm_runtime_enable(dev);
1219
1220         return error;
1221 }
1222 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1223
1224 /**
1225  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1226  * @dev: Device to handle.
1227  *
1228  * Flush all pending requests for the device from pm_wq and wait for all
1229  * runtime PM operations involving the device in progress to complete.
1230  *
1231  * Should be called under dev->power.lock with interrupts disabled.
1232  */
1233 static void __pm_runtime_barrier(struct device *dev)
1234 {
1235         pm_runtime_deactivate_timer(dev);
1236
1237         if (dev->power.request_pending) {
1238                 dev->power.request = RPM_REQ_NONE;
1239                 spin_unlock_irq(&dev->power.lock);
1240
1241                 cancel_work_sync(&dev->power.work);
1242
1243                 spin_lock_irq(&dev->power.lock);
1244                 dev->power.request_pending = false;
1245         }
1246
1247         if (dev->power.runtime_status == RPM_SUSPENDING
1248             || dev->power.runtime_status == RPM_RESUMING
1249             || dev->power.idle_notification) {
1250                 DEFINE_WAIT(wait);
1251
1252                 /* Suspend, wake-up or idle notification in progress. */
1253                 for (;;) {
1254                         prepare_to_wait(&dev->power.wait_queue, &wait,
1255                                         TASK_UNINTERRUPTIBLE);
1256                         if (dev->power.runtime_status != RPM_SUSPENDING
1257                             && dev->power.runtime_status != RPM_RESUMING
1258                             && !dev->power.idle_notification)
1259                                 break;
1260                         spin_unlock_irq(&dev->power.lock);
1261
1262                         schedule();
1263
1264                         spin_lock_irq(&dev->power.lock);
1265                 }
1266                 finish_wait(&dev->power.wait_queue, &wait);
1267         }
1268 }
1269
1270 /**
1271  * pm_runtime_barrier - Flush pending requests and wait for completions.
1272  * @dev: Device to handle.
1273  *
1274  * Prevent the device from being suspended by incrementing its usage counter and
1275  * if there's a pending resume request for the device, wake the device up.
1276  * Next, make sure that all pending requests for the device have been flushed
1277  * from pm_wq and wait for all runtime PM operations involving the device in
1278  * progress to complete.
1279  *
1280  * Return value:
1281  * 1, if there was a resume request pending and the device had to be woken up,
1282  * 0, otherwise
1283  */
1284 int pm_runtime_barrier(struct device *dev)
1285 {
1286         int retval = 0;
1287
1288         pm_runtime_get_noresume(dev);
1289         spin_lock_irq(&dev->power.lock);
1290
1291         if (dev->power.request_pending
1292             && dev->power.request == RPM_REQ_RESUME) {
1293                 rpm_resume(dev, 0);
1294                 retval = 1;
1295         }
1296
1297         __pm_runtime_barrier(dev);
1298
1299         spin_unlock_irq(&dev->power.lock);
1300         pm_runtime_put_noidle(dev);
1301
1302         return retval;
1303 }
1304 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1305
1306 /**
1307  * __pm_runtime_disable - Disable runtime PM of a device.
1308  * @dev: Device to handle.
1309  * @check_resume: If set, check if there's a resume request for the device.
1310  *
1311  * Increment power.disable_depth for the device and if it was zero previously,
1312  * cancel all pending runtime PM requests for the device and wait for all
1313  * operations in progress to complete.  The device can be either active or
1314  * suspended after its runtime PM has been disabled.
1315  *
1316  * If @check_resume is set and there's a resume request pending when
1317  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1318  * function will wake up the device before disabling its runtime PM.
1319  */
1320 void __pm_runtime_disable(struct device *dev, bool check_resume)
1321 {
1322         spin_lock_irq(&dev->power.lock);
1323
1324         if (dev->power.disable_depth > 0) {
1325                 dev->power.disable_depth++;
1326                 goto out;
1327         }
1328
1329         /*
1330          * Wake up the device if there's a resume request pending, because that
1331          * means there probably is some I/O to process and disabling runtime PM
1332          * shouldn't prevent the device from processing the I/O.
1333          */
1334         if (check_resume && dev->power.request_pending
1335             && dev->power.request == RPM_REQ_RESUME) {
1336                 /*
1337                  * Prevent suspends and idle notifications from being carried
1338                  * out after we have woken up the device.
1339                  */
1340                 pm_runtime_get_noresume(dev);
1341
1342                 rpm_resume(dev, 0);
1343
1344                 pm_runtime_put_noidle(dev);
1345         }
1346
1347         /* Update time accounting before disabling PM-runtime. */
1348         update_pm_runtime_accounting(dev);
1349
1350         if (!dev->power.disable_depth++)
1351                 __pm_runtime_barrier(dev);
1352
1353  out:
1354         spin_unlock_irq(&dev->power.lock);
1355 }
1356 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1357
1358 /**
1359  * pm_runtime_enable - Enable runtime PM of a device.
1360  * @dev: Device to handle.
1361  */
1362 void pm_runtime_enable(struct device *dev)
1363 {
1364         unsigned long flags;
1365
1366         spin_lock_irqsave(&dev->power.lock, flags);
1367
1368         if (dev->power.disable_depth > 0) {
1369                 dev->power.disable_depth--;
1370
1371                 /* About to enable runtime pm, set accounting_timestamp to now */
1372                 if (!dev->power.disable_depth)
1373                         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1374         } else {
1375                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1376         }
1377
1378         WARN(!dev->power.disable_depth &&
1379              dev->power.runtime_status == RPM_SUSPENDED &&
1380              !dev->power.ignore_children &&
1381              atomic_read(&dev->power.child_count) > 0,
1382              "Enabling runtime PM for inactive device (%s) with active children\n",
1383              dev_name(dev));
1384
1385         spin_unlock_irqrestore(&dev->power.lock, flags);
1386 }
1387 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1388
1389 /**
1390  * pm_runtime_forbid - Block runtime PM of a device.
1391  * @dev: Device to handle.
1392  *
1393  * Increase the device's usage count and clear its power.runtime_auto flag,
1394  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1395  * for it.
1396  */
1397 void pm_runtime_forbid(struct device *dev)
1398 {
1399         spin_lock_irq(&dev->power.lock);
1400         if (!dev->power.runtime_auto)
1401                 goto out;
1402
1403         dev->power.runtime_auto = false;
1404         atomic_inc(&dev->power.usage_count);
1405         rpm_resume(dev, 0);
1406
1407  out:
1408         spin_unlock_irq(&dev->power.lock);
1409 }
1410 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1411
1412 /**
1413  * pm_runtime_allow - Unblock runtime PM of a device.
1414  * @dev: Device to handle.
1415  *
1416  * Decrease the device's usage count and set its power.runtime_auto flag.
1417  */
1418 void pm_runtime_allow(struct device *dev)
1419 {
1420         spin_lock_irq(&dev->power.lock);
1421         if (dev->power.runtime_auto)
1422                 goto out;
1423
1424         dev->power.runtime_auto = true;
1425         if (atomic_dec_and_test(&dev->power.usage_count))
1426                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1427
1428  out:
1429         spin_unlock_irq(&dev->power.lock);
1430 }
1431 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1432
1433 /**
1434  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1435  * @dev: Device to handle.
1436  *
1437  * Set the power.no_callbacks flag, which tells the PM core that this
1438  * device is power-managed through its parent and has no runtime PM
1439  * callbacks of its own.  The runtime sysfs attributes will be removed.
1440  */
1441 void pm_runtime_no_callbacks(struct device *dev)
1442 {
1443         spin_lock_irq(&dev->power.lock);
1444         dev->power.no_callbacks = 1;
1445         spin_unlock_irq(&dev->power.lock);
1446         if (device_is_registered(dev))
1447                 rpm_sysfs_remove(dev);
1448 }
1449 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1450
1451 /**
1452  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1453  * @dev: Device to handle
1454  *
1455  * Set the power.irq_safe flag, which tells the PM core that the
1456  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1457  * always be invoked with the spinlock held and interrupts disabled.  It also
1458  * causes the parent's usage counter to be permanently incremented, preventing
1459  * the parent from runtime suspending -- otherwise an irq-safe child might have
1460  * to wait for a non-irq-safe parent.
1461  */
1462 void pm_runtime_irq_safe(struct device *dev)
1463 {
1464         if (dev->parent)
1465                 pm_runtime_get_sync(dev->parent);
1466         spin_lock_irq(&dev->power.lock);
1467         dev->power.irq_safe = 1;
1468         spin_unlock_irq(&dev->power.lock);
1469 }
1470 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1471
1472 /**
1473  * update_autosuspend - Handle a change to a device's autosuspend settings.
1474  * @dev: Device to handle.
1475  * @old_delay: The former autosuspend_delay value.
1476  * @old_use: The former use_autosuspend value.
1477  *
1478  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1479  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1480  *
1481  * This function must be called under dev->power.lock with interrupts disabled.
1482  */
1483 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1484 {
1485         int delay = dev->power.autosuspend_delay;
1486
1487         /* Should runtime suspend be prevented now? */
1488         if (dev->power.use_autosuspend && delay < 0) {
1489
1490                 /* If it used to be allowed then prevent it. */
1491                 if (!old_use || old_delay >= 0) {
1492                         atomic_inc(&dev->power.usage_count);
1493                         rpm_resume(dev, 0);
1494                 }
1495         }
1496
1497         /* Runtime suspend should be allowed now. */
1498         else {
1499
1500                 /* If it used to be prevented then allow it. */
1501                 if (old_use && old_delay < 0)
1502                         atomic_dec(&dev->power.usage_count);
1503
1504                 /* Maybe we can autosuspend now. */
1505                 rpm_idle(dev, RPM_AUTO);
1506         }
1507 }
1508
1509 /**
1510  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1511  * @dev: Device to handle.
1512  * @delay: Value of the new delay in milliseconds.
1513  *
1514  * Set the device's power.autosuspend_delay value.  If it changes to negative
1515  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1516  * changes the other way, allow runtime suspends.
1517  */
1518 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1519 {
1520         int old_delay, old_use;
1521
1522         spin_lock_irq(&dev->power.lock);
1523         old_delay = dev->power.autosuspend_delay;
1524         old_use = dev->power.use_autosuspend;
1525         dev->power.autosuspend_delay = delay;
1526         update_autosuspend(dev, old_delay, old_use);
1527         spin_unlock_irq(&dev->power.lock);
1528 }
1529 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1530
1531 /**
1532  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1533  * @dev: Device to handle.
1534  * @use: New value for use_autosuspend.
1535  *
1536  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1537  * suspends as needed.
1538  */
1539 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1540 {
1541         int old_delay, old_use;
1542
1543         spin_lock_irq(&dev->power.lock);
1544         old_delay = dev->power.autosuspend_delay;
1545         old_use = dev->power.use_autosuspend;
1546         dev->power.use_autosuspend = use;
1547         update_autosuspend(dev, old_delay, old_use);
1548         spin_unlock_irq(&dev->power.lock);
1549 }
1550 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1551
1552 /**
1553  * pm_runtime_init - Initialize runtime PM fields in given device object.
1554  * @dev: Device object to initialize.
1555  */
1556 void pm_runtime_init(struct device *dev)
1557 {
1558         dev->power.runtime_status = RPM_SUSPENDED;
1559         dev->power.idle_notification = false;
1560
1561         dev->power.disable_depth = 1;
1562         atomic_set(&dev->power.usage_count, 0);
1563
1564         dev->power.runtime_error = 0;
1565
1566         atomic_set(&dev->power.child_count, 0);
1567         pm_suspend_ignore_children(dev, false);
1568         dev->power.runtime_auto = true;
1569
1570         dev->power.request_pending = false;
1571         dev->power.request = RPM_REQ_NONE;
1572         dev->power.deferred_resume = false;
1573         INIT_WORK(&dev->power.work, pm_runtime_work);
1574
1575         dev->power.timer_expires = 0;
1576         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1577         dev->power.suspend_timer.function = pm_suspend_timer_fn;
1578
1579         init_waitqueue_head(&dev->power.wait_queue);
1580 }
1581
1582 /**
1583  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1584  * @dev: Device object to re-initialize.
1585  */
1586 void pm_runtime_reinit(struct device *dev)
1587 {
1588         if (!pm_runtime_enabled(dev)) {
1589                 if (dev->power.runtime_status == RPM_ACTIVE)
1590                         pm_runtime_set_suspended(dev);
1591                 if (dev->power.irq_safe) {
1592                         spin_lock_irq(&dev->power.lock);
1593                         dev->power.irq_safe = 0;
1594                         spin_unlock_irq(&dev->power.lock);
1595                         if (dev->parent)
1596                                 pm_runtime_put(dev->parent);
1597                 }
1598         }
1599 }
1600
1601 /**
1602  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1603  * @dev: Device object being removed from device hierarchy.
1604  */
1605 void pm_runtime_remove(struct device *dev)
1606 {
1607         __pm_runtime_disable(dev, false);
1608         pm_runtime_reinit(dev);
1609 }
1610
1611 /**
1612  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1613  * @dev: Device whose driver is going to be removed.
1614  *
1615  * Check links from this device to any consumers and if any of them have active
1616  * runtime PM references to the device, drop the usage counter of the device
1617  * (as many times as needed).
1618  *
1619  * Links with the DL_FLAG_STATELESS flag set are ignored.
1620  *
1621  * Since the device is guaranteed to be runtime-active at the point this is
1622  * called, nothing else needs to be done here.
1623  *
1624  * Moreover, this is called after device_links_busy() has returned 'false', so
1625  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1626  * therefore rpm_active can't be manipulated concurrently.
1627  */
1628 void pm_runtime_clean_up_links(struct device *dev)
1629 {
1630         struct device_link *link;
1631         int idx;
1632
1633         idx = device_links_read_lock();
1634
1635         list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1636                 if (link->flags & DL_FLAG_STATELESS)
1637                         continue;
1638
1639                 while (refcount_dec_not_one(&link->rpm_active))
1640                         pm_runtime_put_noidle(dev);
1641         }
1642
1643         device_links_read_unlock(idx);
1644 }
1645
1646 /**
1647  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1648  * @dev: Consumer device.
1649  */
1650 void pm_runtime_get_suppliers(struct device *dev)
1651 {
1652         struct device_link *link;
1653         int idx;
1654
1655         idx = device_links_read_lock();
1656
1657         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1658                 if (link->flags & DL_FLAG_PM_RUNTIME) {
1659                         link->supplier_preactivated = true;
1660                         refcount_inc(&link->rpm_active);
1661                         pm_runtime_get_sync(link->supplier);
1662                 }
1663
1664         device_links_read_unlock(idx);
1665 }
1666
1667 /**
1668  * pm_runtime_put_suppliers - Drop references to supplier devices.
1669  * @dev: Consumer device.
1670  */
1671 void pm_runtime_put_suppliers(struct device *dev)
1672 {
1673         struct device_link *link;
1674         int idx;
1675
1676         idx = device_links_read_lock();
1677
1678         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1679                 if (link->supplier_preactivated) {
1680                         link->supplier_preactivated = false;
1681                         if (refcount_dec_not_one(&link->rpm_active))
1682                                 pm_runtime_put(link->supplier);
1683                 }
1684
1685         device_links_read_unlock(idx);
1686 }
1687
1688 void pm_runtime_new_link(struct device *dev)
1689 {
1690         spin_lock_irq(&dev->power.lock);
1691         dev->power.links_count++;
1692         spin_unlock_irq(&dev->power.lock);
1693 }
1694
1695 void pm_runtime_drop_link(struct device *dev)
1696 {
1697         spin_lock_irq(&dev->power.lock);
1698         WARN_ON(dev->power.links_count == 0);
1699         dev->power.links_count--;
1700         spin_unlock_irq(&dev->power.lock);
1701 }
1702
1703 static bool pm_runtime_need_not_resume(struct device *dev)
1704 {
1705         return atomic_read(&dev->power.usage_count) <= 1 &&
1706                 (atomic_read(&dev->power.child_count) == 0 ||
1707                  dev->power.ignore_children);
1708 }
1709
1710 /**
1711  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1712  * @dev: Device to suspend.
1713  *
1714  * Disable runtime PM so we safely can check the device's runtime PM status and
1715  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1716  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1717  * usage and children counters don't indicate that the device was in use before
1718  * the system-wide transition under way, decrement its parent's children counter
1719  * (if there is a parent).  Keep runtime PM disabled to preserve the state
1720  * unless we encounter errors.
1721  *
1722  * Typically this function may be invoked from a system suspend callback to make
1723  * sure the device is put into low power state and it should only be used during
1724  * system-wide PM transitions to sleep states.  It assumes that the analogous
1725  * pm_runtime_force_resume() will be used to resume the device.
1726  */
1727 int pm_runtime_force_suspend(struct device *dev)
1728 {
1729         int (*callback)(struct device *);
1730         int ret;
1731
1732         pm_runtime_disable(dev);
1733         if (pm_runtime_status_suspended(dev))
1734                 return 0;
1735
1736         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1737
1738         ret = callback ? callback(dev) : 0;
1739         if (ret)
1740                 goto err;
1741
1742         /*
1743          * If the device can stay in suspend after the system-wide transition
1744          * to the working state that will follow, drop the children counter of
1745          * its parent, but set its status to RPM_SUSPENDED anyway in case this
1746          * function will be called again for it in the meantime.
1747          */
1748         if (pm_runtime_need_not_resume(dev))
1749                 pm_runtime_set_suspended(dev);
1750         else
1751                 __update_runtime_status(dev, RPM_SUSPENDED);
1752
1753         return 0;
1754
1755 err:
1756         pm_runtime_enable(dev);
1757         return ret;
1758 }
1759 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1760
1761 /**
1762  * pm_runtime_force_resume - Force a device into resume state if needed.
1763  * @dev: Device to resume.
1764  *
1765  * Prior invoking this function we expect the user to have brought the device
1766  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1767  * those actions and bring the device into full power, if it is expected to be
1768  * used on system resume.  In the other case, we defer the resume to be managed
1769  * via runtime PM.
1770  *
1771  * Typically this function may be invoked from a system resume callback.
1772  */
1773 int pm_runtime_force_resume(struct device *dev)
1774 {
1775         int (*callback)(struct device *);
1776         int ret = 0;
1777
1778         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1779                 goto out;
1780
1781         /*
1782          * The value of the parent's children counter is correct already, so
1783          * just update the status of the device.
1784          */
1785         __update_runtime_status(dev, RPM_ACTIVE);
1786
1787         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1788
1789         ret = callback ? callback(dev) : 0;
1790         if (ret) {
1791                 pm_runtime_set_suspended(dev);
1792                 goto out;
1793         }
1794
1795         pm_runtime_mark_last_busy(dev);
1796 out:
1797         pm_runtime_enable(dev);
1798         return ret;
1799 }
1800 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);