a5fb884a136da01dcb911059df2e09d5fb4ff283
[muen/linux.git] / drivers / base / firmware_class.c
1 /*
2  * firmware_class.c - Multi purpose firmware loading support
3  *
4  * Copyright (c) 2003 Manuel Estrada Sainz
5  *
6  * Please see Documentation/firmware_class/ for more information.
7  *
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/capability.h>
13 #include <linux/device.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/timer.h>
17 #include <linux/vmalloc.h>
18 #include <linux/interrupt.h>
19 #include <linux/bitops.h>
20 #include <linux/mutex.h>
21 #include <linux/workqueue.h>
22 #include <linux/highmem.h>
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/file.h>
27 #include <linux/list.h>
28 #include <linux/fs.h>
29 #include <linux/async.h>
30 #include <linux/pm.h>
31 #include <linux/suspend.h>
32 #include <linux/syscore_ops.h>
33 #include <linux/reboot.h>
34 #include <linux/security.h>
35
36 #include <generated/utsrelease.h>
37
38 #include "base.h"
39
40 MODULE_AUTHOR("Manuel Estrada Sainz");
41 MODULE_DESCRIPTION("Multi purpose firmware loading support");
42 MODULE_LICENSE("GPL");
43
44 /* Builtin firmware support */
45
46 #ifdef CONFIG_FW_LOADER
47
48 extern struct builtin_fw __start_builtin_fw[];
49 extern struct builtin_fw __end_builtin_fw[];
50
51 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
52                                     void *buf, size_t size)
53 {
54         struct builtin_fw *b_fw;
55
56         for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
57                 if (strcmp(name, b_fw->name) == 0) {
58                         fw->size = b_fw->size;
59                         fw->data = b_fw->data;
60
61                         if (buf && fw->size <= size)
62                                 memcpy(buf, fw->data, fw->size);
63                         return true;
64                 }
65         }
66
67         return false;
68 }
69
70 static bool fw_is_builtin_firmware(const struct firmware *fw)
71 {
72         struct builtin_fw *b_fw;
73
74         for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
75                 if (fw->data == b_fw->data)
76                         return true;
77
78         return false;
79 }
80
81 #else /* Module case - no builtin firmware support */
82
83 static inline bool fw_get_builtin_firmware(struct firmware *fw,
84                                            const char *name, void *buf,
85                                            size_t size)
86 {
87         return false;
88 }
89
90 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
91 {
92         return false;
93 }
94 #endif
95
96 enum fw_status {
97         FW_STATUS_UNKNOWN,
98         FW_STATUS_LOADING,
99         FW_STATUS_DONE,
100         FW_STATUS_ABORTED,
101 };
102
103 static int loading_timeout = 60;        /* In seconds */
104
105 static inline long firmware_loading_timeout(void)
106 {
107         return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
108 }
109
110 /*
111  * Concurrent request_firmware() for the same firmware need to be
112  * serialized.  struct fw_state is simple state machine which hold the
113  * state of the firmware loading.
114  */
115 struct fw_state {
116         struct completion completion;
117         enum fw_status status;
118 };
119
120 static void fw_state_init(struct fw_state *fw_st)
121 {
122         init_completion(&fw_st->completion);
123         fw_st->status = FW_STATUS_UNKNOWN;
124 }
125
126 static inline bool __fw_state_is_done(enum fw_status status)
127 {
128         return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
129 }
130
131 static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
132 {
133         long ret;
134
135         ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
136         if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
137                 return -ENOENT;
138         if (!ret)
139                 return -ETIMEDOUT;
140
141         return ret < 0 ? ret : 0;
142 }
143
144 static void __fw_state_set(struct fw_state *fw_st,
145                            enum fw_status status)
146 {
147         WRITE_ONCE(fw_st->status, status);
148
149         if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
150                 complete_all(&fw_st->completion);
151 }
152
153 #define fw_state_start(fw_st)                                   \
154         __fw_state_set(fw_st, FW_STATUS_LOADING)
155 #define fw_state_done(fw_st)                                    \
156         __fw_state_set(fw_st, FW_STATUS_DONE)
157 #define fw_state_aborted(fw_st)                                 \
158         __fw_state_set(fw_st, FW_STATUS_ABORTED)
159 #define fw_state_wait(fw_st)                                    \
160         __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
161
162 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
163 {
164         return fw_st->status == status;
165 }
166
167 #define fw_state_is_aborted(fw_st)                              \
168         __fw_state_check(fw_st, FW_STATUS_ABORTED)
169
170 #ifdef CONFIG_FW_LOADER_USER_HELPER
171
172 #define fw_state_aborted(fw_st)                                 \
173         __fw_state_set(fw_st, FW_STATUS_ABORTED)
174 #define fw_state_is_done(fw_st)                                 \
175         __fw_state_check(fw_st, FW_STATUS_DONE)
176 #define fw_state_is_loading(fw_st)                              \
177         __fw_state_check(fw_st, FW_STATUS_LOADING)
178 #define fw_state_wait_timeout(fw_st, timeout)                   \
179         __fw_state_wait_common(fw_st, timeout)
180
181 #endif /* CONFIG_FW_LOADER_USER_HELPER */
182
183 /* firmware behavior options */
184 #define FW_OPT_UEVENT   (1U << 0)
185 #define FW_OPT_NOWAIT   (1U << 1)
186 #ifdef CONFIG_FW_LOADER_USER_HELPER
187 #define FW_OPT_USERHELPER       (1U << 2)
188 #else
189 #define FW_OPT_USERHELPER       0
190 #endif
191 #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
192 #define FW_OPT_FALLBACK         FW_OPT_USERHELPER
193 #else
194 #define FW_OPT_FALLBACK         0
195 #endif
196 #define FW_OPT_NO_WARN  (1U << 3)
197 #define FW_OPT_NOCACHE  (1U << 4)
198
199 struct firmware_cache {
200         /* firmware_buf instance will be added into the below list */
201         spinlock_t lock;
202         struct list_head head;
203         int state;
204
205 #ifdef CONFIG_PM_SLEEP
206         /*
207          * Names of firmware images which have been cached successfully
208          * will be added into the below list so that device uncache
209          * helper can trace which firmware images have been cached
210          * before.
211          */
212         spinlock_t name_lock;
213         struct list_head fw_names;
214
215         struct delayed_work work;
216
217         struct notifier_block   pm_notify;
218 #endif
219 };
220
221 struct firmware_buf {
222         struct kref ref;
223         struct list_head list;
224         struct firmware_cache *fwc;
225         struct fw_state fw_st;
226         void *data;
227         size_t size;
228         size_t allocated_size;
229 #ifdef CONFIG_FW_LOADER_USER_HELPER
230         bool is_paged_buf;
231         bool need_uevent;
232         struct page **pages;
233         int nr_pages;
234         int page_array_size;
235         struct list_head pending_list;
236 #endif
237         const char *fw_id;
238 };
239
240 struct fw_cache_entry {
241         struct list_head list;
242         const char *name;
243 };
244
245 struct fw_name_devm {
246         unsigned long magic;
247         const char *name;
248 };
249
250 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
251
252 #define FW_LOADER_NO_CACHE      0
253 #define FW_LOADER_START_CACHE   1
254
255 static int fw_cache_piggyback_on_request(const char *name);
256
257 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
258  * guarding for corner cases a global lock should be OK */
259 static DEFINE_MUTEX(fw_lock);
260
261 static bool __enable_firmware = false;
262
263 static void enable_firmware(void)
264 {
265         mutex_lock(&fw_lock);
266         __enable_firmware = true;
267         mutex_unlock(&fw_lock);
268 }
269
270 static void disable_firmware(void)
271 {
272         mutex_lock(&fw_lock);
273         __enable_firmware = false;
274         mutex_unlock(&fw_lock);
275 }
276
277 /*
278  * When disabled only the built-in firmware and the firmware cache will be
279  * used to look for firmware.
280  */
281 static bool firmware_enabled(void)
282 {
283         bool enabled = false;
284
285         mutex_lock(&fw_lock);
286         if (__enable_firmware)
287                 enabled = true;
288         mutex_unlock(&fw_lock);
289
290         return enabled;
291 }
292
293 static struct firmware_cache fw_cache;
294
295 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
296                                               struct firmware_cache *fwc,
297                                               void *dbuf, size_t size)
298 {
299         struct firmware_buf *buf;
300
301         buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
302         if (!buf)
303                 return NULL;
304
305         buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
306         if (!buf->fw_id) {
307                 kfree(buf);
308                 return NULL;
309         }
310
311         kref_init(&buf->ref);
312         buf->fwc = fwc;
313         buf->data = dbuf;
314         buf->allocated_size = size;
315         fw_state_init(&buf->fw_st);
316 #ifdef CONFIG_FW_LOADER_USER_HELPER
317         INIT_LIST_HEAD(&buf->pending_list);
318 #endif
319
320         pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
321
322         return buf;
323 }
324
325 static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
326 {
327         struct firmware_buf *tmp;
328         struct firmware_cache *fwc = &fw_cache;
329
330         list_for_each_entry(tmp, &fwc->head, list)
331                 if (!strcmp(tmp->fw_id, fw_name))
332                         return tmp;
333         return NULL;
334 }
335
336 /* Returns 1 for batching firmware requests with the same name */
337 static int fw_lookup_and_allocate_buf(const char *fw_name,
338                                       struct firmware_cache *fwc,
339                                       struct firmware_buf **buf, void *dbuf,
340                                       size_t size)
341 {
342         struct firmware_buf *tmp;
343
344         spin_lock(&fwc->lock);
345         tmp = __fw_lookup_buf(fw_name);
346         if (tmp) {
347                 kref_get(&tmp->ref);
348                 spin_unlock(&fwc->lock);
349                 *buf = tmp;
350                 pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n");
351                 return 1;
352         }
353         tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
354         if (tmp)
355                 list_add(&tmp->list, &fwc->head);
356         spin_unlock(&fwc->lock);
357
358         *buf = tmp;
359
360         return tmp ? 0 : -ENOMEM;
361 }
362
363 static void __fw_free_buf(struct kref *ref)
364         __releases(&fwc->lock)
365 {
366         struct firmware_buf *buf = to_fwbuf(ref);
367         struct firmware_cache *fwc = buf->fwc;
368
369         pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
370                  __func__, buf->fw_id, buf, buf->data,
371                  (unsigned int)buf->size);
372
373         list_del(&buf->list);
374         spin_unlock(&fwc->lock);
375
376 #ifdef CONFIG_FW_LOADER_USER_HELPER
377         if (buf->is_paged_buf) {
378                 int i;
379                 vunmap(buf->data);
380                 for (i = 0; i < buf->nr_pages; i++)
381                         __free_page(buf->pages[i]);
382                 vfree(buf->pages);
383         } else
384 #endif
385         if (!buf->allocated_size)
386                 vfree(buf->data);
387         kfree_const(buf->fw_id);
388         kfree(buf);
389 }
390
391 static void fw_free_buf(struct firmware_buf *buf)
392 {
393         struct firmware_cache *fwc = buf->fwc;
394         spin_lock(&fwc->lock);
395         if (!kref_put(&buf->ref, __fw_free_buf))
396                 spin_unlock(&fwc->lock);
397 }
398
399 /* direct firmware loading support */
400 static char fw_path_para[256];
401 static const char * const fw_path[] = {
402         fw_path_para,
403         "/lib/firmware/updates/" UTS_RELEASE,
404         "/lib/firmware/updates",
405         "/lib/firmware/" UTS_RELEASE,
406         "/lib/firmware"
407 };
408
409 /*
410  * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
411  * from kernel command line because firmware_class is generally built in
412  * kernel instead of module.
413  */
414 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
415 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
416
417 static int
418 fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
419 {
420         loff_t size;
421         int i, len;
422         int rc = -ENOENT;
423         char *path;
424         enum kernel_read_file_id id = READING_FIRMWARE;
425         size_t msize = INT_MAX;
426
427         /* Already populated data member means we're loading into a buffer */
428         if (buf->data) {
429                 id = READING_FIRMWARE_PREALLOC_BUFFER;
430                 msize = buf->allocated_size;
431         }
432
433         path = __getname();
434         if (!path)
435                 return -ENOMEM;
436
437         for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
438                 /* skip the unset customized path */
439                 if (!fw_path[i][0])
440                         continue;
441
442                 len = snprintf(path, PATH_MAX, "%s/%s",
443                                fw_path[i], buf->fw_id);
444                 if (len >= PATH_MAX) {
445                         rc = -ENAMETOOLONG;
446                         break;
447                 }
448
449                 buf->size = 0;
450                 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
451                                                 id);
452                 if (rc) {
453                         if (rc == -ENOENT)
454                                 dev_dbg(device, "loading %s failed with error %d\n",
455                                          path, rc);
456                         else
457                                 dev_warn(device, "loading %s failed with error %d\n",
458                                          path, rc);
459                         continue;
460                 }
461                 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
462                 buf->size = size;
463                 fw_state_done(&buf->fw_st);
464                 break;
465         }
466         __putname(path);
467
468         return rc;
469 }
470
471 /* firmware holds the ownership of pages */
472 static void firmware_free_data(const struct firmware *fw)
473 {
474         /* Loaded directly? */
475         if (!fw->priv) {
476                 vfree(fw->data);
477                 return;
478         }
479         fw_free_buf(fw->priv);
480 }
481
482 /* store the pages buffer info firmware from buf */
483 static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
484 {
485         fw->priv = buf;
486 #ifdef CONFIG_FW_LOADER_USER_HELPER
487         fw->pages = buf->pages;
488 #endif
489         fw->size = buf->size;
490         fw->data = buf->data;
491
492         pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
493                  __func__, buf->fw_id, buf, buf->data,
494                  (unsigned int)buf->size);
495 }
496
497 #ifdef CONFIG_PM_SLEEP
498 static void fw_name_devm_release(struct device *dev, void *res)
499 {
500         struct fw_name_devm *fwn = res;
501
502         if (fwn->magic == (unsigned long)&fw_cache)
503                 pr_debug("%s: fw_name-%s devm-%p released\n",
504                                 __func__, fwn->name, res);
505         kfree_const(fwn->name);
506 }
507
508 static int fw_devm_match(struct device *dev, void *res,
509                 void *match_data)
510 {
511         struct fw_name_devm *fwn = res;
512
513         return (fwn->magic == (unsigned long)&fw_cache) &&
514                 !strcmp(fwn->name, match_data);
515 }
516
517 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
518                 const char *name)
519 {
520         struct fw_name_devm *fwn;
521
522         fwn = devres_find(dev, fw_name_devm_release,
523                           fw_devm_match, (void *)name);
524         return fwn;
525 }
526
527 /* add firmware name into devres list */
528 static int fw_add_devm_name(struct device *dev, const char *name)
529 {
530         struct fw_name_devm *fwn;
531
532         fwn = fw_find_devm_name(dev, name);
533         if (fwn)
534                 return 1;
535
536         fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
537                            GFP_KERNEL);
538         if (!fwn)
539                 return -ENOMEM;
540         fwn->name = kstrdup_const(name, GFP_KERNEL);
541         if (!fwn->name) {
542                 devres_free(fwn);
543                 return -ENOMEM;
544         }
545
546         fwn->magic = (unsigned long)&fw_cache;
547         devres_add(dev, fwn);
548
549         return 0;
550 }
551 #else
552 static int fw_add_devm_name(struct device *dev, const char *name)
553 {
554         return 0;
555 }
556 #endif
557
558 static int assign_firmware_buf(struct firmware *fw, struct device *device,
559                                unsigned int opt_flags)
560 {
561         struct firmware_buf *buf = fw->priv;
562
563         mutex_lock(&fw_lock);
564         if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
565                 mutex_unlock(&fw_lock);
566                 return -ENOENT;
567         }
568
569         /*
570          * add firmware name into devres list so that we can auto cache
571          * and uncache firmware for device.
572          *
573          * device may has been deleted already, but the problem
574          * should be fixed in devres or driver core.
575          */
576         /* don't cache firmware handled without uevent */
577         if (device && (opt_flags & FW_OPT_UEVENT) &&
578             !(opt_flags & FW_OPT_NOCACHE))
579                 fw_add_devm_name(device, buf->fw_id);
580
581         /*
582          * After caching firmware image is started, let it piggyback
583          * on request firmware.
584          */
585         if (!(opt_flags & FW_OPT_NOCACHE) &&
586             buf->fwc->state == FW_LOADER_START_CACHE) {
587                 if (fw_cache_piggyback_on_request(buf->fw_id))
588                         kref_get(&buf->ref);
589         }
590
591         /* pass the pages buffer to driver at the last minute */
592         fw_set_page_data(buf, fw);
593         mutex_unlock(&fw_lock);
594         return 0;
595 }
596
597 /*
598  * user-mode helper code
599  */
600 #ifdef CONFIG_FW_LOADER_USER_HELPER
601 struct firmware_priv {
602         bool nowait;
603         struct device dev;
604         struct firmware_buf *buf;
605         struct firmware *fw;
606 };
607
608 static struct firmware_priv *to_firmware_priv(struct device *dev)
609 {
610         return container_of(dev, struct firmware_priv, dev);
611 }
612
613 static void __fw_load_abort(struct firmware_buf *buf)
614 {
615         /*
616          * There is a small window in which user can write to 'loading'
617          * between loading done and disappearance of 'loading'
618          */
619         if (fw_state_is_done(&buf->fw_st))
620                 return;
621
622         list_del_init(&buf->pending_list);
623         fw_state_aborted(&buf->fw_st);
624 }
625
626 static void fw_load_abort(struct firmware_priv *fw_priv)
627 {
628         struct firmware_buf *buf = fw_priv->buf;
629
630         __fw_load_abort(buf);
631 }
632
633 static LIST_HEAD(pending_fw_head);
634
635 static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
636 {
637         struct firmware_buf *buf;
638         struct firmware_buf *next;
639
640         mutex_lock(&fw_lock);
641         list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
642                 if (!buf->need_uevent || !only_kill_custom)
643                          __fw_load_abort(buf);
644         }
645         mutex_unlock(&fw_lock);
646 }
647
648 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
649                             char *buf)
650 {
651         return sprintf(buf, "%d\n", loading_timeout);
652 }
653
654 /**
655  * firmware_timeout_store - set number of seconds to wait for firmware
656  * @class: device class pointer
657  * @attr: device attribute pointer
658  * @buf: buffer to scan for timeout value
659  * @count: number of bytes in @buf
660  *
661  *      Sets the number of seconds to wait for the firmware.  Once
662  *      this expires an error will be returned to the driver and no
663  *      firmware will be provided.
664  *
665  *      Note: zero means 'wait forever'.
666  **/
667 static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
668                              const char *buf, size_t count)
669 {
670         loading_timeout = simple_strtol(buf, NULL, 10);
671         if (loading_timeout < 0)
672                 loading_timeout = 0;
673
674         return count;
675 }
676 static CLASS_ATTR_RW(timeout);
677
678 static struct attribute *firmware_class_attrs[] = {
679         &class_attr_timeout.attr,
680         NULL,
681 };
682 ATTRIBUTE_GROUPS(firmware_class);
683
684 static void fw_dev_release(struct device *dev)
685 {
686         struct firmware_priv *fw_priv = to_firmware_priv(dev);
687
688         kfree(fw_priv);
689 }
690
691 static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
692 {
693         if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
694                 return -ENOMEM;
695         if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
696                 return -ENOMEM;
697         if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
698                 return -ENOMEM;
699
700         return 0;
701 }
702
703 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
704 {
705         struct firmware_priv *fw_priv = to_firmware_priv(dev);
706         int err = 0;
707
708         mutex_lock(&fw_lock);
709         if (fw_priv->buf)
710                 err = do_firmware_uevent(fw_priv, env);
711         mutex_unlock(&fw_lock);
712         return err;
713 }
714
715 static struct class firmware_class = {
716         .name           = "firmware",
717         .class_groups   = firmware_class_groups,
718         .dev_uevent     = firmware_uevent,
719         .dev_release    = fw_dev_release,
720 };
721
722 static ssize_t firmware_loading_show(struct device *dev,
723                                      struct device_attribute *attr, char *buf)
724 {
725         struct firmware_priv *fw_priv = to_firmware_priv(dev);
726         int loading = 0;
727
728         mutex_lock(&fw_lock);
729         if (fw_priv->buf)
730                 loading = fw_state_is_loading(&fw_priv->buf->fw_st);
731         mutex_unlock(&fw_lock);
732
733         return sprintf(buf, "%d\n", loading);
734 }
735
736 /* Some architectures don't have PAGE_KERNEL_RO */
737 #ifndef PAGE_KERNEL_RO
738 #define PAGE_KERNEL_RO PAGE_KERNEL
739 #endif
740
741 /* one pages buffer should be mapped/unmapped only once */
742 static int fw_map_pages_buf(struct firmware_buf *buf)
743 {
744         if (!buf->is_paged_buf)
745                 return 0;
746
747         vunmap(buf->data);
748         buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
749         if (!buf->data)
750                 return -ENOMEM;
751         return 0;
752 }
753
754 /**
755  * firmware_loading_store - set value in the 'loading' control file
756  * @dev: device pointer
757  * @attr: device attribute pointer
758  * @buf: buffer to scan for loading control value
759  * @count: number of bytes in @buf
760  *
761  *      The relevant values are:
762  *
763  *       1: Start a load, discarding any previous partial load.
764  *       0: Conclude the load and hand the data to the driver code.
765  *      -1: Conclude the load with an error and discard any written data.
766  **/
767 static ssize_t firmware_loading_store(struct device *dev,
768                                       struct device_attribute *attr,
769                                       const char *buf, size_t count)
770 {
771         struct firmware_priv *fw_priv = to_firmware_priv(dev);
772         struct firmware_buf *fw_buf;
773         ssize_t written = count;
774         int loading = simple_strtol(buf, NULL, 10);
775         int i;
776
777         mutex_lock(&fw_lock);
778         fw_buf = fw_priv->buf;
779         if (fw_state_is_aborted(&fw_buf->fw_st))
780                 goto out;
781
782         switch (loading) {
783         case 1:
784                 /* discarding any previous partial load */
785                 if (!fw_state_is_done(&fw_buf->fw_st)) {
786                         for (i = 0; i < fw_buf->nr_pages; i++)
787                                 __free_page(fw_buf->pages[i]);
788                         vfree(fw_buf->pages);
789                         fw_buf->pages = NULL;
790                         fw_buf->page_array_size = 0;
791                         fw_buf->nr_pages = 0;
792                         fw_state_start(&fw_buf->fw_st);
793                 }
794                 break;
795         case 0:
796                 if (fw_state_is_loading(&fw_buf->fw_st)) {
797                         int rc;
798
799                         /*
800                          * Several loading requests may be pending on
801                          * one same firmware buf, so let all requests
802                          * see the mapped 'buf->data' once the loading
803                          * is completed.
804                          * */
805                         rc = fw_map_pages_buf(fw_buf);
806                         if (rc)
807                                 dev_err(dev, "%s: map pages failed\n",
808                                         __func__);
809                         else
810                                 rc = security_kernel_post_read_file(NULL,
811                                                 fw_buf->data, fw_buf->size,
812                                                 READING_FIRMWARE);
813
814                         /*
815                          * Same logic as fw_load_abort, only the DONE bit
816                          * is ignored and we set ABORT only on failure.
817                          */
818                         list_del_init(&fw_buf->pending_list);
819                         if (rc) {
820                                 fw_state_aborted(&fw_buf->fw_st);
821                                 written = rc;
822                         } else {
823                                 fw_state_done(&fw_buf->fw_st);
824                         }
825                         break;
826                 }
827                 /* fallthrough */
828         default:
829                 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
830                 /* fallthrough */
831         case -1:
832                 fw_load_abort(fw_priv);
833                 break;
834         }
835 out:
836         mutex_unlock(&fw_lock);
837         return written;
838 }
839
840 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
841
842 static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
843                            loff_t offset, size_t count, bool read)
844 {
845         if (read)
846                 memcpy(buffer, buf->data + offset, count);
847         else
848                 memcpy(buf->data + offset, buffer, count);
849 }
850
851 static void firmware_rw(struct firmware_buf *buf, char *buffer,
852                         loff_t offset, size_t count, bool read)
853 {
854         while (count) {
855                 void *page_data;
856                 int page_nr = offset >> PAGE_SHIFT;
857                 int page_ofs = offset & (PAGE_SIZE-1);
858                 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
859
860                 page_data = kmap(buf->pages[page_nr]);
861
862                 if (read)
863                         memcpy(buffer, page_data + page_ofs, page_cnt);
864                 else
865                         memcpy(page_data + page_ofs, buffer, page_cnt);
866
867                 kunmap(buf->pages[page_nr]);
868                 buffer += page_cnt;
869                 offset += page_cnt;
870                 count -= page_cnt;
871         }
872 }
873
874 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
875                                   struct bin_attribute *bin_attr,
876                                   char *buffer, loff_t offset, size_t count)
877 {
878         struct device *dev = kobj_to_dev(kobj);
879         struct firmware_priv *fw_priv = to_firmware_priv(dev);
880         struct firmware_buf *buf;
881         ssize_t ret_count;
882
883         mutex_lock(&fw_lock);
884         buf = fw_priv->buf;
885         if (!buf || fw_state_is_done(&buf->fw_st)) {
886                 ret_count = -ENODEV;
887                 goto out;
888         }
889         if (offset > buf->size) {
890                 ret_count = 0;
891                 goto out;
892         }
893         if (count > buf->size - offset)
894                 count = buf->size - offset;
895
896         ret_count = count;
897
898         if (buf->data)
899                 firmware_rw_buf(buf, buffer, offset, count, true);
900         else
901                 firmware_rw(buf, buffer, offset, count, true);
902
903 out:
904         mutex_unlock(&fw_lock);
905         return ret_count;
906 }
907
908 static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
909 {
910         struct firmware_buf *buf = fw_priv->buf;
911         int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
912
913         /* If the array of pages is too small, grow it... */
914         if (buf->page_array_size < pages_needed) {
915                 int new_array_size = max(pages_needed,
916                                          buf->page_array_size * 2);
917                 struct page **new_pages;
918
919                 new_pages = vmalloc(new_array_size * sizeof(void *));
920                 if (!new_pages) {
921                         fw_load_abort(fw_priv);
922                         return -ENOMEM;
923                 }
924                 memcpy(new_pages, buf->pages,
925                        buf->page_array_size * sizeof(void *));
926                 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
927                        (new_array_size - buf->page_array_size));
928                 vfree(buf->pages);
929                 buf->pages = new_pages;
930                 buf->page_array_size = new_array_size;
931         }
932
933         while (buf->nr_pages < pages_needed) {
934                 buf->pages[buf->nr_pages] =
935                         alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
936
937                 if (!buf->pages[buf->nr_pages]) {
938                         fw_load_abort(fw_priv);
939                         return -ENOMEM;
940                 }
941                 buf->nr_pages++;
942         }
943         return 0;
944 }
945
946 /**
947  * firmware_data_write - write method for firmware
948  * @filp: open sysfs file
949  * @kobj: kobject for the device
950  * @bin_attr: bin_attr structure
951  * @buffer: buffer being written
952  * @offset: buffer offset for write in total data store area
953  * @count: buffer size
954  *
955  *      Data written to the 'data' attribute will be later handed to
956  *      the driver as a firmware image.
957  **/
958 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
959                                    struct bin_attribute *bin_attr,
960                                    char *buffer, loff_t offset, size_t count)
961 {
962         struct device *dev = kobj_to_dev(kobj);
963         struct firmware_priv *fw_priv = to_firmware_priv(dev);
964         struct firmware_buf *buf;
965         ssize_t retval;
966
967         if (!capable(CAP_SYS_RAWIO))
968                 return -EPERM;
969
970         mutex_lock(&fw_lock);
971         buf = fw_priv->buf;
972         if (!buf || fw_state_is_done(&buf->fw_st)) {
973                 retval = -ENODEV;
974                 goto out;
975         }
976
977         if (buf->data) {
978                 if (offset + count > buf->allocated_size) {
979                         retval = -ENOMEM;
980                         goto out;
981                 }
982                 firmware_rw_buf(buf, buffer, offset, count, false);
983                 retval = count;
984         } else {
985                 retval = fw_realloc_buffer(fw_priv, offset + count);
986                 if (retval)
987                         goto out;
988
989                 retval = count;
990                 firmware_rw(buf, buffer, offset, count, false);
991         }
992
993         buf->size = max_t(size_t, offset + count, buf->size);
994 out:
995         mutex_unlock(&fw_lock);
996         return retval;
997 }
998
999 static struct bin_attribute firmware_attr_data = {
1000         .attr = { .name = "data", .mode = 0644 },
1001         .size = 0,
1002         .read = firmware_data_read,
1003         .write = firmware_data_write,
1004 };
1005
1006 static struct attribute *fw_dev_attrs[] = {
1007         &dev_attr_loading.attr,
1008         NULL
1009 };
1010
1011 static struct bin_attribute *fw_dev_bin_attrs[] = {
1012         &firmware_attr_data,
1013         NULL
1014 };
1015
1016 static const struct attribute_group fw_dev_attr_group = {
1017         .attrs = fw_dev_attrs,
1018         .bin_attrs = fw_dev_bin_attrs,
1019 };
1020
1021 static const struct attribute_group *fw_dev_attr_groups[] = {
1022         &fw_dev_attr_group,
1023         NULL
1024 };
1025
1026 static struct firmware_priv *
1027 fw_create_instance(struct firmware *firmware, const char *fw_name,
1028                    struct device *device, unsigned int opt_flags)
1029 {
1030         struct firmware_priv *fw_priv;
1031         struct device *f_dev;
1032
1033         fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
1034         if (!fw_priv) {
1035                 fw_priv = ERR_PTR(-ENOMEM);
1036                 goto exit;
1037         }
1038
1039         fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
1040         fw_priv->fw = firmware;
1041         f_dev = &fw_priv->dev;
1042
1043         device_initialize(f_dev);
1044         dev_set_name(f_dev, "%s", fw_name);
1045         f_dev->parent = device;
1046         f_dev->class = &firmware_class;
1047         f_dev->groups = fw_dev_attr_groups;
1048 exit:
1049         return fw_priv;
1050 }
1051
1052 /* load a firmware via user helper */
1053 static int _request_firmware_load(struct firmware_priv *fw_priv,
1054                                   unsigned int opt_flags, long timeout)
1055 {
1056         int retval = 0;
1057         struct device *f_dev = &fw_priv->dev;
1058         struct firmware_buf *buf = fw_priv->buf;
1059
1060         /* fall back on userspace loading */
1061         if (!buf->data)
1062                 buf->is_paged_buf = true;
1063
1064         dev_set_uevent_suppress(f_dev, true);
1065
1066         retval = device_add(f_dev);
1067         if (retval) {
1068                 dev_err(f_dev, "%s: device_register failed\n", __func__);
1069                 goto err_put_dev;
1070         }
1071
1072         mutex_lock(&fw_lock);
1073         list_add(&buf->pending_list, &pending_fw_head);
1074         mutex_unlock(&fw_lock);
1075
1076         if (opt_flags & FW_OPT_UEVENT) {
1077                 buf->need_uevent = true;
1078                 dev_set_uevent_suppress(f_dev, false);
1079                 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
1080                 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
1081         } else {
1082                 timeout = MAX_JIFFY_OFFSET;
1083         }
1084
1085         retval = fw_state_wait_timeout(&buf->fw_st, timeout);
1086         if (retval < 0) {
1087                 mutex_lock(&fw_lock);
1088                 fw_load_abort(fw_priv);
1089                 mutex_unlock(&fw_lock);
1090         }
1091
1092         if (fw_state_is_aborted(&buf->fw_st)) {
1093                 if (retval == -ERESTARTSYS)
1094                         retval = -EINTR;
1095                 else
1096                         retval = -EAGAIN;
1097         } else if (buf->is_paged_buf && !buf->data)
1098                 retval = -ENOMEM;
1099
1100         device_del(f_dev);
1101 err_put_dev:
1102         put_device(f_dev);
1103         return retval;
1104 }
1105
1106 static int fw_load_from_user_helper(struct firmware *firmware,
1107                                     const char *name, struct device *device,
1108                                     unsigned int opt_flags)
1109 {
1110         struct firmware_priv *fw_priv;
1111         long timeout;
1112         int ret;
1113
1114         timeout = firmware_loading_timeout();
1115         if (opt_flags & FW_OPT_NOWAIT) {
1116                 timeout = usermodehelper_read_lock_wait(timeout);
1117                 if (!timeout) {
1118                         dev_dbg(device, "firmware: %s loading timed out\n",
1119                                 name);
1120                         return -EBUSY;
1121                 }
1122         } else {
1123                 ret = usermodehelper_read_trylock();
1124                 if (WARN_ON(ret)) {
1125                         dev_err(device, "firmware: %s will not be loaded\n",
1126                                 name);
1127                         return ret;
1128                 }
1129         }
1130
1131         fw_priv = fw_create_instance(firmware, name, device, opt_flags);
1132         if (IS_ERR(fw_priv)) {
1133                 ret = PTR_ERR(fw_priv);
1134                 goto out_unlock;
1135         }
1136
1137         fw_priv->buf = firmware->priv;
1138         ret = _request_firmware_load(fw_priv, opt_flags, timeout);
1139
1140         if (!ret)
1141                 ret = assign_firmware_buf(firmware, device, opt_flags);
1142
1143 out_unlock:
1144         usermodehelper_read_unlock();
1145
1146         return ret;
1147 }
1148
1149 #else /* CONFIG_FW_LOADER_USER_HELPER */
1150 static inline int
1151 fw_load_from_user_helper(struct firmware *firmware, const char *name,
1152                          struct device *device, unsigned int opt_flags)
1153 {
1154         return -ENOENT;
1155 }
1156
1157 static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
1158
1159 #endif /* CONFIG_FW_LOADER_USER_HELPER */
1160
1161 /* prepare firmware and firmware_buf structs;
1162  * return 0 if a firmware is already assigned, 1 if need to load one,
1163  * or a negative error code
1164  */
1165 static int
1166 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
1167                           struct device *device, void *dbuf, size_t size)
1168 {
1169         struct firmware *firmware;
1170         struct firmware_buf *buf;
1171         int ret;
1172
1173         *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1174         if (!firmware) {
1175                 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1176                         __func__);
1177                 return -ENOMEM;
1178         }
1179
1180         if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
1181                 dev_dbg(device, "using built-in %s\n", name);
1182                 return 0; /* assigned */
1183         }
1184
1185         ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
1186
1187         /*
1188          * bind with 'buf' now to avoid warning in failure path
1189          * of requesting firmware.
1190          */
1191         firmware->priv = buf;
1192
1193         if (ret > 0) {
1194                 ret = fw_state_wait(&buf->fw_st);
1195                 if (!ret) {
1196                         fw_set_page_data(buf, firmware);
1197                         return 0; /* assigned */
1198                 }
1199         }
1200
1201         if (ret < 0)
1202                 return ret;
1203         return 1; /* need to load */
1204 }
1205
1206 /*
1207  * Batched requests need only one wake, we need to do this step last due to the
1208  * fallback mechanism. The buf is protected with kref_get(), and it won't be
1209  * released until the last user calls release_firmware().
1210  *
1211  * Failed batched requests are possible as well, in such cases we just share
1212  * the struct firmware_buf and won't release it until all requests are woken
1213  * and have gone through this same path.
1214  */
1215 static void fw_abort_batch_reqs(struct firmware *fw)
1216 {
1217         struct firmware_buf *buf;
1218
1219         /* Loaded directly? */
1220         if (!fw || !fw->priv)
1221                 return;
1222
1223         buf = fw->priv;
1224         if (!fw_state_is_aborted(&buf->fw_st))
1225                 fw_state_aborted(&buf->fw_st);
1226 }
1227
1228 /* called from request_firmware() and request_firmware_work_func() */
1229 static int
1230 _request_firmware(const struct firmware **firmware_p, const char *name,
1231                   struct device *device, void *buf, size_t size,
1232                   unsigned int opt_flags)
1233 {
1234         struct firmware *fw = NULL;
1235         int ret;
1236
1237         if (!firmware_p)
1238                 return -EINVAL;
1239
1240         if (!name || name[0] == '\0') {
1241                 ret = -EINVAL;
1242                 goto out;
1243         }
1244
1245         ret = _request_firmware_prepare(&fw, name, device, buf, size);
1246         if (ret <= 0) /* error or already assigned */
1247                 goto out;
1248
1249         if (!firmware_enabled()) {
1250                 WARN(1, "firmware request while host is not available\n");
1251                 ret = -EHOSTDOWN;
1252                 goto out;
1253         }
1254
1255         ret = fw_get_filesystem_firmware(device, fw->priv);
1256         if (ret) {
1257                 if (!(opt_flags & FW_OPT_NO_WARN))
1258                         dev_warn(device,
1259                                  "Direct firmware load for %s failed with error %d\n",
1260                                  name, ret);
1261                 if (opt_flags & FW_OPT_USERHELPER) {
1262                         dev_warn(device, "Falling back to user helper\n");
1263                         ret = fw_load_from_user_helper(fw, name, device,
1264                                                        opt_flags);
1265                 }
1266         } else
1267                 ret = assign_firmware_buf(fw, device, opt_flags);
1268
1269  out:
1270         if (ret < 0) {
1271                 fw_abort_batch_reqs(fw);
1272                 release_firmware(fw);
1273                 fw = NULL;
1274         }
1275
1276         *firmware_p = fw;
1277         return ret;
1278 }
1279
1280 /**
1281  * request_firmware: - send firmware request and wait for it
1282  * @firmware_p: pointer to firmware image
1283  * @name: name of firmware file
1284  * @device: device for which firmware is being loaded
1285  *
1286  *      @firmware_p will be used to return a firmware image by the name
1287  *      of @name for device @device.
1288  *
1289  *      Should be called from user context where sleeping is allowed.
1290  *
1291  *      @name will be used as $FIRMWARE in the uevent environment and
1292  *      should be distinctive enough not to be confused with any other
1293  *      firmware image for this or any other device.
1294  *
1295  *      Caller must hold the reference count of @device.
1296  *
1297  *      The function can be called safely inside device's suspend and
1298  *      resume callback.
1299  **/
1300 int
1301 request_firmware(const struct firmware **firmware_p, const char *name,
1302                  struct device *device)
1303 {
1304         int ret;
1305
1306         /* Need to pin this module until return */
1307         __module_get(THIS_MODULE);
1308         ret = _request_firmware(firmware_p, name, device, NULL, 0,
1309                                 FW_OPT_UEVENT | FW_OPT_FALLBACK);
1310         module_put(THIS_MODULE);
1311         return ret;
1312 }
1313 EXPORT_SYMBOL(request_firmware);
1314
1315 /**
1316  * request_firmware_direct: - load firmware directly without usermode helper
1317  * @firmware_p: pointer to firmware image
1318  * @name: name of firmware file
1319  * @device: device for which firmware is being loaded
1320  *
1321  * This function works pretty much like request_firmware(), but this doesn't
1322  * fall back to usermode helper even if the firmware couldn't be loaded
1323  * directly from fs.  Hence it's useful for loading optional firmwares, which
1324  * aren't always present, without extra long timeouts of udev.
1325  **/
1326 int request_firmware_direct(const struct firmware **firmware_p,
1327                             const char *name, struct device *device)
1328 {
1329         int ret;
1330
1331         __module_get(THIS_MODULE);
1332         ret = _request_firmware(firmware_p, name, device, NULL, 0,
1333                                 FW_OPT_UEVENT | FW_OPT_NO_WARN);
1334         module_put(THIS_MODULE);
1335         return ret;
1336 }
1337 EXPORT_SYMBOL_GPL(request_firmware_direct);
1338
1339 /**
1340  * request_firmware_into_buf - load firmware into a previously allocated buffer
1341  * @firmware_p: pointer to firmware image
1342  * @name: name of firmware file
1343  * @device: device for which firmware is being loaded and DMA region allocated
1344  * @buf: address of buffer to load firmware into
1345  * @size: size of buffer
1346  *
1347  * This function works pretty much like request_firmware(), but it doesn't
1348  * allocate a buffer to hold the firmware data. Instead, the firmware
1349  * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1350  * data member is pointed at @buf.
1351  *
1352  * This function doesn't cache firmware either.
1353  */
1354 int
1355 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1356                           struct device *device, void *buf, size_t size)
1357 {
1358         int ret;
1359
1360         __module_get(THIS_MODULE);
1361         ret = _request_firmware(firmware_p, name, device, buf, size,
1362                                 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1363                                 FW_OPT_NOCACHE);
1364         module_put(THIS_MODULE);
1365         return ret;
1366 }
1367 EXPORT_SYMBOL(request_firmware_into_buf);
1368
1369 /**
1370  * release_firmware: - release the resource associated with a firmware image
1371  * @fw: firmware resource to release
1372  **/
1373 void release_firmware(const struct firmware *fw)
1374 {
1375         if (fw) {
1376                 if (!fw_is_builtin_firmware(fw))
1377                         firmware_free_data(fw);
1378                 kfree(fw);
1379         }
1380 }
1381 EXPORT_SYMBOL(release_firmware);
1382
1383 /* Async support */
1384 struct firmware_work {
1385         struct work_struct work;
1386         struct module *module;
1387         const char *name;
1388         struct device *device;
1389         void *context;
1390         void (*cont)(const struct firmware *fw, void *context);
1391         unsigned int opt_flags;
1392 };
1393
1394 static void request_firmware_work_func(struct work_struct *work)
1395 {
1396         struct firmware_work *fw_work;
1397         const struct firmware *fw;
1398
1399         fw_work = container_of(work, struct firmware_work, work);
1400
1401         _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
1402                           fw_work->opt_flags);
1403         fw_work->cont(fw, fw_work->context);
1404         put_device(fw_work->device); /* taken in request_firmware_nowait() */
1405
1406         module_put(fw_work->module);
1407         kfree_const(fw_work->name);
1408         kfree(fw_work);
1409 }
1410
1411 /**
1412  * request_firmware_nowait - asynchronous version of request_firmware
1413  * @module: module requesting the firmware
1414  * @uevent: sends uevent to copy the firmware image if this flag
1415  *      is non-zero else the firmware copy must be done manually.
1416  * @name: name of firmware file
1417  * @device: device for which firmware is being loaded
1418  * @gfp: allocation flags
1419  * @context: will be passed over to @cont, and
1420  *      @fw may be %NULL if firmware request fails.
1421  * @cont: function will be called asynchronously when the firmware
1422  *      request is over.
1423  *
1424  *      Caller must hold the reference count of @device.
1425  *
1426  *      Asynchronous variant of request_firmware() for user contexts:
1427  *              - sleep for as small periods as possible since it may
1428  *                increase kernel boot time of built-in device drivers
1429  *                requesting firmware in their ->probe() methods, if
1430  *                @gfp is GFP_KERNEL.
1431  *
1432  *              - can't sleep at all if @gfp is GFP_ATOMIC.
1433  **/
1434 int
1435 request_firmware_nowait(
1436         struct module *module, bool uevent,
1437         const char *name, struct device *device, gfp_t gfp, void *context,
1438         void (*cont)(const struct firmware *fw, void *context))
1439 {
1440         struct firmware_work *fw_work;
1441
1442         fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1443         if (!fw_work)
1444                 return -ENOMEM;
1445
1446         fw_work->module = module;
1447         fw_work->name = kstrdup_const(name, gfp);
1448         if (!fw_work->name) {
1449                 kfree(fw_work);
1450                 return -ENOMEM;
1451         }
1452         fw_work->device = device;
1453         fw_work->context = context;
1454         fw_work->cont = cont;
1455         fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
1456                 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1457
1458         if (!try_module_get(module)) {
1459                 kfree_const(fw_work->name);
1460                 kfree(fw_work);
1461                 return -EFAULT;
1462         }
1463
1464         get_device(fw_work->device);
1465         INIT_WORK(&fw_work->work, request_firmware_work_func);
1466         schedule_work(&fw_work->work);
1467         return 0;
1468 }
1469 EXPORT_SYMBOL(request_firmware_nowait);
1470
1471 #ifdef CONFIG_PM_SLEEP
1472 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1473
1474 /**
1475  * cache_firmware - cache one firmware image in kernel memory space
1476  * @fw_name: the firmware image name
1477  *
1478  * Cache firmware in kernel memory so that drivers can use it when
1479  * system isn't ready for them to request firmware image from userspace.
1480  * Once it returns successfully, driver can use request_firmware or its
1481  * nowait version to get the cached firmware without any interacting
1482  * with userspace
1483  *
1484  * Return 0 if the firmware image has been cached successfully
1485  * Return !0 otherwise
1486  *
1487  */
1488 static int cache_firmware(const char *fw_name)
1489 {
1490         int ret;
1491         const struct firmware *fw;
1492
1493         pr_debug("%s: %s\n", __func__, fw_name);
1494
1495         ret = request_firmware(&fw, fw_name, NULL);
1496         if (!ret)
1497                 kfree(fw);
1498
1499         pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1500
1501         return ret;
1502 }
1503
1504 static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1505 {
1506         struct firmware_buf *tmp;
1507         struct firmware_cache *fwc = &fw_cache;
1508
1509         spin_lock(&fwc->lock);
1510         tmp = __fw_lookup_buf(fw_name);
1511         spin_unlock(&fwc->lock);
1512
1513         return tmp;
1514 }
1515
1516 /**
1517  * uncache_firmware - remove one cached firmware image
1518  * @fw_name: the firmware image name
1519  *
1520  * Uncache one firmware image which has been cached successfully
1521  * before.
1522  *
1523  * Return 0 if the firmware cache has been removed successfully
1524  * Return !0 otherwise
1525  *
1526  */
1527 static int uncache_firmware(const char *fw_name)
1528 {
1529         struct firmware_buf *buf;
1530         struct firmware fw;
1531
1532         pr_debug("%s: %s\n", __func__, fw_name);
1533
1534         if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1535                 return 0;
1536
1537         buf = fw_lookup_buf(fw_name);
1538         if (buf) {
1539                 fw_free_buf(buf);
1540                 return 0;
1541         }
1542
1543         return -EINVAL;
1544 }
1545
1546 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1547 {
1548         struct fw_cache_entry *fce;
1549
1550         fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1551         if (!fce)
1552                 goto exit;
1553
1554         fce->name = kstrdup_const(name, GFP_ATOMIC);
1555         if (!fce->name) {
1556                 kfree(fce);
1557                 fce = NULL;
1558                 goto exit;
1559         }
1560 exit:
1561         return fce;
1562 }
1563
1564 static int __fw_entry_found(const char *name)
1565 {
1566         struct firmware_cache *fwc = &fw_cache;
1567         struct fw_cache_entry *fce;
1568
1569         list_for_each_entry(fce, &fwc->fw_names, list) {
1570                 if (!strcmp(fce->name, name))
1571                         return 1;
1572         }
1573         return 0;
1574 }
1575
1576 static int fw_cache_piggyback_on_request(const char *name)
1577 {
1578         struct firmware_cache *fwc = &fw_cache;
1579         struct fw_cache_entry *fce;
1580         int ret = 0;
1581
1582         spin_lock(&fwc->name_lock);
1583         if (__fw_entry_found(name))
1584                 goto found;
1585
1586         fce = alloc_fw_cache_entry(name);
1587         if (fce) {
1588                 ret = 1;
1589                 list_add(&fce->list, &fwc->fw_names);
1590                 pr_debug("%s: fw: %s\n", __func__, name);
1591         }
1592 found:
1593         spin_unlock(&fwc->name_lock);
1594         return ret;
1595 }
1596
1597 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1598 {
1599         kfree_const(fce->name);
1600         kfree(fce);
1601 }
1602
1603 static void __async_dev_cache_fw_image(void *fw_entry,
1604                                        async_cookie_t cookie)
1605 {
1606         struct fw_cache_entry *fce = fw_entry;
1607         struct firmware_cache *fwc = &fw_cache;
1608         int ret;
1609
1610         ret = cache_firmware(fce->name);
1611         if (ret) {
1612                 spin_lock(&fwc->name_lock);
1613                 list_del(&fce->list);
1614                 spin_unlock(&fwc->name_lock);
1615
1616                 free_fw_cache_entry(fce);
1617         }
1618 }
1619
1620 /* called with dev->devres_lock held */
1621 static void dev_create_fw_entry(struct device *dev, void *res,
1622                                 void *data)
1623 {
1624         struct fw_name_devm *fwn = res;
1625         const char *fw_name = fwn->name;
1626         struct list_head *head = data;
1627         struct fw_cache_entry *fce;
1628
1629         fce = alloc_fw_cache_entry(fw_name);
1630         if (fce)
1631                 list_add(&fce->list, head);
1632 }
1633
1634 static int devm_name_match(struct device *dev, void *res,
1635                            void *match_data)
1636 {
1637         struct fw_name_devm *fwn = res;
1638         return (fwn->magic == (unsigned long)match_data);
1639 }
1640
1641 static void dev_cache_fw_image(struct device *dev, void *data)
1642 {
1643         LIST_HEAD(todo);
1644         struct fw_cache_entry *fce;
1645         struct fw_cache_entry *fce_next;
1646         struct firmware_cache *fwc = &fw_cache;
1647
1648         devres_for_each_res(dev, fw_name_devm_release,
1649                             devm_name_match, &fw_cache,
1650                             dev_create_fw_entry, &todo);
1651
1652         list_for_each_entry_safe(fce, fce_next, &todo, list) {
1653                 list_del(&fce->list);
1654
1655                 spin_lock(&fwc->name_lock);
1656                 /* only one cache entry for one firmware */
1657                 if (!__fw_entry_found(fce->name)) {
1658                         list_add(&fce->list, &fwc->fw_names);
1659                 } else {
1660                         free_fw_cache_entry(fce);
1661                         fce = NULL;
1662                 }
1663                 spin_unlock(&fwc->name_lock);
1664
1665                 if (fce)
1666                         async_schedule_domain(__async_dev_cache_fw_image,
1667                                               (void *)fce,
1668                                               &fw_cache_domain);
1669         }
1670 }
1671
1672 static void __device_uncache_fw_images(void)
1673 {
1674         struct firmware_cache *fwc = &fw_cache;
1675         struct fw_cache_entry *fce;
1676
1677         spin_lock(&fwc->name_lock);
1678         while (!list_empty(&fwc->fw_names)) {
1679                 fce = list_entry(fwc->fw_names.next,
1680                                 struct fw_cache_entry, list);
1681                 list_del(&fce->list);
1682                 spin_unlock(&fwc->name_lock);
1683
1684                 uncache_firmware(fce->name);
1685                 free_fw_cache_entry(fce);
1686
1687                 spin_lock(&fwc->name_lock);
1688         }
1689         spin_unlock(&fwc->name_lock);
1690 }
1691
1692 /**
1693  * device_cache_fw_images - cache devices' firmware
1694  *
1695  * If one device called request_firmware or its nowait version
1696  * successfully before, the firmware names are recored into the
1697  * device's devres link list, so device_cache_fw_images can call
1698  * cache_firmware() to cache these firmwares for the device,
1699  * then the device driver can load its firmwares easily at
1700  * time when system is not ready to complete loading firmware.
1701  */
1702 static void device_cache_fw_images(void)
1703 {
1704         struct firmware_cache *fwc = &fw_cache;
1705         int old_timeout;
1706         DEFINE_WAIT(wait);
1707
1708         pr_debug("%s\n", __func__);
1709
1710         /* cancel uncache work */
1711         cancel_delayed_work_sync(&fwc->work);
1712
1713         /*
1714          * use small loading timeout for caching devices' firmware
1715          * because all these firmware images have been loaded
1716          * successfully at lease once, also system is ready for
1717          * completing firmware loading now. The maximum size of
1718          * firmware in current distributions is about 2M bytes,
1719          * so 10 secs should be enough.
1720          */
1721         old_timeout = loading_timeout;
1722         loading_timeout = 10;
1723
1724         mutex_lock(&fw_lock);
1725         fwc->state = FW_LOADER_START_CACHE;
1726         dpm_for_each_dev(NULL, dev_cache_fw_image);
1727         mutex_unlock(&fw_lock);
1728
1729         /* wait for completion of caching firmware for all devices */
1730         async_synchronize_full_domain(&fw_cache_domain);
1731
1732         loading_timeout = old_timeout;
1733 }
1734
1735 /**
1736  * device_uncache_fw_images - uncache devices' firmware
1737  *
1738  * uncache all firmwares which have been cached successfully
1739  * by device_uncache_fw_images earlier
1740  */
1741 static void device_uncache_fw_images(void)
1742 {
1743         pr_debug("%s\n", __func__);
1744         __device_uncache_fw_images();
1745 }
1746
1747 static void device_uncache_fw_images_work(struct work_struct *work)
1748 {
1749         device_uncache_fw_images();
1750 }
1751
1752 /**
1753  * device_uncache_fw_images_delay - uncache devices firmwares
1754  * @delay: number of milliseconds to delay uncache device firmwares
1755  *
1756  * uncache all devices's firmwares which has been cached successfully
1757  * by device_cache_fw_images after @delay milliseconds.
1758  */
1759 static void device_uncache_fw_images_delay(unsigned long delay)
1760 {
1761         queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1762                            msecs_to_jiffies(delay));
1763 }
1764
1765 /**
1766  * fw_pm_notify - notifier for suspend/resume
1767  * @notify_block: unused
1768  * @mode: mode we are switching to
1769  * @unused: unused
1770  *
1771  * Used to modify the firmware_class state as we move in between states.
1772  * The firmware_class implements a firmware cache to enable device driver
1773  * to fetch firmware upon resume before the root filesystem is ready. We
1774  * disable API calls which do not use the built-in firmware or the firmware
1775  * cache when we know these calls will not work.
1776  *
1777  * The inner logic behind all this is a bit complex so it is worth summarizing
1778  * the kernel's own suspend/resume process with context and focus on how this
1779  * can impact the firmware API.
1780  *
1781  * First a review on how we go to suspend::
1782  *
1783  *      pm_suspend() --> enter_state() -->
1784  *      sys_sync()
1785  *      suspend_prepare() -->
1786  *              __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
1787  *              suspend_freeze_processes() -->
1788  *                      freeze_processes() -->
1789  *                              __usermodehelper_set_disable_depth(UMH_DISABLED);
1790  *                              freeze all tasks ...
1791  *                      freeze_kernel_threads()
1792  *      suspend_devices_and_enter() -->
1793  *              dpm_suspend_start() -->
1794  *                              dpm_prepare()
1795  *                              dpm_suspend()
1796  *              suspend_enter()  -->
1797  *                      platform_suspend_prepare()
1798  *                      dpm_suspend_late()
1799  *                      freeze_enter()
1800  *                      syscore_suspend()
1801  *
1802  * When we resume we bail out of a loop from suspend_devices_and_enter() and
1803  * unwind back out to the caller enter_state() where we were before as follows::
1804  *
1805  *      enter_state() -->
1806  *      suspend_devices_and_enter() --> (bail from loop)
1807  *              dpm_resume_end() -->
1808  *                      dpm_resume()
1809  *                      dpm_complete()
1810  *      suspend_finish() -->
1811  *              suspend_thaw_processes() -->
1812  *                      thaw_processes() -->
1813  *                              __usermodehelper_set_disable_depth(UMH_FREEZING);
1814  *                              thaw_workqueues();
1815  *                              thaw all processes ...
1816  *                              usermodehelper_enable();
1817  *              pm_notifier_call_chain(PM_POST_SUSPEND);
1818  *
1819  * fw_pm_notify() works through pm_notifier_call_chain().
1820  */
1821 static int fw_pm_notify(struct notifier_block *notify_block,
1822                         unsigned long mode, void *unused)
1823 {
1824         switch (mode) {
1825         case PM_HIBERNATION_PREPARE:
1826         case PM_SUSPEND_PREPARE:
1827         case PM_RESTORE_PREPARE:
1828                 /*
1829                  * kill pending fallback requests with a custom fallback
1830                  * to avoid stalling suspend.
1831                  */
1832                 kill_pending_fw_fallback_reqs(true);
1833                 device_cache_fw_images();
1834                 disable_firmware();
1835                 break;
1836
1837         case PM_POST_SUSPEND:
1838         case PM_POST_HIBERNATION:
1839         case PM_POST_RESTORE:
1840                 /*
1841                  * In case that system sleep failed and syscore_suspend is
1842                  * not called.
1843                  */
1844                 mutex_lock(&fw_lock);
1845                 fw_cache.state = FW_LOADER_NO_CACHE;
1846                 mutex_unlock(&fw_lock);
1847                 enable_firmware();
1848
1849                 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1850                 break;
1851         }
1852
1853         return 0;
1854 }
1855
1856 /* stop caching firmware once syscore_suspend is reached */
1857 static int fw_suspend(void)
1858 {
1859         fw_cache.state = FW_LOADER_NO_CACHE;
1860         return 0;
1861 }
1862
1863 static struct syscore_ops fw_syscore_ops = {
1864         .suspend = fw_suspend,
1865 };
1866 #else
1867 static int fw_cache_piggyback_on_request(const char *name)
1868 {
1869         return 0;
1870 }
1871 #endif
1872
1873 static void __init fw_cache_init(void)
1874 {
1875         spin_lock_init(&fw_cache.lock);
1876         INIT_LIST_HEAD(&fw_cache.head);
1877         fw_cache.state = FW_LOADER_NO_CACHE;
1878
1879 #ifdef CONFIG_PM_SLEEP
1880         spin_lock_init(&fw_cache.name_lock);
1881         INIT_LIST_HEAD(&fw_cache.fw_names);
1882
1883         INIT_DELAYED_WORK(&fw_cache.work,
1884                           device_uncache_fw_images_work);
1885
1886         fw_cache.pm_notify.notifier_call = fw_pm_notify;
1887         register_pm_notifier(&fw_cache.pm_notify);
1888
1889         register_syscore_ops(&fw_syscore_ops);
1890 #endif
1891 }
1892
1893 static int fw_shutdown_notify(struct notifier_block *unused1,
1894                               unsigned long unused2, void *unused3)
1895 {
1896         disable_firmware();
1897         /*
1898          * Kill all pending fallback requests to avoid both stalling shutdown,
1899          * and avoid a deadlock with the usermode_lock.
1900          */
1901         kill_pending_fw_fallback_reqs(false);
1902
1903         return NOTIFY_DONE;
1904 }
1905
1906 static struct notifier_block fw_shutdown_nb = {
1907         .notifier_call = fw_shutdown_notify,
1908 };
1909
1910 static int __init firmware_class_init(void)
1911 {
1912         enable_firmware();
1913         fw_cache_init();
1914         register_reboot_notifier(&fw_shutdown_nb);
1915 #ifdef CONFIG_FW_LOADER_USER_HELPER
1916         return class_register(&firmware_class);
1917 #else
1918         return 0;
1919 #endif
1920 }
1921
1922 static void __exit firmware_class_exit(void)
1923 {
1924         disable_firmware();
1925 #ifdef CONFIG_PM_SLEEP
1926         unregister_syscore_ops(&fw_syscore_ops);
1927         unregister_pm_notifier(&fw_cache.pm_notify);
1928 #endif
1929         unregister_reboot_notifier(&fw_shutdown_nb);
1930 #ifdef CONFIG_FW_LOADER_USER_HELPER
1931         class_unregister(&firmware_class);
1932 #endif
1933 }
1934
1935 fs_initcall(firmware_class_init);
1936 module_exit(firmware_class_exit);