mm: swap: clean up swap readahead
[muen/linux.git] / mm / swap_state.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/swap_state.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  *
8  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
9  */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/vmalloc.h>
22 #include <linux/swap_slots.h>
23 #include <linux/huge_mm.h>
24
25 #include <asm/pgtable.h>
26
27 /*
28  * swapper_space is a fiction, retained to simplify the path through
29  * vmscan's shrink_page_list.
30  */
31 static const struct address_space_operations swap_aops = {
32         .writepage      = swap_writepage,
33         .set_page_dirty = swap_set_page_dirty,
34 #ifdef CONFIG_MIGRATION
35         .migratepage    = migrate_page,
36 #endif
37 };
38
39 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 bool swap_vma_readahead __read_mostly = true;
42
43 #define SWAP_RA_WIN_SHIFT       (PAGE_SHIFT / 2)
44 #define SWAP_RA_HITS_MASK       ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45 #define SWAP_RA_HITS_MAX        SWAP_RA_HITS_MASK
46 #define SWAP_RA_WIN_MASK        (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47
48 #define SWAP_RA_HITS(v)         ((v) & SWAP_RA_HITS_MASK)
49 #define SWAP_RA_WIN(v)          (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50 #define SWAP_RA_ADDR(v)         ((v) & PAGE_MASK)
51
52 #define SWAP_RA_VAL(addr, win, hits)                            \
53         (((addr) & PAGE_MASK) |                                 \
54          (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |    \
55          ((hits) & SWAP_RA_HITS_MASK))
56
57 /* Initial readahead hits is 4 to start up with a small window */
58 #define GET_SWAP_RA_VAL(vma)                                    \
59         (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
60
61 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
62 #define ADD_CACHE_INFO(x, nr)   do { swap_cache_info.x += (nr); } while (0)
63
64 static struct {
65         unsigned long add_total;
66         unsigned long del_total;
67         unsigned long find_success;
68         unsigned long find_total;
69 } swap_cache_info;
70
71 unsigned long total_swapcache_pages(void)
72 {
73         unsigned int i, j, nr;
74         unsigned long ret = 0;
75         struct address_space *spaces;
76
77         rcu_read_lock();
78         for (i = 0; i < MAX_SWAPFILES; i++) {
79                 /*
80                  * The corresponding entries in nr_swapper_spaces and
81                  * swapper_spaces will be reused only after at least
82                  * one grace period.  So it is impossible for them
83                  * belongs to different usage.
84                  */
85                 nr = nr_swapper_spaces[i];
86                 spaces = rcu_dereference(swapper_spaces[i]);
87                 if (!nr || !spaces)
88                         continue;
89                 for (j = 0; j < nr; j++)
90                         ret += spaces[j].nrpages;
91         }
92         rcu_read_unlock();
93         return ret;
94 }
95
96 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
97
98 void show_swap_cache_info(void)
99 {
100         printk("%lu pages in swap cache\n", total_swapcache_pages());
101         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
102                 swap_cache_info.add_total, swap_cache_info.del_total,
103                 swap_cache_info.find_success, swap_cache_info.find_total);
104         printk("Free swap  = %ldkB\n",
105                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
106         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
107 }
108
109 /*
110  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
111  * but sets SwapCache flag and private instead of mapping and index.
112  */
113 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
114 {
115         int error, i, nr = hpage_nr_pages(page);
116         struct address_space *address_space;
117         pgoff_t idx = swp_offset(entry);
118
119         VM_BUG_ON_PAGE(!PageLocked(page), page);
120         VM_BUG_ON_PAGE(PageSwapCache(page), page);
121         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
122
123         page_ref_add(page, nr);
124         SetPageSwapCache(page);
125
126         address_space = swap_address_space(entry);
127         spin_lock_irq(&address_space->tree_lock);
128         for (i = 0; i < nr; i++) {
129                 set_page_private(page + i, entry.val + i);
130                 error = radix_tree_insert(&address_space->page_tree,
131                                           idx + i, page + i);
132                 if (unlikely(error))
133                         break;
134         }
135         if (likely(!error)) {
136                 address_space->nrpages += nr;
137                 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
138                 ADD_CACHE_INFO(add_total, nr);
139         } else {
140                 /*
141                  * Only the context which have set SWAP_HAS_CACHE flag
142                  * would call add_to_swap_cache().
143                  * So add_to_swap_cache() doesn't returns -EEXIST.
144                  */
145                 VM_BUG_ON(error == -EEXIST);
146                 set_page_private(page + i, 0UL);
147                 while (i--) {
148                         radix_tree_delete(&address_space->page_tree, idx + i);
149                         set_page_private(page + i, 0UL);
150                 }
151                 ClearPageSwapCache(page);
152                 page_ref_sub(page, nr);
153         }
154         spin_unlock_irq(&address_space->tree_lock);
155
156         return error;
157 }
158
159
160 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
161 {
162         int error;
163
164         error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
165         if (!error) {
166                 error = __add_to_swap_cache(page, entry);
167                 radix_tree_preload_end();
168         }
169         return error;
170 }
171
172 /*
173  * This must be called only on pages that have
174  * been verified to be in the swap cache.
175  */
176 void __delete_from_swap_cache(struct page *page)
177 {
178         struct address_space *address_space;
179         int i, nr = hpage_nr_pages(page);
180         swp_entry_t entry;
181         pgoff_t idx;
182
183         VM_BUG_ON_PAGE(!PageLocked(page), page);
184         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
185         VM_BUG_ON_PAGE(PageWriteback(page), page);
186
187         entry.val = page_private(page);
188         address_space = swap_address_space(entry);
189         idx = swp_offset(entry);
190         for (i = 0; i < nr; i++) {
191                 radix_tree_delete(&address_space->page_tree, idx + i);
192                 set_page_private(page + i, 0);
193         }
194         ClearPageSwapCache(page);
195         address_space->nrpages -= nr;
196         __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
197         ADD_CACHE_INFO(del_total, nr);
198 }
199
200 /**
201  * add_to_swap - allocate swap space for a page
202  * @page: page we want to move to swap
203  *
204  * Allocate swap space for the page and add the page to the
205  * swap cache.  Caller needs to hold the page lock. 
206  */
207 int add_to_swap(struct page *page)
208 {
209         swp_entry_t entry;
210         int err;
211
212         VM_BUG_ON_PAGE(!PageLocked(page), page);
213         VM_BUG_ON_PAGE(!PageUptodate(page), page);
214
215         entry = get_swap_page(page);
216         if (!entry.val)
217                 return 0;
218
219         if (mem_cgroup_try_charge_swap(page, entry))
220                 goto fail;
221
222         /*
223          * Radix-tree node allocations from PF_MEMALLOC contexts could
224          * completely exhaust the page allocator. __GFP_NOMEMALLOC
225          * stops emergency reserves from being allocated.
226          *
227          * TODO: this could cause a theoretical memory reclaim
228          * deadlock in the swap out path.
229          */
230         /*
231          * Add it to the swap cache.
232          */
233         err = add_to_swap_cache(page, entry,
234                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
235         /* -ENOMEM radix-tree allocation failure */
236         if (err)
237                 /*
238                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
239                  * clear SWAP_HAS_CACHE flag.
240                  */
241                 goto fail;
242         /*
243          * Normally the page will be dirtied in unmap because its pte should be
244          * dirty. A special case is MADV_FREE page. The page'e pte could have
245          * dirty bit cleared but the page's SwapBacked bit is still set because
246          * clearing the dirty bit and SwapBacked bit has no lock protected. For
247          * such page, unmap will not set dirty bit for it, so page reclaim will
248          * not write the page out. This can cause data corruption when the page
249          * is swap in later. Always setting the dirty bit for the page solves
250          * the problem.
251          */
252         set_page_dirty(page);
253
254         return 1;
255
256 fail:
257         put_swap_page(page, entry);
258         return 0;
259 }
260
261 /*
262  * This must be called only on pages that have
263  * been verified to be in the swap cache and locked.
264  * It will never put the page into the free list,
265  * the caller has a reference on the page.
266  */
267 void delete_from_swap_cache(struct page *page)
268 {
269         swp_entry_t entry;
270         struct address_space *address_space;
271
272         entry.val = page_private(page);
273
274         address_space = swap_address_space(entry);
275         spin_lock_irq(&address_space->tree_lock);
276         __delete_from_swap_cache(page);
277         spin_unlock_irq(&address_space->tree_lock);
278
279         put_swap_page(page, entry);
280         page_ref_sub(page, hpage_nr_pages(page));
281 }
282
283 /* 
284  * If we are the only user, then try to free up the swap cache. 
285  * 
286  * Its ok to check for PageSwapCache without the page lock
287  * here because we are going to recheck again inside
288  * try_to_free_swap() _with_ the lock.
289  *                                      - Marcelo
290  */
291 static inline void free_swap_cache(struct page *page)
292 {
293         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
294                 try_to_free_swap(page);
295                 unlock_page(page);
296         }
297 }
298
299 /* 
300  * Perform a free_page(), also freeing any swap cache associated with
301  * this page if it is the last user of the page.
302  */
303 void free_page_and_swap_cache(struct page *page)
304 {
305         free_swap_cache(page);
306         if (!is_huge_zero_page(page))
307                 put_page(page);
308 }
309
310 /*
311  * Passed an array of pages, drop them all from swapcache and then release
312  * them.  They are removed from the LRU and freed if this is their last use.
313  */
314 void free_pages_and_swap_cache(struct page **pages, int nr)
315 {
316         struct page **pagep = pages;
317         int i;
318
319         lru_add_drain();
320         for (i = 0; i < nr; i++)
321                 free_swap_cache(pagep[i]);
322         release_pages(pagep, nr);
323 }
324
325 /*
326  * Lookup a swap entry in the swap cache. A found page will be returned
327  * unlocked and with its refcount incremented - we rely on the kernel
328  * lock getting page table operations atomic even if we drop the page
329  * lock before returning.
330  */
331 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
332                                unsigned long addr)
333 {
334         struct page *page;
335
336         page = find_get_page(swap_address_space(entry), swp_offset(entry));
337
338         INC_CACHE_INFO(find_total);
339         if (page) {
340                 bool vma_ra = swap_use_vma_readahead();
341                 bool readahead;
342
343                 INC_CACHE_INFO(find_success);
344                 /*
345                  * At the moment, we don't support PG_readahead for anon THP
346                  * so let's bail out rather than confusing the readahead stat.
347                  */
348                 if (unlikely(PageTransCompound(page)))
349                         return page;
350
351                 readahead = TestClearPageReadahead(page);
352                 if (vma && vma_ra) {
353                         unsigned long ra_val;
354                         int win, hits;
355
356                         ra_val = GET_SWAP_RA_VAL(vma);
357                         win = SWAP_RA_WIN(ra_val);
358                         hits = SWAP_RA_HITS(ra_val);
359                         if (readahead)
360                                 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
361                         atomic_long_set(&vma->swap_readahead_info,
362                                         SWAP_RA_VAL(addr, win, hits));
363                 }
364
365                 if (readahead) {
366                         count_vm_event(SWAP_RA_HIT);
367                         if (!vma || !vma_ra)
368                                 atomic_inc(&swapin_readahead_hits);
369                 }
370         }
371
372         return page;
373 }
374
375 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
376                         struct vm_area_struct *vma, unsigned long addr,
377                         bool *new_page_allocated)
378 {
379         struct page *found_page, *new_page = NULL;
380         struct address_space *swapper_space = swap_address_space(entry);
381         int err;
382         *new_page_allocated = false;
383
384         do {
385                 /*
386                  * First check the swap cache.  Since this is normally
387                  * called after lookup_swap_cache() failed, re-calling
388                  * that would confuse statistics.
389                  */
390                 found_page = find_get_page(swapper_space, swp_offset(entry));
391                 if (found_page)
392                         break;
393
394                 /*
395                  * Just skip read ahead for unused swap slot.
396                  * During swap_off when swap_slot_cache is disabled,
397                  * we have to handle the race between putting
398                  * swap entry in swap cache and marking swap slot
399                  * as SWAP_HAS_CACHE.  That's done in later part of code or
400                  * else swap_off will be aborted if we return NULL.
401                  */
402                 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
403                         break;
404
405                 /*
406                  * Get a new page to read into from swap.
407                  */
408                 if (!new_page) {
409                         new_page = alloc_page_vma(gfp_mask, vma, addr);
410                         if (!new_page)
411                                 break;          /* Out of memory */
412                 }
413
414                 /*
415                  * call radix_tree_preload() while we can wait.
416                  */
417                 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
418                 if (err)
419                         break;
420
421                 /*
422                  * Swap entry may have been freed since our caller observed it.
423                  */
424                 err = swapcache_prepare(entry);
425                 if (err == -EEXIST) {
426                         radix_tree_preload_end();
427                         /*
428                          * We might race against get_swap_page() and stumble
429                          * across a SWAP_HAS_CACHE swap_map entry whose page
430                          * has not been brought into the swapcache yet.
431                          */
432                         cond_resched();
433                         continue;
434                 }
435                 if (err) {              /* swp entry is obsolete ? */
436                         radix_tree_preload_end();
437                         break;
438                 }
439
440                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
441                 __SetPageLocked(new_page);
442                 __SetPageSwapBacked(new_page);
443                 err = __add_to_swap_cache(new_page, entry);
444                 if (likely(!err)) {
445                         radix_tree_preload_end();
446                         /*
447                          * Initiate read into locked page and return.
448                          */
449                         lru_cache_add_anon(new_page);
450                         *new_page_allocated = true;
451                         return new_page;
452                 }
453                 radix_tree_preload_end();
454                 __ClearPageLocked(new_page);
455                 /*
456                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
457                  * clear SWAP_HAS_CACHE flag.
458                  */
459                 put_swap_page(new_page, entry);
460         } while (err != -ENOMEM);
461
462         if (new_page)
463                 put_page(new_page);
464         return found_page;
465 }
466
467 /*
468  * Locate a page of swap in physical memory, reserving swap cache space
469  * and reading the disk if it is not already cached.
470  * A failure return means that either the page allocation failed or that
471  * the swap entry is no longer in use.
472  */
473 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
474                 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
475 {
476         bool page_was_allocated;
477         struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
478                         vma, addr, &page_was_allocated);
479
480         if (page_was_allocated)
481                 swap_readpage(retpage, do_poll);
482
483         return retpage;
484 }
485
486 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
487                                       unsigned long offset,
488                                       int hits,
489                                       int max_pages,
490                                       int prev_win)
491 {
492         unsigned int pages, last_ra;
493
494         /*
495          * This heuristic has been found to work well on both sequential and
496          * random loads, swapping to hard disk or to SSD: please don't ask
497          * what the "+ 2" means, it just happens to work well, that's all.
498          */
499         pages = hits + 2;
500         if (pages == 2) {
501                 /*
502                  * We can have no readahead hits to judge by: but must not get
503                  * stuck here forever, so check for an adjacent offset instead
504                  * (and don't even bother to check whether swap type is same).
505                  */
506                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
507                         pages = 1;
508         } else {
509                 unsigned int roundup = 4;
510                 while (roundup < pages)
511                         roundup <<= 1;
512                 pages = roundup;
513         }
514
515         if (pages > max_pages)
516                 pages = max_pages;
517
518         /* Don't shrink readahead too fast */
519         last_ra = prev_win / 2;
520         if (pages < last_ra)
521                 pages = last_ra;
522
523         return pages;
524 }
525
526 static unsigned long swapin_nr_pages(unsigned long offset)
527 {
528         static unsigned long prev_offset;
529         unsigned int hits, pages, max_pages;
530         static atomic_t last_readahead_pages;
531
532         max_pages = 1 << READ_ONCE(page_cluster);
533         if (max_pages <= 1)
534                 return 1;
535
536         hits = atomic_xchg(&swapin_readahead_hits, 0);
537         pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
538                                   atomic_read(&last_readahead_pages));
539         if (!hits)
540                 prev_offset = offset;
541         atomic_set(&last_readahead_pages, pages);
542
543         return pages;
544 }
545
546 /**
547  * swapin_readahead - swap in pages in hope we need them soon
548  * @entry: swap entry of this memory
549  * @gfp_mask: memory allocation flags
550  * @vma: user vma this address belongs to
551  * @addr: target address for mempolicy
552  *
553  * Returns the struct page for entry and addr, after queueing swapin.
554  *
555  * Primitive swap readahead code. We simply read an aligned block of
556  * (1 << page_cluster) entries in the swap area. This method is chosen
557  * because it doesn't cost us any seek time.  We also make sure to queue
558  * the 'original' request together with the readahead ones...
559  *
560  * This has been extended to use the NUMA policies from the mm triggering
561  * the readahead.
562  *
563  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
564  */
565 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
566                         struct vm_area_struct *vma, unsigned long addr)
567 {
568         struct page *page;
569         unsigned long entry_offset = swp_offset(entry);
570         unsigned long offset = entry_offset;
571         unsigned long start_offset, end_offset;
572         unsigned long mask;
573         struct swap_info_struct *si = swp_swap_info(entry);
574         struct blk_plug plug;
575         bool do_poll = true, page_allocated;
576
577         mask = swapin_nr_pages(offset) - 1;
578         if (!mask)
579                 goto skip;
580
581         do_poll = false;
582         /* Read a page_cluster sized and aligned cluster around offset. */
583         start_offset = offset & ~mask;
584         end_offset = offset | mask;
585         if (!start_offset)      /* First page is swap header. */
586                 start_offset++;
587         if (end_offset >= si->max)
588                 end_offset = si->max - 1;
589
590         blk_start_plug(&plug);
591         for (offset = start_offset; offset <= end_offset ; offset++) {
592                 /* Ok, do the async read-ahead now */
593                 page = __read_swap_cache_async(
594                         swp_entry(swp_type(entry), offset),
595                         gfp_mask, vma, addr, &page_allocated);
596                 if (!page)
597                         continue;
598                 if (page_allocated) {
599                         swap_readpage(page, false);
600                         if (offset != entry_offset) {
601                                 SetPageReadahead(page);
602                                 count_vm_event(SWAP_RA);
603                         }
604                 }
605                 put_page(page);
606         }
607         blk_finish_plug(&plug);
608
609         lru_add_drain();        /* Push any new pages onto the LRU now */
610 skip:
611         return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
612 }
613
614 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
615 {
616         struct address_space *spaces, *space;
617         unsigned int i, nr;
618
619         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
620         spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
621         if (!spaces)
622                 return -ENOMEM;
623         for (i = 0; i < nr; i++) {
624                 space = spaces + i;
625                 INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
626                 atomic_set(&space->i_mmap_writable, 0);
627                 space->a_ops = &swap_aops;
628                 /* swap cache doesn't use writeback related tags */
629                 mapping_set_no_writeback_tags(space);
630                 spin_lock_init(&space->tree_lock);
631         }
632         nr_swapper_spaces[type] = nr;
633         rcu_assign_pointer(swapper_spaces[type], spaces);
634
635         return 0;
636 }
637
638 void exit_swap_address_space(unsigned int type)
639 {
640         struct address_space *spaces;
641
642         spaces = swapper_spaces[type];
643         nr_swapper_spaces[type] = 0;
644         rcu_assign_pointer(swapper_spaces[type], NULL);
645         synchronize_rcu();
646         kvfree(spaces);
647 }
648
649 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
650                                      unsigned long faddr,
651                                      unsigned long lpfn,
652                                      unsigned long rpfn,
653                                      unsigned long *start,
654                                      unsigned long *end)
655 {
656         *start = max3(lpfn, PFN_DOWN(vma->vm_start),
657                       PFN_DOWN(faddr & PMD_MASK));
658         *end = min3(rpfn, PFN_DOWN(vma->vm_end),
659                     PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
660 }
661
662 static void swap_ra_info(struct vm_fault *vmf,
663                         struct vma_swap_readahead *ra_info)
664 {
665         struct vm_area_struct *vma = vmf->vma;
666         unsigned long ra_val;
667         swp_entry_t entry;
668         unsigned long faddr, pfn, fpfn;
669         unsigned long start, end;
670         pte_t *pte, *orig_pte;
671         unsigned int max_win, hits, prev_win, win, left;
672 #ifndef CONFIG_64BIT
673         pte_t *tpte;
674 #endif
675
676         max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
677                              SWAP_RA_ORDER_CEILING);
678         if (max_win == 1) {
679                 ra_info->win = 1;
680                 return;
681         }
682
683         faddr = vmf->address;
684         orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
685         entry = pte_to_swp_entry(*pte);
686         if ((unlikely(non_swap_entry(entry)))) {
687                 pte_unmap(orig_pte);
688                 return;
689         }
690
691         fpfn = PFN_DOWN(faddr);
692         ra_val = GET_SWAP_RA_VAL(vma);
693         pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
694         prev_win = SWAP_RA_WIN(ra_val);
695         hits = SWAP_RA_HITS(ra_val);
696         ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
697                                                max_win, prev_win);
698         atomic_long_set(&vma->swap_readahead_info,
699                         SWAP_RA_VAL(faddr, win, 0));
700
701         if (win == 1) {
702                 pte_unmap(orig_pte);
703                 return;
704         }
705
706         /* Copy the PTEs because the page table may be unmapped */
707         if (fpfn == pfn + 1)
708                 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
709         else if (pfn == fpfn + 1)
710                 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
711                                   &start, &end);
712         else {
713                 left = (win - 1) / 2;
714                 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
715                                   &start, &end);
716         }
717         ra_info->nr_pte = end - start;
718         ra_info->offset = fpfn - start;
719         pte -= ra_info->offset;
720 #ifdef CONFIG_64BIT
721         ra_info->ptes = pte;
722 #else
723         tpte = ra_info->ptes;
724         for (pfn = start; pfn != end; pfn++)
725                 *tpte++ = *pte++;
726 #endif
727         pte_unmap(orig_pte);
728 }
729
730 struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
731                                     struct vm_fault *vmf)
732 {
733         struct blk_plug plug;
734         struct vm_area_struct *vma = vmf->vma;
735         struct page *page;
736         pte_t *pte, pentry;
737         swp_entry_t entry;
738         unsigned int i;
739         bool page_allocated;
740         struct vma_swap_readahead ra_info = {0,};
741
742         swap_ra_info(vmf, &ra_info);
743         if (ra_info.win == 1)
744                 goto skip;
745
746         blk_start_plug(&plug);
747         for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
748              i++, pte++) {
749                 pentry = *pte;
750                 if (pte_none(pentry))
751                         continue;
752                 if (pte_present(pentry))
753                         continue;
754                 entry = pte_to_swp_entry(pentry);
755                 if (unlikely(non_swap_entry(entry)))
756                         continue;
757                 page = __read_swap_cache_async(entry, gfp_mask, vma,
758                                                vmf->address, &page_allocated);
759                 if (!page)
760                         continue;
761                 if (page_allocated) {
762                         swap_readpage(page, false);
763                         if (i != ra_info.offset) {
764                                 SetPageReadahead(page);
765                                 count_vm_event(SWAP_RA);
766                         }
767                 }
768                 put_page(page);
769         }
770         blk_finish_plug(&plug);
771         lru_add_drain();
772 skip:
773         return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
774                                      ra_info.win == 1);
775 }
776
777 #ifdef CONFIG_SYSFS
778 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
779                                      struct kobj_attribute *attr, char *buf)
780 {
781         return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false");
782 }
783 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
784                                       struct kobj_attribute *attr,
785                                       const char *buf, size_t count)
786 {
787         if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
788                 swap_vma_readahead = true;
789         else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
790                 swap_vma_readahead = false;
791         else
792                 return -EINVAL;
793
794         return count;
795 }
796 static struct kobj_attribute vma_ra_enabled_attr =
797         __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
798                vma_ra_enabled_store);
799
800 static struct attribute *swap_attrs[] = {
801         &vma_ra_enabled_attr.attr,
802         NULL,
803 };
804
805 static struct attribute_group swap_attr_group = {
806         .attrs = swap_attrs,
807 };
808
809 static int __init swap_init_sysfs(void)
810 {
811         int err;
812         struct kobject *swap_kobj;
813
814         swap_kobj = kobject_create_and_add("swap", mm_kobj);
815         if (!swap_kobj) {
816                 pr_err("failed to create swap kobject\n");
817                 return -ENOMEM;
818         }
819         err = sysfs_create_group(swap_kobj, &swap_attr_group);
820         if (err) {
821                 pr_err("failed to register swap group\n");
822                 goto delete_obj;
823         }
824         return 0;
825
826 delete_obj:
827         kobject_put(swap_kobj);
828         return err;
829 }
830 subsys_initcall(swap_init_sysfs);
831 #endif