z3fold: limit use of stale list for allocation
[muen/linux.git] / mm / z3fold.c
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
36
37 /*****************
38  * Structures
39 *****************/
40 struct z3fold_pool;
41 struct z3fold_ops {
42         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
43 };
44
45 enum buddy {
46         HEADLESS = 0,
47         FIRST,
48         MIDDLE,
49         LAST,
50         BUDDIES_MAX
51 };
52
53 /*
54  * struct z3fold_header - z3fold page metadata occupying first chunks of each
55  *                      z3fold page, except for HEADLESS pages
56  * @buddy:              links the z3fold page into the relevant list in the
57  *                      pool
58  * @page_lock:          per-page lock
59  * @refcount:           reference count for the z3fold page
60  * @work:               work_struct for page layout optimization
61  * @pool:               pointer to the pool which this page belongs to
62  * @cpu:                CPU which this page "belongs" to
63  * @first_chunks:       the size of the first buddy in chunks, 0 if free
64  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
65  * @last_chunks:        the size of the last buddy in chunks, 0 if free
66  * @first_num:          the starting number (for the first handle)
67  */
68 struct z3fold_header {
69         struct list_head buddy;
70         spinlock_t page_lock;
71         struct kref refcount;
72         struct work_struct work;
73         struct z3fold_pool *pool;
74         short cpu;
75         unsigned short first_chunks;
76         unsigned short middle_chunks;
77         unsigned short last_chunks;
78         unsigned short start_middle;
79         unsigned short first_num:2;
80 };
81
82 /*
83  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84  * adjusting internal fragmentation.  It also determines the number of
85  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87  * in the beginning of an allocated page are occupied by z3fold header, so
88  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89  * which shows the max number of free chunks in z3fold page, also there will
90  * be 63, or 62, respectively, freelists per pool.
91  */
92 #define NCHUNKS_ORDER   6
93
94 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100
101 #define BUDDY_MASK      (0x3)
102
103 /**
104  * struct z3fold_pool - stores metadata for each z3fold pool
105  * @name:       pool name
106  * @lock:       protects pool unbuddied/lru lists
107  * @stale_lock: protects pool stale page list
108  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
109  *              buddies; the list each z3fold page is added to depends on
110  *              the size of its free region.
111  * @lru:        list tracking the z3fold pages in LRU order by most recently
112  *              added buddy.
113  * @stale:      list of pages marked for freeing
114  * @pages_nr:   number of z3fold pages in the pool.
115  * @ops:        pointer to a structure of user defined operations specified at
116  *              pool creation time.
117  * @compact_wq: workqueue for page layout background optimization
118  * @release_wq: workqueue for safe page release
119  * @work:       work_struct for safe page release
120  *
121  * This structure is allocated at pool creation time and maintains metadata
122  * pertaining to a particular z3fold pool.
123  */
124 struct z3fold_pool {
125         const char *name;
126         spinlock_t lock;
127         spinlock_t stale_lock;
128         struct list_head *unbuddied;
129         struct list_head lru;
130         struct list_head stale;
131         atomic64_t pages_nr;
132         const struct z3fold_ops *ops;
133         struct zpool *zpool;
134         const struct zpool_ops *zpool_ops;
135         struct workqueue_struct *compact_wq;
136         struct workqueue_struct *release_wq;
137         struct work_struct work;
138 };
139
140 /*
141  * Internal z3fold page flags
142  */
143 enum z3fold_page_flags {
144         PAGE_HEADLESS = 0,
145         MIDDLE_CHUNK_MAPPED,
146         NEEDS_COMPACTING,
147         PAGE_STALE
148 };
149
150 /*****************
151  * Helpers
152 *****************/
153
154 /* Converts an allocation size in bytes to size in z3fold chunks */
155 static int size_to_chunks(size_t size)
156 {
157         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
158 }
159
160 #define for_each_unbuddied_list(_iter, _begin) \
161         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
162
163 static void compact_page_work(struct work_struct *w);
164
165 /* Initializes the z3fold header of a newly allocated z3fold page */
166 static struct z3fold_header *init_z3fold_page(struct page *page,
167                                         struct z3fold_pool *pool)
168 {
169         struct z3fold_header *zhdr = page_address(page);
170
171         INIT_LIST_HEAD(&page->lru);
172         clear_bit(PAGE_HEADLESS, &page->private);
173         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
174         clear_bit(NEEDS_COMPACTING, &page->private);
175         clear_bit(PAGE_STALE, &page->private);
176
177         spin_lock_init(&zhdr->page_lock);
178         kref_init(&zhdr->refcount);
179         zhdr->first_chunks = 0;
180         zhdr->middle_chunks = 0;
181         zhdr->last_chunks = 0;
182         zhdr->first_num = 0;
183         zhdr->start_middle = 0;
184         zhdr->cpu = -1;
185         zhdr->pool = pool;
186         INIT_LIST_HEAD(&zhdr->buddy);
187         INIT_WORK(&zhdr->work, compact_page_work);
188         return zhdr;
189 }
190
191 /* Resets the struct page fields and frees the page */
192 static void free_z3fold_page(struct page *page)
193 {
194         __free_page(page);
195 }
196
197 /* Lock a z3fold page */
198 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
199 {
200         spin_lock(&zhdr->page_lock);
201 }
202
203 /* Try to lock a z3fold page */
204 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
205 {
206         return spin_trylock(&zhdr->page_lock);
207 }
208
209 /* Unlock a z3fold page */
210 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
211 {
212         spin_unlock(&zhdr->page_lock);
213 }
214
215 /*
216  * Encodes the handle of a particular buddy within a z3fold page
217  * Pool lock should be held as this function accesses first_num
218  */
219 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
220 {
221         unsigned long handle;
222
223         handle = (unsigned long)zhdr;
224         if (bud != HEADLESS)
225                 handle += (bud + zhdr->first_num) & BUDDY_MASK;
226         return handle;
227 }
228
229 /* Returns the z3fold page where a given handle is stored */
230 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
231 {
232         return (struct z3fold_header *)(handle & PAGE_MASK);
233 }
234
235 /*
236  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
237  *  but that doesn't matter. because the masking will result in the
238  *  correct buddy number.
239  */
240 static enum buddy handle_to_buddy(unsigned long handle)
241 {
242         struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
243         return (handle - zhdr->first_num) & BUDDY_MASK;
244 }
245
246 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
247 {
248         struct page *page = virt_to_page(zhdr);
249         struct z3fold_pool *pool = zhdr->pool;
250
251         WARN_ON(!list_empty(&zhdr->buddy));
252         set_bit(PAGE_STALE, &page->private);
253         clear_bit(NEEDS_COMPACTING, &page->private);
254         spin_lock(&pool->lock);
255         if (!list_empty(&page->lru))
256                 list_del(&page->lru);
257         spin_unlock(&pool->lock);
258         if (locked)
259                 z3fold_page_unlock(zhdr);
260         spin_lock(&pool->stale_lock);
261         list_add(&zhdr->buddy, &pool->stale);
262         queue_work(pool->release_wq, &pool->work);
263         spin_unlock(&pool->stale_lock);
264 }
265
266 static void __attribute__((__unused__))
267                         release_z3fold_page(struct kref *ref)
268 {
269         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
270                                                 refcount);
271         __release_z3fold_page(zhdr, false);
272 }
273
274 static void release_z3fold_page_locked(struct kref *ref)
275 {
276         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
277                                                 refcount);
278         WARN_ON(z3fold_page_trylock(zhdr));
279         __release_z3fold_page(zhdr, true);
280 }
281
282 static void release_z3fold_page_locked_list(struct kref *ref)
283 {
284         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
285                                                refcount);
286         spin_lock(&zhdr->pool->lock);
287         list_del_init(&zhdr->buddy);
288         spin_unlock(&zhdr->pool->lock);
289
290         WARN_ON(z3fold_page_trylock(zhdr));
291         __release_z3fold_page(zhdr, true);
292 }
293
294 static void free_pages_work(struct work_struct *w)
295 {
296         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
297
298         spin_lock(&pool->stale_lock);
299         while (!list_empty(&pool->stale)) {
300                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
301                                                 struct z3fold_header, buddy);
302                 struct page *page = virt_to_page(zhdr);
303
304                 list_del(&zhdr->buddy);
305                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
306                         continue;
307                 spin_unlock(&pool->stale_lock);
308                 cancel_work_sync(&zhdr->work);
309                 free_z3fold_page(page);
310                 cond_resched();
311                 spin_lock(&pool->stale_lock);
312         }
313         spin_unlock(&pool->stale_lock);
314 }
315
316 /*
317  * Returns the number of free chunks in a z3fold page.
318  * NB: can't be used with HEADLESS pages.
319  */
320 static int num_free_chunks(struct z3fold_header *zhdr)
321 {
322         int nfree;
323         /*
324          * If there is a middle object, pick up the bigger free space
325          * either before or after it. Otherwise just subtract the number
326          * of chunks occupied by the first and the last objects.
327          */
328         if (zhdr->middle_chunks != 0) {
329                 int nfree_before = zhdr->first_chunks ?
330                         0 : zhdr->start_middle - ZHDR_CHUNKS;
331                 int nfree_after = zhdr->last_chunks ?
332                         0 : TOTAL_CHUNKS -
333                                 (zhdr->start_middle + zhdr->middle_chunks);
334                 nfree = max(nfree_before, nfree_after);
335         } else
336                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
337         return nfree;
338 }
339
340 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
341                                 unsigned short dst_chunk)
342 {
343         void *beg = zhdr;
344         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
345                        beg + (zhdr->start_middle << CHUNK_SHIFT),
346                        zhdr->middle_chunks << CHUNK_SHIFT);
347 }
348
349 #define BIG_CHUNK_GAP   3
350 /* Has to be called with lock held */
351 static int z3fold_compact_page(struct z3fold_header *zhdr)
352 {
353         struct page *page = virt_to_page(zhdr);
354
355         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
356                 return 0; /* can't move middle chunk, it's used */
357
358         if (zhdr->middle_chunks == 0)
359                 return 0; /* nothing to compact */
360
361         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
362                 /* move to the beginning */
363                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
364                 zhdr->first_chunks = zhdr->middle_chunks;
365                 zhdr->middle_chunks = 0;
366                 zhdr->start_middle = 0;
367                 zhdr->first_num++;
368                 return 1;
369         }
370
371         /*
372          * moving data is expensive, so let's only do that if
373          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
374          */
375         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
376             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
377                         BIG_CHUNK_GAP) {
378                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
379                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
380                 return 1;
381         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
382                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
383                                         + zhdr->middle_chunks) >=
384                         BIG_CHUNK_GAP) {
385                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
386                         zhdr->middle_chunks;
387                 mchunk_memmove(zhdr, new_start);
388                 zhdr->start_middle = new_start;
389                 return 1;
390         }
391
392         return 0;
393 }
394
395 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
396 {
397         struct z3fold_pool *pool = zhdr->pool;
398         struct page *page;
399         struct list_head *unbuddied;
400         int fchunks;
401
402         page = virt_to_page(zhdr);
403         if (locked)
404                 WARN_ON(z3fold_page_trylock(zhdr));
405         else
406                 z3fold_page_lock(zhdr);
407         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
408                 z3fold_page_unlock(zhdr);
409                 return;
410         }
411         spin_lock(&pool->lock);
412         list_del_init(&zhdr->buddy);
413         spin_unlock(&pool->lock);
414
415         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
416                 atomic64_dec(&pool->pages_nr);
417                 return;
418         }
419
420         z3fold_compact_page(zhdr);
421         unbuddied = get_cpu_ptr(pool->unbuddied);
422         fchunks = num_free_chunks(zhdr);
423         if (fchunks < NCHUNKS &&
424             (!zhdr->first_chunks || !zhdr->middle_chunks ||
425                         !zhdr->last_chunks)) {
426                 /* the page's not completely free and it's unbuddied */
427                 spin_lock(&pool->lock);
428                 list_add(&zhdr->buddy, &unbuddied[fchunks]);
429                 spin_unlock(&pool->lock);
430                 zhdr->cpu = smp_processor_id();
431         }
432         put_cpu_ptr(pool->unbuddied);
433         z3fold_page_unlock(zhdr);
434 }
435
436 static void compact_page_work(struct work_struct *w)
437 {
438         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
439                                                 work);
440
441         do_compact_page(zhdr, false);
442 }
443
444
445 /*
446  * API Functions
447  */
448
449 /**
450  * z3fold_create_pool() - create a new z3fold pool
451  * @name:       pool name
452  * @gfp:        gfp flags when allocating the z3fold pool structure
453  * @ops:        user-defined operations for the z3fold pool
454  *
455  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
456  * failed.
457  */
458 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
459                 const struct z3fold_ops *ops)
460 {
461         struct z3fold_pool *pool = NULL;
462         int i, cpu;
463
464         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
465         if (!pool)
466                 goto out;
467         spin_lock_init(&pool->lock);
468         spin_lock_init(&pool->stale_lock);
469         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
470         for_each_possible_cpu(cpu) {
471                 struct list_head *unbuddied =
472                                 per_cpu_ptr(pool->unbuddied, cpu);
473                 for_each_unbuddied_list(i, 0)
474                         INIT_LIST_HEAD(&unbuddied[i]);
475         }
476         INIT_LIST_HEAD(&pool->lru);
477         INIT_LIST_HEAD(&pool->stale);
478         atomic64_set(&pool->pages_nr, 0);
479         pool->name = name;
480         pool->compact_wq = create_singlethread_workqueue(pool->name);
481         if (!pool->compact_wq)
482                 goto out;
483         pool->release_wq = create_singlethread_workqueue(pool->name);
484         if (!pool->release_wq)
485                 goto out_wq;
486         INIT_WORK(&pool->work, free_pages_work);
487         pool->ops = ops;
488         return pool;
489
490 out_wq:
491         destroy_workqueue(pool->compact_wq);
492 out:
493         kfree(pool);
494         return NULL;
495 }
496
497 /**
498  * z3fold_destroy_pool() - destroys an existing z3fold pool
499  * @pool:       the z3fold pool to be destroyed
500  *
501  * The pool should be emptied before this function is called.
502  */
503 static void z3fold_destroy_pool(struct z3fold_pool *pool)
504 {
505         destroy_workqueue(pool->release_wq);
506         destroy_workqueue(pool->compact_wq);
507         kfree(pool);
508 }
509
510 /**
511  * z3fold_alloc() - allocates a region of a given size
512  * @pool:       z3fold pool from which to allocate
513  * @size:       size in bytes of the desired allocation
514  * @gfp:        gfp flags used if the pool needs to grow
515  * @handle:     handle of the new allocation
516  *
517  * This function will attempt to find a free region in the pool large enough to
518  * satisfy the allocation request.  A search of the unbuddied lists is
519  * performed first. If no suitable free region is found, then a new page is
520  * allocated and added to the pool to satisfy the request.
521  *
522  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
523  * as z3fold pool pages.
524  *
525  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
526  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
527  * a new page.
528  */
529 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
530                         unsigned long *handle)
531 {
532         int chunks = 0, i, freechunks;
533         struct z3fold_header *zhdr = NULL;
534         struct page *page = NULL;
535         enum buddy bud;
536         bool can_sleep = (gfp & __GFP_RECLAIM) == __GFP_RECLAIM;
537
538         if (!size || (gfp & __GFP_HIGHMEM))
539                 return -EINVAL;
540
541         if (size > PAGE_SIZE)
542                 return -ENOSPC;
543
544         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
545                 bud = HEADLESS;
546         else {
547                 struct list_head *unbuddied;
548                 chunks = size_to_chunks(size);
549
550 lookup:
551                 /* First, try to find an unbuddied z3fold page. */
552                 unbuddied = get_cpu_ptr(pool->unbuddied);
553                 for_each_unbuddied_list(i, chunks) {
554                         struct list_head *l = &unbuddied[i];
555
556                         zhdr = list_first_entry_or_null(READ_ONCE(l),
557                                                 struct z3fold_header, buddy);
558
559                         if (!zhdr)
560                                 continue;
561
562                         /* Re-check under lock. */
563                         spin_lock(&pool->lock);
564                         l = &unbuddied[i];
565                         if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
566                                         struct z3fold_header, buddy)) ||
567                             !z3fold_page_trylock(zhdr)) {
568                                 spin_unlock(&pool->lock);
569                                 put_cpu_ptr(pool->unbuddied);
570                                 goto lookup;
571                         }
572                         list_del_init(&zhdr->buddy);
573                         zhdr->cpu = -1;
574                         spin_unlock(&pool->lock);
575
576                         page = virt_to_page(zhdr);
577                         if (test_bit(NEEDS_COMPACTING, &page->private)) {
578                                 z3fold_page_unlock(zhdr);
579                                 zhdr = NULL;
580                                 put_cpu_ptr(pool->unbuddied);
581                                 if (can_sleep)
582                                         cond_resched();
583                                 goto lookup;
584                         }
585
586                         /*
587                          * this page could not be removed from its unbuddied
588                          * list while pool lock was held, and then we've taken
589                          * page lock so kref_put could not be called before
590                          * we got here, so it's safe to just call kref_get()
591                          */
592                         kref_get(&zhdr->refcount);
593                         break;
594                 }
595                 put_cpu_ptr(pool->unbuddied);
596
597                 if (zhdr) {
598                         if (zhdr->first_chunks == 0) {
599                                 if (zhdr->middle_chunks != 0 &&
600                                     chunks >= zhdr->start_middle)
601                                         bud = LAST;
602                                 else
603                                         bud = FIRST;
604                         } else if (zhdr->last_chunks == 0)
605                                 bud = LAST;
606                         else if (zhdr->middle_chunks == 0)
607                                 bud = MIDDLE;
608                         else {
609                                 if (kref_put(&zhdr->refcount,
610                                              release_z3fold_page_locked))
611                                         atomic64_dec(&pool->pages_nr);
612                                 else
613                                         z3fold_page_unlock(zhdr);
614                                 pr_err("No free chunks in unbuddied\n");
615                                 WARN_ON(1);
616                                 goto lookup;
617                         }
618                         goto found;
619                 }
620                 bud = FIRST;
621         }
622
623         page = NULL;
624         if (can_sleep) {
625                 spin_lock(&pool->stale_lock);
626                 zhdr = list_first_entry_or_null(&pool->stale,
627                                                 struct z3fold_header, buddy);
628                 /*
629                  * Before allocating a page, let's see if we can take one from
630                  * the stale pages list. cancel_work_sync() can sleep so we
631                  * limit this case to the contexts where we can sleep
632                  */
633                 if (zhdr) {
634                         list_del(&zhdr->buddy);
635                         spin_unlock(&pool->stale_lock);
636                         cancel_work_sync(&zhdr->work);
637                         page = virt_to_page(zhdr);
638                 } else {
639                         spin_unlock(&pool->stale_lock);
640                 }
641         }
642         if (!page)
643                 page = alloc_page(gfp);
644
645         if (!page)
646                 return -ENOMEM;
647
648         atomic64_inc(&pool->pages_nr);
649         zhdr = init_z3fold_page(page, pool);
650
651         if (bud == HEADLESS) {
652                 set_bit(PAGE_HEADLESS, &page->private);
653                 goto headless;
654         }
655         z3fold_page_lock(zhdr);
656
657 found:
658         if (bud == FIRST)
659                 zhdr->first_chunks = chunks;
660         else if (bud == LAST)
661                 zhdr->last_chunks = chunks;
662         else {
663                 zhdr->middle_chunks = chunks;
664                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
665         }
666
667         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
668                         zhdr->middle_chunks == 0) {
669                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
670
671                 /* Add to unbuddied list */
672                 freechunks = num_free_chunks(zhdr);
673                 spin_lock(&pool->lock);
674                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
675                 spin_unlock(&pool->lock);
676                 zhdr->cpu = smp_processor_id();
677                 put_cpu_ptr(pool->unbuddied);
678         }
679
680 headless:
681         spin_lock(&pool->lock);
682         /* Add/move z3fold page to beginning of LRU */
683         if (!list_empty(&page->lru))
684                 list_del(&page->lru);
685
686         list_add(&page->lru, &pool->lru);
687
688         *handle = encode_handle(zhdr, bud);
689         spin_unlock(&pool->lock);
690         if (bud != HEADLESS)
691                 z3fold_page_unlock(zhdr);
692
693         return 0;
694 }
695
696 /**
697  * z3fold_free() - frees the allocation associated with the given handle
698  * @pool:       pool in which the allocation resided
699  * @handle:     handle associated with the allocation returned by z3fold_alloc()
700  *
701  * In the case that the z3fold page in which the allocation resides is under
702  * reclaim, as indicated by the PG_reclaim flag being set, this function
703  * only sets the first|last_chunks to 0.  The page is actually freed
704  * once both buddies are evicted (see z3fold_reclaim_page() below).
705  */
706 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
707 {
708         struct z3fold_header *zhdr;
709         struct page *page;
710         enum buddy bud;
711
712         zhdr = handle_to_z3fold_header(handle);
713         page = virt_to_page(zhdr);
714
715         if (test_bit(PAGE_HEADLESS, &page->private)) {
716                 /* HEADLESS page stored */
717                 bud = HEADLESS;
718         } else {
719                 z3fold_page_lock(zhdr);
720                 bud = handle_to_buddy(handle);
721
722                 switch (bud) {
723                 case FIRST:
724                         zhdr->first_chunks = 0;
725                         break;
726                 case MIDDLE:
727                         zhdr->middle_chunks = 0;
728                         zhdr->start_middle = 0;
729                         break;
730                 case LAST:
731                         zhdr->last_chunks = 0;
732                         break;
733                 default:
734                         pr_err("%s: unknown bud %d\n", __func__, bud);
735                         WARN_ON(1);
736                         z3fold_page_unlock(zhdr);
737                         return;
738                 }
739         }
740
741         if (bud == HEADLESS) {
742                 spin_lock(&pool->lock);
743                 list_del(&page->lru);
744                 spin_unlock(&pool->lock);
745                 free_z3fold_page(page);
746                 atomic64_dec(&pool->pages_nr);
747                 return;
748         }
749
750         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
751                 atomic64_dec(&pool->pages_nr);
752                 return;
753         }
754         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
755                 z3fold_page_unlock(zhdr);
756                 return;
757         }
758         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
759                 spin_lock(&pool->lock);
760                 list_del_init(&zhdr->buddy);
761                 spin_unlock(&pool->lock);
762                 zhdr->cpu = -1;
763                 kref_get(&zhdr->refcount);
764                 do_compact_page(zhdr, true);
765                 return;
766         }
767         kref_get(&zhdr->refcount);
768         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
769         z3fold_page_unlock(zhdr);
770 }
771
772 /**
773  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
774  * @pool:       pool from which a page will attempt to be evicted
775  * @retries:    number of pages on the LRU list for which eviction will
776  *              be attempted before failing
777  *
778  * z3fold reclaim is different from normal system reclaim in that it is done
779  * from the bottom, up. This is because only the bottom layer, z3fold, has
780  * information on how the allocations are organized within each z3fold page.
781  * This has the potential to create interesting locking situations between
782  * z3fold and the user, however.
783  *
784  * To avoid these, this is how z3fold_reclaim_page() should be called:
785  *
786  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
787  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
788  * call the user-defined eviction handler with the pool and handle as
789  * arguments.
790  *
791  * If the handle can not be evicted, the eviction handler should return
792  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
793  * appropriate list and try the next z3fold page on the LRU up to
794  * a user defined number of retries.
795  *
796  * If the handle is successfully evicted, the eviction handler should
797  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
798  * contains logic to delay freeing the page if the page is under reclaim,
799  * as indicated by the setting of the PG_reclaim flag on the underlying page.
800  *
801  * If all buddies in the z3fold page are successfully evicted, then the
802  * z3fold page can be freed.
803  *
804  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
805  * no pages to evict or an eviction handler is not registered, -EAGAIN if
806  * the retry limit was hit.
807  */
808 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
809 {
810         int i, ret = 0;
811         struct z3fold_header *zhdr = NULL;
812         struct page *page = NULL;
813         struct list_head *pos;
814         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
815
816         spin_lock(&pool->lock);
817         if (!pool->ops || !pool->ops->evict || retries == 0) {
818                 spin_unlock(&pool->lock);
819                 return -EINVAL;
820         }
821         for (i = 0; i < retries; i++) {
822                 if (list_empty(&pool->lru)) {
823                         spin_unlock(&pool->lock);
824                         return -EINVAL;
825                 }
826                 list_for_each_prev(pos, &pool->lru) {
827                         page = list_entry(pos, struct page, lru);
828                         if (test_bit(PAGE_HEADLESS, &page->private))
829                                 /* candidate found */
830                                 break;
831
832                         zhdr = page_address(page);
833                         if (!z3fold_page_trylock(zhdr))
834                                 continue; /* can't evict at this point */
835                         kref_get(&zhdr->refcount);
836                         list_del_init(&zhdr->buddy);
837                         zhdr->cpu = -1;
838                 }
839
840                 list_del_init(&page->lru);
841                 spin_unlock(&pool->lock);
842
843                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
844                         /*
845                          * We need encode the handles before unlocking, since
846                          * we can race with free that will set
847                          * (first|last)_chunks to 0
848                          */
849                         first_handle = 0;
850                         last_handle = 0;
851                         middle_handle = 0;
852                         if (zhdr->first_chunks)
853                                 first_handle = encode_handle(zhdr, FIRST);
854                         if (zhdr->middle_chunks)
855                                 middle_handle = encode_handle(zhdr, MIDDLE);
856                         if (zhdr->last_chunks)
857                                 last_handle = encode_handle(zhdr, LAST);
858                         /*
859                          * it's safe to unlock here because we hold a
860                          * reference to this page
861                          */
862                         z3fold_page_unlock(zhdr);
863                 } else {
864                         first_handle = encode_handle(zhdr, HEADLESS);
865                         last_handle = middle_handle = 0;
866                 }
867
868                 /* Issue the eviction callback(s) */
869                 if (middle_handle) {
870                         ret = pool->ops->evict(pool, middle_handle);
871                         if (ret)
872                                 goto next;
873                 }
874                 if (first_handle) {
875                         ret = pool->ops->evict(pool, first_handle);
876                         if (ret)
877                                 goto next;
878                 }
879                 if (last_handle) {
880                         ret = pool->ops->evict(pool, last_handle);
881                         if (ret)
882                                 goto next;
883                 }
884 next:
885                 spin_lock(&pool->lock);
886                 if (test_bit(PAGE_HEADLESS, &page->private)) {
887                         if (ret == 0) {
888                                 spin_unlock(&pool->lock);
889                                 free_z3fold_page(page);
890                                 return 0;
891                         }
892                 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
893                         atomic64_dec(&pool->pages_nr);
894                         spin_unlock(&pool->lock);
895                         return 0;
896                 }
897
898                 /*
899                  * Add to the beginning of LRU.
900                  * Pool lock has to be kept here to ensure the page has
901                  * not already been released
902                  */
903                 list_add(&page->lru, &pool->lru);
904         }
905         spin_unlock(&pool->lock);
906         return -EAGAIN;
907 }
908
909 /**
910  * z3fold_map() - maps the allocation associated with the given handle
911  * @pool:       pool in which the allocation resides
912  * @handle:     handle associated with the allocation to be mapped
913  *
914  * Extracts the buddy number from handle and constructs the pointer to the
915  * correct starting chunk within the page.
916  *
917  * Returns: a pointer to the mapped allocation
918  */
919 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
920 {
921         struct z3fold_header *zhdr;
922         struct page *page;
923         void *addr;
924         enum buddy buddy;
925
926         zhdr = handle_to_z3fold_header(handle);
927         addr = zhdr;
928         page = virt_to_page(zhdr);
929
930         if (test_bit(PAGE_HEADLESS, &page->private))
931                 goto out;
932
933         z3fold_page_lock(zhdr);
934         buddy = handle_to_buddy(handle);
935         switch (buddy) {
936         case FIRST:
937                 addr += ZHDR_SIZE_ALIGNED;
938                 break;
939         case MIDDLE:
940                 addr += zhdr->start_middle << CHUNK_SHIFT;
941                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
942                 break;
943         case LAST:
944                 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
945                 break;
946         default:
947                 pr_err("unknown buddy id %d\n", buddy);
948                 WARN_ON(1);
949                 addr = NULL;
950                 break;
951         }
952
953         z3fold_page_unlock(zhdr);
954 out:
955         return addr;
956 }
957
958 /**
959  * z3fold_unmap() - unmaps the allocation associated with the given handle
960  * @pool:       pool in which the allocation resides
961  * @handle:     handle associated with the allocation to be unmapped
962  */
963 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
964 {
965         struct z3fold_header *zhdr;
966         struct page *page;
967         enum buddy buddy;
968
969         zhdr = handle_to_z3fold_header(handle);
970         page = virt_to_page(zhdr);
971
972         if (test_bit(PAGE_HEADLESS, &page->private))
973                 return;
974
975         z3fold_page_lock(zhdr);
976         buddy = handle_to_buddy(handle);
977         if (buddy == MIDDLE)
978                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
979         z3fold_page_unlock(zhdr);
980 }
981
982 /**
983  * z3fold_get_pool_size() - gets the z3fold pool size in pages
984  * @pool:       pool whose size is being queried
985  *
986  * Returns: size in pages of the given pool.
987  */
988 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
989 {
990         return atomic64_read(&pool->pages_nr);
991 }
992
993 /*****************
994  * zpool
995  ****************/
996
997 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
998 {
999         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1000                 return pool->zpool_ops->evict(pool->zpool, handle);
1001         else
1002                 return -ENOENT;
1003 }
1004
1005 static const struct z3fold_ops z3fold_zpool_ops = {
1006         .evict =        z3fold_zpool_evict
1007 };
1008
1009 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1010                                const struct zpool_ops *zpool_ops,
1011                                struct zpool *zpool)
1012 {
1013         struct z3fold_pool *pool;
1014
1015         pool = z3fold_create_pool(name, gfp,
1016                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1017         if (pool) {
1018                 pool->zpool = zpool;
1019                 pool->zpool_ops = zpool_ops;
1020         }
1021         return pool;
1022 }
1023
1024 static void z3fold_zpool_destroy(void *pool)
1025 {
1026         z3fold_destroy_pool(pool);
1027 }
1028
1029 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1030                         unsigned long *handle)
1031 {
1032         return z3fold_alloc(pool, size, gfp, handle);
1033 }
1034 static void z3fold_zpool_free(void *pool, unsigned long handle)
1035 {
1036         z3fold_free(pool, handle);
1037 }
1038
1039 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1040                         unsigned int *reclaimed)
1041 {
1042         unsigned int total = 0;
1043         int ret = -EINVAL;
1044
1045         while (total < pages) {
1046                 ret = z3fold_reclaim_page(pool, 8);
1047                 if (ret < 0)
1048                         break;
1049                 total++;
1050         }
1051
1052         if (reclaimed)
1053                 *reclaimed = total;
1054
1055         return ret;
1056 }
1057
1058 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1059                         enum zpool_mapmode mm)
1060 {
1061         return z3fold_map(pool, handle);
1062 }
1063 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1064 {
1065         z3fold_unmap(pool, handle);
1066 }
1067
1068 static u64 z3fold_zpool_total_size(void *pool)
1069 {
1070         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1071 }
1072
1073 static struct zpool_driver z3fold_zpool_driver = {
1074         .type =         "z3fold",
1075         .owner =        THIS_MODULE,
1076         .create =       z3fold_zpool_create,
1077         .destroy =      z3fold_zpool_destroy,
1078         .malloc =       z3fold_zpool_malloc,
1079         .free =         z3fold_zpool_free,
1080         .shrink =       z3fold_zpool_shrink,
1081         .map =          z3fold_zpool_map,
1082         .unmap =        z3fold_zpool_unmap,
1083         .total_size =   z3fold_zpool_total_size,
1084 };
1085
1086 MODULE_ALIAS("zpool-z3fold");
1087
1088 static int __init init_z3fold(void)
1089 {
1090         /* Make sure the z3fold header is not larger than the page size */
1091         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1092         zpool_register_driver(&z3fold_zpool_driver);
1093
1094         return 0;
1095 }
1096
1097 static void __exit exit_z3fold(void)
1098 {
1099         zpool_unregister_driver(&z3fold_zpool_driver);
1100 }
1101
1102 module_init(init_z3fold);
1103 module_exit(exit_z3fold);
1104
1105 MODULE_LICENSE("GPL");
1106 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1107 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");