d589d318727fa3a17bddfa3e0a8ef34c091f745f
[muen/linux.git] / mm / z3fold.c
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
36
37 /*****************
38  * Structures
39 *****************/
40 struct z3fold_pool;
41 struct z3fold_ops {
42         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
43 };
44
45 enum buddy {
46         HEADLESS = 0,
47         FIRST,
48         MIDDLE,
49         LAST,
50         BUDDIES_MAX
51 };
52
53 /*
54  * struct z3fold_header - z3fold page metadata occupying first chunks of each
55  *                      z3fold page, except for HEADLESS pages
56  * @buddy:              links the z3fold page into the relevant list in the
57  *                      pool
58  * @page_lock:          per-page lock
59  * @refcount:           reference count for the z3fold page
60  * @work:               work_struct for page layout optimization
61  * @pool:               pointer to the pool which this page belongs to
62  * @cpu:                CPU which this page "belongs" to
63  * @first_chunks:       the size of the first buddy in chunks, 0 if free
64  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
65  * @last_chunks:        the size of the last buddy in chunks, 0 if free
66  * @first_num:          the starting number (for the first handle)
67  */
68 struct z3fold_header {
69         struct list_head buddy;
70         spinlock_t page_lock;
71         struct kref refcount;
72         struct work_struct work;
73         struct z3fold_pool *pool;
74         short cpu;
75         unsigned short first_chunks;
76         unsigned short middle_chunks;
77         unsigned short last_chunks;
78         unsigned short start_middle;
79         unsigned short first_num:2;
80 };
81
82 /*
83  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84  * adjusting internal fragmentation.  It also determines the number of
85  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87  * in the beginning of an allocated page are occupied by z3fold header, so
88  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89  * which shows the max number of free chunks in z3fold page, also there will
90  * be 63, or 62, respectively, freelists per pool.
91  */
92 #define NCHUNKS_ORDER   6
93
94 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100
101 #define BUDDY_MASK      (0x3)
102
103 /**
104  * struct z3fold_pool - stores metadata for each z3fold pool
105  * @name:       pool name
106  * @lock:       protects pool unbuddied/lru lists
107  * @stale_lock: protects pool stale page list
108  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
109  *              buddies; the list each z3fold page is added to depends on
110  *              the size of its free region.
111  * @lru:        list tracking the z3fold pages in LRU order by most recently
112  *              added buddy.
113  * @stale:      list of pages marked for freeing
114  * @pages_nr:   number of z3fold pages in the pool.
115  * @ops:        pointer to a structure of user defined operations specified at
116  *              pool creation time.
117  * @compact_wq: workqueue for page layout background optimization
118  * @release_wq: workqueue for safe page release
119  * @work:       work_struct for safe page release
120  *
121  * This structure is allocated at pool creation time and maintains metadata
122  * pertaining to a particular z3fold pool.
123  */
124 struct z3fold_pool {
125         const char *name;
126         spinlock_t lock;
127         spinlock_t stale_lock;
128         struct list_head *unbuddied;
129         struct list_head lru;
130         struct list_head stale;
131         atomic64_t pages_nr;
132         const struct z3fold_ops *ops;
133         struct zpool *zpool;
134         const struct zpool_ops *zpool_ops;
135         struct workqueue_struct *compact_wq;
136         struct workqueue_struct *release_wq;
137         struct work_struct work;
138 };
139
140 /*
141  * Internal z3fold page flags
142  */
143 enum z3fold_page_flags {
144         PAGE_HEADLESS = 0,
145         MIDDLE_CHUNK_MAPPED,
146         NEEDS_COMPACTING,
147         PAGE_STALE
148 };
149
150 /*****************
151  * Helpers
152 *****************/
153
154 /* Converts an allocation size in bytes to size in z3fold chunks */
155 static int size_to_chunks(size_t size)
156 {
157         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
158 }
159
160 #define for_each_unbuddied_list(_iter, _begin) \
161         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
162
163 static void compact_page_work(struct work_struct *w);
164
165 /* Initializes the z3fold header of a newly allocated z3fold page */
166 static struct z3fold_header *init_z3fold_page(struct page *page,
167                                         struct z3fold_pool *pool)
168 {
169         struct z3fold_header *zhdr = page_address(page);
170
171         INIT_LIST_HEAD(&page->lru);
172         clear_bit(PAGE_HEADLESS, &page->private);
173         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
174         clear_bit(NEEDS_COMPACTING, &page->private);
175         clear_bit(PAGE_STALE, &page->private);
176
177         spin_lock_init(&zhdr->page_lock);
178         kref_init(&zhdr->refcount);
179         zhdr->first_chunks = 0;
180         zhdr->middle_chunks = 0;
181         zhdr->last_chunks = 0;
182         zhdr->first_num = 0;
183         zhdr->start_middle = 0;
184         zhdr->cpu = -1;
185         zhdr->pool = pool;
186         INIT_LIST_HEAD(&zhdr->buddy);
187         INIT_WORK(&zhdr->work, compact_page_work);
188         return zhdr;
189 }
190
191 /* Resets the struct page fields and frees the page */
192 static void free_z3fold_page(struct page *page)
193 {
194         __free_page(page);
195 }
196
197 /* Lock a z3fold page */
198 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
199 {
200         spin_lock(&zhdr->page_lock);
201 }
202
203 /* Try to lock a z3fold page */
204 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
205 {
206         return spin_trylock(&zhdr->page_lock);
207 }
208
209 /* Unlock a z3fold page */
210 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
211 {
212         spin_unlock(&zhdr->page_lock);
213 }
214
215 /*
216  * Encodes the handle of a particular buddy within a z3fold page
217  * Pool lock should be held as this function accesses first_num
218  */
219 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
220 {
221         unsigned long handle;
222
223         handle = (unsigned long)zhdr;
224         if (bud != HEADLESS)
225                 handle += (bud + zhdr->first_num) & BUDDY_MASK;
226         return handle;
227 }
228
229 /* Returns the z3fold page where a given handle is stored */
230 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
231 {
232         return (struct z3fold_header *)(handle & PAGE_MASK);
233 }
234
235 /*
236  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
237  *  but that doesn't matter. because the masking will result in the
238  *  correct buddy number.
239  */
240 static enum buddy handle_to_buddy(unsigned long handle)
241 {
242         struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
243         return (handle - zhdr->first_num) & BUDDY_MASK;
244 }
245
246 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
247 {
248         struct page *page = virt_to_page(zhdr);
249         struct z3fold_pool *pool = zhdr->pool;
250
251         WARN_ON(!list_empty(&zhdr->buddy));
252         set_bit(PAGE_STALE, &page->private);
253         clear_bit(NEEDS_COMPACTING, &page->private);
254         spin_lock(&pool->lock);
255         if (!list_empty(&page->lru))
256                 list_del(&page->lru);
257         spin_unlock(&pool->lock);
258         if (locked)
259                 z3fold_page_unlock(zhdr);
260         spin_lock(&pool->stale_lock);
261         list_add(&zhdr->buddy, &pool->stale);
262         queue_work(pool->release_wq, &pool->work);
263         spin_unlock(&pool->stale_lock);
264 }
265
266 static void __attribute__((__unused__))
267                         release_z3fold_page(struct kref *ref)
268 {
269         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
270                                                 refcount);
271         __release_z3fold_page(zhdr, false);
272 }
273
274 static void release_z3fold_page_locked(struct kref *ref)
275 {
276         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
277                                                 refcount);
278         WARN_ON(z3fold_page_trylock(zhdr));
279         __release_z3fold_page(zhdr, true);
280 }
281
282 static void release_z3fold_page_locked_list(struct kref *ref)
283 {
284         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
285                                                refcount);
286         spin_lock(&zhdr->pool->lock);
287         list_del_init(&zhdr->buddy);
288         spin_unlock(&zhdr->pool->lock);
289
290         WARN_ON(z3fold_page_trylock(zhdr));
291         __release_z3fold_page(zhdr, true);
292 }
293
294 static void free_pages_work(struct work_struct *w)
295 {
296         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
297
298         spin_lock(&pool->stale_lock);
299         while (!list_empty(&pool->stale)) {
300                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
301                                                 struct z3fold_header, buddy);
302                 struct page *page = virt_to_page(zhdr);
303
304                 list_del(&zhdr->buddy);
305                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
306                         continue;
307                 spin_unlock(&pool->stale_lock);
308                 cancel_work_sync(&zhdr->work);
309                 free_z3fold_page(page);
310                 cond_resched();
311                 spin_lock(&pool->stale_lock);
312         }
313         spin_unlock(&pool->stale_lock);
314 }
315
316 /*
317  * Returns the number of free chunks in a z3fold page.
318  * NB: can't be used with HEADLESS pages.
319  */
320 static int num_free_chunks(struct z3fold_header *zhdr)
321 {
322         int nfree;
323         /*
324          * If there is a middle object, pick up the bigger free space
325          * either before or after it. Otherwise just subtract the number
326          * of chunks occupied by the first and the last objects.
327          */
328         if (zhdr->middle_chunks != 0) {
329                 int nfree_before = zhdr->first_chunks ?
330                         0 : zhdr->start_middle - ZHDR_CHUNKS;
331                 int nfree_after = zhdr->last_chunks ?
332                         0 : TOTAL_CHUNKS -
333                                 (zhdr->start_middle + zhdr->middle_chunks);
334                 nfree = max(nfree_before, nfree_after);
335         } else
336                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
337         return nfree;
338 }
339
340 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
341                                 unsigned short dst_chunk)
342 {
343         void *beg = zhdr;
344         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
345                        beg + (zhdr->start_middle << CHUNK_SHIFT),
346                        zhdr->middle_chunks << CHUNK_SHIFT);
347 }
348
349 #define BIG_CHUNK_GAP   3
350 /* Has to be called with lock held */
351 static int z3fold_compact_page(struct z3fold_header *zhdr)
352 {
353         struct page *page = virt_to_page(zhdr);
354
355         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
356                 return 0; /* can't move middle chunk, it's used */
357
358         if (zhdr->middle_chunks == 0)
359                 return 0; /* nothing to compact */
360
361         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
362                 /* move to the beginning */
363                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
364                 zhdr->first_chunks = zhdr->middle_chunks;
365                 zhdr->middle_chunks = 0;
366                 zhdr->start_middle = 0;
367                 zhdr->first_num++;
368                 return 1;
369         }
370
371         /*
372          * moving data is expensive, so let's only do that if
373          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
374          */
375         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
376             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
377                         BIG_CHUNK_GAP) {
378                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
379                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
380                 return 1;
381         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
382                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
383                                         + zhdr->middle_chunks) >=
384                         BIG_CHUNK_GAP) {
385                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
386                         zhdr->middle_chunks;
387                 mchunk_memmove(zhdr, new_start);
388                 zhdr->start_middle = new_start;
389                 return 1;
390         }
391
392         return 0;
393 }
394
395 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
396 {
397         struct z3fold_pool *pool = zhdr->pool;
398         struct page *page;
399         struct list_head *unbuddied;
400         int fchunks;
401
402         page = virt_to_page(zhdr);
403         if (locked)
404                 WARN_ON(z3fold_page_trylock(zhdr));
405         else
406                 z3fold_page_lock(zhdr);
407         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
408                 z3fold_page_unlock(zhdr);
409                 return;
410         }
411         spin_lock(&pool->lock);
412         list_del_init(&zhdr->buddy);
413         spin_unlock(&pool->lock);
414
415         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
416                 atomic64_dec(&pool->pages_nr);
417                 return;
418         }
419
420         z3fold_compact_page(zhdr);
421         unbuddied = get_cpu_ptr(pool->unbuddied);
422         fchunks = num_free_chunks(zhdr);
423         if (fchunks < NCHUNKS &&
424             (!zhdr->first_chunks || !zhdr->middle_chunks ||
425                         !zhdr->last_chunks)) {
426                 /* the page's not completely free and it's unbuddied */
427                 spin_lock(&pool->lock);
428                 list_add(&zhdr->buddy, &unbuddied[fchunks]);
429                 spin_unlock(&pool->lock);
430                 zhdr->cpu = smp_processor_id();
431         }
432         put_cpu_ptr(pool->unbuddied);
433         z3fold_page_unlock(zhdr);
434 }
435
436 static void compact_page_work(struct work_struct *w)
437 {
438         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
439                                                 work);
440
441         do_compact_page(zhdr, false);
442 }
443
444
445 /*
446  * API Functions
447  */
448
449 /**
450  * z3fold_create_pool() - create a new z3fold pool
451  * @name:       pool name
452  * @gfp:        gfp flags when allocating the z3fold pool structure
453  * @ops:        user-defined operations for the z3fold pool
454  *
455  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
456  * failed.
457  */
458 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
459                 const struct z3fold_ops *ops)
460 {
461         struct z3fold_pool *pool = NULL;
462         int i, cpu;
463
464         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
465         if (!pool)
466                 goto out;
467         spin_lock_init(&pool->lock);
468         spin_lock_init(&pool->stale_lock);
469         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
470         for_each_possible_cpu(cpu) {
471                 struct list_head *unbuddied =
472                                 per_cpu_ptr(pool->unbuddied, cpu);
473                 for_each_unbuddied_list(i, 0)
474                         INIT_LIST_HEAD(&unbuddied[i]);
475         }
476         INIT_LIST_HEAD(&pool->lru);
477         INIT_LIST_HEAD(&pool->stale);
478         atomic64_set(&pool->pages_nr, 0);
479         pool->name = name;
480         pool->compact_wq = create_singlethread_workqueue(pool->name);
481         if (!pool->compact_wq)
482                 goto out;
483         pool->release_wq = create_singlethread_workqueue(pool->name);
484         if (!pool->release_wq)
485                 goto out_wq;
486         INIT_WORK(&pool->work, free_pages_work);
487         pool->ops = ops;
488         return pool;
489
490 out_wq:
491         destroy_workqueue(pool->compact_wq);
492 out:
493         kfree(pool);
494         return NULL;
495 }
496
497 /**
498  * z3fold_destroy_pool() - destroys an existing z3fold pool
499  * @pool:       the z3fold pool to be destroyed
500  *
501  * The pool should be emptied before this function is called.
502  */
503 static void z3fold_destroy_pool(struct z3fold_pool *pool)
504 {
505         destroy_workqueue(pool->release_wq);
506         destroy_workqueue(pool->compact_wq);
507         kfree(pool);
508 }
509
510 /**
511  * z3fold_alloc() - allocates a region of a given size
512  * @pool:       z3fold pool from which to allocate
513  * @size:       size in bytes of the desired allocation
514  * @gfp:        gfp flags used if the pool needs to grow
515  * @handle:     handle of the new allocation
516  *
517  * This function will attempt to find a free region in the pool large enough to
518  * satisfy the allocation request.  A search of the unbuddied lists is
519  * performed first. If no suitable free region is found, then a new page is
520  * allocated and added to the pool to satisfy the request.
521  *
522  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
523  * as z3fold pool pages.
524  *
525  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
526  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
527  * a new page.
528  */
529 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
530                         unsigned long *handle)
531 {
532         int chunks = 0, i, freechunks;
533         struct z3fold_header *zhdr = NULL;
534         struct page *page = NULL;
535         enum buddy bud;
536         bool can_sleep = (gfp & __GFP_RECLAIM) == __GFP_RECLAIM;
537
538         if (!size || (gfp & __GFP_HIGHMEM))
539                 return -EINVAL;
540
541         if (size > PAGE_SIZE)
542                 return -ENOSPC;
543
544         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
545                 bud = HEADLESS;
546         else {
547                 struct list_head *unbuddied;
548                 chunks = size_to_chunks(size);
549
550 lookup:
551                 /* First, try to find an unbuddied z3fold page. */
552                 unbuddied = get_cpu_ptr(pool->unbuddied);
553                 for_each_unbuddied_list(i, chunks) {
554                         struct list_head *l = &unbuddied[i];
555
556                         zhdr = list_first_entry_or_null(READ_ONCE(l),
557                                                 struct z3fold_header, buddy);
558
559                         if (!zhdr)
560                                 continue;
561
562                         /* Re-check under lock. */
563                         spin_lock(&pool->lock);
564                         l = &unbuddied[i];
565                         if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
566                                         struct z3fold_header, buddy)) ||
567                             !z3fold_page_trylock(zhdr)) {
568                                 spin_unlock(&pool->lock);
569                                 put_cpu_ptr(pool->unbuddied);
570                                 goto lookup;
571                         }
572                         list_del_init(&zhdr->buddy);
573                         zhdr->cpu = -1;
574                         spin_unlock(&pool->lock);
575
576                         page = virt_to_page(zhdr);
577                         if (test_bit(NEEDS_COMPACTING, &page->private)) {
578                                 z3fold_page_unlock(zhdr);
579                                 zhdr = NULL;
580                                 put_cpu_ptr(pool->unbuddied);
581                                 if (can_sleep)
582                                         cond_resched();
583                                 goto lookup;
584                         }
585
586                         /*
587                          * this page could not be removed from its unbuddied
588                          * list while pool lock was held, and then we've taken
589                          * page lock so kref_put could not be called before
590                          * we got here, so it's safe to just call kref_get()
591                          */
592                         kref_get(&zhdr->refcount);
593                         break;
594                 }
595                 put_cpu_ptr(pool->unbuddied);
596
597                 if (zhdr) {
598                         if (zhdr->first_chunks == 0) {
599                                 if (zhdr->middle_chunks != 0 &&
600                                     chunks >= zhdr->start_middle)
601                                         bud = LAST;
602                                 else
603                                         bud = FIRST;
604                         } else if (zhdr->last_chunks == 0)
605                                 bud = LAST;
606                         else if (zhdr->middle_chunks == 0)
607                                 bud = MIDDLE;
608                         else {
609                                 if (kref_put(&zhdr->refcount,
610                                              release_z3fold_page_locked))
611                                         atomic64_dec(&pool->pages_nr);
612                                 else
613                                         z3fold_page_unlock(zhdr);
614                                 pr_err("No free chunks in unbuddied\n");
615                                 WARN_ON(1);
616                                 goto lookup;
617                         }
618                         goto found;
619                 }
620                 bud = FIRST;
621         }
622
623         spin_lock(&pool->stale_lock);
624         zhdr = list_first_entry_or_null(&pool->stale,
625                                         struct z3fold_header, buddy);
626         /*
627          * Before allocating a page, let's see if we can take one from the
628          * stale pages list. cancel_work_sync() can sleep so we must make
629          * sure it won't be called in case we're in atomic context.
630          */
631         if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
632                 list_del(&zhdr->buddy);
633                 spin_unlock(&pool->stale_lock);
634                 if (can_sleep)
635                         cancel_work_sync(&zhdr->work);
636                 page = virt_to_page(zhdr);
637         } else {
638                 spin_unlock(&pool->stale_lock);
639                 page = alloc_page(gfp);
640         }
641
642         if (!page)
643                 return -ENOMEM;
644
645         atomic64_inc(&pool->pages_nr);
646         zhdr = init_z3fold_page(page, pool);
647
648         if (bud == HEADLESS) {
649                 set_bit(PAGE_HEADLESS, &page->private);
650                 goto headless;
651         }
652         z3fold_page_lock(zhdr);
653
654 found:
655         if (bud == FIRST)
656                 zhdr->first_chunks = chunks;
657         else if (bud == LAST)
658                 zhdr->last_chunks = chunks;
659         else {
660                 zhdr->middle_chunks = chunks;
661                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
662         }
663
664         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
665                         zhdr->middle_chunks == 0) {
666                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
667
668                 /* Add to unbuddied list */
669                 freechunks = num_free_chunks(zhdr);
670                 spin_lock(&pool->lock);
671                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
672                 spin_unlock(&pool->lock);
673                 zhdr->cpu = smp_processor_id();
674                 put_cpu_ptr(pool->unbuddied);
675         }
676
677 headless:
678         spin_lock(&pool->lock);
679         /* Add/move z3fold page to beginning of LRU */
680         if (!list_empty(&page->lru))
681                 list_del(&page->lru);
682
683         list_add(&page->lru, &pool->lru);
684
685         *handle = encode_handle(zhdr, bud);
686         spin_unlock(&pool->lock);
687         if (bud != HEADLESS)
688                 z3fold_page_unlock(zhdr);
689
690         return 0;
691 }
692
693 /**
694  * z3fold_free() - frees the allocation associated with the given handle
695  * @pool:       pool in which the allocation resided
696  * @handle:     handle associated with the allocation returned by z3fold_alloc()
697  *
698  * In the case that the z3fold page in which the allocation resides is under
699  * reclaim, as indicated by the PG_reclaim flag being set, this function
700  * only sets the first|last_chunks to 0.  The page is actually freed
701  * once both buddies are evicted (see z3fold_reclaim_page() below).
702  */
703 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
704 {
705         struct z3fold_header *zhdr;
706         struct page *page;
707         enum buddy bud;
708
709         zhdr = handle_to_z3fold_header(handle);
710         page = virt_to_page(zhdr);
711
712         if (test_bit(PAGE_HEADLESS, &page->private)) {
713                 /* HEADLESS page stored */
714                 bud = HEADLESS;
715         } else {
716                 z3fold_page_lock(zhdr);
717                 bud = handle_to_buddy(handle);
718
719                 switch (bud) {
720                 case FIRST:
721                         zhdr->first_chunks = 0;
722                         break;
723                 case MIDDLE:
724                         zhdr->middle_chunks = 0;
725                         zhdr->start_middle = 0;
726                         break;
727                 case LAST:
728                         zhdr->last_chunks = 0;
729                         break;
730                 default:
731                         pr_err("%s: unknown bud %d\n", __func__, bud);
732                         WARN_ON(1);
733                         z3fold_page_unlock(zhdr);
734                         return;
735                 }
736         }
737
738         if (bud == HEADLESS) {
739                 spin_lock(&pool->lock);
740                 list_del(&page->lru);
741                 spin_unlock(&pool->lock);
742                 free_z3fold_page(page);
743                 atomic64_dec(&pool->pages_nr);
744                 return;
745         }
746
747         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
748                 atomic64_dec(&pool->pages_nr);
749                 return;
750         }
751         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
752                 z3fold_page_unlock(zhdr);
753                 return;
754         }
755         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
756                 spin_lock(&pool->lock);
757                 list_del_init(&zhdr->buddy);
758                 spin_unlock(&pool->lock);
759                 zhdr->cpu = -1;
760                 kref_get(&zhdr->refcount);
761                 do_compact_page(zhdr, true);
762                 return;
763         }
764         kref_get(&zhdr->refcount);
765         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
766         z3fold_page_unlock(zhdr);
767 }
768
769 /**
770  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
771  * @pool:       pool from which a page will attempt to be evicted
772  * @retries:    number of pages on the LRU list for which eviction will
773  *              be attempted before failing
774  *
775  * z3fold reclaim is different from normal system reclaim in that it is done
776  * from the bottom, up. This is because only the bottom layer, z3fold, has
777  * information on how the allocations are organized within each z3fold page.
778  * This has the potential to create interesting locking situations between
779  * z3fold and the user, however.
780  *
781  * To avoid these, this is how z3fold_reclaim_page() should be called:
782  *
783  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
784  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
785  * call the user-defined eviction handler with the pool and handle as
786  * arguments.
787  *
788  * If the handle can not be evicted, the eviction handler should return
789  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
790  * appropriate list and try the next z3fold page on the LRU up to
791  * a user defined number of retries.
792  *
793  * If the handle is successfully evicted, the eviction handler should
794  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
795  * contains logic to delay freeing the page if the page is under reclaim,
796  * as indicated by the setting of the PG_reclaim flag on the underlying page.
797  *
798  * If all buddies in the z3fold page are successfully evicted, then the
799  * z3fold page can be freed.
800  *
801  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
802  * no pages to evict or an eviction handler is not registered, -EAGAIN if
803  * the retry limit was hit.
804  */
805 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
806 {
807         int i, ret = 0;
808         struct z3fold_header *zhdr = NULL;
809         struct page *page = NULL;
810         struct list_head *pos;
811         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
812
813         spin_lock(&pool->lock);
814         if (!pool->ops || !pool->ops->evict || retries == 0) {
815                 spin_unlock(&pool->lock);
816                 return -EINVAL;
817         }
818         for (i = 0; i < retries; i++) {
819                 if (list_empty(&pool->lru)) {
820                         spin_unlock(&pool->lock);
821                         return -EINVAL;
822                 }
823                 list_for_each_prev(pos, &pool->lru) {
824                         page = list_entry(pos, struct page, lru);
825                         if (test_bit(PAGE_HEADLESS, &page->private))
826                                 /* candidate found */
827                                 break;
828
829                         zhdr = page_address(page);
830                         if (!z3fold_page_trylock(zhdr))
831                                 continue; /* can't evict at this point */
832                         kref_get(&zhdr->refcount);
833                         list_del_init(&zhdr->buddy);
834                         zhdr->cpu = -1;
835                 }
836
837                 list_del_init(&page->lru);
838                 spin_unlock(&pool->lock);
839
840                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
841                         /*
842                          * We need encode the handles before unlocking, since
843                          * we can race with free that will set
844                          * (first|last)_chunks to 0
845                          */
846                         first_handle = 0;
847                         last_handle = 0;
848                         middle_handle = 0;
849                         if (zhdr->first_chunks)
850                                 first_handle = encode_handle(zhdr, FIRST);
851                         if (zhdr->middle_chunks)
852                                 middle_handle = encode_handle(zhdr, MIDDLE);
853                         if (zhdr->last_chunks)
854                                 last_handle = encode_handle(zhdr, LAST);
855                         /*
856                          * it's safe to unlock here because we hold a
857                          * reference to this page
858                          */
859                         z3fold_page_unlock(zhdr);
860                 } else {
861                         first_handle = encode_handle(zhdr, HEADLESS);
862                         last_handle = middle_handle = 0;
863                 }
864
865                 /* Issue the eviction callback(s) */
866                 if (middle_handle) {
867                         ret = pool->ops->evict(pool, middle_handle);
868                         if (ret)
869                                 goto next;
870                 }
871                 if (first_handle) {
872                         ret = pool->ops->evict(pool, first_handle);
873                         if (ret)
874                                 goto next;
875                 }
876                 if (last_handle) {
877                         ret = pool->ops->evict(pool, last_handle);
878                         if (ret)
879                                 goto next;
880                 }
881 next:
882                 spin_lock(&pool->lock);
883                 if (test_bit(PAGE_HEADLESS, &page->private)) {
884                         if (ret == 0) {
885                                 spin_unlock(&pool->lock);
886                                 free_z3fold_page(page);
887                                 return 0;
888                         }
889                 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
890                         atomic64_dec(&pool->pages_nr);
891                         spin_unlock(&pool->lock);
892                         return 0;
893                 }
894
895                 /*
896                  * Add to the beginning of LRU.
897                  * Pool lock has to be kept here to ensure the page has
898                  * not already been released
899                  */
900                 list_add(&page->lru, &pool->lru);
901         }
902         spin_unlock(&pool->lock);
903         return -EAGAIN;
904 }
905
906 /**
907  * z3fold_map() - maps the allocation associated with the given handle
908  * @pool:       pool in which the allocation resides
909  * @handle:     handle associated with the allocation to be mapped
910  *
911  * Extracts the buddy number from handle and constructs the pointer to the
912  * correct starting chunk within the page.
913  *
914  * Returns: a pointer to the mapped allocation
915  */
916 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
917 {
918         struct z3fold_header *zhdr;
919         struct page *page;
920         void *addr;
921         enum buddy buddy;
922
923         zhdr = handle_to_z3fold_header(handle);
924         addr = zhdr;
925         page = virt_to_page(zhdr);
926
927         if (test_bit(PAGE_HEADLESS, &page->private))
928                 goto out;
929
930         z3fold_page_lock(zhdr);
931         buddy = handle_to_buddy(handle);
932         switch (buddy) {
933         case FIRST:
934                 addr += ZHDR_SIZE_ALIGNED;
935                 break;
936         case MIDDLE:
937                 addr += zhdr->start_middle << CHUNK_SHIFT;
938                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
939                 break;
940         case LAST:
941                 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
942                 break;
943         default:
944                 pr_err("unknown buddy id %d\n", buddy);
945                 WARN_ON(1);
946                 addr = NULL;
947                 break;
948         }
949
950         z3fold_page_unlock(zhdr);
951 out:
952         return addr;
953 }
954
955 /**
956  * z3fold_unmap() - unmaps the allocation associated with the given handle
957  * @pool:       pool in which the allocation resides
958  * @handle:     handle associated with the allocation to be unmapped
959  */
960 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
961 {
962         struct z3fold_header *zhdr;
963         struct page *page;
964         enum buddy buddy;
965
966         zhdr = handle_to_z3fold_header(handle);
967         page = virt_to_page(zhdr);
968
969         if (test_bit(PAGE_HEADLESS, &page->private))
970                 return;
971
972         z3fold_page_lock(zhdr);
973         buddy = handle_to_buddy(handle);
974         if (buddy == MIDDLE)
975                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
976         z3fold_page_unlock(zhdr);
977 }
978
979 /**
980  * z3fold_get_pool_size() - gets the z3fold pool size in pages
981  * @pool:       pool whose size is being queried
982  *
983  * Returns: size in pages of the given pool.
984  */
985 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
986 {
987         return atomic64_read(&pool->pages_nr);
988 }
989
990 /*****************
991  * zpool
992  ****************/
993
994 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
995 {
996         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
997                 return pool->zpool_ops->evict(pool->zpool, handle);
998         else
999                 return -ENOENT;
1000 }
1001
1002 static const struct z3fold_ops z3fold_zpool_ops = {
1003         .evict =        z3fold_zpool_evict
1004 };
1005
1006 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1007                                const struct zpool_ops *zpool_ops,
1008                                struct zpool *zpool)
1009 {
1010         struct z3fold_pool *pool;
1011
1012         pool = z3fold_create_pool(name, gfp,
1013                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1014         if (pool) {
1015                 pool->zpool = zpool;
1016                 pool->zpool_ops = zpool_ops;
1017         }
1018         return pool;
1019 }
1020
1021 static void z3fold_zpool_destroy(void *pool)
1022 {
1023         z3fold_destroy_pool(pool);
1024 }
1025
1026 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1027                         unsigned long *handle)
1028 {
1029         return z3fold_alloc(pool, size, gfp, handle);
1030 }
1031 static void z3fold_zpool_free(void *pool, unsigned long handle)
1032 {
1033         z3fold_free(pool, handle);
1034 }
1035
1036 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1037                         unsigned int *reclaimed)
1038 {
1039         unsigned int total = 0;
1040         int ret = -EINVAL;
1041
1042         while (total < pages) {
1043                 ret = z3fold_reclaim_page(pool, 8);
1044                 if (ret < 0)
1045                         break;
1046                 total++;
1047         }
1048
1049         if (reclaimed)
1050                 *reclaimed = total;
1051
1052         return ret;
1053 }
1054
1055 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1056                         enum zpool_mapmode mm)
1057 {
1058         return z3fold_map(pool, handle);
1059 }
1060 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1061 {
1062         z3fold_unmap(pool, handle);
1063 }
1064
1065 static u64 z3fold_zpool_total_size(void *pool)
1066 {
1067         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1068 }
1069
1070 static struct zpool_driver z3fold_zpool_driver = {
1071         .type =         "z3fold",
1072         .owner =        THIS_MODULE,
1073         .create =       z3fold_zpool_create,
1074         .destroy =      z3fold_zpool_destroy,
1075         .malloc =       z3fold_zpool_malloc,
1076         .free =         z3fold_zpool_free,
1077         .shrink =       z3fold_zpool_shrink,
1078         .map =          z3fold_zpool_map,
1079         .unmap =        z3fold_zpool_unmap,
1080         .total_size =   z3fold_zpool_total_size,
1081 };
1082
1083 MODULE_ALIAS("zpool-z3fold");
1084
1085 static int __init init_z3fold(void)
1086 {
1087         /* Make sure the z3fold header is not larger than the page size */
1088         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1089         zpool_register_driver(&z3fold_zpool_driver);
1090
1091         return 0;
1092 }
1093
1094 static void __exit exit_z3fold(void)
1095 {
1096         zpool_unregister_driver(&z3fold_zpool_driver);
1097 }
1098
1099 module_init(init_z3fold);
1100 module_exit(exit_z3fold);
1101
1102 MODULE_LICENSE("GPL");
1103 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1104 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");