2 * zswap.c - zswap driver file
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/types.h>
31 #include <linux/atomic.h>
32 #include <linux/frontswap.h>
33 #include <linux/rbtree.h>
34 #include <linux/swap.h>
35 #include <linux/crypto.h>
36 #include <linux/mempool.h>
37 #include <linux/zpool.h>
39 #include <linux/mm_types.h>
40 #include <linux/page-flags.h>
41 #include <linux/swapops.h>
42 #include <linux/writeback.h>
43 #include <linux/pagemap.h>
45 /*********************************
47 **********************************/
48 /* Total bytes used by the compressed storage */
49 static u64 zswap_pool_total_size;
50 /* The number of compressed pages currently stored in zswap */
51 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor;
68 /* Store failed because underlying allocator could not get memory */
69 static u64 zswap_reject_alloc_fail;
70 /* Store failed because the entry metadata could not be allocated (rare) */
71 static u64 zswap_reject_kmemcache_fail;
72 /* Duplicate store was encountered (rare) */
73 static u64 zswap_duplicate_entry;
75 /*********************************
77 **********************************/
79 #define ZSWAP_PARAM_UNSET ""
81 /* Enable/disable zswap (disabled by default) */
82 static bool zswap_enabled;
83 static int zswap_enabled_param_set(const char *,
84 const struct kernel_param *);
85 static struct kernel_param_ops zswap_enabled_param_ops = {
86 .set = zswap_enabled_param_set,
87 .get = param_get_bool,
89 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
91 /* Crypto compressor to use */
92 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
93 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
94 static int zswap_compressor_param_set(const char *,
95 const struct kernel_param *);
96 static struct kernel_param_ops zswap_compressor_param_ops = {
97 .set = zswap_compressor_param_set,
98 .get = param_get_charp,
99 .free = param_free_charp,
101 module_param_cb(compressor, &zswap_compressor_param_ops,
102 &zswap_compressor, 0644);
104 /* Compressed storage zpool to use */
105 #define ZSWAP_ZPOOL_DEFAULT "zbud"
106 static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
107 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
108 static struct kernel_param_ops zswap_zpool_param_ops = {
109 .set = zswap_zpool_param_set,
110 .get = param_get_charp,
111 .free = param_free_charp,
113 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
115 /* The maximum percentage of memory that the compressed pool can occupy */
116 static unsigned int zswap_max_pool_percent = 20;
117 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
119 /*********************************
121 **********************************/
125 struct crypto_comp * __percpu *tfm;
127 struct list_head list;
128 struct work_struct work;
129 struct hlist_node node;
130 char tfm_name[CRYPTO_MAX_ALG_NAME];
136 * This structure contains the metadata for tracking a single compressed
139 * rbnode - links the entry into red-black tree for the appropriate swap type
140 * offset - the swap offset for the entry. Index into the red-black tree.
141 * refcount - the number of outstanding reference to the entry. This is needed
142 * to protect against premature freeing of the entry by code
143 * concurrent calls to load, invalidate, and writeback. The lock
144 * for the zswap_tree structure that contains the entry must
145 * be held while changing the refcount. Since the lock must
146 * be held, there is no reason to also make refcount atomic.
147 * length - the length in bytes of the compressed page data. Needed during
149 * pool - the zswap_pool the entry's data is in
150 * handle - zpool allocation handle that stores the compressed page data
153 struct rb_node rbnode;
157 struct zswap_pool *pool;
158 unsigned long handle;
161 struct zswap_header {
162 swp_entry_t swpentry;
166 * The tree lock in the zswap_tree struct protects a few things:
168 * - the refcount field of each entry in the tree
171 struct rb_root rbroot;
175 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
177 /* RCU-protected iteration */
178 static LIST_HEAD(zswap_pools);
179 /* protects zswap_pools list modification */
180 static DEFINE_SPINLOCK(zswap_pools_lock);
181 /* pool counter to provide unique names to zpool */
182 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
184 /* used by param callback function */
185 static bool zswap_init_started;
187 /* fatal error during init */
188 static bool zswap_init_failed;
190 /* init completed, but couldn't create the initial pool */
191 static bool zswap_has_pool;
193 /*********************************
194 * helpers and fwd declarations
195 **********************************/
197 #define zswap_pool_debug(msg, p) \
198 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
199 zpool_get_type((p)->zpool))
201 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
202 static int zswap_pool_get(struct zswap_pool *pool);
203 static void zswap_pool_put(struct zswap_pool *pool);
205 static const struct zpool_ops zswap_zpool_ops = {
206 .evict = zswap_writeback_entry
209 static bool zswap_is_full(void)
211 return totalram_pages * zswap_max_pool_percent / 100 <
212 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
215 static void zswap_update_total_size(void)
217 struct zswap_pool *pool;
222 list_for_each_entry_rcu(pool, &zswap_pools, list)
223 total += zpool_get_total_size(pool->zpool);
227 zswap_pool_total_size = total;
230 /*********************************
231 * zswap entry functions
232 **********************************/
233 static struct kmem_cache *zswap_entry_cache;
235 static int __init zswap_entry_cache_create(void)
237 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
238 return zswap_entry_cache == NULL;
241 static void __init zswap_entry_cache_destroy(void)
243 kmem_cache_destroy(zswap_entry_cache);
246 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
248 struct zswap_entry *entry;
249 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
253 RB_CLEAR_NODE(&entry->rbnode);
257 static void zswap_entry_cache_free(struct zswap_entry *entry)
259 kmem_cache_free(zswap_entry_cache, entry);
262 /*********************************
264 **********************************/
265 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
267 struct rb_node *node = root->rb_node;
268 struct zswap_entry *entry;
271 entry = rb_entry(node, struct zswap_entry, rbnode);
272 if (entry->offset > offset)
273 node = node->rb_left;
274 else if (entry->offset < offset)
275 node = node->rb_right;
283 * In the case that a entry with the same offset is found, a pointer to
284 * the existing entry is stored in dupentry and the function returns -EEXIST
286 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
287 struct zswap_entry **dupentry)
289 struct rb_node **link = &root->rb_node, *parent = NULL;
290 struct zswap_entry *myentry;
294 myentry = rb_entry(parent, struct zswap_entry, rbnode);
295 if (myentry->offset > entry->offset)
296 link = &(*link)->rb_left;
297 else if (myentry->offset < entry->offset)
298 link = &(*link)->rb_right;
304 rb_link_node(&entry->rbnode, parent, link);
305 rb_insert_color(&entry->rbnode, root);
309 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
311 if (!RB_EMPTY_NODE(&entry->rbnode)) {
312 rb_erase(&entry->rbnode, root);
313 RB_CLEAR_NODE(&entry->rbnode);
318 * Carries out the common pattern of freeing and entry's zpool allocation,
319 * freeing the entry itself, and decrementing the number of stored pages.
321 static void zswap_free_entry(struct zswap_entry *entry)
323 zpool_free(entry->pool->zpool, entry->handle);
324 zswap_pool_put(entry->pool);
325 zswap_entry_cache_free(entry);
326 atomic_dec(&zswap_stored_pages);
327 zswap_update_total_size();
330 /* caller must hold the tree lock */
331 static void zswap_entry_get(struct zswap_entry *entry)
336 /* caller must hold the tree lock
337 * remove from the tree and free it, if nobody reference the entry
339 static void zswap_entry_put(struct zswap_tree *tree,
340 struct zswap_entry *entry)
342 int refcount = --entry->refcount;
344 BUG_ON(refcount < 0);
346 zswap_rb_erase(&tree->rbroot, entry);
347 zswap_free_entry(entry);
351 /* caller must hold the tree lock */
352 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
355 struct zswap_entry *entry;
357 entry = zswap_rb_search(root, offset);
359 zswap_entry_get(entry);
364 /*********************************
366 **********************************/
367 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
369 static int zswap_dstmem_prepare(unsigned int cpu)
373 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
375 pr_err("can't allocate compressor buffer\n");
378 per_cpu(zswap_dstmem, cpu) = dst;
382 static int zswap_dstmem_dead(unsigned int cpu)
386 dst = per_cpu(zswap_dstmem, cpu);
388 per_cpu(zswap_dstmem, cpu) = NULL;
393 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
395 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
396 struct crypto_comp *tfm;
398 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
401 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
402 if (IS_ERR_OR_NULL(tfm)) {
403 pr_err("could not alloc crypto comp %s : %ld\n",
404 pool->tfm_name, PTR_ERR(tfm));
407 *per_cpu_ptr(pool->tfm, cpu) = tfm;
411 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
413 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
414 struct crypto_comp *tfm;
416 tfm = *per_cpu_ptr(pool->tfm, cpu);
417 if (!IS_ERR_OR_NULL(tfm))
418 crypto_free_comp(tfm);
419 *per_cpu_ptr(pool->tfm, cpu) = NULL;
423 /*********************************
425 **********************************/
427 static struct zswap_pool *__zswap_pool_current(void)
429 struct zswap_pool *pool;
431 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
432 WARN_ONCE(!pool && zswap_has_pool,
433 "%s: no page storage pool!\n", __func__);
438 static struct zswap_pool *zswap_pool_current(void)
440 assert_spin_locked(&zswap_pools_lock);
442 return __zswap_pool_current();
445 static struct zswap_pool *zswap_pool_current_get(void)
447 struct zswap_pool *pool;
451 pool = __zswap_pool_current();
452 if (!zswap_pool_get(pool))
460 static struct zswap_pool *zswap_pool_last_get(void)
462 struct zswap_pool *pool, *last = NULL;
466 list_for_each_entry_rcu(pool, &zswap_pools, list)
468 WARN_ONCE(!last && zswap_has_pool,
469 "%s: no page storage pool!\n", __func__);
470 if (!zswap_pool_get(last))
478 /* type and compressor must be null-terminated */
479 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
481 struct zswap_pool *pool;
483 assert_spin_locked(&zswap_pools_lock);
485 list_for_each_entry_rcu(pool, &zswap_pools, list) {
486 if (strcmp(pool->tfm_name, compressor))
488 if (strcmp(zpool_get_type(pool->zpool), type))
490 /* if we can't get it, it's about to be destroyed */
491 if (!zswap_pool_get(pool))
499 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
501 struct zswap_pool *pool;
502 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
503 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
506 if (!zswap_has_pool) {
507 /* if either are unset, pool initialization failed, and we
508 * need both params to be set correctly before trying to
511 if (!strcmp(type, ZSWAP_PARAM_UNSET))
513 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
517 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
521 /* unique name for each pool specifically required by zsmalloc */
522 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
524 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
526 pr_err("%s zpool not available\n", type);
529 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
531 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
532 pool->tfm = alloc_percpu(struct crypto_comp *);
534 pr_err("percpu alloc failed\n");
538 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
542 pr_debug("using %s compressor\n", pool->tfm_name);
544 /* being the current pool takes 1 ref; this func expects the
545 * caller to always add the new pool as the current pool
547 kref_init(&pool->kref);
548 INIT_LIST_HEAD(&pool->list);
550 zswap_pool_debug("created", pool);
555 free_percpu(pool->tfm);
557 zpool_destroy_pool(pool->zpool);
562 static __init struct zswap_pool *__zswap_pool_create_fallback(void)
564 bool has_comp, has_zpool;
566 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
567 if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
568 pr_err("compressor %s not available, using default %s\n",
569 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
570 param_free_charp(&zswap_compressor);
571 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
572 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
575 pr_err("default compressor %s not available\n",
577 param_free_charp(&zswap_compressor);
578 zswap_compressor = ZSWAP_PARAM_UNSET;
581 has_zpool = zpool_has_pool(zswap_zpool_type);
582 if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
583 pr_err("zpool %s not available, using default %s\n",
584 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
585 param_free_charp(&zswap_zpool_type);
586 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
587 has_zpool = zpool_has_pool(zswap_zpool_type);
590 pr_err("default zpool %s not available\n",
592 param_free_charp(&zswap_zpool_type);
593 zswap_zpool_type = ZSWAP_PARAM_UNSET;
596 if (!has_comp || !has_zpool)
599 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
602 static void zswap_pool_destroy(struct zswap_pool *pool)
604 zswap_pool_debug("destroying", pool);
606 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
607 free_percpu(pool->tfm);
608 zpool_destroy_pool(pool->zpool);
612 static int __must_check zswap_pool_get(struct zswap_pool *pool)
617 return kref_get_unless_zero(&pool->kref);
620 static void __zswap_pool_release(struct work_struct *work)
622 struct zswap_pool *pool = container_of(work, typeof(*pool), work);
626 /* nobody should have been able to get a kref... */
627 WARN_ON(kref_get_unless_zero(&pool->kref));
629 /* pool is now off zswap_pools list and has no references. */
630 zswap_pool_destroy(pool);
633 static void __zswap_pool_empty(struct kref *kref)
635 struct zswap_pool *pool;
637 pool = container_of(kref, typeof(*pool), kref);
639 spin_lock(&zswap_pools_lock);
641 WARN_ON(pool == zswap_pool_current());
643 list_del_rcu(&pool->list);
645 INIT_WORK(&pool->work, __zswap_pool_release);
646 schedule_work(&pool->work);
648 spin_unlock(&zswap_pools_lock);
651 static void zswap_pool_put(struct zswap_pool *pool)
653 kref_put(&pool->kref, __zswap_pool_empty);
656 /*********************************
658 **********************************/
660 /* val must be a null-terminated string */
661 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
662 char *type, char *compressor)
664 struct zswap_pool *pool, *put_pool = NULL;
665 char *s = strstrip((char *)val);
668 if (zswap_init_failed) {
669 pr_err("can't set param, initialization failed\n");
673 /* no change required */
674 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
677 /* if this is load-time (pre-init) param setting,
678 * don't create a pool; that's done during init.
680 if (!zswap_init_started)
681 return param_set_charp(s, kp);
684 if (!zpool_has_pool(s)) {
685 pr_err("zpool %s not available\n", s);
689 } else if (!compressor) {
690 if (!crypto_has_comp(s, 0, 0)) {
691 pr_err("compressor %s not available\n", s);
700 spin_lock(&zswap_pools_lock);
702 pool = zswap_pool_find_get(type, compressor);
704 zswap_pool_debug("using existing", pool);
705 WARN_ON(pool == zswap_pool_current());
706 list_del_rcu(&pool->list);
709 spin_unlock(&zswap_pools_lock);
712 pool = zswap_pool_create(type, compressor);
715 ret = param_set_charp(s, kp);
719 spin_lock(&zswap_pools_lock);
722 put_pool = zswap_pool_current();
723 list_add_rcu(&pool->list, &zswap_pools);
724 zswap_has_pool = true;
726 /* add the possibly pre-existing pool to the end of the pools
727 * list; if it's new (and empty) then it'll be removed and
728 * destroyed by the put after we drop the lock
730 list_add_tail_rcu(&pool->list, &zswap_pools);
734 spin_unlock(&zswap_pools_lock);
736 if (!zswap_has_pool && !pool) {
737 /* if initial pool creation failed, and this pool creation also
738 * failed, maybe both compressor and zpool params were bad.
739 * Allow changing this param, so pool creation will succeed
740 * when the other param is changed. We already verified this
741 * param is ok in the zpool_has_pool() or crypto_has_comp()
744 ret = param_set_charp(s, kp);
747 /* drop the ref from either the old current pool,
748 * or the new pool we failed to add
751 zswap_pool_put(put_pool);
756 static int zswap_compressor_param_set(const char *val,
757 const struct kernel_param *kp)
759 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
762 static int zswap_zpool_param_set(const char *val,
763 const struct kernel_param *kp)
765 return __zswap_param_set(val, kp, NULL, zswap_compressor);
768 static int zswap_enabled_param_set(const char *val,
769 const struct kernel_param *kp)
771 if (zswap_init_failed) {
772 pr_err("can't enable, initialization failed\n");
775 if (!zswap_has_pool && zswap_init_started) {
776 pr_err("can't enable, no pool configured\n");
780 return param_set_bool(val, kp);
783 /*********************************
785 **********************************/
786 /* return enum for zswap_get_swap_cache_page */
787 enum zswap_get_swap_ret {
789 ZSWAP_SWAPCACHE_EXIST,
790 ZSWAP_SWAPCACHE_FAIL,
794 * zswap_get_swap_cache_page
796 * This is an adaption of read_swap_cache_async()
798 * This function tries to find a page with the given swap entry
799 * in the swapper_space address space (the swap cache). If the page
800 * is found, it is returned in retpage. Otherwise, a page is allocated,
801 * added to the swap cache, and returned in retpage.
803 * If success, the swap cache page is returned in retpage
804 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
805 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
806 * the new page is added to swapcache and locked
807 * Returns ZSWAP_SWAPCACHE_FAIL on error
809 static int zswap_get_swap_cache_page(swp_entry_t entry,
810 struct page **retpage)
812 bool page_was_allocated;
814 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
815 NULL, 0, &page_was_allocated);
816 if (page_was_allocated)
817 return ZSWAP_SWAPCACHE_NEW;
819 return ZSWAP_SWAPCACHE_FAIL;
820 return ZSWAP_SWAPCACHE_EXIST;
824 * Attempts to free an entry by adding a page to the swap cache,
825 * decompressing the entry data into the page, and issuing a
826 * bio write to write the page back to the swap device.
828 * This can be thought of as a "resumed writeback" of the page
829 * to the swap device. We are basically resuming the same swap
830 * writeback path that was intercepted with the frontswap_store()
831 * in the first place. After the page has been decompressed into
832 * the swap cache, the compressed version stored by zswap can be
835 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
837 struct zswap_header *zhdr;
838 swp_entry_t swpentry;
839 struct zswap_tree *tree;
841 struct zswap_entry *entry;
843 struct crypto_comp *tfm;
847 struct writeback_control wbc = {
848 .sync_mode = WB_SYNC_NONE,
851 /* extract swpentry from data */
852 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
853 swpentry = zhdr->swpentry; /* here */
854 zpool_unmap_handle(pool, handle);
855 tree = zswap_trees[swp_type(swpentry)];
856 offset = swp_offset(swpentry);
858 /* find and ref zswap entry */
859 spin_lock(&tree->lock);
860 entry = zswap_entry_find_get(&tree->rbroot, offset);
862 /* entry was invalidated */
863 spin_unlock(&tree->lock);
866 spin_unlock(&tree->lock);
867 BUG_ON(offset != entry->offset);
869 /* try to allocate swap cache page */
870 switch (zswap_get_swap_cache_page(swpentry, &page)) {
871 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
875 case ZSWAP_SWAPCACHE_EXIST:
876 /* page is already in the swap cache, ignore for now */
881 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
884 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
885 ZPOOL_MM_RO) + sizeof(struct zswap_header);
886 dst = kmap_atomic(page);
887 tfm = *get_cpu_ptr(entry->pool->tfm);
888 ret = crypto_comp_decompress(tfm, src, entry->length,
890 put_cpu_ptr(entry->pool->tfm);
892 zpool_unmap_handle(entry->pool->zpool, entry->handle);
894 BUG_ON(dlen != PAGE_SIZE);
896 /* page is up to date */
897 SetPageUptodate(page);
900 /* move it to the tail of the inactive list after end_writeback */
901 SetPageReclaim(page);
903 /* start writeback */
904 __swap_writepage(page, &wbc, end_swap_bio_write);
906 zswap_written_back_pages++;
908 spin_lock(&tree->lock);
909 /* drop local reference */
910 zswap_entry_put(tree, entry);
913 * There are two possible situations for entry here:
914 * (1) refcount is 1(normal case), entry is valid and on the tree
915 * (2) refcount is 0, entry is freed and not on the tree
916 * because invalidate happened during writeback
917 * search the tree and free the entry if find entry
919 if (entry == zswap_rb_search(&tree->rbroot, offset))
920 zswap_entry_put(tree, entry);
921 spin_unlock(&tree->lock);
926 * if we get here due to ZSWAP_SWAPCACHE_EXIST
927 * a load may happening concurrently
928 * it is safe and okay to not free the entry
929 * if we free the entry in the following put
930 * it it either okay to return !0
933 spin_lock(&tree->lock);
934 zswap_entry_put(tree, entry);
935 spin_unlock(&tree->lock);
941 static int zswap_shrink(void)
943 struct zswap_pool *pool;
946 pool = zswap_pool_last_get();
950 ret = zpool_shrink(pool->zpool, 1, NULL);
952 zswap_pool_put(pool);
957 /*********************************
959 **********************************/
960 /* attempts to compress and store an single page */
961 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
964 struct zswap_tree *tree = zswap_trees[type];
965 struct zswap_entry *entry, *dupentry;
966 struct crypto_comp *tfm;
968 unsigned int dlen = PAGE_SIZE, len;
969 unsigned long handle;
972 struct zswap_header *zhdr;
974 if (!zswap_enabled || !tree) {
979 /* reclaim space if needed */
980 if (zswap_is_full()) {
981 zswap_pool_limit_hit++;
982 if (zswap_shrink()) {
983 zswap_reject_reclaim_fail++;
990 entry = zswap_entry_cache_alloc(GFP_KERNEL);
992 zswap_reject_kmemcache_fail++;
997 /* if entry is successfully added, it keeps the reference */
998 entry->pool = zswap_pool_current_get();
1005 dst = get_cpu_var(zswap_dstmem);
1006 tfm = *get_cpu_ptr(entry->pool->tfm);
1007 src = kmap_atomic(page);
1008 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1010 put_cpu_ptr(entry->pool->tfm);
1017 len = dlen + sizeof(struct zswap_header);
1018 ret = zpool_malloc(entry->pool->zpool, len,
1019 __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1021 if (ret == -ENOSPC) {
1022 zswap_reject_compress_poor++;
1026 zswap_reject_alloc_fail++;
1029 zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1030 zhdr->swpentry = swp_entry(type, offset);
1031 buf = (u8 *)(zhdr + 1);
1032 memcpy(buf, dst, dlen);
1033 zpool_unmap_handle(entry->pool->zpool, handle);
1034 put_cpu_var(zswap_dstmem);
1036 /* populate entry */
1037 entry->offset = offset;
1038 entry->handle = handle;
1039 entry->length = dlen;
1042 spin_lock(&tree->lock);
1044 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1045 if (ret == -EEXIST) {
1046 zswap_duplicate_entry++;
1047 /* remove from rbtree */
1048 zswap_rb_erase(&tree->rbroot, dupentry);
1049 zswap_entry_put(tree, dupentry);
1051 } while (ret == -EEXIST);
1052 spin_unlock(&tree->lock);
1055 atomic_inc(&zswap_stored_pages);
1056 zswap_update_total_size();
1061 put_cpu_var(zswap_dstmem);
1062 zswap_pool_put(entry->pool);
1064 zswap_entry_cache_free(entry);
1070 * returns 0 if the page was successfully decompressed
1071 * return -1 on entry not found or error
1073 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1076 struct zswap_tree *tree = zswap_trees[type];
1077 struct zswap_entry *entry;
1078 struct crypto_comp *tfm;
1084 spin_lock(&tree->lock);
1085 entry = zswap_entry_find_get(&tree->rbroot, offset);
1087 /* entry was written back */
1088 spin_unlock(&tree->lock);
1091 spin_unlock(&tree->lock);
1095 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1096 ZPOOL_MM_RO) + sizeof(struct zswap_header);
1097 dst = kmap_atomic(page);
1098 tfm = *get_cpu_ptr(entry->pool->tfm);
1099 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1100 put_cpu_ptr(entry->pool->tfm);
1102 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1105 spin_lock(&tree->lock);
1106 zswap_entry_put(tree, entry);
1107 spin_unlock(&tree->lock);
1112 /* frees an entry in zswap */
1113 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1115 struct zswap_tree *tree = zswap_trees[type];
1116 struct zswap_entry *entry;
1119 spin_lock(&tree->lock);
1120 entry = zswap_rb_search(&tree->rbroot, offset);
1122 /* entry was written back */
1123 spin_unlock(&tree->lock);
1127 /* remove from rbtree */
1128 zswap_rb_erase(&tree->rbroot, entry);
1130 /* drop the initial reference from entry creation */
1131 zswap_entry_put(tree, entry);
1133 spin_unlock(&tree->lock);
1136 /* frees all zswap entries for the given swap type */
1137 static void zswap_frontswap_invalidate_area(unsigned type)
1139 struct zswap_tree *tree = zswap_trees[type];
1140 struct zswap_entry *entry, *n;
1145 /* walk the tree and free everything */
1146 spin_lock(&tree->lock);
1147 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1148 zswap_free_entry(entry);
1149 tree->rbroot = RB_ROOT;
1150 spin_unlock(&tree->lock);
1152 zswap_trees[type] = NULL;
1155 static void zswap_frontswap_init(unsigned type)
1157 struct zswap_tree *tree;
1159 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1161 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1165 tree->rbroot = RB_ROOT;
1166 spin_lock_init(&tree->lock);
1167 zswap_trees[type] = tree;
1170 static struct frontswap_ops zswap_frontswap_ops = {
1171 .store = zswap_frontswap_store,
1172 .load = zswap_frontswap_load,
1173 .invalidate_page = zswap_frontswap_invalidate_page,
1174 .invalidate_area = zswap_frontswap_invalidate_area,
1175 .init = zswap_frontswap_init
1178 /*********************************
1180 **********************************/
1181 #ifdef CONFIG_DEBUG_FS
1182 #include <linux/debugfs.h>
1184 static struct dentry *zswap_debugfs_root;
1186 static int __init zswap_debugfs_init(void)
1188 if (!debugfs_initialized())
1191 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1192 if (!zswap_debugfs_root)
1195 debugfs_create_u64("pool_limit_hit", S_IRUGO,
1196 zswap_debugfs_root, &zswap_pool_limit_hit);
1197 debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1198 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1199 debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1200 zswap_debugfs_root, &zswap_reject_alloc_fail);
1201 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1202 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1203 debugfs_create_u64("reject_compress_poor", S_IRUGO,
1204 zswap_debugfs_root, &zswap_reject_compress_poor);
1205 debugfs_create_u64("written_back_pages", S_IRUGO,
1206 zswap_debugfs_root, &zswap_written_back_pages);
1207 debugfs_create_u64("duplicate_entry", S_IRUGO,
1208 zswap_debugfs_root, &zswap_duplicate_entry);
1209 debugfs_create_u64("pool_total_size", S_IRUGO,
1210 zswap_debugfs_root, &zswap_pool_total_size);
1211 debugfs_create_atomic_t("stored_pages", S_IRUGO,
1212 zswap_debugfs_root, &zswap_stored_pages);
1217 static void __exit zswap_debugfs_exit(void)
1219 debugfs_remove_recursive(zswap_debugfs_root);
1222 static int __init zswap_debugfs_init(void)
1227 static void __exit zswap_debugfs_exit(void) { }
1230 /*********************************
1231 * module init and exit
1232 **********************************/
1233 static int __init init_zswap(void)
1235 struct zswap_pool *pool;
1238 zswap_init_started = true;
1240 if (zswap_entry_cache_create()) {
1241 pr_err("entry cache creation failed\n");
1245 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1246 zswap_dstmem_prepare, zswap_dstmem_dead);
1248 pr_err("dstmem alloc failed\n");
1252 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1253 "mm/zswap_pool:prepare",
1254 zswap_cpu_comp_prepare,
1255 zswap_cpu_comp_dead);
1259 pool = __zswap_pool_create_fallback();
1261 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1262 zpool_get_type(pool->zpool));
1263 list_add(&pool->list, &zswap_pools);
1264 zswap_has_pool = true;
1266 pr_err("pool creation failed\n");
1267 zswap_enabled = false;
1270 frontswap_register_ops(&zswap_frontswap_ops);
1271 if (zswap_debugfs_init())
1272 pr_warn("debugfs initialization failed\n");
1276 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1278 zswap_entry_cache_destroy();
1280 /* if built-in, we aren't unloaded on failure; don't allow use */
1281 zswap_init_failed = true;
1282 zswap_enabled = false;
1285 /* must be late so crypto has time to come up */
1286 late_initcall(init_zswap);
1288 MODULE_LICENSE("GPL");
1289 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1290 MODULE_DESCRIPTION("Compressed cache for swap pages");