mm: rename and change semantics of nr_indirectly_reclaimable_bytes
[muen/linux.git] / drivers / staging / android / ion / ion_page_pool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion_mem_pool.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/swap.h>
11
12 #include "ion.h"
13
14 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
15 {
16         return alloc_pages(pool->gfp_mask, pool->order);
17 }
18
19 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
20                                      struct page *page)
21 {
22         __free_pages(page, pool->order);
23 }
24
25 static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
26 {
27         mutex_lock(&pool->mutex);
28         if (PageHighMem(page)) {
29                 list_add_tail(&page->lru, &pool->high_items);
30                 pool->high_count++;
31         } else {
32                 list_add_tail(&page->lru, &pool->low_items);
33                 pool->low_count++;
34         }
35
36         mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
37                                                         1 << pool->order);
38         mutex_unlock(&pool->mutex);
39 }
40
41 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
42 {
43         struct page *page;
44
45         if (high) {
46                 BUG_ON(!pool->high_count);
47                 page = list_first_entry(&pool->high_items, struct page, lru);
48                 pool->high_count--;
49         } else {
50                 BUG_ON(!pool->low_count);
51                 page = list_first_entry(&pool->low_items, struct page, lru);
52                 pool->low_count--;
53         }
54
55         list_del(&page->lru);
56         mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
57                                                         -(1 << pool->order));
58         return page;
59 }
60
61 struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
62 {
63         struct page *page = NULL;
64
65         BUG_ON(!pool);
66
67         mutex_lock(&pool->mutex);
68         if (pool->high_count)
69                 page = ion_page_pool_remove(pool, true);
70         else if (pool->low_count)
71                 page = ion_page_pool_remove(pool, false);
72         mutex_unlock(&pool->mutex);
73
74         if (!page)
75                 page = ion_page_pool_alloc_pages(pool);
76
77         return page;
78 }
79
80 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
81 {
82         BUG_ON(pool->order != compound_order(page));
83
84         ion_page_pool_add(pool, page);
85 }
86
87 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
88 {
89         int count = pool->low_count;
90
91         if (high)
92                 count += pool->high_count;
93
94         return count << pool->order;
95 }
96
97 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
98                          int nr_to_scan)
99 {
100         int freed = 0;
101         bool high;
102
103         if (current_is_kswapd())
104                 high = true;
105         else
106                 high = !!(gfp_mask & __GFP_HIGHMEM);
107
108         if (nr_to_scan == 0)
109                 return ion_page_pool_total(pool, high);
110
111         while (freed < nr_to_scan) {
112                 struct page *page;
113
114                 mutex_lock(&pool->mutex);
115                 if (pool->low_count) {
116                         page = ion_page_pool_remove(pool, false);
117                 } else if (high && pool->high_count) {
118                         page = ion_page_pool_remove(pool, true);
119                 } else {
120                         mutex_unlock(&pool->mutex);
121                         break;
122                 }
123                 mutex_unlock(&pool->mutex);
124                 ion_page_pool_free_pages(pool, page);
125                 freed += (1 << pool->order);
126         }
127
128         return freed;
129 }
130
131 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
132 {
133         struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
134
135         if (!pool)
136                 return NULL;
137         pool->high_count = 0;
138         pool->low_count = 0;
139         INIT_LIST_HEAD(&pool->low_items);
140         INIT_LIST_HEAD(&pool->high_items);
141         pool->gfp_mask = gfp_mask | __GFP_COMP;
142         pool->order = order;
143         mutex_init(&pool->mutex);
144         plist_node_init(&pool->list, order);
145
146         return pool;
147 }
148
149 void ion_page_pool_destroy(struct ion_page_pool *pool)
150 {
151         kfree(pool);
152 }