mm/migrate: rename migration reason MR_CMA to MR_CONTIG_RANGE
[muen/linux.git] / arch / powerpc / mm / mmu_context_iommu.c
1 /*
2  *  IOMMU helpers in MMU context.
3  *
4  *  Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
22
23 static DEFINE_MUTEX(mem_list_mutex);
24
25 struct mm_iommu_table_group_mem_t {
26         struct list_head next;
27         struct rcu_head rcu;
28         unsigned long used;
29         atomic64_t mapped;
30         u64 ua;                 /* userspace address */
31         u64 entries;            /* number of entries in hpas[] */
32         u64 *hpas;              /* vmalloc'ed */
33 };
34
35 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
36                 unsigned long npages, bool incr)
37 {
38         long ret = 0, locked, lock_limit;
39
40         if (!npages)
41                 return 0;
42
43         down_write(&mm->mmap_sem);
44
45         if (incr) {
46                 locked = mm->locked_vm + npages;
47                 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
49                         ret = -ENOMEM;
50                 else
51                         mm->locked_vm += npages;
52         } else {
53                 if (WARN_ON_ONCE(npages > mm->locked_vm))
54                         npages = mm->locked_vm;
55                 mm->locked_vm -= npages;
56         }
57
58         pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59                         current ? current->pid : 0,
60                         incr ? '+' : '-',
61                         npages << PAGE_SHIFT,
62                         mm->locked_vm << PAGE_SHIFT,
63                         rlimit(RLIMIT_MEMLOCK));
64         up_write(&mm->mmap_sem);
65
66         return ret;
67 }
68
69 bool mm_iommu_preregistered(struct mm_struct *mm)
70 {
71         return !list_empty(&mm->context.iommu_group_mem_list);
72 }
73 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
74
75 /*
76  * Taken from alloc_migrate_target with changes to remove CMA allocations
77  */
78 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
79                                         int **resultp)
80 {
81         gfp_t gfp_mask = GFP_USER;
82         struct page *new_page;
83
84         if (PageCompound(page))
85                 return NULL;
86
87         if (PageHighMem(page))
88                 gfp_mask |= __GFP_HIGHMEM;
89
90         /*
91          * We don't want the allocation to force an OOM if possibe
92          */
93         new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
94         return new_page;
95 }
96
97 static int mm_iommu_move_page_from_cma(struct page *page)
98 {
99         int ret = 0;
100         LIST_HEAD(cma_migrate_pages);
101
102         /* Ignore huge pages for now */
103         if (PageCompound(page))
104                 return -EBUSY;
105
106         lru_add_drain();
107         ret = isolate_lru_page(page);
108         if (ret)
109                 return ret;
110
111         list_add(&page->lru, &cma_migrate_pages);
112         put_page(page); /* Drop the gup reference */
113
114         ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
115                                 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
116         if (ret) {
117                 if (!list_empty(&cma_migrate_pages))
118                         putback_movable_pages(&cma_migrate_pages);
119         }
120
121         return 0;
122 }
123
124 long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
125                 struct mm_iommu_table_group_mem_t **pmem)
126 {
127         struct mm_iommu_table_group_mem_t *mem;
128         long i, j, ret = 0, locked_entries = 0;
129         struct page *page = NULL;
130
131         mutex_lock(&mem_list_mutex);
132
133         list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
134                         next) {
135                 if ((mem->ua == ua) && (mem->entries == entries)) {
136                         ++mem->used;
137                         *pmem = mem;
138                         goto unlock_exit;
139                 }
140
141                 /* Overlap? */
142                 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
143                                 (ua < (mem->ua +
144                                        (mem->entries << PAGE_SHIFT)))) {
145                         ret = -EINVAL;
146                         goto unlock_exit;
147                 }
148
149         }
150
151         ret = mm_iommu_adjust_locked_vm(mm, entries, true);
152         if (ret)
153                 goto unlock_exit;
154
155         locked_entries = entries;
156
157         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
158         if (!mem) {
159                 ret = -ENOMEM;
160                 goto unlock_exit;
161         }
162
163         mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
164         if (!mem->hpas) {
165                 kfree(mem);
166                 ret = -ENOMEM;
167                 goto unlock_exit;
168         }
169
170         for (i = 0; i < entries; ++i) {
171                 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
172                                         1/* pages */, 1/* iswrite */, &page)) {
173                         ret = -EFAULT;
174                         for (j = 0; j < i; ++j)
175                                 put_page(pfn_to_page(mem->hpas[j] >>
176                                                 PAGE_SHIFT));
177                         vfree(mem->hpas);
178                         kfree(mem);
179                         goto unlock_exit;
180                 }
181                 /*
182                  * If we get a page from the CMA zone, since we are going to
183                  * be pinning these entries, we might as well move them out
184                  * of the CMA zone if possible. NOTE: faulting in + migration
185                  * can be expensive. Batching can be considered later
186                  */
187                 if (is_migrate_cma_page(page)) {
188                         if (mm_iommu_move_page_from_cma(page))
189                                 goto populate;
190                         if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
191                                                 1/* pages */, 1/* iswrite */,
192                                                 &page)) {
193                                 ret = -EFAULT;
194                                 for (j = 0; j < i; ++j)
195                                         put_page(pfn_to_page(mem->hpas[j] >>
196                                                                 PAGE_SHIFT));
197                                 vfree(mem->hpas);
198                                 kfree(mem);
199                                 goto unlock_exit;
200                         }
201                 }
202 populate:
203                 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
204         }
205
206         atomic64_set(&mem->mapped, 1);
207         mem->used = 1;
208         mem->ua = ua;
209         mem->entries = entries;
210         *pmem = mem;
211
212         list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
213
214 unlock_exit:
215         if (locked_entries && ret)
216                 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
217
218         mutex_unlock(&mem_list_mutex);
219
220         return ret;
221 }
222 EXPORT_SYMBOL_GPL(mm_iommu_get);
223
224 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
225 {
226         long i;
227         struct page *page = NULL;
228
229         for (i = 0; i < mem->entries; ++i) {
230                 if (!mem->hpas[i])
231                         continue;
232
233                 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
234                 if (!page)
235                         continue;
236
237                 put_page(page);
238                 mem->hpas[i] = 0;
239         }
240 }
241
242 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
243 {
244
245         mm_iommu_unpin(mem);
246         vfree(mem->hpas);
247         kfree(mem);
248 }
249
250 static void mm_iommu_free(struct rcu_head *head)
251 {
252         struct mm_iommu_table_group_mem_t *mem = container_of(head,
253                         struct mm_iommu_table_group_mem_t, rcu);
254
255         mm_iommu_do_free(mem);
256 }
257
258 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
259 {
260         list_del_rcu(&mem->next);
261         call_rcu(&mem->rcu, mm_iommu_free);
262 }
263
264 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
265 {
266         long ret = 0;
267
268         mutex_lock(&mem_list_mutex);
269
270         if (mem->used == 0) {
271                 ret = -ENOENT;
272                 goto unlock_exit;
273         }
274
275         --mem->used;
276         /* There are still users, exit */
277         if (mem->used)
278                 goto unlock_exit;
279
280         /* Are there still mappings? */
281         if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
282                 ++mem->used;
283                 ret = -EBUSY;
284                 goto unlock_exit;
285         }
286
287         /* @mapped became 0 so now mappings are disabled, release the region */
288         mm_iommu_release(mem);
289
290         mm_iommu_adjust_locked_vm(mm, mem->entries, false);
291
292 unlock_exit:
293         mutex_unlock(&mem_list_mutex);
294
295         return ret;
296 }
297 EXPORT_SYMBOL_GPL(mm_iommu_put);
298
299 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
300                 unsigned long ua, unsigned long size)
301 {
302         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
303
304         list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
305                 if ((mem->ua <= ua) &&
306                                 (ua + size <= mem->ua +
307                                  (mem->entries << PAGE_SHIFT))) {
308                         ret = mem;
309                         break;
310                 }
311         }
312
313         return ret;
314 }
315 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
316
317 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
318                 unsigned long ua, unsigned long size)
319 {
320         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
321
322         list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
323                         next) {
324                 if ((mem->ua <= ua) &&
325                                 (ua + size <= mem->ua +
326                                  (mem->entries << PAGE_SHIFT))) {
327                         ret = mem;
328                         break;
329                 }
330         }
331
332         return ret;
333 }
334 EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
335
336 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
337                 unsigned long ua, unsigned long entries)
338 {
339         struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
340
341         list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
342                 if ((mem->ua == ua) && (mem->entries == entries)) {
343                         ret = mem;
344                         break;
345                 }
346         }
347
348         return ret;
349 }
350 EXPORT_SYMBOL_GPL(mm_iommu_find);
351
352 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
353                 unsigned long ua, unsigned long *hpa)
354 {
355         const long entry = (ua - mem->ua) >> PAGE_SHIFT;
356         u64 *va = &mem->hpas[entry];
357
358         if (entry >= mem->entries)
359                 return -EFAULT;
360
361         *hpa = *va | (ua & ~PAGE_MASK);
362
363         return 0;
364 }
365 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
366
367 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
368                 unsigned long ua, unsigned long *hpa)
369 {
370         const long entry = (ua - mem->ua) >> PAGE_SHIFT;
371         void *va = &mem->hpas[entry];
372         unsigned long *pa;
373
374         if (entry >= mem->entries)
375                 return -EFAULT;
376
377         pa = (void *) vmalloc_to_phys(va);
378         if (!pa)
379                 return -EFAULT;
380
381         *hpa = *pa | (ua & ~PAGE_MASK);
382
383         return 0;
384 }
385 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
386
387 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
388 {
389         if (atomic64_inc_not_zero(&mem->mapped))
390                 return 0;
391
392         /* Last mm_iommu_put() has been called, no more mappings allowed() */
393         return -ENXIO;
394 }
395 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
396
397 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
398 {
399         atomic64_add_unless(&mem->mapped, -1, 1);
400 }
401 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
402
403 void mm_iommu_init(struct mm_struct *mm)
404 {
405         INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
406 }