Merge tag 'drm-amdkfd-next-2018-03-27' of git://people.freedesktop.org/~gabbayo/linux...
[muen/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <drm/drmP.h>
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_amdkfd.h"
30
31 /* Special VM and GART address alignment needed for VI pre-Fiji due to
32  * a HW bug.
33  */
34 #define VI_BO_SIZE_ALIGN (0x8000)
35
36 /* Impose limit on how much memory KFD can use */
37 static struct {
38         uint64_t max_system_mem_limit;
39         int64_t system_mem_used;
40         spinlock_t mem_limit_lock;
41 } kfd_mem_limit;
42
43 /* Struct used for amdgpu_amdkfd_bo_validate */
44 struct amdgpu_vm_parser {
45         uint32_t        domain;
46         bool            wait;
47 };
48
49 static const char * const domain_bit_to_string[] = {
50                 "CPU",
51                 "GTT",
52                 "VRAM",
53                 "GDS",
54                 "GWS",
55                 "OA"
56 };
57
58 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
59
60
61
62 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
63 {
64         return (struct amdgpu_device *)kgd;
65 }
66
67 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
68                 struct kgd_mem *mem)
69 {
70         struct kfd_bo_va_list *entry;
71
72         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
73                 if (entry->bo_va->base.vm == avm)
74                         return false;
75
76         return true;
77 }
78
79 /* Set memory usage limits. Current, limits are
80  *  System (kernel) memory - 3/8th System RAM
81  */
82 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
83 {
84         struct sysinfo si;
85         uint64_t mem;
86
87         si_meminfo(&si);
88         mem = si.totalram - si.totalhigh;
89         mem *= si.mem_unit;
90
91         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
92         kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
93         pr_debug("Kernel memory limit %lluM\n",
94                 (kfd_mem_limit.max_system_mem_limit >> 20));
95 }
96
97 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
98                                               uint64_t size, u32 domain)
99 {
100         size_t acc_size;
101         int ret = 0;
102
103         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
104                                        sizeof(struct amdgpu_bo));
105
106         spin_lock(&kfd_mem_limit.mem_limit_lock);
107         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
108                 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
109                         kfd_mem_limit.max_system_mem_limit) {
110                         ret = -ENOMEM;
111                         goto err_no_mem;
112                 }
113                 kfd_mem_limit.system_mem_used += (acc_size + size);
114         }
115 err_no_mem:
116         spin_unlock(&kfd_mem_limit.mem_limit_lock);
117         return ret;
118 }
119
120 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
121                                        uint64_t size, u32 domain)
122 {
123         size_t acc_size;
124
125         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
126                                        sizeof(struct amdgpu_bo));
127
128         spin_lock(&kfd_mem_limit.mem_limit_lock);
129         if (domain == AMDGPU_GEM_DOMAIN_GTT)
130                 kfd_mem_limit.system_mem_used -= (acc_size + size);
131         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
132                   "kfd system memory accounting unbalanced");
133
134         spin_unlock(&kfd_mem_limit.mem_limit_lock);
135 }
136
137 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
138 {
139         spin_lock(&kfd_mem_limit.mem_limit_lock);
140
141         if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
142                 kfd_mem_limit.system_mem_used -=
143                         (bo->tbo.acc_size + amdgpu_bo_size(bo));
144         }
145         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
146                   "kfd system memory accounting unbalanced");
147
148         spin_unlock(&kfd_mem_limit.mem_limit_lock);
149 }
150
151
152 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
153  *  reservation object.
154  *
155  * @bo: [IN] Remove eviction fence(s) from this BO
156  * @ef: [IN] If ef is specified, then this eviction fence is removed if it
157  *  is present in the shared list.
158  * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
159  *  from BO's reservation object shared list.
160  * @ef_count: [OUT] Number of fences in ef_list.
161  *
162  * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
163  *  called to restore the eviction fences and to avoid memory leak. This is
164  *  useful for shared BOs.
165  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
166  */
167 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
168                                         struct amdgpu_amdkfd_fence *ef,
169                                         struct amdgpu_amdkfd_fence ***ef_list,
170                                         unsigned int *ef_count)
171 {
172         struct reservation_object_list *fobj;
173         struct reservation_object *resv;
174         unsigned int i = 0, j = 0, k = 0, shared_count;
175         unsigned int count = 0;
176         struct amdgpu_amdkfd_fence **fence_list;
177
178         if (!ef && !ef_list)
179                 return -EINVAL;
180
181         if (ef_list) {
182                 *ef_list = NULL;
183                 *ef_count = 0;
184         }
185
186         resv = bo->tbo.resv;
187         fobj = reservation_object_get_list(resv);
188
189         if (!fobj)
190                 return 0;
191
192         preempt_disable();
193         write_seqcount_begin(&resv->seq);
194
195         /* Go through all the shared fences in the resevation object. If
196          * ef is specified and it exists in the list, remove it and reduce the
197          * count. If ef is not specified, then get the count of eviction fences
198          * present.
199          */
200         shared_count = fobj->shared_count;
201         for (i = 0; i < shared_count; ++i) {
202                 struct dma_fence *f;
203
204                 f = rcu_dereference_protected(fobj->shared[i],
205                                               reservation_object_held(resv));
206
207                 if (ef) {
208                         if (f->context == ef->base.context) {
209                                 dma_fence_put(f);
210                                 fobj->shared_count--;
211                         } else {
212                                 RCU_INIT_POINTER(fobj->shared[j++], f);
213                         }
214                 } else if (to_amdgpu_amdkfd_fence(f))
215                         count++;
216         }
217         write_seqcount_end(&resv->seq);
218         preempt_enable();
219
220         if (ef || !count)
221                 return 0;
222
223         /* Alloc memory for count number of eviction fence pointers. Fill the
224          * ef_list array and ef_count
225          */
226         fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
227                              GFP_KERNEL);
228         if (!fence_list)
229                 return -ENOMEM;
230
231         preempt_disable();
232         write_seqcount_begin(&resv->seq);
233
234         j = 0;
235         for (i = 0; i < shared_count; ++i) {
236                 struct dma_fence *f;
237                 struct amdgpu_amdkfd_fence *efence;
238
239                 f = rcu_dereference_protected(fobj->shared[i],
240                         reservation_object_held(resv));
241
242                 efence = to_amdgpu_amdkfd_fence(f);
243                 if (efence) {
244                         fence_list[k++] = efence;
245                         fobj->shared_count--;
246                 } else {
247                         RCU_INIT_POINTER(fobj->shared[j++], f);
248                 }
249         }
250
251         write_seqcount_end(&resv->seq);
252         preempt_enable();
253
254         *ef_list = fence_list;
255         *ef_count = k;
256
257         return 0;
258 }
259
260 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
261  *  reservation object.
262  *
263  * @bo: [IN] Add eviction fences to this BO
264  * @ef_list: [IN] List of eviction fences to be added
265  * @ef_count: [IN] Number of fences in ef_list.
266  *
267  * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
268  *  function.
269  */
270 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
271                                 struct amdgpu_amdkfd_fence **ef_list,
272                                 unsigned int ef_count)
273 {
274         int i;
275
276         if (!ef_list || !ef_count)
277                 return;
278
279         for (i = 0; i < ef_count; i++) {
280                 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
281                 /* Re-adding the fence takes an additional reference. Drop that
282                  * reference.
283                  */
284                 dma_fence_put(&ef_list[i]->base);
285         }
286
287         kfree(ef_list);
288 }
289
290 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
291                                      bool wait)
292 {
293         struct ttm_operation_ctx ctx = { false, false };
294         int ret;
295
296         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
297                  "Called with userptr BO"))
298                 return -EINVAL;
299
300         amdgpu_ttm_placement_from_domain(bo, domain);
301
302         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
303         if (ret)
304                 goto validate_fail;
305         if (wait) {
306                 struct amdgpu_amdkfd_fence **ef_list;
307                 unsigned int ef_count;
308
309                 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
310                                                           &ef_count);
311                 if (ret)
312                         goto validate_fail;
313
314                 ttm_bo_wait(&bo->tbo, false, false);
315                 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
316         }
317
318 validate_fail:
319         return ret;
320 }
321
322 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
323 {
324         struct amdgpu_vm_parser *p = param;
325
326         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
327 }
328
329 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
330  *
331  * Page directories are not updated here because huge page handling
332  * during page table updates can invalidate page directory entries
333  * again. Page directories are only updated after updating page
334  * tables.
335  */
336 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
337 {
338         struct amdgpu_bo *pd = vm->root.base.bo;
339         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
340         struct amdgpu_vm_parser param;
341         uint64_t addr, flags = AMDGPU_PTE_VALID;
342         int ret;
343
344         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
345         param.wait = false;
346
347         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
348                                         &param);
349         if (ret) {
350                 pr_err("amdgpu: failed to validate PT BOs\n");
351                 return ret;
352         }
353
354         ret = amdgpu_amdkfd_validate(&param, pd);
355         if (ret) {
356                 pr_err("amdgpu: failed to validate PD\n");
357                 return ret;
358         }
359
360         addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
361         amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
362         vm->pd_phys_addr = addr;
363
364         if (vm->use_cpu_for_update) {
365                 ret = amdgpu_bo_kmap(pd, NULL);
366                 if (ret) {
367                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
368                         return ret;
369                 }
370         }
371
372         return 0;
373 }
374
375 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
376                          struct dma_fence *f)
377 {
378         int ret = amdgpu_sync_fence(adev, sync, f, false);
379
380         /* Sync objects can't handle multiple GPUs (contexts) updating
381          * sync->last_vm_update. Fortunately we don't need it for
382          * KFD's purposes, so we can just drop that fence.
383          */
384         if (sync->last_vm_update) {
385                 dma_fence_put(sync->last_vm_update);
386                 sync->last_vm_update = NULL;
387         }
388
389         return ret;
390 }
391
392 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
393 {
394         struct amdgpu_bo *pd = vm->root.base.bo;
395         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
396         int ret;
397
398         ret = amdgpu_vm_update_directories(adev, vm);
399         if (ret)
400                 return ret;
401
402         return sync_vm_fence(adev, sync, vm->last_update);
403 }
404
405 /* add_bo_to_vm - Add a BO to a VM
406  *
407  * Everything that needs to bo done only once when a BO is first added
408  * to a VM. It can later be mapped and unmapped many times without
409  * repeating these steps.
410  *
411  * 1. Allocate and initialize BO VA entry data structure
412  * 2. Add BO to the VM
413  * 3. Determine ASIC-specific PTE flags
414  * 4. Alloc page tables and directories if needed
415  * 4a.  Validate new page tables and directories
416  */
417 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
418                 struct amdgpu_vm *vm, bool is_aql,
419                 struct kfd_bo_va_list **p_bo_va_entry)
420 {
421         int ret;
422         struct kfd_bo_va_list *bo_va_entry;
423         struct amdgpu_bo *pd = vm->root.base.bo;
424         struct amdgpu_bo *bo = mem->bo;
425         uint64_t va = mem->va;
426         struct list_head *list_bo_va = &mem->bo_va_list;
427         unsigned long bo_size = bo->tbo.mem.size;
428
429         if (!va) {
430                 pr_err("Invalid VA when adding BO to VM\n");
431                 return -EINVAL;
432         }
433
434         if (is_aql)
435                 va += bo_size;
436
437         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
438         if (!bo_va_entry)
439                 return -ENOMEM;
440
441         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
442                         va + bo_size, vm);
443
444         /* Add BO to VM internal data structures*/
445         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
446         if (!bo_va_entry->bo_va) {
447                 ret = -EINVAL;
448                 pr_err("Failed to add BO object to VM. ret == %d\n",
449                                 ret);
450                 goto err_vmadd;
451         }
452
453         bo_va_entry->va = va;
454         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
455                                                          mem->mapping_flags);
456         bo_va_entry->kgd_dev = (void *)adev;
457         list_add(&bo_va_entry->bo_list, list_bo_va);
458
459         if (p_bo_va_entry)
460                 *p_bo_va_entry = bo_va_entry;
461
462         /* Allocate new page tables if needed and validate
463          * them. Clearing of new page tables and validate need to wait
464          * on move fences. We don't want that to trigger the eviction
465          * fence, so remove it temporarily.
466          */
467         amdgpu_amdkfd_remove_eviction_fence(pd,
468                                         vm->process_info->eviction_fence,
469                                         NULL, NULL);
470
471         ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
472         if (ret) {
473                 pr_err("Failed to allocate pts, err=%d\n", ret);
474                 goto err_alloc_pts;
475         }
476
477         ret = vm_validate_pt_pd_bos(vm);
478         if (ret) {
479                 pr_err("validate_pt_pd_bos() failed\n");
480                 goto err_alloc_pts;
481         }
482
483         /* Add the eviction fence back */
484         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
485
486         return 0;
487
488 err_alloc_pts:
489         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
490         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
491         list_del(&bo_va_entry->bo_list);
492 err_vmadd:
493         kfree(bo_va_entry);
494         return ret;
495 }
496
497 static void remove_bo_from_vm(struct amdgpu_device *adev,
498                 struct kfd_bo_va_list *entry, unsigned long size)
499 {
500         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
501                         entry->va,
502                         entry->va + size, entry);
503         amdgpu_vm_bo_rmv(adev, entry->bo_va);
504         list_del(&entry->bo_list);
505         kfree(entry);
506 }
507
508 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
509                                 struct amdkfd_process_info *process_info)
510 {
511         struct ttm_validate_buffer *entry = &mem->validate_list;
512         struct amdgpu_bo *bo = mem->bo;
513
514         INIT_LIST_HEAD(&entry->head);
515         entry->shared = true;
516         entry->bo = &bo->tbo;
517         mutex_lock(&process_info->lock);
518         list_add_tail(&entry->head, &process_info->kfd_bo_list);
519         mutex_unlock(&process_info->lock);
520 }
521
522 /* Reserving a BO and its page table BOs must happen atomically to
523  * avoid deadlocks. Some operations update multiple VMs at once. Track
524  * all the reservation info in a context structure. Optionally a sync
525  * object can track VM updates.
526  */
527 struct bo_vm_reservation_context {
528         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
529         unsigned int n_vms;                 /* Number of VMs reserved       */
530         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
531         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
532         struct list_head list, duplicates;  /* BO lists                     */
533         struct amdgpu_sync *sync;           /* Pointer to sync object       */
534         bool reserved;                      /* Whether BOs are reserved     */
535 };
536
537 enum bo_vm_match {
538         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
539         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
540         BO_VM_ALL,              /* Match all VMs a BO was added to    */
541 };
542
543 /**
544  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
545  * @mem: KFD BO structure.
546  * @vm: the VM to reserve.
547  * @ctx: the struct that will be used in unreserve_bo_and_vms().
548  */
549 static int reserve_bo_and_vm(struct kgd_mem *mem,
550                               struct amdgpu_vm *vm,
551                               struct bo_vm_reservation_context *ctx)
552 {
553         struct amdgpu_bo *bo = mem->bo;
554         int ret;
555
556         WARN_ON(!vm);
557
558         ctx->reserved = false;
559         ctx->n_vms = 1;
560         ctx->sync = &mem->sync;
561
562         INIT_LIST_HEAD(&ctx->list);
563         INIT_LIST_HEAD(&ctx->duplicates);
564
565         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
566         if (!ctx->vm_pd)
567                 return -ENOMEM;
568
569         ctx->kfd_bo.robj = bo;
570         ctx->kfd_bo.priority = 0;
571         ctx->kfd_bo.tv.bo = &bo->tbo;
572         ctx->kfd_bo.tv.shared = true;
573         ctx->kfd_bo.user_pages = NULL;
574         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
575
576         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
577
578         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
579                                      false, &ctx->duplicates);
580         if (!ret)
581                 ctx->reserved = true;
582         else {
583                 pr_err("Failed to reserve buffers in ttm\n");
584                 kfree(ctx->vm_pd);
585                 ctx->vm_pd = NULL;
586         }
587
588         return ret;
589 }
590
591 /**
592  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
593  * @mem: KFD BO structure.
594  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
595  * is used. Otherwise, a single VM associated with the BO.
596  * @map_type: the mapping status that will be used to filter the VMs.
597  * @ctx: the struct that will be used in unreserve_bo_and_vms().
598  *
599  * Returns 0 for success, negative for failure.
600  */
601 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
602                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
603                                 struct bo_vm_reservation_context *ctx)
604 {
605         struct amdgpu_bo *bo = mem->bo;
606         struct kfd_bo_va_list *entry;
607         unsigned int i;
608         int ret;
609
610         ctx->reserved = false;
611         ctx->n_vms = 0;
612         ctx->vm_pd = NULL;
613         ctx->sync = &mem->sync;
614
615         INIT_LIST_HEAD(&ctx->list);
616         INIT_LIST_HEAD(&ctx->duplicates);
617
618         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
619                 if ((vm && vm != entry->bo_va->base.vm) ||
620                         (entry->is_mapped != map_type
621                         && map_type != BO_VM_ALL))
622                         continue;
623
624                 ctx->n_vms++;
625         }
626
627         if (ctx->n_vms != 0) {
628                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
629                                      GFP_KERNEL);
630                 if (!ctx->vm_pd)
631                         return -ENOMEM;
632         }
633
634         ctx->kfd_bo.robj = bo;
635         ctx->kfd_bo.priority = 0;
636         ctx->kfd_bo.tv.bo = &bo->tbo;
637         ctx->kfd_bo.tv.shared = true;
638         ctx->kfd_bo.user_pages = NULL;
639         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
640
641         i = 0;
642         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
643                 if ((vm && vm != entry->bo_va->base.vm) ||
644                         (entry->is_mapped != map_type
645                         && map_type != BO_VM_ALL))
646                         continue;
647
648                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
649                                 &ctx->vm_pd[i]);
650                 i++;
651         }
652
653         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
654                                      false, &ctx->duplicates);
655         if (!ret)
656                 ctx->reserved = true;
657         else
658                 pr_err("Failed to reserve buffers in ttm.\n");
659
660         if (ret) {
661                 kfree(ctx->vm_pd);
662                 ctx->vm_pd = NULL;
663         }
664
665         return ret;
666 }
667
668 /**
669  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
670  * @ctx: Reservation context to unreserve
671  * @wait: Optionally wait for a sync object representing pending VM updates
672  * @intr: Whether the wait is interruptible
673  *
674  * Also frees any resources allocated in
675  * reserve_bo_and_(cond_)vm(s). Returns the status from
676  * amdgpu_sync_wait.
677  */
678 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
679                                  bool wait, bool intr)
680 {
681         int ret = 0;
682
683         if (wait)
684                 ret = amdgpu_sync_wait(ctx->sync, intr);
685
686         if (ctx->reserved)
687                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
688         kfree(ctx->vm_pd);
689
690         ctx->sync = NULL;
691
692         ctx->reserved = false;
693         ctx->vm_pd = NULL;
694
695         return ret;
696 }
697
698 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
699                                 struct kfd_bo_va_list *entry,
700                                 struct amdgpu_sync *sync)
701 {
702         struct amdgpu_bo_va *bo_va = entry->bo_va;
703         struct amdgpu_vm *vm = bo_va->base.vm;
704         struct amdgpu_bo *pd = vm->root.base.bo;
705
706         /* Remove eviction fence from PD (and thereby from PTs too as
707          * they share the resv. object). Otherwise during PT update
708          * job (see amdgpu_vm_bo_update_mapping), eviction fence would
709          * get added to job->sync object and job execution would
710          * trigger the eviction fence.
711          */
712         amdgpu_amdkfd_remove_eviction_fence(pd,
713                                             vm->process_info->eviction_fence,
714                                             NULL, NULL);
715         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
716
717         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
718
719         /* Add the eviction fence back */
720         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
721
722         sync_vm_fence(adev, sync, bo_va->last_pt_update);
723
724         return 0;
725 }
726
727 static int update_gpuvm_pte(struct amdgpu_device *adev,
728                 struct kfd_bo_va_list *entry,
729                 struct amdgpu_sync *sync)
730 {
731         int ret;
732         struct amdgpu_vm *vm;
733         struct amdgpu_bo_va *bo_va;
734         struct amdgpu_bo *bo;
735
736         bo_va = entry->bo_va;
737         vm = bo_va->base.vm;
738         bo = bo_va->base.bo;
739
740         /* Update the page tables  */
741         ret = amdgpu_vm_bo_update(adev, bo_va, false);
742         if (ret) {
743                 pr_err("amdgpu_vm_bo_update failed\n");
744                 return ret;
745         }
746
747         return sync_vm_fence(adev, sync, bo_va->last_pt_update);
748 }
749
750 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
751                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync)
752 {
753         int ret;
754
755         /* Set virtual address for the allocation */
756         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
757                                amdgpu_bo_size(entry->bo_va->base.bo),
758                                entry->pte_flags);
759         if (ret) {
760                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
761                                 entry->va, ret);
762                 return ret;
763         }
764
765         ret = update_gpuvm_pte(adev, entry, sync);
766         if (ret) {
767                 pr_err("update_gpuvm_pte() failed\n");
768                 goto update_gpuvm_pte_failed;
769         }
770
771         return 0;
772
773 update_gpuvm_pte_failed:
774         unmap_bo_from_gpuvm(adev, entry, sync);
775         return ret;
776 }
777
778 static int process_validate_vms(struct amdkfd_process_info *process_info)
779 {
780         struct amdgpu_vm *peer_vm;
781         int ret;
782
783         list_for_each_entry(peer_vm, &process_info->vm_list_head,
784                             vm_list_node) {
785                 ret = vm_validate_pt_pd_bos(peer_vm);
786                 if (ret)
787                         return ret;
788         }
789
790         return 0;
791 }
792
793 static int process_update_pds(struct amdkfd_process_info *process_info,
794                               struct amdgpu_sync *sync)
795 {
796         struct amdgpu_vm *peer_vm;
797         int ret;
798
799         list_for_each_entry(peer_vm, &process_info->vm_list_head,
800                             vm_list_node) {
801                 ret = vm_update_pds(peer_vm, sync);
802                 if (ret)
803                         return ret;
804         }
805
806         return 0;
807 }
808
809 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
810                        struct dma_fence **ef)
811 {
812         struct amdkfd_process_info *info = NULL;
813         int ret;
814
815         if (!*process_info) {
816                 info = kzalloc(sizeof(*info), GFP_KERNEL);
817                 if (!info)
818                         return -ENOMEM;
819
820                 mutex_init(&info->lock);
821                 INIT_LIST_HEAD(&info->vm_list_head);
822                 INIT_LIST_HEAD(&info->kfd_bo_list);
823
824                 info->eviction_fence =
825                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
826                                                    current->mm);
827                 if (!info->eviction_fence) {
828                         pr_err("Failed to create eviction fence\n");
829                         ret = -ENOMEM;
830                         goto create_evict_fence_fail;
831                 }
832
833                 *process_info = info;
834                 *ef = dma_fence_get(&info->eviction_fence->base);
835         }
836
837         vm->process_info = *process_info;
838
839         /* Validate page directory and attach eviction fence */
840         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
841         if (ret)
842                 goto reserve_pd_fail;
843         ret = vm_validate_pt_pd_bos(vm);
844         if (ret) {
845                 pr_err("validate_pt_pd_bos() failed\n");
846                 goto validate_pd_fail;
847         }
848         ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
849         if (ret)
850                 goto wait_pd_fail;
851         amdgpu_bo_fence(vm->root.base.bo,
852                         &vm->process_info->eviction_fence->base, true);
853         amdgpu_bo_unreserve(vm->root.base.bo);
854
855         /* Update process info */
856         mutex_lock(&vm->process_info->lock);
857         list_add_tail(&vm->vm_list_node,
858                         &(vm->process_info->vm_list_head));
859         vm->process_info->n_vms++;
860         mutex_unlock(&vm->process_info->lock);
861
862         return 0;
863
864 wait_pd_fail:
865 validate_pd_fail:
866         amdgpu_bo_unreserve(vm->root.base.bo);
867 reserve_pd_fail:
868         vm->process_info = NULL;
869         if (info) {
870                 /* Two fence references: one in info and one in *ef */
871                 dma_fence_put(&info->eviction_fence->base);
872                 dma_fence_put(*ef);
873                 *ef = NULL;
874                 *process_info = NULL;
875 create_evict_fence_fail:
876                 mutex_destroy(&info->lock);
877                 kfree(info);
878         }
879         return ret;
880 }
881
882 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
883                                           void **process_info,
884                                           struct dma_fence **ef)
885 {
886         struct amdgpu_device *adev = get_amdgpu_device(kgd);
887         struct amdgpu_vm *new_vm;
888         int ret;
889
890         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
891         if (!new_vm)
892                 return -ENOMEM;
893
894         /* Initialize AMDGPU part of the VM */
895         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
896         if (ret) {
897                 pr_err("Failed init vm ret %d\n", ret);
898                 goto amdgpu_vm_init_fail;
899         }
900
901         /* Initialize KFD part of the VM and process info */
902         ret = init_kfd_vm(new_vm, process_info, ef);
903         if (ret)
904                 goto init_kfd_vm_fail;
905
906         *vm = (void *) new_vm;
907
908         return 0;
909
910 init_kfd_vm_fail:
911         amdgpu_vm_fini(adev, new_vm);
912 amdgpu_vm_init_fail:
913         kfree(new_vm);
914         return ret;
915 }
916
917 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
918                                            struct file *filp,
919                                            void **vm, void **process_info,
920                                            struct dma_fence **ef)
921 {
922         struct amdgpu_device *adev = get_amdgpu_device(kgd);
923         struct drm_file *drm_priv = filp->private_data;
924         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
925         struct amdgpu_vm *avm = &drv_priv->vm;
926         int ret;
927
928         /* Already a compute VM? */
929         if (avm->process_info)
930                 return -EINVAL;
931
932         /* Convert VM into a compute VM */
933         ret = amdgpu_vm_make_compute(adev, avm);
934         if (ret)
935                 return ret;
936
937         /* Initialize KFD part of the VM and process info */
938         ret = init_kfd_vm(avm, process_info, ef);
939         if (ret)
940                 return ret;
941
942         *vm = (void *)avm;
943
944         return 0;
945 }
946
947 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
948                                     struct amdgpu_vm *vm)
949 {
950         struct amdkfd_process_info *process_info = vm->process_info;
951         struct amdgpu_bo *pd = vm->root.base.bo;
952
953         if (!process_info)
954                 return;
955
956         /* Release eviction fence from PD */
957         amdgpu_bo_reserve(pd, false);
958         amdgpu_bo_fence(pd, NULL, false);
959         amdgpu_bo_unreserve(pd);
960
961         /* Update process info */
962         mutex_lock(&process_info->lock);
963         process_info->n_vms--;
964         list_del(&vm->vm_list_node);
965         mutex_unlock(&process_info->lock);
966
967         /* Release per-process resources when last compute VM is destroyed */
968         if (!process_info->n_vms) {
969                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
970
971                 dma_fence_put(&process_info->eviction_fence->base);
972                 mutex_destroy(&process_info->lock);
973                 kfree(process_info);
974         }
975 }
976
977 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
978 {
979         struct amdgpu_device *adev = get_amdgpu_device(kgd);
980         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
981
982         if (WARN_ON(!kgd || !vm))
983                 return;
984
985         pr_debug("Destroying process vm %p\n", vm);
986
987         /* Release the VM context */
988         amdgpu_vm_fini(adev, avm);
989         kfree(vm);
990 }
991
992 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
993 {
994         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
995
996         return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
997 }
998
999 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1000                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1001                 void *vm, struct kgd_mem **mem,
1002                 uint64_t *offset, uint32_t flags)
1003 {
1004         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1005         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1006         struct amdgpu_bo *bo;
1007         int byte_align;
1008         u32 alloc_domain;
1009         u64 alloc_flags;
1010         uint32_t mapping_flags;
1011         int ret;
1012
1013         /*
1014          * Check on which domain to allocate BO
1015          */
1016         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1017                 alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1018                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1019                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1020                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1021                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1022         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1023                 alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1024                 alloc_flags = 0;
1025         } else {
1026                 return -EINVAL;
1027         }
1028
1029         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1030         if (!*mem)
1031                 return -ENOMEM;
1032         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1033         mutex_init(&(*mem)->lock);
1034         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1035
1036         /* Workaround for AQL queue wraparound bug. Map the same
1037          * memory twice. That means we only actually allocate half
1038          * the memory.
1039          */
1040         if ((*mem)->aql_queue)
1041                 size = size >> 1;
1042
1043         /* Workaround for TLB bug on older VI chips */
1044         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1045                         adev->asic_type != CHIP_FIJI &&
1046                         adev->asic_type != CHIP_POLARIS10 &&
1047                         adev->asic_type != CHIP_POLARIS11) ?
1048                         VI_BO_SIZE_ALIGN : 1;
1049
1050         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1051         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1052                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1053         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1054                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1055         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1056                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1057         else
1058                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1059         (*mem)->mapping_flags = mapping_flags;
1060
1061         amdgpu_sync_create(&(*mem)->sync);
1062
1063         ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1064         if (ret) {
1065                 pr_debug("Insufficient system memory\n");
1066                 goto err_reserve_system_mem;
1067         }
1068
1069         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1070                         va, size, domain_string(alloc_domain));
1071
1072         ret = amdgpu_bo_create(adev, size, byte_align,
1073                                 alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
1074         if (ret) {
1075                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1076                                 domain_string(alloc_domain), ret);
1077                 goto err_bo_create;
1078         }
1079         bo->kfd_bo = *mem;
1080         (*mem)->bo = bo;
1081
1082         (*mem)->va = va;
1083         (*mem)->domain = alloc_domain;
1084         (*mem)->mapped_to_gpu_memory = 0;
1085         (*mem)->process_info = avm->process_info;
1086         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info);
1087
1088         if (offset)
1089                 *offset = amdgpu_bo_mmap_offset(bo);
1090
1091         return 0;
1092
1093 err_bo_create:
1094         unreserve_system_mem_limit(adev, size, alloc_domain);
1095 err_reserve_system_mem:
1096         mutex_destroy(&(*mem)->lock);
1097         kfree(*mem);
1098         return ret;
1099 }
1100
1101 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1102                 struct kgd_dev *kgd, struct kgd_mem *mem)
1103 {
1104         struct amdkfd_process_info *process_info = mem->process_info;
1105         unsigned long bo_size = mem->bo->tbo.mem.size;
1106         struct kfd_bo_va_list *entry, *tmp;
1107         struct bo_vm_reservation_context ctx;
1108         struct ttm_validate_buffer *bo_list_entry;
1109         int ret;
1110
1111         mutex_lock(&mem->lock);
1112
1113         if (mem->mapped_to_gpu_memory > 0) {
1114                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1115                                 mem->va, bo_size);
1116                 mutex_unlock(&mem->lock);
1117                 return -EBUSY;
1118         }
1119
1120         mutex_unlock(&mem->lock);
1121         /* lock is not needed after this, since mem is unused and will
1122          * be freed anyway
1123          */
1124
1125         /* Make sure restore workers don't access the BO any more */
1126         bo_list_entry = &mem->validate_list;
1127         mutex_lock(&process_info->lock);
1128         list_del(&bo_list_entry->head);
1129         mutex_unlock(&process_info->lock);
1130
1131         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1132         if (unlikely(ret))
1133                 return ret;
1134
1135         /* The eviction fence should be removed by the last unmap.
1136          * TODO: Log an error condition if the bo still has the eviction fence
1137          * attached
1138          */
1139         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1140                                         process_info->eviction_fence,
1141                                         NULL, NULL);
1142         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1143                 mem->va + bo_size * (1 + mem->aql_queue));
1144
1145         /* Remove from VM internal data structures */
1146         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1147                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1148                                 entry, bo_size);
1149
1150         ret = unreserve_bo_and_vms(&ctx, false, false);
1151
1152         /* Free the sync object */
1153         amdgpu_sync_free(&mem->sync);
1154
1155         /* Free the BO*/
1156         amdgpu_bo_unref(&mem->bo);
1157         mutex_destroy(&mem->lock);
1158         kfree(mem);
1159
1160         return ret;
1161 }
1162
1163 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1164                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1165 {
1166         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1167         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1168         int ret;
1169         struct amdgpu_bo *bo;
1170         uint32_t domain;
1171         struct kfd_bo_va_list *entry;
1172         struct bo_vm_reservation_context ctx;
1173         struct kfd_bo_va_list *bo_va_entry = NULL;
1174         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1175         unsigned long bo_size;
1176
1177         /* Make sure restore is not running concurrently.
1178          */
1179         mutex_lock(&mem->process_info->lock);
1180
1181         mutex_lock(&mem->lock);
1182
1183         bo = mem->bo;
1184
1185         if (!bo) {
1186                 pr_err("Invalid BO when mapping memory to GPU\n");
1187                 ret = -EINVAL;
1188                 goto out;
1189         }
1190
1191         domain = mem->domain;
1192         bo_size = bo->tbo.mem.size;
1193
1194         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1195                         mem->va,
1196                         mem->va + bo_size * (1 + mem->aql_queue),
1197                         vm, domain_string(domain));
1198
1199         ret = reserve_bo_and_vm(mem, vm, &ctx);
1200         if (unlikely(ret))
1201                 goto out;
1202
1203         if (check_if_add_bo_to_vm(avm, mem)) {
1204                 ret = add_bo_to_vm(adev, mem, avm, false,
1205                                 &bo_va_entry);
1206                 if (ret)
1207                         goto add_bo_to_vm_failed;
1208                 if (mem->aql_queue) {
1209                         ret = add_bo_to_vm(adev, mem, avm,
1210                                         true, &bo_va_entry_aql);
1211                         if (ret)
1212                                 goto add_bo_to_vm_failed_aql;
1213                 }
1214         } else {
1215                 ret = vm_validate_pt_pd_bos(avm);
1216                 if (unlikely(ret))
1217                         goto add_bo_to_vm_failed;
1218         }
1219
1220         if (mem->mapped_to_gpu_memory == 0) {
1221                 /* Validate BO only once. The eviction fence gets added to BO
1222                  * the first time it is mapped. Validate will wait for all
1223                  * background evictions to complete.
1224                  */
1225                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1226                 if (ret) {
1227                         pr_debug("Validate failed\n");
1228                         goto map_bo_to_gpuvm_failed;
1229                 }
1230         }
1231
1232         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1233                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1234                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1235                                         entry->va, entry->va + bo_size,
1236                                         entry);
1237
1238                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync);
1239                         if (ret) {
1240                                 pr_err("Failed to map radeon bo to gpuvm\n");
1241                                 goto map_bo_to_gpuvm_failed;
1242                         }
1243
1244                         ret = vm_update_pds(vm, ctx.sync);
1245                         if (ret) {
1246                                 pr_err("Failed to update page directories\n");
1247                                 goto map_bo_to_gpuvm_failed;
1248                         }
1249
1250                         entry->is_mapped = true;
1251                         mem->mapped_to_gpu_memory++;
1252                         pr_debug("\t INC mapping count %d\n",
1253                                         mem->mapped_to_gpu_memory);
1254                 }
1255         }
1256
1257         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1258                 amdgpu_bo_fence(bo,
1259                                 &avm->process_info->eviction_fence->base,
1260                                 true);
1261         ret = unreserve_bo_and_vms(&ctx, false, false);
1262
1263         goto out;
1264
1265 map_bo_to_gpuvm_failed:
1266         if (bo_va_entry_aql)
1267                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1268 add_bo_to_vm_failed_aql:
1269         if (bo_va_entry)
1270                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1271 add_bo_to_vm_failed:
1272         unreserve_bo_and_vms(&ctx, false, false);
1273 out:
1274         mutex_unlock(&mem->process_info->lock);
1275         mutex_unlock(&mem->lock);
1276         return ret;
1277 }
1278
1279 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1280                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1281 {
1282         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1283         struct amdkfd_process_info *process_info =
1284                 ((struct amdgpu_vm *)vm)->process_info;
1285         unsigned long bo_size = mem->bo->tbo.mem.size;
1286         struct kfd_bo_va_list *entry;
1287         struct bo_vm_reservation_context ctx;
1288         int ret;
1289
1290         mutex_lock(&mem->lock);
1291
1292         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1293         if (unlikely(ret))
1294                 goto out;
1295         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1296         if (ctx.n_vms == 0) {
1297                 ret = -EINVAL;
1298                 goto unreserve_out;
1299         }
1300
1301         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1302         if (unlikely(ret))
1303                 goto unreserve_out;
1304
1305         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1306                 mem->va,
1307                 mem->va + bo_size * (1 + mem->aql_queue),
1308                 vm);
1309
1310         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1311                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1312                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1313                                         entry->va,
1314                                         entry->va + bo_size,
1315                                         entry);
1316
1317                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1318                         if (ret == 0) {
1319                                 entry->is_mapped = false;
1320                         } else {
1321                                 pr_err("failed to unmap VA 0x%llx\n",
1322                                                 mem->va);
1323                                 goto unreserve_out;
1324                         }
1325
1326                         mem->mapped_to_gpu_memory--;
1327                         pr_debug("\t DEC mapping count %d\n",
1328                                         mem->mapped_to_gpu_memory);
1329                 }
1330         }
1331
1332         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1333          * required.
1334          */
1335         if (mem->mapped_to_gpu_memory == 0 &&
1336             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1337                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1338                                                 process_info->eviction_fence,
1339                                                     NULL, NULL);
1340
1341 unreserve_out:
1342         unreserve_bo_and_vms(&ctx, false, false);
1343 out:
1344         mutex_unlock(&mem->lock);
1345         return ret;
1346 }
1347
1348 int amdgpu_amdkfd_gpuvm_sync_memory(
1349                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1350 {
1351         struct amdgpu_sync sync;
1352         int ret;
1353
1354         amdgpu_sync_create(&sync);
1355
1356         mutex_lock(&mem->lock);
1357         amdgpu_sync_clone(&mem->sync, &sync);
1358         mutex_unlock(&mem->lock);
1359
1360         ret = amdgpu_sync_wait(&sync, intr);
1361         amdgpu_sync_free(&sync);
1362         return ret;
1363 }
1364
1365 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1366                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1367 {
1368         int ret;
1369         struct amdgpu_bo *bo = mem->bo;
1370
1371         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1372                 pr_err("userptr can't be mapped to kernel\n");
1373                 return -EINVAL;
1374         }
1375
1376         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1377          * this BO in BO's restoring after eviction.
1378          */
1379         mutex_lock(&mem->process_info->lock);
1380
1381         ret = amdgpu_bo_reserve(bo, true);
1382         if (ret) {
1383                 pr_err("Failed to reserve bo. ret %d\n", ret);
1384                 goto bo_reserve_failed;
1385         }
1386
1387         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
1388         if (ret) {
1389                 pr_err("Failed to pin bo. ret %d\n", ret);
1390                 goto pin_failed;
1391         }
1392
1393         ret = amdgpu_bo_kmap(bo, kptr);
1394         if (ret) {
1395                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1396                 goto kmap_failed;
1397         }
1398
1399         amdgpu_amdkfd_remove_eviction_fence(
1400                 bo, mem->process_info->eviction_fence, NULL, NULL);
1401         list_del_init(&mem->validate_list.head);
1402
1403         if (size)
1404                 *size = amdgpu_bo_size(bo);
1405
1406         amdgpu_bo_unreserve(bo);
1407
1408         mutex_unlock(&mem->process_info->lock);
1409         return 0;
1410
1411 kmap_failed:
1412         amdgpu_bo_unpin(bo);
1413 pin_failed:
1414         amdgpu_bo_unreserve(bo);
1415 bo_reserve_failed:
1416         mutex_unlock(&mem->process_info->lock);
1417
1418         return ret;
1419 }
1420
1421 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1422  *   KFD process identified by process_info
1423  *
1424  * @process_info: amdkfd_process_info of the KFD process
1425  *
1426  * After memory eviction, restore thread calls this function. The function
1427  * should be called when the Process is still valid. BO restore involves -
1428  *
1429  * 1.  Release old eviction fence and create new one
1430  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1431  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1432  *     BOs that need to be reserved.
1433  * 4.  Reserve all the BOs
1434  * 5.  Validate of PD and PT BOs.
1435  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1436  * 7.  Add fence to all PD and PT BOs.
1437  * 8.  Unreserve all BOs
1438  */
1439 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1440 {
1441         struct amdgpu_bo_list_entry *pd_bo_list;
1442         struct amdkfd_process_info *process_info = info;
1443         struct amdgpu_vm *peer_vm;
1444         struct kgd_mem *mem;
1445         struct bo_vm_reservation_context ctx;
1446         struct amdgpu_amdkfd_fence *new_fence;
1447         int ret = 0, i;
1448         struct list_head duplicate_save;
1449         struct amdgpu_sync sync_obj;
1450
1451         INIT_LIST_HEAD(&duplicate_save);
1452         INIT_LIST_HEAD(&ctx.list);
1453         INIT_LIST_HEAD(&ctx.duplicates);
1454
1455         pd_bo_list = kcalloc(process_info->n_vms,
1456                              sizeof(struct amdgpu_bo_list_entry),
1457                              GFP_KERNEL);
1458         if (!pd_bo_list)
1459                 return -ENOMEM;
1460
1461         i = 0;
1462         mutex_lock(&process_info->lock);
1463         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1464                         vm_list_node)
1465                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1466
1467         /* Reserve all BOs and page tables/directory. Add all BOs from
1468          * kfd_bo_list to ctx.list
1469          */
1470         list_for_each_entry(mem, &process_info->kfd_bo_list,
1471                             validate_list.head) {
1472
1473                 list_add_tail(&mem->resv_list.head, &ctx.list);
1474                 mem->resv_list.bo = mem->validate_list.bo;
1475                 mem->resv_list.shared = mem->validate_list.shared;
1476         }
1477
1478         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
1479                                      false, &duplicate_save);
1480         if (ret) {
1481                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
1482                 goto ttm_reserve_fail;
1483         }
1484
1485         amdgpu_sync_create(&sync_obj);
1486
1487         /* Validate PDs and PTs */
1488         ret = process_validate_vms(process_info);
1489         if (ret)
1490                 goto validate_map_fail;
1491
1492         /* Wait for PD/PTs validate to finish */
1493         /* FIXME: I think this isn't needed */
1494         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1495                             vm_list_node) {
1496                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
1497
1498                 ttm_bo_wait(&bo->tbo, false, false);
1499         }
1500
1501         /* Validate BOs and map them to GPUVM (update VM page tables). */
1502         list_for_each_entry(mem, &process_info->kfd_bo_list,
1503                             validate_list.head) {
1504
1505                 struct amdgpu_bo *bo = mem->bo;
1506                 uint32_t domain = mem->domain;
1507                 struct kfd_bo_va_list *bo_va_entry;
1508
1509                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
1510                 if (ret) {
1511                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
1512                         goto validate_map_fail;
1513                 }
1514
1515                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
1516                                     bo_list) {
1517                         ret = update_gpuvm_pte((struct amdgpu_device *)
1518                                               bo_va_entry->kgd_dev,
1519                                               bo_va_entry,
1520                                               &sync_obj);
1521                         if (ret) {
1522                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
1523                                 goto validate_map_fail;
1524                         }
1525                 }
1526         }
1527
1528         /* Update page directories */
1529         ret = process_update_pds(process_info, &sync_obj);
1530         if (ret) {
1531                 pr_debug("Memory eviction: update PDs failed. Try again\n");
1532                 goto validate_map_fail;
1533         }
1534
1535         amdgpu_sync_wait(&sync_obj, false);
1536
1537         /* Release old eviction fence and create new one, because fence only
1538          * goes from unsignaled to signaled, fence cannot be reused.
1539          * Use context and mm from the old fence.
1540          */
1541         new_fence = amdgpu_amdkfd_fence_create(
1542                                 process_info->eviction_fence->base.context,
1543                                 process_info->eviction_fence->mm);
1544         if (!new_fence) {
1545                 pr_err("Failed to create eviction fence\n");
1546                 ret = -ENOMEM;
1547                 goto validate_map_fail;
1548         }
1549         dma_fence_put(&process_info->eviction_fence->base);
1550         process_info->eviction_fence = new_fence;
1551         *ef = dma_fence_get(&new_fence->base);
1552
1553         /* Wait for validate to finish and attach new eviction fence */
1554         list_for_each_entry(mem, &process_info->kfd_bo_list,
1555                 validate_list.head)
1556                 ttm_bo_wait(&mem->bo->tbo, false, false);
1557         list_for_each_entry(mem, &process_info->kfd_bo_list,
1558                 validate_list.head)
1559                 amdgpu_bo_fence(mem->bo,
1560                         &process_info->eviction_fence->base, true);
1561
1562         /* Attach eviction fence to PD / PT BOs */
1563         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1564                             vm_list_node) {
1565                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
1566
1567                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
1568         }
1569
1570 validate_map_fail:
1571         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
1572         amdgpu_sync_free(&sync_obj);
1573 ttm_reserve_fail:
1574         mutex_unlock(&process_info->lock);
1575         kfree(pd_bo_list);
1576         return ret;
1577 }