726a662f43f4915792e31b5a3c7abab410d73e91
[muen/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39
40 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
41 {
42         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
43         struct amdgpu_bo *bo;
44
45         bo = container_of(tbo, struct amdgpu_bo, tbo);
46
47         amdgpu_bo_kunmap(bo);
48
49         drm_gem_object_release(&bo->gem_base);
50         amdgpu_bo_unref(&bo->parent);
51         if (!list_empty(&bo->shadow_list)) {
52                 mutex_lock(&adev->shadow_list_lock);
53                 list_del_init(&bo->shadow_list);
54                 mutex_unlock(&adev->shadow_list_lock);
55         }
56         kfree(bo->metadata);
57         kfree(bo);
58 }
59
60 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
61 {
62         if (bo->destroy == &amdgpu_ttm_bo_destroy)
63                 return true;
64         return false;
65 }
66
67 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
68 {
69         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
70         struct ttm_placement *placement = &abo->placement;
71         struct ttm_place *places = abo->placements;
72         u64 flags = abo->flags;
73         u32 c = 0;
74
75         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
76                 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
77
78                 places[c].fpfn = 0;
79                 places[c].lpfn = 0;
80                 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
81                         TTM_PL_FLAG_VRAM;
82
83                 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
84                         places[c].lpfn = visible_pfn;
85                 else
86                         places[c].flags |= TTM_PL_FLAG_TOPDOWN;
87
88                 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
89                         places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
90                 c++;
91         }
92
93         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
94                 places[c].fpfn = 0;
95                 if (flags & AMDGPU_GEM_CREATE_SHADOW)
96                         places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
97                 else
98                         places[c].lpfn = 0;
99                 places[c].flags = TTM_PL_FLAG_TT;
100                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
101                         places[c].flags |= TTM_PL_FLAG_WC |
102                                 TTM_PL_FLAG_UNCACHED;
103                 else
104                         places[c].flags |= TTM_PL_FLAG_CACHED;
105                 c++;
106         }
107
108         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
109                 places[c].fpfn = 0;
110                 places[c].lpfn = 0;
111                 places[c].flags = TTM_PL_FLAG_SYSTEM;
112                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
113                         places[c].flags |= TTM_PL_FLAG_WC |
114                                 TTM_PL_FLAG_UNCACHED;
115                 else
116                         places[c].flags |= TTM_PL_FLAG_CACHED;
117                 c++;
118         }
119
120         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
121                 places[c].fpfn = 0;
122                 places[c].lpfn = 0;
123                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
124                 c++;
125         }
126
127         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
128                 places[c].fpfn = 0;
129                 places[c].lpfn = 0;
130                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
131                 c++;
132         }
133
134         if (domain & AMDGPU_GEM_DOMAIN_OA) {
135                 places[c].fpfn = 0;
136                 places[c].lpfn = 0;
137                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
138                 c++;
139         }
140
141         if (!c) {
142                 places[c].fpfn = 0;
143                 places[c].lpfn = 0;
144                 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
145                 c++;
146         }
147
148         placement->num_placement = c;
149         placement->placement = places;
150
151         placement->num_busy_placement = c;
152         placement->busy_placement = places;
153 }
154
155 /**
156  * amdgpu_bo_create_reserved - create reserved BO for kernel use
157  *
158  * @adev: amdgpu device object
159  * @size: size for the new BO
160  * @align: alignment for the new BO
161  * @domain: where to place it
162  * @bo_ptr: resulting BO
163  * @gpu_addr: GPU addr of the pinned BO
164  * @cpu_addr: optional CPU address mapping
165  *
166  * Allocates and pins a BO for kernel internal use, and returns it still
167  * reserved.
168  *
169  * Returns 0 on success, negative error code otherwise.
170  */
171 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
172                               unsigned long size, int align,
173                               u32 domain, struct amdgpu_bo **bo_ptr,
174                               u64 *gpu_addr, void **cpu_addr)
175 {
176         bool free = false;
177         int r;
178
179         if (!*bo_ptr) {
180                 r = amdgpu_bo_create(adev, size, align, true, domain,
181                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
182                                      AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
183                                      NULL, NULL, 0, bo_ptr);
184                 if (r) {
185                         dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
186                                 r);
187                         return r;
188                 }
189                 free = true;
190         }
191
192         r = amdgpu_bo_reserve(*bo_ptr, false);
193         if (r) {
194                 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
195                 goto error_free;
196         }
197
198         r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
199         if (r) {
200                 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
201                 goto error_unreserve;
202         }
203
204         if (cpu_addr) {
205                 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
206                 if (r) {
207                         dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
208                         goto error_unreserve;
209                 }
210         }
211
212         return 0;
213
214 error_unreserve:
215         amdgpu_bo_unreserve(*bo_ptr);
216
217 error_free:
218         if (free)
219                 amdgpu_bo_unref(bo_ptr);
220
221         return r;
222 }
223
224 /**
225  * amdgpu_bo_create_kernel - create BO for kernel use
226  *
227  * @adev: amdgpu device object
228  * @size: size for the new BO
229  * @align: alignment for the new BO
230  * @domain: where to place it
231  * @bo_ptr: resulting BO
232  * @gpu_addr: GPU addr of the pinned BO
233  * @cpu_addr: optional CPU address mapping
234  *
235  * Allocates and pins a BO for kernel internal use.
236  *
237  * Returns 0 on success, negative error code otherwise.
238  */
239 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
240                             unsigned long size, int align,
241                             u32 domain, struct amdgpu_bo **bo_ptr,
242                             u64 *gpu_addr, void **cpu_addr)
243 {
244         int r;
245
246         r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
247                                       gpu_addr, cpu_addr);
248
249         if (r)
250                 return r;
251
252         amdgpu_bo_unreserve(*bo_ptr);
253
254         return 0;
255 }
256
257 /**
258  * amdgpu_bo_free_kernel - free BO for kernel use
259  *
260  * @bo: amdgpu BO to free
261  *
262  * unmaps and unpin a BO for kernel internal use.
263  */
264 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
265                            void **cpu_addr)
266 {
267         if (*bo == NULL)
268                 return;
269
270         if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
271                 if (cpu_addr)
272                         amdgpu_bo_kunmap(*bo);
273
274                 amdgpu_bo_unpin(*bo);
275                 amdgpu_bo_unreserve(*bo);
276         }
277         amdgpu_bo_unref(bo);
278
279         if (gpu_addr)
280                 *gpu_addr = 0;
281
282         if (cpu_addr)
283                 *cpu_addr = NULL;
284 }
285
286 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
287                                unsigned long size, int byte_align,
288                                bool kernel, u32 domain, u64 flags,
289                                struct sg_table *sg,
290                                struct reservation_object *resv,
291                                uint64_t init_value,
292                                struct amdgpu_bo **bo_ptr)
293 {
294         struct amdgpu_bo *bo;
295         enum ttm_bo_type type;
296         unsigned long page_align;
297         u64 initial_bytes_moved, bytes_moved;
298         size_t acc_size;
299         int r;
300
301         page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
302         size = ALIGN(size, PAGE_SIZE);
303
304         if (kernel) {
305                 type = ttm_bo_type_kernel;
306         } else if (sg) {
307                 type = ttm_bo_type_sg;
308         } else {
309                 type = ttm_bo_type_device;
310         }
311         *bo_ptr = NULL;
312
313         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
314                                        sizeof(struct amdgpu_bo));
315
316         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
317         if (bo == NULL)
318                 return -ENOMEM;
319         r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
320         if (unlikely(r)) {
321                 kfree(bo);
322                 return r;
323         }
324         INIT_LIST_HEAD(&bo->shadow_list);
325         INIT_LIST_HEAD(&bo->va);
326         bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
327                                          AMDGPU_GEM_DOMAIN_GTT |
328                                          AMDGPU_GEM_DOMAIN_CPU |
329                                          AMDGPU_GEM_DOMAIN_GDS |
330                                          AMDGPU_GEM_DOMAIN_GWS |
331                                          AMDGPU_GEM_DOMAIN_OA);
332         bo->allowed_domains = bo->preferred_domains;
333         if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
334                 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
335
336         bo->flags = flags;
337
338 #ifdef CONFIG_X86_32
339         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
340          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
341          */
342         bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
343 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
344         /* Don't try to enable write-combining when it can't work, or things
345          * may be slow
346          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
347          */
348
349 #ifndef CONFIG_COMPILE_TEST
350 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
351          thanks to write-combining
352 #endif
353
354         if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
355                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
356                               "better performance thanks to write-combining\n");
357         bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
358 #else
359         /* For architectures that don't support WC memory,
360          * mask out the WC flag from the BO
361          */
362         if (!drm_arch_can_wc_memory())
363                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
364 #endif
365
366         bo->tbo.bdev = &adev->mman.bdev;
367         amdgpu_ttm_placement_from_domain(bo, domain);
368
369         initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
370         /* Kernel allocation are uninterruptible */
371         r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
372                                  &bo->placement, page_align, !kernel, NULL,
373                                  acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
374         bytes_moved = atomic64_read(&adev->num_bytes_moved) -
375                       initial_bytes_moved;
376         if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
377             bo->tbo.mem.mem_type == TTM_PL_VRAM &&
378             bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
379                 amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
380         else
381                 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
382
383         if (unlikely(r != 0))
384                 return r;
385
386         if (kernel)
387                 bo->tbo.priority = 1;
388
389         if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
390             bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
391                 struct dma_fence *fence;
392
393                 r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
394                 if (unlikely(r))
395                         goto fail_unreserve;
396
397                 amdgpu_bo_fence(bo, fence, false);
398                 dma_fence_put(bo->tbo.moving);
399                 bo->tbo.moving = dma_fence_get(fence);
400                 dma_fence_put(fence);
401         }
402         if (!resv)
403                 amdgpu_bo_unreserve(bo);
404         *bo_ptr = bo;
405
406         trace_amdgpu_bo_create(bo);
407
408         /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
409         if (type == ttm_bo_type_device)
410                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
411
412         return 0;
413
414 fail_unreserve:
415         if (!resv)
416                 ww_mutex_unlock(&bo->tbo.resv->lock);
417         amdgpu_bo_unref(&bo);
418         return r;
419 }
420
421 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
422                                    unsigned long size, int byte_align,
423                                    struct amdgpu_bo *bo)
424 {
425         int r;
426
427         if (bo->shadow)
428                 return 0;
429
430         r = amdgpu_bo_do_create(adev, size, byte_align, true,
431                                 AMDGPU_GEM_DOMAIN_GTT,
432                                 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
433                                 AMDGPU_GEM_CREATE_SHADOW,
434                                 NULL, bo->tbo.resv, 0,
435                                 &bo->shadow);
436         if (!r) {
437                 bo->shadow->parent = amdgpu_bo_ref(bo);
438                 mutex_lock(&adev->shadow_list_lock);
439                 list_add_tail(&bo->shadow_list, &adev->shadow_list);
440                 mutex_unlock(&adev->shadow_list_lock);
441         }
442
443         return r;
444 }
445
446 /* init_value will only take effect when flags contains
447  * AMDGPU_GEM_CREATE_VRAM_CLEARED.
448  */
449 int amdgpu_bo_create(struct amdgpu_device *adev,
450                      unsigned long size, int byte_align,
451                      bool kernel, u32 domain, u64 flags,
452                      struct sg_table *sg,
453                      struct reservation_object *resv,
454                      uint64_t init_value,
455                      struct amdgpu_bo **bo_ptr)
456 {
457         uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
458         int r;
459
460         r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
461                                 parent_flags, sg, resv, init_value, bo_ptr);
462         if (r)
463                 return r;
464
465         if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
466                 if (!resv)
467                         WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
468                                                         NULL));
469
470                 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
471
472                 if (!resv)
473                         reservation_object_unlock((*bo_ptr)->tbo.resv);
474
475                 if (r)
476                         amdgpu_bo_unref(bo_ptr);
477         }
478
479         return r;
480 }
481
482 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
483                                struct amdgpu_ring *ring,
484                                struct amdgpu_bo *bo,
485                                struct reservation_object *resv,
486                                struct dma_fence **fence,
487                                bool direct)
488
489 {
490         struct amdgpu_bo *shadow = bo->shadow;
491         uint64_t bo_addr, shadow_addr;
492         int r;
493
494         if (!shadow)
495                 return -EINVAL;
496
497         bo_addr = amdgpu_bo_gpu_offset(bo);
498         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
499
500         r = reservation_object_reserve_shared(bo->tbo.resv);
501         if (r)
502                 goto err;
503
504         r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
505                                amdgpu_bo_size(bo), resv, fence,
506                                direct, false);
507         if (!r)
508                 amdgpu_bo_fence(bo, *fence, true);
509
510 err:
511         return r;
512 }
513
514 int amdgpu_bo_validate(struct amdgpu_bo *bo)
515 {
516         uint32_t domain;
517         int r;
518
519         if (bo->pin_count)
520                 return 0;
521
522         domain = bo->preferred_domains;
523
524 retry:
525         amdgpu_ttm_placement_from_domain(bo, domain);
526         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
527         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
528                 domain = bo->allowed_domains;
529                 goto retry;
530         }
531
532         return r;
533 }
534
535 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
536                                   struct amdgpu_ring *ring,
537                                   struct amdgpu_bo *bo,
538                                   struct reservation_object *resv,
539                                   struct dma_fence **fence,
540                                   bool direct)
541
542 {
543         struct amdgpu_bo *shadow = bo->shadow;
544         uint64_t bo_addr, shadow_addr;
545         int r;
546
547         if (!shadow)
548                 return -EINVAL;
549
550         bo_addr = amdgpu_bo_gpu_offset(bo);
551         shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
552
553         r = reservation_object_reserve_shared(bo->tbo.resv);
554         if (r)
555                 goto err;
556
557         r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
558                                amdgpu_bo_size(bo), resv, fence,
559                                direct, false);
560         if (!r)
561                 amdgpu_bo_fence(bo, *fence, true);
562
563 err:
564         return r;
565 }
566
567 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
568 {
569         void *kptr;
570         long r;
571
572         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
573                 return -EPERM;
574
575         kptr = amdgpu_bo_kptr(bo);
576         if (kptr) {
577                 if (ptr)
578                         *ptr = kptr;
579                 return 0;
580         }
581
582         r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
583                                                 MAX_SCHEDULE_TIMEOUT);
584         if (r < 0)
585                 return r;
586
587         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
588         if (r)
589                 return r;
590
591         if (ptr)
592                 *ptr = amdgpu_bo_kptr(bo);
593
594         return 0;
595 }
596
597 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
598 {
599         bool is_iomem;
600
601         return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
602 }
603
604 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
605 {
606         if (bo->kmap.bo)
607                 ttm_bo_kunmap(&bo->kmap);
608 }
609
610 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
611 {
612         if (bo == NULL)
613                 return NULL;
614
615         ttm_bo_reference(&bo->tbo);
616         return bo;
617 }
618
619 void amdgpu_bo_unref(struct amdgpu_bo **bo)
620 {
621         struct ttm_buffer_object *tbo;
622
623         if ((*bo) == NULL)
624                 return;
625
626         tbo = &((*bo)->tbo);
627         ttm_bo_unref(&tbo);
628         if (tbo == NULL)
629                 *bo = NULL;
630 }
631
632 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
633                              u64 min_offset, u64 max_offset,
634                              u64 *gpu_addr)
635 {
636         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
637         int r, i;
638         unsigned fpfn, lpfn;
639
640         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
641                 return -EPERM;
642
643         if (WARN_ON_ONCE(min_offset > max_offset))
644                 return -EINVAL;
645
646         /* A shared bo cannot be migrated to VRAM */
647         if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
648                 return -EINVAL;
649
650         if (bo->pin_count) {
651                 uint32_t mem_type = bo->tbo.mem.mem_type;
652
653                 if (domain != amdgpu_mem_type_to_domain(mem_type))
654                         return -EINVAL;
655
656                 bo->pin_count++;
657                 if (gpu_addr)
658                         *gpu_addr = amdgpu_bo_gpu_offset(bo);
659
660                 if (max_offset != 0) {
661                         u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
662                         WARN_ON_ONCE(max_offset <
663                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
664                 }
665
666                 return 0;
667         }
668
669         bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
670         amdgpu_ttm_placement_from_domain(bo, domain);
671         for (i = 0; i < bo->placement.num_placement; i++) {
672                 /* force to pin into visible video ram */
673                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
674                     !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
675                     (!max_offset || max_offset >
676                      adev->mc.visible_vram_size)) {
677                         if (WARN_ON_ONCE(min_offset >
678                                          adev->mc.visible_vram_size))
679                                 return -EINVAL;
680                         fpfn = min_offset >> PAGE_SHIFT;
681                         lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
682                 } else {
683                         fpfn = min_offset >> PAGE_SHIFT;
684                         lpfn = max_offset >> PAGE_SHIFT;
685                 }
686                 if (fpfn > bo->placements[i].fpfn)
687                         bo->placements[i].fpfn = fpfn;
688                 if (!bo->placements[i].lpfn ||
689                     (lpfn && lpfn < bo->placements[i].lpfn))
690                         bo->placements[i].lpfn = lpfn;
691                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
692         }
693
694         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
695         if (unlikely(r)) {
696                 dev_err(adev->dev, "%p pin failed\n", bo);
697                 goto error;
698         }
699
700         bo->pin_count = 1;
701         if (gpu_addr != NULL) {
702                 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
703                 if (unlikely(r)) {
704                         dev_err(adev->dev, "%p bind failed\n", bo);
705                         goto error;
706                 }
707                 *gpu_addr = amdgpu_bo_gpu_offset(bo);
708         }
709         if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
710                 adev->vram_pin_size += amdgpu_bo_size(bo);
711                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
712                         adev->invisible_pin_size += amdgpu_bo_size(bo);
713         } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
714                 adev->gart_pin_size += amdgpu_bo_size(bo);
715         }
716
717 error:
718         return r;
719 }
720
721 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
722 {
723         return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
724 }
725
726 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
727 {
728         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
729         int r, i;
730
731         if (!bo->pin_count) {
732                 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
733                 return 0;
734         }
735         bo->pin_count--;
736         if (bo->pin_count)
737                 return 0;
738         for (i = 0; i < bo->placement.num_placement; i++) {
739                 bo->placements[i].lpfn = 0;
740                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
741         }
742         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
743         if (unlikely(r)) {
744                 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
745                 goto error;
746         }
747
748         if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
749                 adev->vram_pin_size -= amdgpu_bo_size(bo);
750                 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
751                         adev->invisible_pin_size -= amdgpu_bo_size(bo);
752         } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
753                 adev->gart_pin_size -= amdgpu_bo_size(bo);
754         }
755
756 error:
757         return r;
758 }
759
760 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
761 {
762         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
763         if (0 && (adev->flags & AMD_IS_APU)) {
764                 /* Useless to evict on IGP chips */
765                 return 0;
766         }
767         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
768 }
769
770 static const char *amdgpu_vram_names[] = {
771         "UNKNOWN",
772         "GDDR1",
773         "DDR2",
774         "GDDR3",
775         "GDDR4",
776         "GDDR5",
777         "HBM",
778         "DDR3"
779 };
780
781 int amdgpu_bo_init(struct amdgpu_device *adev)
782 {
783         /* reserve PAT memory space to WC for VRAM */
784         arch_io_reserve_memtype_wc(adev->mc.aper_base,
785                                    adev->mc.aper_size);
786
787         /* Add an MTRR for the VRAM */
788         adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
789                                               adev->mc.aper_size);
790         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
791                 adev->mc.mc_vram_size >> 20,
792                 (unsigned long long)adev->mc.aper_size >> 20);
793         DRM_INFO("RAM width %dbits %s\n",
794                  adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
795         return amdgpu_ttm_init(adev);
796 }
797
798 void amdgpu_bo_fini(struct amdgpu_device *adev)
799 {
800         amdgpu_ttm_fini(adev);
801         arch_phys_wc_del(adev->mc.vram_mtrr);
802         arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
803 }
804
805 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
806                              struct vm_area_struct *vma)
807 {
808         return ttm_fbdev_mmap(vma, &bo->tbo);
809 }
810
811 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
812 {
813         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
814
815         if (adev->family <= AMDGPU_FAMILY_CZ &&
816             AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
817                 return -EINVAL;
818
819         bo->tiling_flags = tiling_flags;
820         return 0;
821 }
822
823 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
824 {
825         lockdep_assert_held(&bo->tbo.resv->lock.base);
826
827         if (tiling_flags)
828                 *tiling_flags = bo->tiling_flags;
829 }
830
831 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
832                             uint32_t metadata_size, uint64_t flags)
833 {
834         void *buffer;
835
836         if (!metadata_size) {
837                 if (bo->metadata_size) {
838                         kfree(bo->metadata);
839                         bo->metadata = NULL;
840                         bo->metadata_size = 0;
841                 }
842                 return 0;
843         }
844
845         if (metadata == NULL)
846                 return -EINVAL;
847
848         buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
849         if (buffer == NULL)
850                 return -ENOMEM;
851
852         kfree(bo->metadata);
853         bo->metadata_flags = flags;
854         bo->metadata = buffer;
855         bo->metadata_size = metadata_size;
856
857         return 0;
858 }
859
860 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
861                            size_t buffer_size, uint32_t *metadata_size,
862                            uint64_t *flags)
863 {
864         if (!buffer && !metadata_size)
865                 return -EINVAL;
866
867         if (buffer) {
868                 if (buffer_size < bo->metadata_size)
869                         return -EINVAL;
870
871                 if (bo->metadata_size)
872                         memcpy(buffer, bo->metadata, bo->metadata_size);
873         }
874
875         if (metadata_size)
876                 *metadata_size = bo->metadata_size;
877         if (flags)
878                 *flags = bo->metadata_flags;
879
880         return 0;
881 }
882
883 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
884                            bool evict,
885                            struct ttm_mem_reg *new_mem)
886 {
887         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
888         struct amdgpu_bo *abo;
889         struct ttm_mem_reg *old_mem = &bo->mem;
890
891         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
892                 return;
893
894         abo = container_of(bo, struct amdgpu_bo, tbo);
895         amdgpu_vm_bo_invalidate(adev, abo, evict);
896
897         amdgpu_bo_kunmap(abo);
898
899         /* remember the eviction */
900         if (evict)
901                 atomic64_inc(&adev->num_evictions);
902
903         /* update statistics */
904         if (!new_mem)
905                 return;
906
907         /* move_notify is called before move happens */
908         trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
909 }
910
911 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
912 {
913         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
914         struct amdgpu_bo *abo;
915         unsigned long offset, size;
916         int r;
917
918         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
919                 return 0;
920
921         abo = container_of(bo, struct amdgpu_bo, tbo);
922
923         /* Remember that this BO was accessed by the CPU */
924         abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
925
926         if (bo->mem.mem_type != TTM_PL_VRAM)
927                 return 0;
928
929         size = bo->mem.num_pages << PAGE_SHIFT;
930         offset = bo->mem.start << PAGE_SHIFT;
931         if ((offset + size) <= adev->mc.visible_vram_size)
932                 return 0;
933
934         /* Can't move a pinned BO to visible VRAM */
935         if (abo->pin_count > 0)
936                 return -EINVAL;
937
938         /* hurrah the memory is not visible ! */
939         atomic64_inc(&adev->num_vram_cpu_page_faults);
940         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
941                                          AMDGPU_GEM_DOMAIN_GTT);
942
943         /* Avoid costly evictions; only set GTT as a busy placement */
944         abo->placement.num_busy_placement = 1;
945         abo->placement.busy_placement = &abo->placements[1];
946
947         r = ttm_bo_validate(bo, &abo->placement, false, false);
948         if (unlikely(r != 0))
949                 return r;
950
951         offset = bo->mem.start << PAGE_SHIFT;
952         /* this should never happen */
953         if (bo->mem.mem_type == TTM_PL_VRAM &&
954             (offset + size) > adev->mc.visible_vram_size)
955                 return -EINVAL;
956
957         return 0;
958 }
959
960 /**
961  * amdgpu_bo_fence - add fence to buffer object
962  *
963  * @bo: buffer object in question
964  * @fence: fence to add
965  * @shared: true if fence should be added shared
966  *
967  */
968 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
969                      bool shared)
970 {
971         struct reservation_object *resv = bo->tbo.resv;
972
973         if (shared)
974                 reservation_object_add_shared_fence(resv, fence);
975         else
976                 reservation_object_add_excl_fence(resv, fence);
977 }
978
979 /**
980  * amdgpu_bo_gpu_offset - return GPU offset of bo
981  * @bo: amdgpu object for which we query the offset
982  *
983  * Returns current GPU offset of the object.
984  *
985  * Note: object should either be pinned or reserved when calling this
986  * function, it might be useful to add check for this for debugging.
987  */
988 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
989 {
990         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
991         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
992                      !amdgpu_ttm_is_bound(bo->tbo.ttm));
993         WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
994                      !bo->pin_count);
995         WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
996         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
997                      !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
998
999         return bo->tbo.offset;
1000 }