Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <airlied@redhat.com>
Mon, 9 Oct 2017 01:00:16 +0000 (11:00 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 9 Oct 2017 01:00:16 +0000 (11:00 +1000)
More new stuff for 4.15. Highlights:
- Add clock query interface for raven
- Add new FENCE_TO_HANDLE ioctl
- UVD video encode ring support on polaris
- transparent huge page DMA support
- deadlock fixes
- compute pipe lru tweaks
- powerplay cleanups and regression fixes
- fix duplicate symbol issue with radeon and amdgpu
- misc bug fixes

* 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (72 commits)
  drm/radeon/dp: make radeon_dp_get_dp_link_config static
  drm/radeon: move ci_send_msg_to_smc to where it's used
  drm/amd/sched: fix deadlock caused by unsignaled fences of deleted jobs
  drm/amd/sched: NULL out the s_fence field after run_job
  drm/amd/sched: move adding finish callback to amd_sched_job_begin
  drm/amd/sched: fix an outdated comment
  drm/amd/sched: rename amd_sched_entity_pop_job
  drm/amdgpu: minor coding style fix
  drm/ttm: add transparent huge page support for DMA allocations v2
  drm/ttm: add support for different pool sizes
  drm/ttm: remove unsued options from ttm_mem_global_alloc_page
  drm/amdgpu: add uvd enc irq
  drm/amdgpu: add uvd enc ib test
  drm/amdgpu: add uvd enc ring test
  drm/amdgpu: add uvd enc vm functions (v2)
  drm/amdgpu: add uvd enc into run queue
  drm/amdgpu: add uvd enc rings
  drm/amdgpu: add new uvd enc ring methods
  drm/amdgpu: add uvd enc command in header
  drm/amdgpu: add uvd enc registers in header
  ...

1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/drm_syncobj.c

index bbcc670382035b9c9a1c9416fe8d48054c68b0cf,bca9eebb6947109545e223de5aed29dd0e39c34b..fee0a32ac56f65b32f4e15a966afbca1baced4b1
@@@ -2541,7 -2541,8 +2541,8 @@@ static uint32_t amdgpu_vm_get_block_siz
   * @adev: amdgpu_device pointer
   * @fragment_size_default: the default fragment size if it's set auto
   */
- void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+ void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+                                uint32_t fragment_size_default)
  {
        if (amdgpu_vm_fragment_size == -1)
                adev->vm_manager.fragment_size = fragment_size_default;
   * @adev: amdgpu_device pointer
   * @vm_size: the default vm size if it's set auto
   */
- void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
+ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+                          uint32_t fragment_size_default)
  {
        /* adjust vm size firstly */
        if (amdgpu_vm_size == -1)
@@@ -2598,7 -2600,7 +2600,7 @@@ int amdgpu_vm_init(struct amdgpu_devic
        u64 flags;
        uint64_t init_pde_value = 0;
  
 -      vm->va = RB_ROOT;
 +      vm->va = RB_ROOT_CACHED;
        vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
                vm->reserved_vmid[i] = NULL;
        }
  
        INIT_KFIFO(vm->faults);
+       vm->fault_credit = 16;
  
        return 0;
  
@@@ -2751,11 -2754,10 +2754,11 @@@ void amdgpu_vm_fini(struct amdgpu_devic
  
        amd_sched_entity_fini(vm->entity.sched, &vm->entity);
  
 -      if (!RB_EMPTY_ROOT(&vm->va)) {
 +      if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
                dev_err(adev->dev, "still active bo inside vm\n");
        }
 -      rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
 +      rbtree_postorder_for_each_entry_safe(mapping, tmp,
 +                                           &vm->va.rb_root, rb) {
                list_del(&mapping->list);
                amdgpu_vm_it_remove(mapping, &vm->va);
                kfree(mapping);
                amdgpu_vm_free_reserved_vmid(adev, vm, i);
  }
  
+ /**
+  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
+  *
+  * @adev: amdgpu_device pointer
+  * @pasid: PASID do identify the VM
+  *
+  * This function is expected to be called in interrupt context. Returns
+  * true if there was fault credit, false otherwise
+  */
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+                                 unsigned int pasid)
+ {
+       struct amdgpu_vm *vm;
+       spin_lock(&adev->vm_manager.pasid_lock);
+       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       spin_unlock(&adev->vm_manager.pasid_lock);
+       if (!vm)
+               /* VM not found, can't track fault credit */
+               return true;
+       /* No lock needed. only accessed by IRQ handler */
+       if (!vm->fault_credit)
+               /* Too many faults in this VM */
+               return false;
+       vm->fault_credit--;
+       return true;
+ }
  /**
   * amdgpu_vm_manager_init - init the VM manager
   *
index 0af090667dfcdfbcbef0f2c30ee8043ab2a54a0d,66efbc2e43af1f1924ae9d29113dce8523722cdd..d68f39b4e5e71770f9d5622cf1aada5d8053bae1
@@@ -126,7 -126,7 +126,7 @@@ struct amdgpu_vm_pt 
  
  struct amdgpu_vm {
        /* tree of virtual addresses mapped */
 -      struct rb_root          va;
 +      struct rb_root_cached   va;
  
        /* protecting invalidated */
        spinlock_t              status_lock;
        /* Flag to indicate ATS support from PTE for GFX9 */
        bool                    pte_support_ats;
  
-       /* Up to 128 pending page faults */
+       /* Up to 128 pending retry page faults */
        DECLARE_KFIFO(faults, u64, 128);
+       /* Limit non-retry fault storms */
+       unsigned int            fault_credit;
  };
  
  struct amdgpu_vm_id {
@@@ -244,6 -247,8 +247,8 @@@ void amdgpu_vm_manager_fini(struct amdg
  int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                   int vm_context, unsigned int pasid);
  void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+                                 unsigned int pasid);
  void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
                         struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry);
index 26d60615b4d422b56899dea572cfd811bb2394a1,62adc7acafcc3d230f90da46bd1092e818dda8a9..80b6151da9aebaec8fcd9a7e03aa3652b3af6e53
@@@ -262,8 -262,14 +262,14 @@@ void drm_syncobj_free(struct kref *kref
  }
  EXPORT_SYMBOL(drm_syncobj_free);
  
- static int drm_syncobj_create(struct drm_file *file_private,
-                             u32 *handle, uint32_t flags)
+ /**
+  * drm_syncobj_create - create a new syncobj
+  * @out_syncobj: returned syncobj
+  * @flags: DRM_SYNCOBJ_* flags
+  * @fence: if non-NULL, the syncobj will represent this fence
+  */
+ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
+                      struct dma_fence *fence)
  {
        int ret;
        struct drm_syncobj *syncobj;
                }
        }
  
+       if (fence)
+               drm_syncobj_replace_fence(syncobj, fence);
+       *out_syncobj = syncobj;
+       return 0;
+ }
+ EXPORT_SYMBOL(drm_syncobj_create);
+ /**
+  * drm_syncobj_get_handle - get a handle from a syncobj
+  */
+ int drm_syncobj_get_handle(struct drm_file *file_private,
+                          struct drm_syncobj *syncobj, u32 *handle)
+ {
+       int ret;
+       /* take a reference to put in the idr */
+       drm_syncobj_get(syncobj);
        idr_preload(GFP_KERNEL);
        spin_lock(&file_private->syncobj_table_lock);
        ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
        *handle = ret;
        return 0;
  }
+ EXPORT_SYMBOL(drm_syncobj_get_handle);
+ static int drm_syncobj_create_as_handle(struct drm_file *file_private,
+                                       u32 *handle, uint32_t flags)
+ {
+       int ret;
+       struct drm_syncobj *syncobj;
+       ret = drm_syncobj_create(&syncobj, flags, NULL);
+       if (ret)
+               return ret;
+       ret = drm_syncobj_get_handle(file_private, syncobj, handle);
+       drm_syncobj_put(syncobj);
+       return ret;
+ }
  
  static int drm_syncobj_destroy(struct drm_file *file_private,
                               u32 handle)
@@@ -345,33 -386,38 +386,38 @@@ static int drm_syncobj_alloc_file(struc
        return 0;
  }
  
- static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
-                                   u32 handle, int *p_fd)
+ int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
  {
-       struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
        int ret;
        int fd;
  
-       if (!syncobj)
-               return -EINVAL;
        fd = get_unused_fd_flags(O_CLOEXEC);
-       if (fd < 0) {
-               drm_syncobj_put(syncobj);
+       if (fd < 0)
                return fd;
-       }
  
        if (!syncobj->file) {
                ret = drm_syncobj_alloc_file(syncobj);
-               if (ret)
-                       goto out_put_fd;
+               if (ret) {
+                       put_unused_fd(fd);
+                       return ret;
+               }
        }
        fd_install(fd, syncobj->file);
-       drm_syncobj_put(syncobj);
        *p_fd = fd;
        return 0;
- out_put_fd:
-       put_unused_fd(fd);
+ }
+ EXPORT_SYMBOL(drm_syncobj_get_fd);
+ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
+                                   u32 handle, int *p_fd)
+ {
+       struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
+       int ret;
+       if (!syncobj)
+               return -EINVAL;
+       ret = drm_syncobj_get_fd(syncobj, p_fd);
        drm_syncobj_put(syncobj);
        return ret;
  }
@@@ -417,8 -463,8 +463,8 @@@ static int drm_syncobj_fd_to_handle(str
        return 0;
  }
  
 -int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
 -                                     int fd, int handle)
 +static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
 +                                            int fd, int handle)
  {
        struct dma_fence *fence = sync_file_get_fence(fd);
        struct drm_syncobj *syncobj;
        return 0;
  }
  
 -int drm_syncobj_export_sync_file(struct drm_file *file_private,
 -                               int handle, int *p_fd)
 +static int drm_syncobj_export_sync_file(struct drm_file *file_private,
 +                                      int handle, int *p_fd)
  {
        int ret;
        struct dma_fence *fence;
@@@ -522,8 -568,8 +568,8 @@@ drm_syncobj_create_ioctl(struct drm_dev
        if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
                return -EINVAL;
  
-       return drm_syncobj_create(file_private,
-                                 &args->handle, args->flags);
+       return drm_syncobj_create_as_handle(file_private,
+                                           &args->handle, args->flags);
  }
  
  int