Merge tag 'gvt-next-2018-09-04' of https://github.com/intel/gvt-linux into drm-intel...
[muen/linux.git] / drivers / gpu / drm / i915 / gvt / scheduler.c
index b0e566956b8d5ce1609c0f19f31ffa45e610adee..1a88c1972416ddc99fafba74b5d86513a154b703 100644 (file)
@@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        unsigned long context_gpa, context_page_num;
        int i;
 
-       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-                       workload->ctx_desc.lrca);
-
-       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
-       context_page_num = context_page_num >> PAGE_SHIFT;
-
-       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
-               context_page_num = 19;
-
-       i = 2;
-
-       while (i < context_page_num) {
-               context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-                               (u32)((workload->ctx_desc.lrca + i) <<
-                               I915_GTT_PAGE_SHIFT));
-               if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_vgpu_err("Invalid guest context descriptor\n");
-                       return -EFAULT;
-               }
-
-               page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
-               dst = kmap(page);
-               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-                               I915_GTT_PAGE_SIZE);
-               kunmap(page);
-               i++;
-       }
-
        page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
        shadow_ring_context = kmap(page);
 
@@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
        sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
        kunmap(page);
+
+       if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+               return 0;
+
+       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+                       workload->ctx_desc.lrca);
+
+       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+       context_page_num = context_page_num >> PAGE_SHIFT;
+
+       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+               context_page_num = 19;
+
+       i = 2;
+       while (i < context_page_num) {
+               context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+                               (u32)((workload->ctx_desc.lrca + i) <<
+                               I915_GTT_PAGE_SHIFT));
+               if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+                       gvt_vgpu_err("Invalid guest context descriptor\n");
+                       return -EFAULT;
+               }
+
+               page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+               dst = kmap(page);
+               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+                               I915_GTT_PAGE_SIZE);
+               kunmap(page);
+               i++;
+       }
        return 0;
 }
 
@@ -1137,6 +1139,7 @@ out_shadow_ctx:
 /**
  * intel_vgpu_select_submission_ops - select virtual submission interface
  * @vgpu: a vGPU
+ * @engine_mask: either ALL_ENGINES or target engine mask
  * @interface: expected vGPU virtual submission interface
  *
  * This function is called when guest configures submission interface.
@@ -1189,7 +1192,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
 
 /**
  * intel_vgpu_destroy_workload - destroy a vGPU workload
- * @vgpu: a vGPU
+ * @workload: workload to destroy
  *
  * This function is called when destroy a vGPU workload.
  *
@@ -1281,6 +1284,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 /**
  * intel_vgpu_create_workload - create a vGPU workload
  * @vgpu: a vGPU
+ * @ring_id: ring index
  * @desc: a guest context descriptor
  *
  * This function is called when creating a vGPU workload.