Merge tag 'drm-intel-next-fixes-2018-03-27' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Wed, 28 Mar 2018 04:47:26 +0000 (14:47 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 28 Mar 2018 04:47:26 +0000 (14:47 +1000)
- Display fixes for booting with MST hub lid closed and display
  freezing after hibernation (fd.o bugs 105470 & 105196)
- Fix for a very rare interrupt handling race resulting in GPU hang

* tag 'drm-intel-next-fixes-2018-03-27' of git://anongit.freedesktop.org/drm/drm-intel:
  drm/i915: Fix hibernation with ACPI S0 target state
  drm/i915/execlists: Use a locked clear_bit() for synchronisation with interrupt
  drm/i915: Specify which engines to reset following semaphore/event lockups
  drm/i915/dp: Write to SET_POWER dpcd to enable MST hub.

drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_lrc.c

index d7c4de45644dff066fc47dc1bc05476f668694ce..07c07d55398bdfcff72e2a4c40852479d2c3c157 100644 (file)
@@ -1611,15 +1611,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
-       bool fw_csr;
        int ret;
 
        disable_rpm_wakeref_asserts(dev_priv);
 
        intel_display_set_init_power(dev_priv, false);
 
-       fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
-               suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
        /*
         * In case of firmware assisted context save/restore don't manually
         * deinit the power domains. This also means the CSR/DMC firmware will
@@ -1627,8 +1624,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
         * also enable deeper system power states that would be blocked if the
         * firmware was inactive.
         */
-       if (!fw_csr)
+       if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
+           dev_priv->csr.dmc_payload == NULL) {
                intel_power_domains_suspend(dev_priv);
+               dev_priv->power_domains_suspended = true;
+       }
 
        ret = 0;
        if (IS_GEN9_LP(dev_priv))
@@ -1640,8 +1640,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        if (ret) {
                DRM_ERROR("Suspend complete failed: %d\n", ret);
-               if (!fw_csr)
+               if (dev_priv->power_domains_suspended) {
                        intel_power_domains_init_hw(dev_priv, true);
+                       dev_priv->power_domains_suspended = false;
+               }
 
                goto out;
        }
@@ -1662,8 +1664,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
        if (!(hibernation && INTEL_GEN(dev_priv) < 6))
                pci_set_power_state(pdev, PCI_D3hot);
 
-       dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
-
 out:
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -1830,8 +1830,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        intel_uncore_resume_early(dev_priv);
 
        if (IS_GEN9_LP(dev_priv)) {
-               if (!dev_priv->suspended_to_idle)
-                       gen9_sanitize_dc_state(dev_priv);
+               gen9_sanitize_dc_state(dev_priv);
                bxt_disable_dc9(dev_priv);
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                hsw_disable_pc8(dev_priv);
@@ -1839,8 +1838,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_sanitize(dev_priv);
 
-       if (IS_GEN9_LP(dev_priv) ||
-           !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+       if (dev_priv->power_domains_suspended)
                intel_power_domains_init_hw(dev_priv, true);
        else
                intel_display_set_init_power(dev_priv, true);
@@ -1850,7 +1848,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        enable_rpm_wakeref_asserts(dev_priv);
 
 out:
-       dev_priv->suspended_to_idle = false;
+       dev_priv->power_domains_suspended = false;
 
        return ret;
 }
index 6e740f6fe33fce97a91f12082043a5da019b85ff..ce18b6cf6e683306320f372c218e9523315b763b 100644 (file)
@@ -2119,7 +2119,7 @@ struct drm_i915_private {
        u32 bxt_phy_grc;
 
        u32 suspend_count;
-       bool suspended_to_idle;
+       bool power_domains_suspended;
        struct i915_suspend_saved_registers regfile;
        struct vlv_s0ix_state vlv_s0ix_state;
 
index 3a69b367e56552114a1048f41204ab03d8692705..697af5add78bac2668987809fdbd94bc08aa0dc7 100644 (file)
@@ -831,7 +831,8 @@ static void execlists_submission_tasklet(unsigned long data)
        struct drm_i915_private *dev_priv = engine->i915;
        bool fw = false;
 
-       /* We can skip acquiring intel_runtime_pm_get() here as it was taken
+       /*
+        * We can skip acquiring intel_runtime_pm_get() here as it was taken
         * on our behalf by the request (see i915_gem_mark_busy()) and it will
         * not be relinquished until the device is idle (see
         * i915_gem_idle_work_handler()). As a precaution, we make sure
@@ -840,7 +841,8 @@ static void execlists_submission_tasklet(unsigned long data)
         */
        GEM_BUG_ON(!dev_priv->gt.awake);
 
-       /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
+       /*
+        * Prefer doing test_and_clear_bit() as a two stage operation to avoid
         * imposing the cost of a locked atomic transaction when submitting a
         * new request (outside of the context-switch interrupt).
         */
@@ -856,17 +858,10 @@ static void execlists_submission_tasklet(unsigned long data)
                        execlists->csb_head = -1; /* force mmio read of CSB ptrs */
                }
 
-               /* The write will be ordered by the uncached read (itself
-                * a memory barrier), so we do not need another in the form
-                * of a locked instruction. The race between the interrupt
-                * handler and the split test/clear is harmless as we order
-                * our clear before the CSB read. If the interrupt arrived
-                * first between the test and the clear, we read the updated
-                * CSB and clear the bit. If the interrupt arrives as we read
-                * the CSB or later (i.e. after we had cleared the bit) the bit
-                * is set and we do a new loop.
-                */
-               __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+               /* Clear before reading to catch new interrupts */
+               clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+               smp_mb__after_atomic();
+
                if (unlikely(execlists->csb_head == -1)) { /* following a reset */
                        if (!fw) {
                                intel_uncore_forcewake_get(dev_priv,