Merge branch 'pm-cpuidle'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 13 Nov 2017 00:34:14 +0000 (01:34 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 13 Nov 2017 00:34:14 +0000 (01:34 +0100)
* pm-cpuidle:
  intel_idle: Graceful probe failure when MWAIT is disabled
  cpuidle: Avoid assignment in if () argument
  cpuidle: Clean up cpuidle_enable_device() error handling a bit
  cpuidle: ladder: Add per CPU PM QoS resume latency support
  ARM: cpuidle: Refactor rollback operations if init fails
  ARM: cpuidle: Correct driver unregistration if init fails
  intel_idle: replace conditionals with static_cpu_has(X86_FEATURE_ARAT)
  cpuidle: fix broadcast control when broadcast can not be entered

 Conflicts:
drivers/idle/intel_idle.c

1  2 
drivers/idle/intel_idle.c

index f0b06b14e782b5b926b5ba7876d827551ce4cfa9,9c93abdf635fe84ff3f64b4d60ac7a0203c34514..b2ccce5fb0718303971dec46581482ff1d2e7e76
@@@ -913,23 -913,28 +913,29 @@@ static __cpuidle int intel_idle(struct 
        struct cpuidle_state *state = &drv->states[index];
        unsigned long eax = flg2MWAIT(state->flags);
        unsigned int cstate;
+       bool uninitialized_var(tick);
 +      int cpu = smp_processor_id();
  
-       cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
        /*
 -       * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition
 -       * will probably flush the TLB.  It's not guaranteed to flush
 -       * the TLB, though, so it's not clear that we can do anything
 -       * useful with this knowledge.
 +       * leave_mm() to avoid costly and often unnecessary wakeups
 +       * for flushing the user TLB's associated with the active mm.
         */
 +      if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
 +              leave_mm(cpu);
  
-       if (!(lapic_timer_reliable_states & (1 << (cstate))))
-               tick_broadcast_enter();
+       if (!static_cpu_has(X86_FEATURE_ARAT)) {
+               cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
+                               MWAIT_CSTATE_MASK) + 1;
+               tick = false;
+               if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
+                       tick = true;
+                       tick_broadcast_enter();
+               }
+       }
  
        mwait_idle_with_hints(eax, ecx);
  
-       if (!(lapic_timer_reliable_states & (1 << (cstate))))
+       if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
                tick_broadcast_exit();
  
        return index;
@@@ -1061,7 -1066,7 +1067,7 @@@ static const struct idle_cpu idle_cpu_d
  };
  
  #define ICPU(model, cpu) \
-       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
  
  static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        ICPU(INTEL_FAM6_NEHALEM_EP,             idle_cpu_nehalem),
@@@ -1125,6 -1130,11 +1131,11 @@@ static int __init intel_idle_probe(void
                return -ENODEV;
        }
  
+       if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+               pr_debug("Please enable MWAIT in BIOS SETUP\n");
+               return -ENODEV;
+       }
        if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
                return -ENODEV;