Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Sep 2015 16:23:08 +0000 (09:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Sep 2015 16:23:08 +0000 (09:23 -0700)
Pull KVM fixes from Paolo Bonzini:
 "Mostly stable material, a lot of ARM fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (22 commits)
  sched: access local runqueue directly in single_task_running
  arm/arm64: KVM: Remove 'config KVM_ARM_MAX_VCPUS'
  arm64: KVM: Remove all traces of the ThumbEE registers
  arm: KVM: Disable virtual timer even if the guest is not using it
  arm64: KVM: Disable virtual timer even if the guest is not using it
  arm/arm64: KVM: vgic: Check for !irqchip_in_kernel() when mapping resources
  KVM: s390: Replace incorrect atomic_or with atomic_andnot
  arm: KVM: Fix incorrect device to IPA mapping
  arm64: KVM: Fix user access for debug registers
  KVM: vmx: fix VPID is 0000H in non-root operation
  KVM: add halt_attempted_poll to VCPU stats
  kvm: fix zero length mmio searching
  kvm: fix double free for fast mmio eventfd
  kvm: factor out core eventfd assign/deassign logic
  kvm: don't try to register to KVM_FAST_MMIO_BUS for non mmio eventfd
  KVM: make the declaration of functions within 80 characters
  KVM: arm64: add workaround for Cortex-A57 erratum #852523
  KVM: fix polling for guest halt continued even if disable it
  arm/arm64: KVM: Fix PSCI affinity info return value for non valid cores
  arm64: KVM: set {v,}TCR_EL2 RES1 bits
  ...

1  2 
kernel/sched/core.c

diff --combined kernel/sched/core.c
index 97d276ff1edb1225f0ad894cb66b052be36b2104,4064f794ab8c81756ca7fce06f46119e8f56eb69..2f9c9288481779c309f31c19dc3b74831ef70b3d
@@@ -621,21 -621,18 +621,21 @@@ int get_nohz_timer_target(void
        int i, cpu = smp_processor_id();
        struct sched_domain *sd;
  
 -      if (!idle_cpu(cpu))
 +      if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
                return cpu;
  
        rcu_read_lock();
        for_each_domain(cpu, sd) {
                for_each_cpu(i, sched_domain_span(sd)) {
 -                      if (!idle_cpu(i)) {
 +                      if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
                                cpu = i;
                                goto unlock;
                        }
                }
        }
 +
 +      if (!is_housekeeping_cpu(cpu))
 +              cpu = housekeeping_any_cpu();
  unlock:
        rcu_read_unlock();
        return cpu;
@@@ -2669,13 -2666,20 +2669,20 @@@ unsigned long nr_running(void
  
  /*
   * Check if only the current task is running on the cpu.
+  *
+  * Caution: this function does not check that the caller has disabled
+  * preemption, thus the result might have a time-of-check-to-time-of-use
+  * race.  The caller is responsible to use it correctly, for example:
+  *
+  * - from a non-preemptable section (of course)
+  *
+  * - from a thread that is bound to a single CPU
+  *
+  * - in a loop with very short iterations (e.g. a polling loop)
   */
  bool single_task_running(void)
  {
-       if (cpu_rq(smp_processor_id())->nr_running == 1)
-               return true;
-       else
-               return false;
+       return raw_rq()->nr_running == 1;
  }
  EXPORT_SYMBOL(single_task_running);
  
@@@ -5181,47 -5185,24 +5188,47 @@@ static void migrate_tasks(struct rq *de
                        break;
  
                /*
 -               * Ensure rq->lock covers the entire task selection
 -               * until the migration.
 +               * pick_next_task assumes pinned rq->lock.
                 */
                lockdep_pin_lock(&rq->lock);
                next = pick_next_task(rq, &fake_task);
                BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
  
 +              /*
 +               * Rules for changing task_struct::cpus_allowed are holding
 +               * both pi_lock and rq->lock, such that holding either
 +               * stabilizes the mask.
 +               *
 +               * Drop rq->lock is not quite as disastrous as it usually is
 +               * because !cpu_active at this point, which means load-balance
 +               * will not interfere. Also, stop-machine.
 +               */
 +              lockdep_unpin_lock(&rq->lock);
 +              raw_spin_unlock(&rq->lock);
 +              raw_spin_lock(&next->pi_lock);
 +              raw_spin_lock(&rq->lock);
 +
 +              /*
 +               * Since we're inside stop-machine, _nothing_ should have
 +               * changed the task, WARN if weird stuff happened, because in
 +               * that case the above rq->lock drop is a fail too.
 +               */
 +              if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
 +                      raw_spin_unlock(&next->pi_lock);
 +                      continue;
 +              }
 +
                /* Find suitable destination for @next, with force if needed. */
                dest_cpu = select_fallback_rq(dead_rq->cpu, next);
  
 -              lockdep_unpin_lock(&rq->lock);
                rq = __migrate_task(rq, next, dest_cpu);
                if (rq != dead_rq) {
                        raw_spin_unlock(&rq->lock);
                        rq = dead_rq;
                        raw_spin_lock(&rq->lock);
                }
 +              raw_spin_unlock(&next->pi_lock);
        }
  
        rq->stop = stop;