kvm: x86: Set highest physical address bits in non-present/reserved SPTEs
[muen/linux.git] / arch / x86 / kernel / kvm.c
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/apic.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 #include <asm/kvm_guest.h>
49
50 static int kvmapf = 1;
51
52 static int __init parse_no_kvmapf(char *arg)
53 {
54         kvmapf = 0;
55         return 0;
56 }
57
58 early_param("no-kvmapf", parse_no_kvmapf);
59
60 static int steal_acc = 1;
61 static int __init parse_no_stealacc(char *arg)
62 {
63         steal_acc = 0;
64         return 0;
65 }
66
67 early_param("no-steal-acc", parse_no_stealacc);
68
69 static int kvmclock_vsyscall = 1;
70 static int __init parse_no_kvmclock_vsyscall(char *arg)
71 {
72         kvmclock_vsyscall = 0;
73         return 0;
74 }
75
76 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
78 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79 static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
80 static int has_steal_clock = 0;
81
82 /*
83  * No need for any "IO delay" on KVM
84  */
85 static void kvm_io_delay(void)
86 {
87 }
88
89 #define KVM_TASK_SLEEP_HASHBITS 8
90 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92 struct kvm_task_sleep_node {
93         struct hlist_node link;
94         struct swait_queue_head wq;
95         u32 token;
96         int cpu;
97         bool halted;
98 };
99
100 static struct kvm_task_sleep_head {
101         raw_spinlock_t lock;
102         struct hlist_head list;
103 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106                                                   u32 token)
107 {
108         struct hlist_node *p;
109
110         hlist_for_each(p, &b->list) {
111                 struct kvm_task_sleep_node *n =
112                         hlist_entry(p, typeof(*n), link);
113                 if (n->token == token)
114                         return n;
115         }
116
117         return NULL;
118 }
119
120 /*
121  * @interrupt_kernel: Is this called from a routine which interrupts the kernel
122  *                    (other than user space)?
123  */
124 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
125 {
126         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
127         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
128         struct kvm_task_sleep_node n, *e;
129         DECLARE_SWAITQUEUE(wait);
130
131         rcu_irq_enter();
132
133         raw_spin_lock(&b->lock);
134         e = _find_apf_task(b, token);
135         if (e) {
136                 /* dummy entry exist -> wake up was delivered ahead of PF */
137                 hlist_del(&e->link);
138                 kfree(e);
139                 raw_spin_unlock(&b->lock);
140
141                 rcu_irq_exit();
142                 return;
143         }
144
145         n.token = token;
146         n.cpu = smp_processor_id();
147         n.halted = is_idle_task(current) ||
148                    (IS_ENABLED(CONFIG_PREEMPT_COUNT)
149                     ? preempt_count() > 1 || rcu_preempt_depth()
150                     : interrupt_kernel);
151         init_swait_queue_head(&n.wq);
152         hlist_add_head(&n.link, &b->list);
153         raw_spin_unlock(&b->lock);
154
155         for (;;) {
156                 if (!n.halted)
157                         prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
158                 if (hlist_unhashed(&n.link))
159                         break;
160
161                 rcu_irq_exit();
162
163                 if (!n.halted) {
164                         local_irq_enable();
165                         schedule();
166                         local_irq_disable();
167                 } else {
168                         /*
169                          * We cannot reschedule. So halt.
170                          */
171                         native_safe_halt();
172                         local_irq_disable();
173                 }
174
175                 rcu_irq_enter();
176         }
177         if (!n.halted)
178                 finish_swait(&n.wq, &wait);
179
180         rcu_irq_exit();
181         return;
182 }
183 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
184
185 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
186 {
187         hlist_del_init(&n->link);
188         if (n->halted)
189                 smp_send_reschedule(n->cpu);
190         else if (swq_has_sleeper(&n->wq))
191                 swake_up(&n->wq);
192 }
193
194 static void apf_task_wake_all(void)
195 {
196         int i;
197
198         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
199                 struct hlist_node *p, *next;
200                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
201                 raw_spin_lock(&b->lock);
202                 hlist_for_each_safe(p, next, &b->list) {
203                         struct kvm_task_sleep_node *n =
204                                 hlist_entry(p, typeof(*n), link);
205                         if (n->cpu == smp_processor_id())
206                                 apf_task_wake_one(n);
207                 }
208                 raw_spin_unlock(&b->lock);
209         }
210 }
211
212 void kvm_async_pf_task_wake(u32 token)
213 {
214         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
215         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
216         struct kvm_task_sleep_node *n;
217
218         if (token == ~0) {
219                 apf_task_wake_all();
220                 return;
221         }
222
223 again:
224         raw_spin_lock(&b->lock);
225         n = _find_apf_task(b, token);
226         if (!n) {
227                 /*
228                  * async PF was not yet handled.
229                  * Add dummy entry for the token.
230                  */
231                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
232                 if (!n) {
233                         /*
234                          * Allocation failed! Busy wait while other cpu
235                          * handles async PF.
236                          */
237                         raw_spin_unlock(&b->lock);
238                         cpu_relax();
239                         goto again;
240                 }
241                 n->token = token;
242                 n->cpu = smp_processor_id();
243                 init_swait_queue_head(&n->wq);
244                 hlist_add_head(&n->link, &b->list);
245         } else
246                 apf_task_wake_one(n);
247         raw_spin_unlock(&b->lock);
248         return;
249 }
250 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
251
252 u32 kvm_read_and_reset_pf_reason(void)
253 {
254         u32 reason = 0;
255
256         if (__this_cpu_read(apf_reason.enabled)) {
257                 reason = __this_cpu_read(apf_reason.reason);
258                 __this_cpu_write(apf_reason.reason, 0);
259         }
260
261         return reason;
262 }
263 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
264 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
265
266 dotraplinkage void
267 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
268 {
269         enum ctx_state prev_state;
270
271         switch (kvm_read_and_reset_pf_reason()) {
272         default:
273                 do_page_fault(regs, error_code);
274                 break;
275         case KVM_PV_REASON_PAGE_NOT_PRESENT:
276                 /* page is swapped out by the host. */
277                 prev_state = exception_enter();
278                 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
279                 exception_exit(prev_state);
280                 break;
281         case KVM_PV_REASON_PAGE_READY:
282                 rcu_irq_enter();
283                 kvm_async_pf_task_wake((u32)read_cr2());
284                 rcu_irq_exit();
285                 break;
286         }
287 }
288 NOKPROBE_SYMBOL(do_async_page_fault);
289
290 static void __init paravirt_ops_setup(void)
291 {
292         pv_info.name = "KVM";
293
294         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
295                 pv_cpu_ops.io_delay = kvm_io_delay;
296
297 #ifdef CONFIG_X86_IO_APIC
298         no_timer_check = 1;
299 #endif
300 }
301
302 static void kvm_register_steal_time(void)
303 {
304         int cpu = smp_processor_id();
305         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
306
307         if (!has_steal_clock)
308                 return;
309
310         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
311         pr_info("kvm-stealtime: cpu %d, msr %llx\n",
312                 cpu, (unsigned long long) slow_virt_to_phys(st));
313 }
314
315 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
316
317 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
318 {
319         /**
320          * This relies on __test_and_clear_bit to modify the memory
321          * in a way that is atomic with respect to the local CPU.
322          * The hypervisor only accesses this memory from the local CPU so
323          * there's no need for lock or memory barriers.
324          * An optimization barrier is implied in apic write.
325          */
326         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
327                 return;
328         apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
329 }
330
331 static void kvm_guest_cpu_init(void)
332 {
333         if (!kvm_para_available())
334                 return;
335
336         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
337                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
338
339 #ifdef CONFIG_PREEMPT
340                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
341 #endif
342                 pa |= KVM_ASYNC_PF_ENABLED;
343
344                 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
345                         pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
346
347                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
348                 __this_cpu_write(apf_reason.enabled, 1);
349                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
350                        smp_processor_id());
351         }
352
353         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354                 unsigned long pa;
355                 /* Size alignment is implied but just to make it explicit. */
356                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
357                 __this_cpu_write(kvm_apic_eoi, 0);
358                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
359                         | KVM_MSR_ENABLED;
360                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
361         }
362
363         if (has_steal_clock)
364                 kvm_register_steal_time();
365 }
366
367 static void kvm_pv_disable_apf(void)
368 {
369         if (!__this_cpu_read(apf_reason.enabled))
370                 return;
371
372         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
373         __this_cpu_write(apf_reason.enabled, 0);
374
375         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
376                smp_processor_id());
377 }
378
379 static void kvm_pv_guest_cpu_reboot(void *unused)
380 {
381         /*
382          * We disable PV EOI before we load a new kernel by kexec,
383          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
384          * New kernel can re-enable when it boots.
385          */
386         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
387                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
388         kvm_pv_disable_apf();
389         kvm_disable_steal_time();
390 }
391
392 static int kvm_pv_reboot_notify(struct notifier_block *nb,
393                                 unsigned long code, void *unused)
394 {
395         if (code == SYS_RESTART)
396                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
397         return NOTIFY_DONE;
398 }
399
400 static struct notifier_block kvm_pv_reboot_nb = {
401         .notifier_call = kvm_pv_reboot_notify,
402 };
403
404 static u64 kvm_steal_clock(int cpu)
405 {
406         u64 steal;
407         struct kvm_steal_time *src;
408         int version;
409
410         src = &per_cpu(steal_time, cpu);
411         do {
412                 version = src->version;
413                 virt_rmb();
414                 steal = src->steal;
415                 virt_rmb();
416         } while ((version & 1) || (version != src->version));
417
418         return steal;
419 }
420
421 void kvm_disable_steal_time(void)
422 {
423         if (!has_steal_clock)
424                 return;
425
426         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
427 }
428
429 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
430 {
431         early_set_memory_decrypted((unsigned long) ptr, size);
432 }
433
434 /*
435  * Iterate through all possible CPUs and map the memory region pointed
436  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
437  *
438  * Note: we iterate through all possible CPUs to ensure that CPUs
439  * hotplugged will have their per-cpu variable already mapped as
440  * decrypted.
441  */
442 static void __init sev_map_percpu_data(void)
443 {
444         int cpu;
445
446         if (!sev_active())
447                 return;
448
449         for_each_possible_cpu(cpu) {
450                 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
451                 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
452                 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
453         }
454 }
455
456 #ifdef CONFIG_SMP
457 #define KVM_IPI_CLUSTER_SIZE    (2 * BITS_PER_LONG)
458
459 static void __send_ipi_mask(const struct cpumask *mask, int vector)
460 {
461         unsigned long flags;
462         int cpu, apic_id, icr;
463         int min = 0, max = 0;
464 #ifdef CONFIG_X86_64
465         __uint128_t ipi_bitmap = 0;
466 #else
467         u64 ipi_bitmap = 0;
468 #endif
469
470         if (cpumask_empty(mask))
471                 return;
472
473         local_irq_save(flags);
474
475         switch (vector) {
476         default:
477                 icr = APIC_DM_FIXED | vector;
478                 break;
479         case NMI_VECTOR:
480                 icr = APIC_DM_NMI;
481                 break;
482         }
483
484         for_each_cpu(cpu, mask) {
485                 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
486                 if (!ipi_bitmap) {
487                         min = max = apic_id;
488                 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
489                         ipi_bitmap <<= min - apic_id;
490                         min = apic_id;
491                 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
492                         max = apic_id < max ? max : apic_id;
493                 } else {
494                         kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
495                                 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
496                         min = max = apic_id;
497                         ipi_bitmap = 0;
498                 }
499                 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
500         }
501
502         if (ipi_bitmap) {
503                 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
504                         (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
505         }
506
507         local_irq_restore(flags);
508 }
509
510 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
511 {
512         __send_ipi_mask(mask, vector);
513 }
514
515 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
516 {
517         unsigned int this_cpu = smp_processor_id();
518         struct cpumask new_mask;
519         const struct cpumask *local_mask;
520
521         cpumask_copy(&new_mask, mask);
522         cpumask_clear_cpu(this_cpu, &new_mask);
523         local_mask = &new_mask;
524         __send_ipi_mask(local_mask, vector);
525 }
526
527 static void kvm_send_ipi_allbutself(int vector)
528 {
529         kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
530 }
531
532 static void kvm_send_ipi_all(int vector)
533 {
534         __send_ipi_mask(cpu_online_mask, vector);
535 }
536
537 /*
538  * Set the IPI entry points
539  */
540 static void kvm_setup_pv_ipi(void)
541 {
542         apic->send_IPI_mask = kvm_send_ipi_mask;
543         apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
544         apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
545         apic->send_IPI_all = kvm_send_ipi_all;
546         pr_info("KVM setup pv IPIs\n");
547 }
548
549 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
550 {
551         native_smp_prepare_cpus(max_cpus);
552         if (kvm_para_has_hint(KVM_HINTS_REALTIME))
553                 static_branch_disable(&virt_spin_lock_key);
554 }
555
556 static void __init kvm_smp_prepare_boot_cpu(void)
557 {
558         /*
559          * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
560          * shares the guest physical address with the hypervisor.
561          */
562         sev_map_percpu_data();
563
564         kvm_guest_cpu_init();
565         native_smp_prepare_boot_cpu();
566         kvm_spinlock_init();
567 }
568
569 static void kvm_guest_cpu_offline(void)
570 {
571         kvm_disable_steal_time();
572         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
573                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
574         kvm_pv_disable_apf();
575         apf_task_wake_all();
576 }
577
578 static int kvm_cpu_online(unsigned int cpu)
579 {
580         local_irq_disable();
581         kvm_guest_cpu_init();
582         local_irq_enable();
583         return 0;
584 }
585
586 static int kvm_cpu_down_prepare(unsigned int cpu)
587 {
588         local_irq_disable();
589         kvm_guest_cpu_offline();
590         local_irq_enable();
591         return 0;
592 }
593 #endif
594
595 static void __init kvm_apf_trap_init(void)
596 {
597         update_intr_gate(X86_TRAP_PF, async_page_fault);
598 }
599
600 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
601
602 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
603                         const struct flush_tlb_info *info)
604 {
605         u8 state;
606         int cpu;
607         struct kvm_steal_time *src;
608         struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
609
610         cpumask_copy(flushmask, cpumask);
611         /*
612          * We have to call flush only on online vCPUs. And
613          * queue flush_on_enter for pre-empted vCPUs
614          */
615         for_each_cpu(cpu, flushmask) {
616                 src = &per_cpu(steal_time, cpu);
617                 state = READ_ONCE(src->preempted);
618                 if ((state & KVM_VCPU_PREEMPTED)) {
619                         if (try_cmpxchg(&src->preempted, &state,
620                                         state | KVM_VCPU_FLUSH_TLB))
621                                 __cpumask_clear_cpu(cpu, flushmask);
622                 }
623         }
624
625         native_flush_tlb_others(flushmask, info);
626 }
627
628 static void __init kvm_guest_init(void)
629 {
630         int i;
631
632         if (!kvm_para_available())
633                 return;
634
635         paravirt_ops_setup();
636         register_reboot_notifier(&kvm_pv_reboot_nb);
637         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
638                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
639         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
640                 x86_init.irqs.trap_init = kvm_apf_trap_init;
641
642         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
643                 has_steal_clock = 1;
644                 pv_time_ops.steal_clock = kvm_steal_clock;
645         }
646
647         if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
648             !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
649             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
650                 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
651
652         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
653                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
654
655         if (kvmclock_vsyscall)
656                 kvm_setup_vsyscall_timeinfo();
657
658 #ifdef CONFIG_SMP
659         smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
660         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
661         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
662                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
663                 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
664 #else
665         sev_map_percpu_data();
666         kvm_guest_cpu_init();
667 #endif
668
669         /*
670          * Hard lockup detection is enabled by default. Disable it, as guests
671          * can get false positives too easily, for example if the host is
672          * overcommitted.
673          */
674         hardlockup_detector_disable();
675 }
676
677 static noinline uint32_t __kvm_cpuid_base(void)
678 {
679         if (boot_cpu_data.cpuid_level < 0)
680                 return 0;       /* So we don't blow up on old processors */
681
682         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
683                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
684
685         return 0;
686 }
687
688 static inline uint32_t kvm_cpuid_base(void)
689 {
690         static int kvm_cpuid_base = -1;
691
692         if (kvm_cpuid_base == -1)
693                 kvm_cpuid_base = __kvm_cpuid_base();
694
695         return kvm_cpuid_base;
696 }
697
698 bool kvm_para_available(void)
699 {
700         return kvm_cpuid_base() != 0;
701 }
702 EXPORT_SYMBOL_GPL(kvm_para_available);
703
704 unsigned int kvm_arch_para_features(void)
705 {
706         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
707 }
708
709 unsigned int kvm_arch_para_hints(void)
710 {
711         return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
712 }
713
714 static uint32_t __init kvm_detect(void)
715 {
716         return kvm_cpuid_base();
717 }
718
719 static void __init kvm_apic_init(void)
720 {
721 #if defined(CONFIG_SMP)
722         if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
723                 kvm_setup_pv_ipi();
724 #endif
725 }
726
727 static void __init kvm_init_platform(void)
728 {
729         x86_platform.apic_post_init = kvm_apic_init;
730 }
731
732 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
733         .name                   = "KVM",
734         .detect                 = kvm_detect,
735         .type                   = X86_HYPER_KVM,
736         .init.guest_late_init   = kvm_guest_init,
737         .init.x2apic_available  = kvm_para_available,
738         .init.init_platform     = kvm_init_platform,
739 };
740
741 static __init int activate_jump_labels(void)
742 {
743         if (has_steal_clock) {
744                 static_key_slow_inc(&paravirt_steal_enabled);
745                 if (steal_acc)
746                         static_key_slow_inc(&paravirt_steal_rq_enabled);
747         }
748
749         return 0;
750 }
751 arch_initcall(activate_jump_labels);
752
753 static __init int kvm_setup_pv_tlb_flush(void)
754 {
755         int cpu;
756
757         if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
758             !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
759             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
760                 for_each_possible_cpu(cpu) {
761                         zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
762                                 GFP_KERNEL, cpu_to_node(cpu));
763                 }
764                 pr_info("KVM setup pv remote TLB flush\n");
765         }
766
767         return 0;
768 }
769 arch_initcall(kvm_setup_pv_tlb_flush);
770
771 #ifdef CONFIG_PARAVIRT_SPINLOCKS
772
773 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
774 static void kvm_kick_cpu(int cpu)
775 {
776         int apicid;
777         unsigned long flags = 0;
778
779         apicid = per_cpu(x86_cpu_to_apicid, cpu);
780         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
781 }
782
783 #include <asm/qspinlock.h>
784
785 static void kvm_wait(u8 *ptr, u8 val)
786 {
787         unsigned long flags;
788
789         if (in_nmi())
790                 return;
791
792         local_irq_save(flags);
793
794         if (READ_ONCE(*ptr) != val)
795                 goto out;
796
797         /*
798          * halt until it's our turn and kicked. Note that we do safe halt
799          * for irq enabled case to avoid hang when lock info is overwritten
800          * in irq spinlock slowpath and no spurious interrupt occur to save us.
801          */
802         if (arch_irqs_disabled_flags(flags))
803                 halt();
804         else
805                 safe_halt();
806
807 out:
808         local_irq_restore(flags);
809 }
810
811 #ifdef CONFIG_X86_32
812 __visible bool __kvm_vcpu_is_preempted(long cpu)
813 {
814         struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
815
816         return !!(src->preempted & KVM_VCPU_PREEMPTED);
817 }
818 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
819
820 #else
821
822 #include <asm/asm-offsets.h>
823
824 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
825
826 /*
827  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
828  * restoring to/from the stack.
829  */
830 asm(
831 ".pushsection .text;"
832 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
833 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
834 "__raw_callee_save___kvm_vcpu_is_preempted:"
835 "movq   __per_cpu_offset(,%rdi,8), %rax;"
836 "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
837 "setne  %al;"
838 "ret;"
839 ".popsection");
840
841 #endif
842
843 /*
844  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
845  */
846 void __init kvm_spinlock_init(void)
847 {
848         if (!kvm_para_available())
849                 return;
850         /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
851         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
852                 return;
853
854         if (kvm_para_has_hint(KVM_HINTS_REALTIME))
855                 return;
856
857         /* Don't use the pvqspinlock code if there is only 1 vCPU. */
858         if (num_possible_cpus() == 1)
859                 return;
860
861         __pv_init_lock_hash();
862         pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
863         pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
864         pv_lock_ops.wait = kvm_wait;
865         pv_lock_ops.kick = kvm_kick_cpu;
866
867         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
868                 pv_lock_ops.vcpu_is_preempted =
869                         PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
870         }
871 }
872
873 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */