2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
46 #include <asm/virtext.h>
48 #include <asm/fpu/internal.h>
49 #include <asm/perf_event.h>
50 #include <asm/debugreg.h>
51 #include <asm/kexec.h>
53 #include <asm/irq_remapping.h>
54 #include <asm/mmu_context.h>
55 #include <asm/spec-ctrl.h>
56 #include <asm/mshyperv.h>
60 #include "vmx_evmcs.h"
62 #define __ex(x) __kvm_handle_fault_on_reboot(x)
63 #define __ex_clear(x, reg) \
64 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
66 MODULE_AUTHOR("Qumranet");
67 MODULE_LICENSE("GPL");
69 static const struct x86_cpu_id vmx_cpu_id[] = {
70 X86_FEATURE_MATCH(X86_FEATURE_VMX),
73 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
75 static bool __read_mostly enable_vpid = 1;
76 module_param_named(vpid, enable_vpid, bool, 0444);
78 static bool __read_mostly enable_vnmi = 1;
79 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
81 static bool __read_mostly flexpriority_enabled = 1;
82 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
84 static bool __read_mostly enable_ept = 1;
85 module_param_named(ept, enable_ept, bool, S_IRUGO);
87 static bool __read_mostly enable_unrestricted_guest = 1;
88 module_param_named(unrestricted_guest,
89 enable_unrestricted_guest, bool, S_IRUGO);
91 static bool __read_mostly enable_ept_ad_bits = 1;
92 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
94 static bool __read_mostly emulate_invalid_guest_state = true;
95 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
97 static bool __read_mostly fasteoi = 1;
98 module_param(fasteoi, bool, S_IRUGO);
100 static bool __read_mostly enable_apicv = 1;
101 module_param(enable_apicv, bool, S_IRUGO);
103 static bool __read_mostly enable_shadow_vmcs = 1;
104 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
106 * If nested=1, nested virtualization is supported, i.e., guests may use
107 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
108 * use VMX instructions.
110 static bool __read_mostly nested = 0;
111 module_param(nested, bool, S_IRUGO);
113 static u64 __read_mostly host_xss;
115 static bool __read_mostly enable_pml = 1;
116 module_param_named(pml, enable_pml, bool, S_IRUGO);
120 #define MSR_TYPE_RW 3
122 #define MSR_BITMAP_MODE_X2APIC 1
123 #define MSR_BITMAP_MODE_X2APIC_APICV 2
124 #define MSR_BITMAP_MODE_LM 4
126 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
128 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
129 static int __read_mostly cpu_preemption_timer_multi;
130 static bool __read_mostly enable_preemption_timer = 1;
132 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
135 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
136 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
137 #define KVM_VM_CR0_ALWAYS_ON \
138 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
139 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
140 #define KVM_CR4_GUEST_OWNED_BITS \
141 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
142 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
144 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
145 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
146 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
148 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
150 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
153 * Hyper-V requires all of these, so mark them as supported even though
154 * they are just treated the same as all-context.
156 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
157 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
158 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
159 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
160 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
163 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
164 * ple_gap: upper bound on the amount of time between two successive
165 * executions of PAUSE in a loop. Also indicate if ple enabled.
166 * According to test, this time is usually smaller than 128 cycles.
167 * ple_window: upper bound on the amount of time a guest is allowed to execute
168 * in a PAUSE loop. Tests indicate that most spinlocks are held for
169 * less than 2^12 cycles
170 * Time is measured based on a counter that runs at the same rate as the TSC,
171 * refer SDM volume 3b section 21.6.13 & 22.1.3.
173 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
175 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
176 module_param(ple_window, uint, 0444);
178 /* Default doubles per-vcpu window every exit. */
179 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
180 module_param(ple_window_grow, uint, 0444);
182 /* Default resets per-vcpu window every exit to ple_window. */
183 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
184 module_param(ple_window_shrink, uint, 0444);
186 /* Default is to compute the maximum so we can never overflow. */
187 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
188 module_param(ple_window_max, uint, 0444);
190 extern const ulong vmx_return;
192 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
193 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
194 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
196 /* Storage for pre module init parameter parsing */
197 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
199 static const struct {
201 enum vmx_l1d_flush_state cmd;
202 } vmentry_l1d_param[] = {
203 {"auto", VMENTER_L1D_FLUSH_AUTO},
204 {"never", VMENTER_L1D_FLUSH_NEVER},
205 {"cond", VMENTER_L1D_FLUSH_COND},
206 {"always", VMENTER_L1D_FLUSH_ALWAYS},
209 #define L1D_CACHE_ORDER 4
210 static void *vmx_l1d_flush_pages;
212 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
218 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
222 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
225 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
226 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
227 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
232 /* If set to auto use the default l1tf mitigation method */
233 if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
234 switch (l1tf_mitigation) {
235 case L1TF_MITIGATION_OFF:
236 l1tf = VMENTER_L1D_FLUSH_NEVER;
238 case L1TF_MITIGATION_FLUSH_NOWARN:
239 case L1TF_MITIGATION_FLUSH:
240 case L1TF_MITIGATION_FLUSH_NOSMT:
241 l1tf = VMENTER_L1D_FLUSH_COND;
243 case L1TF_MITIGATION_FULL:
244 case L1TF_MITIGATION_FULL_FORCE:
245 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
248 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
249 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
252 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
253 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
254 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
257 vmx_l1d_flush_pages = page_address(page);
260 * Initialize each page with a different pattern in
261 * order to protect against KSM in the nested
262 * virtualization case.
264 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
265 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
270 l1tf_vmx_mitigation = l1tf;
272 if (l1tf != VMENTER_L1D_FLUSH_NEVER)
273 static_branch_enable(&vmx_l1d_should_flush);
275 static_branch_disable(&vmx_l1d_should_flush);
277 if (l1tf == VMENTER_L1D_FLUSH_COND)
278 static_branch_enable(&vmx_l1d_flush_cond);
280 static_branch_disable(&vmx_l1d_flush_cond);
284 static int vmentry_l1d_flush_parse(const char *s)
289 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
290 if (sysfs_streq(s, vmentry_l1d_param[i].option))
291 return vmentry_l1d_param[i].cmd;
297 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
301 if (!boot_cpu_has(X86_BUG_L1TF))
304 l1tf = vmentry_l1d_flush_parse(s);
309 * Has vmx_init() run already? If not then this is the pre init
310 * parameter parsing. In that case just store the value and let
311 * vmx_init() do the proper setup after enable_ept has been
314 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
315 vmentry_l1d_flush_param = l1tf;
319 mutex_lock(&vmx_l1d_flush_mutex);
320 ret = vmx_setup_l1d_flush(l1tf);
321 mutex_unlock(&vmx_l1d_flush_mutex);
325 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
327 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
330 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
331 .set = vmentry_l1d_flush_set,
332 .get = vmentry_l1d_flush_get,
334 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
336 enum ept_pointers_status {
337 EPT_POINTERS_CHECK = 0,
338 EPT_POINTERS_MATCH = 1,
339 EPT_POINTERS_MISMATCH = 2
345 unsigned int tss_addr;
346 bool ept_identity_pagetable_done;
347 gpa_t ept_identity_map_addr;
349 enum ept_pointers_status ept_pointers_match;
350 spinlock_t ept_pointer_lock;
353 #define NR_AUTOLOAD_MSRS 8
367 * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
368 * and whose values change infrequently, but are not constant. I.e. this is
369 * used as a write-through cache of the corresponding VMCS fields.
371 struct vmcs_host_state {
372 unsigned long cr3; /* May not match real cr3 */
373 unsigned long cr4; /* May not match real cr4 */
374 unsigned long gs_base;
375 unsigned long fs_base;
377 u16 fs_sel, gs_sel, ldt_sel;
384 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
385 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
386 * loaded on this CPU (so we can clear them if the CPU goes down).
390 struct vmcs *shadow_vmcs;
393 bool nmi_known_unmasked;
394 /* Support for vnmi-less CPUs */
395 int soft_vnmi_blocked;
397 s64 vnmi_blocked_time;
398 unsigned long *msr_bitmap;
399 struct list_head loaded_vmcss_on_cpu_link;
400 struct vmcs_host_state host_state;
403 struct shared_msr_entry {
410 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
411 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
412 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
413 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
414 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
415 * More than one of these structures may exist, if L1 runs multiple L2 guests.
416 * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
417 * underlying hardware which will be used to run L2.
418 * This structure is packed to ensure that its layout is identical across
419 * machines (necessary for live migration).
421 * IMPORTANT: Changing the layout of existing fields in this structure
422 * will break save/restore compatibility with older kvm releases. When
423 * adding new fields, either use space in the reserved padding* arrays
424 * or add the new fields to the end of the structure.
426 typedef u64 natural_width;
427 struct __packed vmcs12 {
428 /* According to the Intel spec, a VMCS region must start with the
429 * following two fields. Then follow implementation-specific data.
434 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
435 u32 padding[7]; /* room for future expansion */
440 u64 vm_exit_msr_store_addr;
441 u64 vm_exit_msr_load_addr;
442 u64 vm_entry_msr_load_addr;
444 u64 virtual_apic_page_addr;
445 u64 apic_access_addr;
446 u64 posted_intr_desc_addr;
448 u64 eoi_exit_bitmap0;
449 u64 eoi_exit_bitmap1;
450 u64 eoi_exit_bitmap2;
451 u64 eoi_exit_bitmap3;
453 u64 guest_physical_address;
454 u64 vmcs_link_pointer;
455 u64 guest_ia32_debugctl;
458 u64 guest_ia32_perf_global_ctrl;
466 u64 host_ia32_perf_global_ctrl;
469 u64 vm_function_control;
470 u64 eptp_list_address;
472 u64 padding64[3]; /* room for future expansion */
474 * To allow migration of L1 (complete with its L2 guests) between
475 * machines of different natural widths (32 or 64 bit), we cannot have
476 * unsigned long fields with no explict size. We use u64 (aliased
477 * natural_width) instead. Luckily, x86 is little-endian.
479 natural_width cr0_guest_host_mask;
480 natural_width cr4_guest_host_mask;
481 natural_width cr0_read_shadow;
482 natural_width cr4_read_shadow;
483 natural_width cr3_target_value0;
484 natural_width cr3_target_value1;
485 natural_width cr3_target_value2;
486 natural_width cr3_target_value3;
487 natural_width exit_qualification;
488 natural_width guest_linear_address;
489 natural_width guest_cr0;
490 natural_width guest_cr3;
491 natural_width guest_cr4;
492 natural_width guest_es_base;
493 natural_width guest_cs_base;
494 natural_width guest_ss_base;
495 natural_width guest_ds_base;
496 natural_width guest_fs_base;
497 natural_width guest_gs_base;
498 natural_width guest_ldtr_base;
499 natural_width guest_tr_base;
500 natural_width guest_gdtr_base;
501 natural_width guest_idtr_base;
502 natural_width guest_dr7;
503 natural_width guest_rsp;
504 natural_width guest_rip;
505 natural_width guest_rflags;
506 natural_width guest_pending_dbg_exceptions;
507 natural_width guest_sysenter_esp;
508 natural_width guest_sysenter_eip;
509 natural_width host_cr0;
510 natural_width host_cr3;
511 natural_width host_cr4;
512 natural_width host_fs_base;
513 natural_width host_gs_base;
514 natural_width host_tr_base;
515 natural_width host_gdtr_base;
516 natural_width host_idtr_base;
517 natural_width host_ia32_sysenter_esp;
518 natural_width host_ia32_sysenter_eip;
519 natural_width host_rsp;
520 natural_width host_rip;
521 natural_width paddingl[8]; /* room for future expansion */
522 u32 pin_based_vm_exec_control;
523 u32 cpu_based_vm_exec_control;
524 u32 exception_bitmap;
525 u32 page_fault_error_code_mask;
526 u32 page_fault_error_code_match;
527 u32 cr3_target_count;
528 u32 vm_exit_controls;
529 u32 vm_exit_msr_store_count;
530 u32 vm_exit_msr_load_count;
531 u32 vm_entry_controls;
532 u32 vm_entry_msr_load_count;
533 u32 vm_entry_intr_info_field;
534 u32 vm_entry_exception_error_code;
535 u32 vm_entry_instruction_len;
537 u32 secondary_vm_exec_control;
538 u32 vm_instruction_error;
540 u32 vm_exit_intr_info;
541 u32 vm_exit_intr_error_code;
542 u32 idt_vectoring_info_field;
543 u32 idt_vectoring_error_code;
544 u32 vm_exit_instruction_len;
545 u32 vmx_instruction_info;
552 u32 guest_ldtr_limit;
554 u32 guest_gdtr_limit;
555 u32 guest_idtr_limit;
556 u32 guest_es_ar_bytes;
557 u32 guest_cs_ar_bytes;
558 u32 guest_ss_ar_bytes;
559 u32 guest_ds_ar_bytes;
560 u32 guest_fs_ar_bytes;
561 u32 guest_gs_ar_bytes;
562 u32 guest_ldtr_ar_bytes;
563 u32 guest_tr_ar_bytes;
564 u32 guest_interruptibility_info;
565 u32 guest_activity_state;
566 u32 guest_sysenter_cs;
567 u32 host_ia32_sysenter_cs;
568 u32 vmx_preemption_timer_value;
569 u32 padding32[7]; /* room for future expansion */
570 u16 virtual_processor_id;
572 u16 guest_es_selector;
573 u16 guest_cs_selector;
574 u16 guest_ss_selector;
575 u16 guest_ds_selector;
576 u16 guest_fs_selector;
577 u16 guest_gs_selector;
578 u16 guest_ldtr_selector;
579 u16 guest_tr_selector;
580 u16 guest_intr_status;
581 u16 host_es_selector;
582 u16 host_cs_selector;
583 u16 host_ss_selector;
584 u16 host_ds_selector;
585 u16 host_fs_selector;
586 u16 host_gs_selector;
587 u16 host_tr_selector;
592 * For save/restore compatibility, the vmcs12 field offsets must not change.
594 #define CHECK_OFFSET(field, loc) \
595 BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
596 "Offset of " #field " in struct vmcs12 has changed.")
598 static inline void vmx_check_vmcs12_offsets(void) {
599 CHECK_OFFSET(hdr, 0);
600 CHECK_OFFSET(abort, 4);
601 CHECK_OFFSET(launch_state, 8);
602 CHECK_OFFSET(io_bitmap_a, 40);
603 CHECK_OFFSET(io_bitmap_b, 48);
604 CHECK_OFFSET(msr_bitmap, 56);
605 CHECK_OFFSET(vm_exit_msr_store_addr, 64);
606 CHECK_OFFSET(vm_exit_msr_load_addr, 72);
607 CHECK_OFFSET(vm_entry_msr_load_addr, 80);
608 CHECK_OFFSET(tsc_offset, 88);
609 CHECK_OFFSET(virtual_apic_page_addr, 96);
610 CHECK_OFFSET(apic_access_addr, 104);
611 CHECK_OFFSET(posted_intr_desc_addr, 112);
612 CHECK_OFFSET(ept_pointer, 120);
613 CHECK_OFFSET(eoi_exit_bitmap0, 128);
614 CHECK_OFFSET(eoi_exit_bitmap1, 136);
615 CHECK_OFFSET(eoi_exit_bitmap2, 144);
616 CHECK_OFFSET(eoi_exit_bitmap3, 152);
617 CHECK_OFFSET(xss_exit_bitmap, 160);
618 CHECK_OFFSET(guest_physical_address, 168);
619 CHECK_OFFSET(vmcs_link_pointer, 176);
620 CHECK_OFFSET(guest_ia32_debugctl, 184);
621 CHECK_OFFSET(guest_ia32_pat, 192);
622 CHECK_OFFSET(guest_ia32_efer, 200);
623 CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
624 CHECK_OFFSET(guest_pdptr0, 216);
625 CHECK_OFFSET(guest_pdptr1, 224);
626 CHECK_OFFSET(guest_pdptr2, 232);
627 CHECK_OFFSET(guest_pdptr3, 240);
628 CHECK_OFFSET(guest_bndcfgs, 248);
629 CHECK_OFFSET(host_ia32_pat, 256);
630 CHECK_OFFSET(host_ia32_efer, 264);
631 CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
632 CHECK_OFFSET(vmread_bitmap, 280);
633 CHECK_OFFSET(vmwrite_bitmap, 288);
634 CHECK_OFFSET(vm_function_control, 296);
635 CHECK_OFFSET(eptp_list_address, 304);
636 CHECK_OFFSET(pml_address, 312);
637 CHECK_OFFSET(cr0_guest_host_mask, 344);
638 CHECK_OFFSET(cr4_guest_host_mask, 352);
639 CHECK_OFFSET(cr0_read_shadow, 360);
640 CHECK_OFFSET(cr4_read_shadow, 368);
641 CHECK_OFFSET(cr3_target_value0, 376);
642 CHECK_OFFSET(cr3_target_value1, 384);
643 CHECK_OFFSET(cr3_target_value2, 392);
644 CHECK_OFFSET(cr3_target_value3, 400);
645 CHECK_OFFSET(exit_qualification, 408);
646 CHECK_OFFSET(guest_linear_address, 416);
647 CHECK_OFFSET(guest_cr0, 424);
648 CHECK_OFFSET(guest_cr3, 432);
649 CHECK_OFFSET(guest_cr4, 440);
650 CHECK_OFFSET(guest_es_base, 448);
651 CHECK_OFFSET(guest_cs_base, 456);
652 CHECK_OFFSET(guest_ss_base, 464);
653 CHECK_OFFSET(guest_ds_base, 472);
654 CHECK_OFFSET(guest_fs_base, 480);
655 CHECK_OFFSET(guest_gs_base, 488);
656 CHECK_OFFSET(guest_ldtr_base, 496);
657 CHECK_OFFSET(guest_tr_base, 504);
658 CHECK_OFFSET(guest_gdtr_base, 512);
659 CHECK_OFFSET(guest_idtr_base, 520);
660 CHECK_OFFSET(guest_dr7, 528);
661 CHECK_OFFSET(guest_rsp, 536);
662 CHECK_OFFSET(guest_rip, 544);
663 CHECK_OFFSET(guest_rflags, 552);
664 CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
665 CHECK_OFFSET(guest_sysenter_esp, 568);
666 CHECK_OFFSET(guest_sysenter_eip, 576);
667 CHECK_OFFSET(host_cr0, 584);
668 CHECK_OFFSET(host_cr3, 592);
669 CHECK_OFFSET(host_cr4, 600);
670 CHECK_OFFSET(host_fs_base, 608);
671 CHECK_OFFSET(host_gs_base, 616);
672 CHECK_OFFSET(host_tr_base, 624);
673 CHECK_OFFSET(host_gdtr_base, 632);
674 CHECK_OFFSET(host_idtr_base, 640);
675 CHECK_OFFSET(host_ia32_sysenter_esp, 648);
676 CHECK_OFFSET(host_ia32_sysenter_eip, 656);
677 CHECK_OFFSET(host_rsp, 664);
678 CHECK_OFFSET(host_rip, 672);
679 CHECK_OFFSET(pin_based_vm_exec_control, 744);
680 CHECK_OFFSET(cpu_based_vm_exec_control, 748);
681 CHECK_OFFSET(exception_bitmap, 752);
682 CHECK_OFFSET(page_fault_error_code_mask, 756);
683 CHECK_OFFSET(page_fault_error_code_match, 760);
684 CHECK_OFFSET(cr3_target_count, 764);
685 CHECK_OFFSET(vm_exit_controls, 768);
686 CHECK_OFFSET(vm_exit_msr_store_count, 772);
687 CHECK_OFFSET(vm_exit_msr_load_count, 776);
688 CHECK_OFFSET(vm_entry_controls, 780);
689 CHECK_OFFSET(vm_entry_msr_load_count, 784);
690 CHECK_OFFSET(vm_entry_intr_info_field, 788);
691 CHECK_OFFSET(vm_entry_exception_error_code, 792);
692 CHECK_OFFSET(vm_entry_instruction_len, 796);
693 CHECK_OFFSET(tpr_threshold, 800);
694 CHECK_OFFSET(secondary_vm_exec_control, 804);
695 CHECK_OFFSET(vm_instruction_error, 808);
696 CHECK_OFFSET(vm_exit_reason, 812);
697 CHECK_OFFSET(vm_exit_intr_info, 816);
698 CHECK_OFFSET(vm_exit_intr_error_code, 820);
699 CHECK_OFFSET(idt_vectoring_info_field, 824);
700 CHECK_OFFSET(idt_vectoring_error_code, 828);
701 CHECK_OFFSET(vm_exit_instruction_len, 832);
702 CHECK_OFFSET(vmx_instruction_info, 836);
703 CHECK_OFFSET(guest_es_limit, 840);
704 CHECK_OFFSET(guest_cs_limit, 844);
705 CHECK_OFFSET(guest_ss_limit, 848);
706 CHECK_OFFSET(guest_ds_limit, 852);
707 CHECK_OFFSET(guest_fs_limit, 856);
708 CHECK_OFFSET(guest_gs_limit, 860);
709 CHECK_OFFSET(guest_ldtr_limit, 864);
710 CHECK_OFFSET(guest_tr_limit, 868);
711 CHECK_OFFSET(guest_gdtr_limit, 872);
712 CHECK_OFFSET(guest_idtr_limit, 876);
713 CHECK_OFFSET(guest_es_ar_bytes, 880);
714 CHECK_OFFSET(guest_cs_ar_bytes, 884);
715 CHECK_OFFSET(guest_ss_ar_bytes, 888);
716 CHECK_OFFSET(guest_ds_ar_bytes, 892);
717 CHECK_OFFSET(guest_fs_ar_bytes, 896);
718 CHECK_OFFSET(guest_gs_ar_bytes, 900);
719 CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
720 CHECK_OFFSET(guest_tr_ar_bytes, 908);
721 CHECK_OFFSET(guest_interruptibility_info, 912);
722 CHECK_OFFSET(guest_activity_state, 916);
723 CHECK_OFFSET(guest_sysenter_cs, 920);
724 CHECK_OFFSET(host_ia32_sysenter_cs, 924);
725 CHECK_OFFSET(vmx_preemption_timer_value, 928);
726 CHECK_OFFSET(virtual_processor_id, 960);
727 CHECK_OFFSET(posted_intr_nv, 962);
728 CHECK_OFFSET(guest_es_selector, 964);
729 CHECK_OFFSET(guest_cs_selector, 966);
730 CHECK_OFFSET(guest_ss_selector, 968);
731 CHECK_OFFSET(guest_ds_selector, 970);
732 CHECK_OFFSET(guest_fs_selector, 972);
733 CHECK_OFFSET(guest_gs_selector, 974);
734 CHECK_OFFSET(guest_ldtr_selector, 976);
735 CHECK_OFFSET(guest_tr_selector, 978);
736 CHECK_OFFSET(guest_intr_status, 980);
737 CHECK_OFFSET(host_es_selector, 982);
738 CHECK_OFFSET(host_cs_selector, 984);
739 CHECK_OFFSET(host_ss_selector, 986);
740 CHECK_OFFSET(host_ds_selector, 988);
741 CHECK_OFFSET(host_fs_selector, 990);
742 CHECK_OFFSET(host_gs_selector, 992);
743 CHECK_OFFSET(host_tr_selector, 994);
744 CHECK_OFFSET(guest_pml_index, 996);
748 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
749 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
750 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
752 * IMPORTANT: Changing this value will break save/restore compatibility with
753 * older kvm releases.
755 #define VMCS12_REVISION 0x11e57ed0
758 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
759 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
760 * current implementation, 4K are reserved to avoid future complications.
762 #define VMCS12_SIZE 0x1000
765 * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
766 * supported VMCS12 field encoding.
768 #define VMCS12_MAX_FIELD_INDEX 0x17
770 struct nested_vmx_msrs {
772 * We only store the "true" versions of the VMX capability MSRs. We
773 * generate the "non-true" versions by setting the must-be-1 bits
774 * according to the SDM.
776 u32 procbased_ctls_low;
777 u32 procbased_ctls_high;
778 u32 secondary_ctls_low;
779 u32 secondary_ctls_high;
780 u32 pinbased_ctls_low;
781 u32 pinbased_ctls_high;
800 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
801 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
804 /* Has the level1 guest done vmxon? */
809 /* The guest-physical address of the current VMCS L1 keeps for L2 */
812 * Cache of the guest's VMCS, existing outside of guest memory.
813 * Loaded from guest memory during VMPTRLD. Flushed to guest
814 * memory during VMCLEAR and VMPTRLD.
816 struct vmcs12 *cached_vmcs12;
818 * Cache of the guest's shadow VMCS, existing outside of guest
819 * memory. Loaded from guest memory during VM entry. Flushed
820 * to guest memory during VM exit.
822 struct vmcs12 *cached_shadow_vmcs12;
824 * Indicates if the shadow vmcs must be updated with the
825 * data hold by vmcs12
827 bool sync_shadow_vmcs;
830 bool change_vmcs01_virtual_apic_mode;
832 /* L2 must run next, and mustn't decide to exit to L1. */
833 bool nested_run_pending;
835 struct loaded_vmcs vmcs02;
838 * Guest pages referred to in the vmcs02 with host-physical
839 * pointers, so we must keep them pinned while L2 runs.
841 struct page *apic_access_page;
842 struct page *virtual_apic_page;
843 struct page *pi_desc_page;
844 struct pi_desc *pi_desc;
848 struct hrtimer preemption_timer;
849 bool preemption_timer_expired;
851 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
857 struct nested_vmx_msrs msrs;
859 /* SMM related state */
861 /* in VMX operation on SMM entry? */
863 /* in guest mode on SMM entry? */
868 #define POSTED_INTR_ON 0
869 #define POSTED_INTR_SN 1
871 /* Posted-Interrupt Descriptor */
873 u32 pir[8]; /* Posted interrupt requested */
876 /* bit 256 - Outstanding Notification */
878 /* bit 257 - Suppress Notification */
880 /* bit 271:258 - Reserved */
882 /* bit 279:272 - Notification Vector */
884 /* bit 287:280 - Reserved */
886 /* bit 319:288 - Notification Destination */
894 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
896 return test_and_set_bit(POSTED_INTR_ON,
897 (unsigned long *)&pi_desc->control);
900 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
902 return test_and_clear_bit(POSTED_INTR_ON,
903 (unsigned long *)&pi_desc->control);
906 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
908 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
911 static inline void pi_clear_sn(struct pi_desc *pi_desc)
913 return clear_bit(POSTED_INTR_SN,
914 (unsigned long *)&pi_desc->control);
917 static inline void pi_set_sn(struct pi_desc *pi_desc)
919 return set_bit(POSTED_INTR_SN,
920 (unsigned long *)&pi_desc->control);
923 static inline void pi_clear_on(struct pi_desc *pi_desc)
925 clear_bit(POSTED_INTR_ON,
926 (unsigned long *)&pi_desc->control);
929 static inline int pi_test_on(struct pi_desc *pi_desc)
931 return test_bit(POSTED_INTR_ON,
932 (unsigned long *)&pi_desc->control);
935 static inline int pi_test_sn(struct pi_desc *pi_desc)
937 return test_bit(POSTED_INTR_SN,
938 (unsigned long *)&pi_desc->control);
943 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
947 struct kvm_vcpu vcpu;
948 unsigned long host_rsp;
952 u32 idt_vectoring_info;
954 struct shared_msr_entry *guest_msrs;
957 unsigned long host_idt_base;
959 u64 msr_host_kernel_gs_base;
960 u64 msr_guest_kernel_gs_base;
963 u64 arch_capabilities;
966 u32 vm_entry_controls_shadow;
967 u32 vm_exit_controls_shadow;
968 u32 secondary_exec_control;
971 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
972 * non-nested (L1) guest, it always points to vmcs01. For a nested
973 * guest (L2), it points to a different VMCS. loaded_cpu_state points
974 * to the VMCS whose state is loaded into the CPU registers that only
975 * need to be switched when transitioning to/from the kernel; a NULL
976 * value indicates that host state is loaded.
978 struct loaded_vmcs vmcs01;
979 struct loaded_vmcs *loaded_vmcs;
980 struct loaded_vmcs *loaded_cpu_state;
981 bool __launched; /* temporary, used in vmx_vcpu_run */
982 struct msr_autoload {
983 struct vmx_msrs guest;
984 struct vmx_msrs host;
990 struct kvm_segment segs[8];
993 u32 bitmask; /* 4 bits per segment (1 bit per field) */
994 struct kvm_save_segment {
1002 bool emulation_required;
1006 /* Posted interrupt descriptor */
1007 struct pi_desc pi_desc;
1009 /* Support for a guest hypervisor (nested VMX) */
1010 struct nested_vmx nested;
1012 /* Dynamic PLE window. */
1014 bool ple_window_dirty;
1016 /* Support for PML */
1017 #define PML_ENTITY_NUM 512
1018 struct page *pml_pg;
1020 /* apic deadline value in host tsc */
1021 u64 hv_deadline_tsc;
1023 u64 current_tsc_ratio;
1027 unsigned long host_debugctlmsr;
1030 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
1031 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
1032 * in msr_ia32_feature_control_valid_bits.
1034 u64 msr_ia32_feature_control;
1035 u64 msr_ia32_feature_control_valid_bits;
1039 enum segment_cache_field {
1042 SEG_FIELD_LIMIT = 2,
1048 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
1050 return container_of(kvm, struct kvm_vmx, kvm);
1053 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
1055 return container_of(vcpu, struct vcpu_vmx, vcpu);
1058 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
1060 return &(to_vmx(vcpu)->pi_desc);
1063 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
1064 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
1065 #define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name)
1066 #define FIELD64(number, name) \
1067 FIELD(number, name), \
1068 [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
1071 static u16 shadow_read_only_fields[] = {
1072 #define SHADOW_FIELD_RO(x) x,
1073 #include "vmx_shadow_fields.h"
1075 static int max_shadow_read_only_fields =
1076 ARRAY_SIZE(shadow_read_only_fields);
1078 static u16 shadow_read_write_fields[] = {
1079 #define SHADOW_FIELD_RW(x) x,
1080 #include "vmx_shadow_fields.h"
1082 static int max_shadow_read_write_fields =
1083 ARRAY_SIZE(shadow_read_write_fields);
1085 static const unsigned short vmcs_field_to_offset_table[] = {
1086 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
1087 FIELD(POSTED_INTR_NV, posted_intr_nv),
1088 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
1089 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
1090 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
1091 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
1092 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
1093 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
1094 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
1095 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
1096 FIELD(GUEST_INTR_STATUS, guest_intr_status),
1097 FIELD(GUEST_PML_INDEX, guest_pml_index),
1098 FIELD(HOST_ES_SELECTOR, host_es_selector),
1099 FIELD(HOST_CS_SELECTOR, host_cs_selector),
1100 FIELD(HOST_SS_SELECTOR, host_ss_selector),
1101 FIELD(HOST_DS_SELECTOR, host_ds_selector),
1102 FIELD(HOST_FS_SELECTOR, host_fs_selector),
1103 FIELD(HOST_GS_SELECTOR, host_gs_selector),
1104 FIELD(HOST_TR_SELECTOR, host_tr_selector),
1105 FIELD64(IO_BITMAP_A, io_bitmap_a),
1106 FIELD64(IO_BITMAP_B, io_bitmap_b),
1107 FIELD64(MSR_BITMAP, msr_bitmap),
1108 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
1109 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
1110 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
1111 FIELD64(PML_ADDRESS, pml_address),
1112 FIELD64(TSC_OFFSET, tsc_offset),
1113 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
1114 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
1115 FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
1116 FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
1117 FIELD64(EPT_POINTER, ept_pointer),
1118 FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
1119 FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
1120 FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
1121 FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
1122 FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
1123 FIELD64(VMREAD_BITMAP, vmread_bitmap),
1124 FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
1125 FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
1126 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
1127 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
1128 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
1129 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
1130 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
1131 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
1132 FIELD64(GUEST_PDPTR0, guest_pdptr0),
1133 FIELD64(GUEST_PDPTR1, guest_pdptr1),
1134 FIELD64(GUEST_PDPTR2, guest_pdptr2),
1135 FIELD64(GUEST_PDPTR3, guest_pdptr3),
1136 FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
1137 FIELD64(HOST_IA32_PAT, host_ia32_pat),
1138 FIELD64(HOST_IA32_EFER, host_ia32_efer),
1139 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
1140 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
1141 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
1142 FIELD(EXCEPTION_BITMAP, exception_bitmap),
1143 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
1144 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
1145 FIELD(CR3_TARGET_COUNT, cr3_target_count),
1146 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
1147 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
1148 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
1149 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
1150 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
1151 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
1152 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
1153 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
1154 FIELD(TPR_THRESHOLD, tpr_threshold),
1155 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
1156 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
1157 FIELD(VM_EXIT_REASON, vm_exit_reason),
1158 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
1159 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
1160 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
1161 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
1162 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
1163 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
1164 FIELD(GUEST_ES_LIMIT, guest_es_limit),
1165 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
1166 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
1167 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
1168 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
1169 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
1170 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
1171 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
1172 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
1173 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
1174 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
1175 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
1176 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
1177 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
1178 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
1179 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
1180 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
1181 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
1182 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
1183 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
1184 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
1185 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
1186 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
1187 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
1188 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
1189 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
1190 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
1191 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
1192 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
1193 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
1194 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
1195 FIELD(EXIT_QUALIFICATION, exit_qualification),
1196 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
1197 FIELD(GUEST_CR0, guest_cr0),
1198 FIELD(GUEST_CR3, guest_cr3),
1199 FIELD(GUEST_CR4, guest_cr4),
1200 FIELD(GUEST_ES_BASE, guest_es_base),
1201 FIELD(GUEST_CS_BASE, guest_cs_base),
1202 FIELD(GUEST_SS_BASE, guest_ss_base),
1203 FIELD(GUEST_DS_BASE, guest_ds_base),
1204 FIELD(GUEST_FS_BASE, guest_fs_base),
1205 FIELD(GUEST_GS_BASE, guest_gs_base),
1206 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
1207 FIELD(GUEST_TR_BASE, guest_tr_base),
1208 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
1209 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
1210 FIELD(GUEST_DR7, guest_dr7),
1211 FIELD(GUEST_RSP, guest_rsp),
1212 FIELD(GUEST_RIP, guest_rip),
1213 FIELD(GUEST_RFLAGS, guest_rflags),
1214 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
1215 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
1216 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
1217 FIELD(HOST_CR0, host_cr0),
1218 FIELD(HOST_CR3, host_cr3),
1219 FIELD(HOST_CR4, host_cr4),
1220 FIELD(HOST_FS_BASE, host_fs_base),
1221 FIELD(HOST_GS_BASE, host_gs_base),
1222 FIELD(HOST_TR_BASE, host_tr_base),
1223 FIELD(HOST_GDTR_BASE, host_gdtr_base),
1224 FIELD(HOST_IDTR_BASE, host_idtr_base),
1225 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
1226 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
1227 FIELD(HOST_RSP, host_rsp),
1228 FIELD(HOST_RIP, host_rip),
1231 static inline short vmcs_field_to_offset(unsigned long field)
1233 const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
1234 unsigned short offset;
1240 index = ROL16(field, 6);
1244 index = array_index_nospec(index, size);
1245 offset = vmcs_field_to_offset_table[index];
1251 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
1253 return to_vmx(vcpu)->nested.cached_vmcs12;
1256 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
1258 return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
1261 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
1262 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
1263 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
1264 static bool vmx_xsaves_supported(void);
1265 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1266 struct kvm_segment *var, int seg);
1267 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1268 struct kvm_segment *var, int seg);
1269 static bool guest_state_valid(struct kvm_vcpu *vcpu);
1270 static u32 vmx_segment_access_rights(struct kvm_segment *var);
1271 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1272 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
1273 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1274 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1276 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1277 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1280 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
1281 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
1283 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
1284 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
1286 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
1289 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
1290 * can find which vCPU should be waken up.
1292 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
1293 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
1301 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
1303 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
1304 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
1306 static bool cpu_has_load_ia32_efer;
1307 static bool cpu_has_load_perf_global_ctrl;
1309 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
1310 static DEFINE_SPINLOCK(vmx_vpid_lock);
1312 static struct vmcs_config {
1317 u32 pin_based_exec_ctrl;
1318 u32 cpu_based_exec_ctrl;
1319 u32 cpu_based_2nd_exec_ctrl;
1322 struct nested_vmx_msrs nested;
1325 static struct vmx_capability {
1330 #define VMX_SEGMENT_FIELD(seg) \
1331 [VCPU_SREG_##seg] = { \
1332 .selector = GUEST_##seg##_SELECTOR, \
1333 .base = GUEST_##seg##_BASE, \
1334 .limit = GUEST_##seg##_LIMIT, \
1335 .ar_bytes = GUEST_##seg##_AR_BYTES, \
1338 static const struct kvm_vmx_segment_field {
1343 } kvm_vmx_segment_fields[] = {
1344 VMX_SEGMENT_FIELD(CS),
1345 VMX_SEGMENT_FIELD(DS),
1346 VMX_SEGMENT_FIELD(ES),
1347 VMX_SEGMENT_FIELD(FS),
1348 VMX_SEGMENT_FIELD(GS),
1349 VMX_SEGMENT_FIELD(SS),
1350 VMX_SEGMENT_FIELD(TR),
1351 VMX_SEGMENT_FIELD(LDTR),
1354 static u64 host_efer;
1356 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1359 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1360 * away by decrementing the array size.
1362 static const u32 vmx_msr_index[] = {
1363 #ifdef CONFIG_X86_64
1364 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1366 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1369 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1371 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1373 #define KVM_EVMCS_VERSION 1
1375 #if IS_ENABLED(CONFIG_HYPERV)
1376 static bool __read_mostly enlightened_vmcs = true;
1377 module_param(enlightened_vmcs, bool, 0444);
1379 static inline void evmcs_write64(unsigned long field, u64 value)
1382 int offset = get_evmcs_offset(field, &clean_field);
1387 *(u64 *)((char *)current_evmcs + offset) = value;
1389 current_evmcs->hv_clean_fields &= ~clean_field;
1392 static inline void evmcs_write32(unsigned long field, u32 value)
1395 int offset = get_evmcs_offset(field, &clean_field);
1400 *(u32 *)((char *)current_evmcs + offset) = value;
1401 current_evmcs->hv_clean_fields &= ~clean_field;
1404 static inline void evmcs_write16(unsigned long field, u16 value)
1407 int offset = get_evmcs_offset(field, &clean_field);
1412 *(u16 *)((char *)current_evmcs + offset) = value;
1413 current_evmcs->hv_clean_fields &= ~clean_field;
1416 static inline u64 evmcs_read64(unsigned long field)
1418 int offset = get_evmcs_offset(field, NULL);
1423 return *(u64 *)((char *)current_evmcs + offset);
1426 static inline u32 evmcs_read32(unsigned long field)
1428 int offset = get_evmcs_offset(field, NULL);
1433 return *(u32 *)((char *)current_evmcs + offset);
1436 static inline u16 evmcs_read16(unsigned long field)
1438 int offset = get_evmcs_offset(field, NULL);
1443 return *(u16 *)((char *)current_evmcs + offset);
1446 static inline void evmcs_touch_msr_bitmap(void)
1448 if (unlikely(!current_evmcs))
1451 if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1452 current_evmcs->hv_clean_fields &=
1453 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1456 static void evmcs_load(u64 phys_addr)
1458 struct hv_vp_assist_page *vp_ap =
1459 hv_get_vp_assist_page(smp_processor_id());
1461 vp_ap->current_nested_vmcs = phys_addr;
1462 vp_ap->enlighten_vmentry = 1;
1465 static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1468 * Enlightened VMCSv1 doesn't support these:
1470 * POSTED_INTR_NV = 0x00000002,
1471 * GUEST_INTR_STATUS = 0x00000810,
1472 * APIC_ACCESS_ADDR = 0x00002014,
1473 * POSTED_INTR_DESC_ADDR = 0x00002016,
1474 * EOI_EXIT_BITMAP0 = 0x0000201c,
1475 * EOI_EXIT_BITMAP1 = 0x0000201e,
1476 * EOI_EXIT_BITMAP2 = 0x00002020,
1477 * EOI_EXIT_BITMAP3 = 0x00002022,
1479 vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1480 vmcs_conf->cpu_based_2nd_exec_ctrl &=
1481 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1482 vmcs_conf->cpu_based_2nd_exec_ctrl &=
1483 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1484 vmcs_conf->cpu_based_2nd_exec_ctrl &=
1485 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1488 * GUEST_PML_INDEX = 0x00000812,
1489 * PML_ADDRESS = 0x0000200e,
1491 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1493 /* VM_FUNCTION_CONTROL = 0x00002018, */
1494 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1497 * EPTP_LIST_ADDRESS = 0x00002024,
1498 * VMREAD_BITMAP = 0x00002026,
1499 * VMWRITE_BITMAP = 0x00002028,
1501 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1504 * TSC_MULTIPLIER = 0x00002032,
1506 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1509 * PLE_GAP = 0x00004020,
1510 * PLE_WINDOW = 0x00004022,
1512 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1515 * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
1517 vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1520 * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
1521 * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
1523 vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1524 vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1527 * Currently unsupported in KVM:
1528 * GUEST_IA32_RTIT_CTL = 0x00002814,
1532 /* check_ept_pointer() should be under protection of ept_pointer_lock. */
1533 static void check_ept_pointer_match(struct kvm *kvm)
1535 struct kvm_vcpu *vcpu;
1536 u64 tmp_eptp = INVALID_PAGE;
1539 kvm_for_each_vcpu(i, vcpu, kvm) {
1540 if (!VALID_PAGE(tmp_eptp)) {
1541 tmp_eptp = to_vmx(vcpu)->ept_pointer;
1542 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
1543 to_kvm_vmx(kvm)->ept_pointers_match
1544 = EPT_POINTERS_MISMATCH;
1549 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
1552 static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
1556 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1558 if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
1559 check_ept_pointer_match(kvm);
1561 if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
1566 ret = hyperv_flush_guest_mapping(
1567 to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
1570 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1573 #else /* !IS_ENABLED(CONFIG_HYPERV) */
1574 static inline void evmcs_write64(unsigned long field, u64 value) {}
1575 static inline void evmcs_write32(unsigned long field, u32 value) {}
1576 static inline void evmcs_write16(unsigned long field, u16 value) {}
1577 static inline u64 evmcs_read64(unsigned long field) { return 0; }
1578 static inline u32 evmcs_read32(unsigned long field) { return 0; }
1579 static inline u16 evmcs_read16(unsigned long field) { return 0; }
1580 static inline void evmcs_load(u64 phys_addr) {}
1581 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1582 static inline void evmcs_touch_msr_bitmap(void) {}
1583 #endif /* IS_ENABLED(CONFIG_HYPERV) */
1585 static inline bool is_exception_n(u32 intr_info, u8 vector)
1587 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1588 INTR_INFO_VALID_MASK)) ==
1589 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1592 static inline bool is_debug(u32 intr_info)
1594 return is_exception_n(intr_info, DB_VECTOR);
1597 static inline bool is_breakpoint(u32 intr_info)
1599 return is_exception_n(intr_info, BP_VECTOR);
1602 static inline bool is_page_fault(u32 intr_info)
1604 return is_exception_n(intr_info, PF_VECTOR);
1607 static inline bool is_no_device(u32 intr_info)
1609 return is_exception_n(intr_info, NM_VECTOR);
1612 static inline bool is_invalid_opcode(u32 intr_info)
1614 return is_exception_n(intr_info, UD_VECTOR);
1617 static inline bool is_gp_fault(u32 intr_info)
1619 return is_exception_n(intr_info, GP_VECTOR);
1622 static inline bool is_external_interrupt(u32 intr_info)
1624 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1625 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1628 static inline bool is_machine_check(u32 intr_info)
1630 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1631 INTR_INFO_VALID_MASK)) ==
1632 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1635 /* Undocumented: icebp/int1 */
1636 static inline bool is_icebp(u32 intr_info)
1638 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1639 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1642 static inline bool cpu_has_vmx_msr_bitmap(void)
1644 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1647 static inline bool cpu_has_vmx_tpr_shadow(void)
1649 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1652 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1654 return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1657 static inline bool cpu_has_secondary_exec_ctrls(void)
1659 return vmcs_config.cpu_based_exec_ctrl &
1660 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1663 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1665 return vmcs_config.cpu_based_2nd_exec_ctrl &
1666 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1669 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1671 return vmcs_config.cpu_based_2nd_exec_ctrl &
1672 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1675 static inline bool cpu_has_vmx_apic_register_virt(void)
1677 return vmcs_config.cpu_based_2nd_exec_ctrl &
1678 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1681 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1683 return vmcs_config.cpu_based_2nd_exec_ctrl &
1684 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1688 * Comment's format: document - errata name - stepping - processor name.
1690 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1692 static u32 vmx_preemption_cpu_tfms[] = {
1693 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
1695 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
1696 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1697 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1699 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1701 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
1702 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
1704 * 320767.pdf - AAP86 - B1 -
1705 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1708 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1710 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1712 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1714 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1715 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1716 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1720 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1722 u32 eax = cpuid_eax(0x00000001), i;
1724 /* Clear the reserved bits */
1725 eax &= ~(0x3U << 14 | 0xfU << 28);
1726 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1727 if (eax == vmx_preemption_cpu_tfms[i])
1733 static inline bool cpu_has_vmx_preemption_timer(void)
1735 return vmcs_config.pin_based_exec_ctrl &
1736 PIN_BASED_VMX_PREEMPTION_TIMER;
1739 static inline bool cpu_has_vmx_posted_intr(void)
1741 return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1742 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1745 static inline bool cpu_has_vmx_apicv(void)
1747 return cpu_has_vmx_apic_register_virt() &&
1748 cpu_has_vmx_virtual_intr_delivery() &&
1749 cpu_has_vmx_posted_intr();
1752 static inline bool cpu_has_vmx_flexpriority(void)
1754 return cpu_has_vmx_tpr_shadow() &&
1755 cpu_has_vmx_virtualize_apic_accesses();
1758 static inline bool cpu_has_vmx_ept_execute_only(void)
1760 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1763 static inline bool cpu_has_vmx_ept_2m_page(void)
1765 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1768 static inline bool cpu_has_vmx_ept_1g_page(void)
1770 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1773 static inline bool cpu_has_vmx_ept_4levels(void)
1775 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1778 static inline bool cpu_has_vmx_ept_mt_wb(void)
1780 return vmx_capability.ept & VMX_EPTP_WB_BIT;
1783 static inline bool cpu_has_vmx_ept_5levels(void)
1785 return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1788 static inline bool cpu_has_vmx_ept_ad_bits(void)
1790 return vmx_capability.ept & VMX_EPT_AD_BIT;
1793 static inline bool cpu_has_vmx_invept_context(void)
1795 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1798 static inline bool cpu_has_vmx_invept_global(void)
1800 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1803 static inline bool cpu_has_vmx_invvpid_individual_addr(void)
1805 return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
1808 static inline bool cpu_has_vmx_invvpid_single(void)
1810 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1813 static inline bool cpu_has_vmx_invvpid_global(void)
1815 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1818 static inline bool cpu_has_vmx_invvpid(void)
1820 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1823 static inline bool cpu_has_vmx_ept(void)
1825 return vmcs_config.cpu_based_2nd_exec_ctrl &
1826 SECONDARY_EXEC_ENABLE_EPT;
1829 static inline bool cpu_has_vmx_unrestricted_guest(void)
1831 return vmcs_config.cpu_based_2nd_exec_ctrl &
1832 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1835 static inline bool cpu_has_vmx_ple(void)
1837 return vmcs_config.cpu_based_2nd_exec_ctrl &
1838 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1841 static inline bool cpu_has_vmx_basic_inout(void)
1843 return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1846 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1848 return flexpriority_enabled && lapic_in_kernel(vcpu);
1851 static inline bool cpu_has_vmx_vpid(void)
1853 return vmcs_config.cpu_based_2nd_exec_ctrl &
1854 SECONDARY_EXEC_ENABLE_VPID;
1857 static inline bool cpu_has_vmx_rdtscp(void)
1859 return vmcs_config.cpu_based_2nd_exec_ctrl &
1860 SECONDARY_EXEC_RDTSCP;
1863 static inline bool cpu_has_vmx_invpcid(void)
1865 return vmcs_config.cpu_based_2nd_exec_ctrl &
1866 SECONDARY_EXEC_ENABLE_INVPCID;
1869 static inline bool cpu_has_virtual_nmis(void)
1871 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1874 static inline bool cpu_has_vmx_wbinvd_exit(void)
1876 return vmcs_config.cpu_based_2nd_exec_ctrl &
1877 SECONDARY_EXEC_WBINVD_EXITING;
1880 static inline bool cpu_has_vmx_shadow_vmcs(void)
1883 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1884 /* check if the cpu supports writing r/o exit information fields */
1885 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1888 return vmcs_config.cpu_based_2nd_exec_ctrl &
1889 SECONDARY_EXEC_SHADOW_VMCS;
1892 static inline bool cpu_has_vmx_pml(void)
1894 return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1897 static inline bool cpu_has_vmx_tsc_scaling(void)
1899 return vmcs_config.cpu_based_2nd_exec_ctrl &
1900 SECONDARY_EXEC_TSC_SCALING;
1903 static inline bool cpu_has_vmx_vmfunc(void)
1905 return vmcs_config.cpu_based_2nd_exec_ctrl &
1906 SECONDARY_EXEC_ENABLE_VMFUNC;
1909 static bool vmx_umip_emulated(void)
1911 return vmcs_config.cpu_based_2nd_exec_ctrl &
1912 SECONDARY_EXEC_DESC;
1915 static inline bool report_flexpriority(void)
1917 return flexpriority_enabled;
1920 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1922 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
1926 * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
1927 * to modify any valid field of the VMCS, or are the VM-exit
1928 * information fields read-only?
1930 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
1932 return to_vmx(vcpu)->nested.msrs.misc_low &
1933 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
1936 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
1938 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
1941 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
1943 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
1944 CPU_BASED_MONITOR_TRAP_FLAG;
1947 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
1949 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
1950 SECONDARY_EXEC_SHADOW_VMCS;
1953 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1955 return vmcs12->cpu_based_vm_exec_control & bit;
1958 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1960 return (vmcs12->cpu_based_vm_exec_control &
1961 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1962 (vmcs12->secondary_vm_exec_control & bit);
1965 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1967 return vmcs12->pin_based_vm_exec_control &
1968 PIN_BASED_VMX_PREEMPTION_TIMER;
1971 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1973 return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1976 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1978 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1981 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1983 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1986 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1988 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1991 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1993 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1996 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1998 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
2001 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
2003 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
2006 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
2008 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
2011 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
2013 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2016 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
2018 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
2021 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
2023 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
2026 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
2028 return nested_cpu_has_vmfunc(vmcs12) &&
2029 (vmcs12->vm_function_control &
2030 VMX_VMFUNC_EPTP_SWITCHING);
2033 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
2035 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
2038 static inline bool is_nmi(u32 intr_info)
2040 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
2041 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
2044 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
2046 unsigned long exit_qualification);
2047 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
2048 struct vmcs12 *vmcs12,
2049 u32 reason, unsigned long qualification);
2051 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
2055 for (i = 0; i < vmx->nmsrs; ++i)
2056 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
2061 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
2067 } operand = { vpid, 0, gva };
2070 asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
2071 : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
2076 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
2080 } operand = {eptp, gpa};
2083 asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
2084 : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
2089 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
2093 i = __find_msr_index(vmx, msr);
2095 return &vmx->guest_msrs[i];
2099 static void vmcs_clear(struct vmcs *vmcs)
2101 u64 phys_addr = __pa(vmcs);
2104 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
2105 : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2107 if (unlikely(error))
2108 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
2112 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
2114 vmcs_clear(loaded_vmcs->vmcs);
2115 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
2116 vmcs_clear(loaded_vmcs->shadow_vmcs);
2117 loaded_vmcs->cpu = -1;
2118 loaded_vmcs->launched = 0;
2121 static void vmcs_load(struct vmcs *vmcs)
2123 u64 phys_addr = __pa(vmcs);
2126 if (static_branch_unlikely(&enable_evmcs))
2127 return evmcs_load(phys_addr);
2129 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
2130 : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2132 if (unlikely(error))
2133 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
2137 #ifdef CONFIG_KEXEC_CORE
2139 * This bitmap is used to indicate whether the vmclear
2140 * operation is enabled on all cpus. All disabled by
2143 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
2145 static inline void crash_enable_local_vmclear(int cpu)
2147 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
2150 static inline void crash_disable_local_vmclear(int cpu)
2152 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
2155 static inline int crash_local_vmclear_enabled(int cpu)
2157 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
2160 static void crash_vmclear_local_loaded_vmcss(void)
2162 int cpu = raw_smp_processor_id();
2163 struct loaded_vmcs *v;
2165 if (!crash_local_vmclear_enabled(cpu))
2168 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
2169 loaded_vmcss_on_cpu_link)
2170 vmcs_clear(v->vmcs);
2173 static inline void crash_enable_local_vmclear(int cpu) { }
2174 static inline void crash_disable_local_vmclear(int cpu) { }
2175 #endif /* CONFIG_KEXEC_CORE */
2177 static void __loaded_vmcs_clear(void *arg)
2179 struct loaded_vmcs *loaded_vmcs = arg;
2180 int cpu = raw_smp_processor_id();
2182 if (loaded_vmcs->cpu != cpu)
2183 return; /* vcpu migration can race with cpu offline */
2184 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
2185 per_cpu(current_vmcs, cpu) = NULL;
2186 crash_disable_local_vmclear(cpu);
2187 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
2190 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
2191 * is before setting loaded_vmcs->vcpu to -1 which is done in
2192 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
2193 * then adds the vmcs into percpu list before it is deleted.
2197 loaded_vmcs_init(loaded_vmcs);
2198 crash_enable_local_vmclear(cpu);
2201 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
2203 int cpu = loaded_vmcs->cpu;
2206 smp_call_function_single(cpu,
2207 __loaded_vmcs_clear, loaded_vmcs, 1);
2210 static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
2215 if (cpu_has_vmx_invvpid_individual_addr()) {
2216 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
2223 static inline void vpid_sync_vcpu_single(int vpid)
2228 if (cpu_has_vmx_invvpid_single())
2229 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
2232 static inline void vpid_sync_vcpu_global(void)
2234 if (cpu_has_vmx_invvpid_global())
2235 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
2238 static inline void vpid_sync_context(int vpid)
2240 if (cpu_has_vmx_invvpid_single())
2241 vpid_sync_vcpu_single(vpid);
2243 vpid_sync_vcpu_global();
2246 static inline void ept_sync_global(void)
2248 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
2251 static inline void ept_sync_context(u64 eptp)
2253 if (cpu_has_vmx_invept_context())
2254 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
2259 static __always_inline void vmcs_check16(unsigned long field)
2261 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2262 "16-bit accessor invalid for 64-bit field");
2263 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2264 "16-bit accessor invalid for 64-bit high field");
2265 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2266 "16-bit accessor invalid for 32-bit high field");
2267 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2268 "16-bit accessor invalid for natural width field");
2271 static __always_inline void vmcs_check32(unsigned long field)
2273 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2274 "32-bit accessor invalid for 16-bit field");
2275 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2276 "32-bit accessor invalid for natural width field");
2279 static __always_inline void vmcs_check64(unsigned long field)
2281 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2282 "64-bit accessor invalid for 16-bit field");
2283 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2284 "64-bit accessor invalid for 64-bit high field");
2285 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2286 "64-bit accessor invalid for 32-bit field");
2287 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2288 "64-bit accessor invalid for natural width field");
2291 static __always_inline void vmcs_checkl(unsigned long field)
2293 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2294 "Natural width accessor invalid for 16-bit field");
2295 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2296 "Natural width accessor invalid for 64-bit field");
2297 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2298 "Natural width accessor invalid for 64-bit high field");
2299 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2300 "Natural width accessor invalid for 32-bit field");
2303 static __always_inline unsigned long __vmcs_readl(unsigned long field)
2305 unsigned long value;
2307 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
2308 : "=a"(value) : "d"(field) : "cc");
2312 static __always_inline u16 vmcs_read16(unsigned long field)
2314 vmcs_check16(field);
2315 if (static_branch_unlikely(&enable_evmcs))
2316 return evmcs_read16(field);
2317 return __vmcs_readl(field);
2320 static __always_inline u32 vmcs_read32(unsigned long field)
2322 vmcs_check32(field);
2323 if (static_branch_unlikely(&enable_evmcs))
2324 return evmcs_read32(field);
2325 return __vmcs_readl(field);
2328 static __always_inline u64 vmcs_read64(unsigned long field)
2330 vmcs_check64(field);
2331 if (static_branch_unlikely(&enable_evmcs))
2332 return evmcs_read64(field);
2333 #ifdef CONFIG_X86_64
2334 return __vmcs_readl(field);
2336 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
2340 static __always_inline unsigned long vmcs_readl(unsigned long field)
2343 if (static_branch_unlikely(&enable_evmcs))
2344 return evmcs_read64(field);
2345 return __vmcs_readl(field);
2348 static noinline void vmwrite_error(unsigned long field, unsigned long value)
2350 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
2351 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
2355 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
2359 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
2360 : CC_OUT(na) (error) : "a"(value), "d"(field));
2361 if (unlikely(error))
2362 vmwrite_error(field, value);
2365 static __always_inline void vmcs_write16(unsigned long field, u16 value)
2367 vmcs_check16(field);
2368 if (static_branch_unlikely(&enable_evmcs))
2369 return evmcs_write16(field, value);
2371 __vmcs_writel(field, value);
2374 static __always_inline void vmcs_write32(unsigned long field, u32 value)
2376 vmcs_check32(field);
2377 if (static_branch_unlikely(&enable_evmcs))
2378 return evmcs_write32(field, value);
2380 __vmcs_writel(field, value);
2383 static __always_inline void vmcs_write64(unsigned long field, u64 value)
2385 vmcs_check64(field);
2386 if (static_branch_unlikely(&enable_evmcs))
2387 return evmcs_write64(field, value);
2389 __vmcs_writel(field, value);
2390 #ifndef CONFIG_X86_64
2392 __vmcs_writel(field+1, value >> 32);
2396 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
2399 if (static_branch_unlikely(&enable_evmcs))
2400 return evmcs_write64(field, value);
2402 __vmcs_writel(field, value);
2405 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
2407 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2408 "vmcs_clear_bits does not support 64-bit fields");
2409 if (static_branch_unlikely(&enable_evmcs))
2410 return evmcs_write32(field, evmcs_read32(field) & ~mask);
2412 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
2415 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
2417 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2418 "vmcs_set_bits does not support 64-bit fields");
2419 if (static_branch_unlikely(&enable_evmcs))
2420 return evmcs_write32(field, evmcs_read32(field) | mask);
2422 __vmcs_writel(field, __vmcs_readl(field) | mask);
2425 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
2427 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
2430 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
2432 vmcs_write32(VM_ENTRY_CONTROLS, val);
2433 vmx->vm_entry_controls_shadow = val;
2436 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
2438 if (vmx->vm_entry_controls_shadow != val)
2439 vm_entry_controls_init(vmx, val);
2442 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
2444 return vmx->vm_entry_controls_shadow;
2448 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2450 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
2453 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2455 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
2458 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
2460 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
2463 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
2465 vmcs_write32(VM_EXIT_CONTROLS, val);
2466 vmx->vm_exit_controls_shadow = val;
2469 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2471 if (vmx->vm_exit_controls_shadow != val)
2472 vm_exit_controls_init(vmx, val);
2475 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2477 return vmx->vm_exit_controls_shadow;
2481 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2483 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2486 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2488 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2491 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2493 vmx->segment_cache.bitmask = 0;
2496 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2500 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2502 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2503 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2504 vmx->segment_cache.bitmask = 0;
2506 ret = vmx->segment_cache.bitmask & mask;
2507 vmx->segment_cache.bitmask |= mask;
2511 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2513 u16 *p = &vmx->segment_cache.seg[seg].selector;
2515 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2516 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2520 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2522 ulong *p = &vmx->segment_cache.seg[seg].base;
2524 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2525 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2529 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2531 u32 *p = &vmx->segment_cache.seg[seg].limit;
2533 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2534 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2538 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2540 u32 *p = &vmx->segment_cache.seg[seg].ar;
2542 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2543 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2547 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2551 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
2552 (1u << DB_VECTOR) | (1u << AC_VECTOR);
2554 * Guest access to VMware backdoor ports could legitimately
2555 * trigger #GP because of TSS I/O permission bitmap.
2556 * We intercept those #GP and allow access to them anyway
2559 if (enable_vmware_backdoor)
2560 eb |= (1u << GP_VECTOR);
2561 if ((vcpu->guest_debug &
2562 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2563 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2564 eb |= 1u << BP_VECTOR;
2565 if (to_vmx(vcpu)->rmode.vm86_active)
2568 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
2570 /* When we are running a nested L2 guest and L1 specified for it a
2571 * certain exception bitmap, we must trap the same exceptions and pass
2572 * them to L1. When running L2, we will only handle the exceptions
2573 * specified above if L1 did not want them.
2575 if (is_guest_mode(vcpu))
2576 eb |= get_vmcs12(vcpu)->exception_bitmap;
2578 vmcs_write32(EXCEPTION_BITMAP, eb);
2582 * Check if MSR is intercepted for currently loaded MSR bitmap.
2584 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2586 unsigned long *msr_bitmap;
2587 int f = sizeof(unsigned long);
2589 if (!cpu_has_vmx_msr_bitmap())
2592 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2594 if (msr <= 0x1fff) {
2595 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2596 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2598 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2605 * Check if MSR is intercepted for L01 MSR bitmap.
2607 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2609 unsigned long *msr_bitmap;
2610 int f = sizeof(unsigned long);
2612 if (!cpu_has_vmx_msr_bitmap())
2615 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2617 if (msr <= 0x1fff) {
2618 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2619 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2621 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2627 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2628 unsigned long entry, unsigned long exit)
2630 vm_entry_controls_clearbit(vmx, entry);
2631 vm_exit_controls_clearbit(vmx, exit);
2634 static int find_msr(struct vmx_msrs *m, unsigned int msr)
2638 for (i = 0; i < m->nr; ++i) {
2639 if (m->val[i].index == msr)
2645 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2648 struct msr_autoload *m = &vmx->msr_autoload;
2652 if (cpu_has_load_ia32_efer) {
2653 clear_atomic_switch_msr_special(vmx,
2654 VM_ENTRY_LOAD_IA32_EFER,
2655 VM_EXIT_LOAD_IA32_EFER);
2659 case MSR_CORE_PERF_GLOBAL_CTRL:
2660 if (cpu_has_load_perf_global_ctrl) {
2661 clear_atomic_switch_msr_special(vmx,
2662 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2663 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2668 i = find_msr(&m->guest, msr);
2672 m->guest.val[i] = m->guest.val[m->guest.nr];
2673 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2676 i = find_msr(&m->host, msr);
2681 m->host.val[i] = m->host.val[m->host.nr];
2682 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2685 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2686 unsigned long entry, unsigned long exit,
2687 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2688 u64 guest_val, u64 host_val)
2690 vmcs_write64(guest_val_vmcs, guest_val);
2691 vmcs_write64(host_val_vmcs, host_val);
2692 vm_entry_controls_setbit(vmx, entry);
2693 vm_exit_controls_setbit(vmx, exit);
2696 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2697 u64 guest_val, u64 host_val, bool entry_only)
2700 struct msr_autoload *m = &vmx->msr_autoload;
2704 if (cpu_has_load_ia32_efer) {
2705 add_atomic_switch_msr_special(vmx,
2706 VM_ENTRY_LOAD_IA32_EFER,
2707 VM_EXIT_LOAD_IA32_EFER,
2710 guest_val, host_val);
2714 case MSR_CORE_PERF_GLOBAL_CTRL:
2715 if (cpu_has_load_perf_global_ctrl) {
2716 add_atomic_switch_msr_special(vmx,
2717 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2718 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2719 GUEST_IA32_PERF_GLOBAL_CTRL,
2720 HOST_IA32_PERF_GLOBAL_CTRL,
2721 guest_val, host_val);
2725 case MSR_IA32_PEBS_ENABLE:
2726 /* PEBS needs a quiescent period after being disabled (to write
2727 * a record). Disabling PEBS through VMX MSR swapping doesn't
2728 * provide that period, so a CPU could write host's record into
2731 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2734 i = find_msr(&m->guest, msr);
2736 j = find_msr(&m->host, msr);
2738 if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
2739 printk_once(KERN_WARNING "Not enough msr switch entries. "
2740 "Can't add msr %x\n", msr);
2745 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2747 m->guest.val[i].index = msr;
2748 m->guest.val[i].value = guest_val;
2755 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2757 m->host.val[j].index = msr;
2758 m->host.val[j].value = host_val;
2761 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2763 u64 guest_efer = vmx->vcpu.arch.efer;
2764 u64 ignore_bits = 0;
2768 * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
2769 * host CPUID is more efficient than testing guest CPUID
2770 * or CR4. Host SMEP is anyway a requirement for guest SMEP.
2772 if (boot_cpu_has(X86_FEATURE_SMEP))
2773 guest_efer |= EFER_NX;
2774 else if (!(guest_efer & EFER_NX))
2775 ignore_bits |= EFER_NX;
2779 * LMA and LME handled by hardware; SCE meaningless outside long mode.
2781 ignore_bits |= EFER_SCE;
2782 #ifdef CONFIG_X86_64
2783 ignore_bits |= EFER_LMA | EFER_LME;
2784 /* SCE is meaningful only in long mode on Intel */
2785 if (guest_efer & EFER_LMA)
2786 ignore_bits &= ~(u64)EFER_SCE;
2789 clear_atomic_switch_msr(vmx, MSR_EFER);
2792 * On EPT, we can't emulate NX, so we must switch EFER atomically.
2793 * On CPUs that support "load IA32_EFER", always switch EFER
2794 * atomically, since it's faster than switching it manually.
2796 if (cpu_has_load_ia32_efer ||
2797 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2798 if (!(guest_efer & EFER_LMA))
2799 guest_efer &= ~EFER_LME;
2800 if (guest_efer != host_efer)
2801 add_atomic_switch_msr(vmx, MSR_EFER,
2802 guest_efer, host_efer, false);
2805 guest_efer &= ~ignore_bits;
2806 guest_efer |= host_efer & ignore_bits;
2808 vmx->guest_msrs[efer_offset].data = guest_efer;
2809 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2815 #ifdef CONFIG_X86_32
2817 * On 32-bit kernels, VM exits still load the FS and GS bases from the
2818 * VMCS rather than the segment table. KVM uses this helper to figure
2819 * out the current bases to poke them into the VMCS before entry.
2821 static unsigned long segment_base(u16 selector)
2823 struct desc_struct *table;
2826 if (!(selector & ~SEGMENT_RPL_MASK))
2829 table = get_current_gdt_ro();
2831 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2832 u16 ldt_selector = kvm_read_ldt();
2834 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2837 table = (struct desc_struct *)segment_base(ldt_selector);
2839 v = get_desc_base(&table[selector >> 3]);
2844 static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2846 struct vcpu_vmx *vmx = to_vmx(vcpu);
2847 struct vmcs_host_state *host_state;
2848 #ifdef CONFIG_X86_64
2849 int cpu = raw_smp_processor_id();
2851 unsigned long fs_base, gs_base;
2855 if (vmx->loaded_cpu_state)
2858 vmx->loaded_cpu_state = vmx->loaded_vmcs;
2859 host_state = &vmx->loaded_cpu_state->host_state;
2862 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
2863 * allow segment selectors with cpl > 0 or ti == 1.
2865 host_state->ldt_sel = kvm_read_ldt();
2867 #ifdef CONFIG_X86_64
2868 savesegment(ds, host_state->ds_sel);
2869 savesegment(es, host_state->es_sel);
2871 gs_base = cpu_kernelmode_gs_base(cpu);
2872 if (likely(is_64bit_mm(current->mm))) {
2873 save_fsgs_for_kvm();
2874 fs_sel = current->thread.fsindex;
2875 gs_sel = current->thread.gsindex;
2876 fs_base = current->thread.fsbase;
2877 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
2879 savesegment(fs, fs_sel);
2880 savesegment(gs, gs_sel);
2881 fs_base = read_msr(MSR_FS_BASE);
2882 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
2885 if (is_long_mode(&vmx->vcpu))
2886 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2888 savesegment(fs, fs_sel);
2889 savesegment(gs, gs_sel);
2890 fs_base = segment_base(fs_sel);
2891 gs_base = segment_base(gs_sel);
2894 if (unlikely(fs_sel != host_state->fs_sel)) {
2896 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
2898 vmcs_write16(HOST_FS_SELECTOR, 0);
2899 host_state->fs_sel = fs_sel;
2901 if (unlikely(gs_sel != host_state->gs_sel)) {
2903 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
2905 vmcs_write16(HOST_GS_SELECTOR, 0);
2906 host_state->gs_sel = gs_sel;
2908 if (unlikely(fs_base != host_state->fs_base)) {
2909 vmcs_writel(HOST_FS_BASE, fs_base);
2910 host_state->fs_base = fs_base;
2912 if (unlikely(gs_base != host_state->gs_base)) {
2913 vmcs_writel(HOST_GS_BASE, gs_base);
2914 host_state->gs_base = gs_base;
2917 for (i = 0; i < vmx->save_nmsrs; ++i)
2918 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2919 vmx->guest_msrs[i].data,
2920 vmx->guest_msrs[i].mask);
2923 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
2925 struct vmcs_host_state *host_state;
2927 if (!vmx->loaded_cpu_state)
2930 WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
2931 host_state = &vmx->loaded_cpu_state->host_state;
2933 ++vmx->vcpu.stat.host_state_reload;
2934 vmx->loaded_cpu_state = NULL;
2936 #ifdef CONFIG_X86_64
2937 if (is_long_mode(&vmx->vcpu))
2938 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2940 if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
2941 kvm_load_ldt(host_state->ldt_sel);
2942 #ifdef CONFIG_X86_64
2943 load_gs_index(host_state->gs_sel);
2945 loadsegment(gs, host_state->gs_sel);
2948 if (host_state->fs_sel & 7)
2949 loadsegment(fs, host_state->fs_sel);
2950 #ifdef CONFIG_X86_64
2951 if (unlikely(host_state->ds_sel | host_state->es_sel)) {
2952 loadsegment(ds, host_state->ds_sel);
2953 loadsegment(es, host_state->es_sel);
2956 invalidate_tss_limit();
2957 #ifdef CONFIG_X86_64
2958 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2960 load_fixmap_gdt(raw_smp_processor_id());
2963 #ifdef CONFIG_X86_64
2964 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
2966 if (is_long_mode(&vmx->vcpu)) {
2968 if (vmx->loaded_cpu_state)
2969 rdmsrl(MSR_KERNEL_GS_BASE,
2970 vmx->msr_guest_kernel_gs_base);
2973 return vmx->msr_guest_kernel_gs_base;
2976 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
2978 if (is_long_mode(&vmx->vcpu)) {
2980 if (vmx->loaded_cpu_state)
2981 wrmsrl(MSR_KERNEL_GS_BASE, data);
2984 vmx->msr_guest_kernel_gs_base = data;
2988 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2990 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2991 struct pi_desc old, new;
2995 * In case of hot-plug or hot-unplug, we may have to undo
2996 * vmx_vcpu_pi_put even if there is no assigned device. And we
2997 * always keep PI.NDST up to date for simplicity: it makes the
2998 * code easier, and CPU migration is not a fast path.
3000 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
3004 * First handle the simple case where no cmpxchg is necessary; just
3005 * allow posting non-urgent interrupts.
3007 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
3008 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
3009 * expects the VCPU to be on the blocked_vcpu_list that matches
3012 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
3014 pi_clear_sn(pi_desc);
3018 /* The full case. */
3020 old.control = new.control = pi_desc->control;
3022 dest = cpu_physical_id(cpu);
3024 if (x2apic_enabled())
3027 new.ndst = (dest << 8) & 0xFF00;
3030 } while (cmpxchg64(&pi_desc->control, old.control,
3031 new.control) != old.control);
3034 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
3036 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
3037 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
3041 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
3042 * vcpu mutex is already taken.
3044 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3046 struct vcpu_vmx *vmx = to_vmx(vcpu);
3047 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
3049 if (!already_loaded) {
3050 loaded_vmcs_clear(vmx->loaded_vmcs);
3051 local_irq_disable();
3052 crash_disable_local_vmclear(cpu);
3055 * Read loaded_vmcs->cpu should be before fetching
3056 * loaded_vmcs->loaded_vmcss_on_cpu_link.
3057 * See the comments in __loaded_vmcs_clear().
3061 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
3062 &per_cpu(loaded_vmcss_on_cpu, cpu));
3063 crash_enable_local_vmclear(cpu);
3067 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
3068 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
3069 vmcs_load(vmx->loaded_vmcs->vmcs);
3070 indirect_branch_prediction_barrier();
3073 if (!already_loaded) {
3074 void *gdt = get_current_gdt_ro();
3075 unsigned long sysenter_esp;
3077 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3080 * Linux uses per-cpu TSS and GDT, so set these when switching
3081 * processors. See 22.2.4.
3083 vmcs_writel(HOST_TR_BASE,
3084 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
3085 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
3088 * VM exits change the host TR limit to 0x67 after a VM
3089 * exit. This is okay, since 0x67 covers everything except
3090 * the IO bitmap and have have code to handle the IO bitmap
3091 * being lost after a VM exit.
3093 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
3095 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
3096 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
3098 vmx->loaded_vmcs->cpu = cpu;
3101 /* Setup TSC multiplier */
3102 if (kvm_has_tsc_control &&
3103 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
3104 decache_tsc_multiplier(vmx);
3106 vmx_vcpu_pi_load(vcpu, cpu);
3107 vmx->host_pkru = read_pkru();
3108 vmx->host_debugctlmsr = get_debugctlmsr();
3111 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
3113 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
3115 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
3116 !irq_remapping_cap(IRQ_POSTING_CAP) ||
3117 !kvm_vcpu_apicv_active(vcpu))
3120 /* Set SN when the vCPU is preempted */
3121 if (vcpu->preempted)
3125 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
3127 vmx_vcpu_pi_put(vcpu);
3129 vmx_prepare_switch_to_host(to_vmx(vcpu));
3132 static bool emulation_required(struct kvm_vcpu *vcpu)
3134 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3137 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
3140 * Return the cr0 value that a nested guest would read. This is a combination
3141 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
3142 * its hypervisor (cr0_read_shadow).
3144 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
3146 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
3147 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
3149 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
3151 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
3152 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
3155 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
3157 unsigned long rflags, save_rflags;
3159 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
3160 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3161 rflags = vmcs_readl(GUEST_RFLAGS);
3162 if (to_vmx(vcpu)->rmode.vm86_active) {
3163 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3164 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
3165 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3167 to_vmx(vcpu)->rflags = rflags;
3169 return to_vmx(vcpu)->rflags;
3172 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
3174 unsigned long old_rflags = vmx_get_rflags(vcpu);
3176 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3177 to_vmx(vcpu)->rflags = rflags;
3178 if (to_vmx(vcpu)->rmode.vm86_active) {
3179 to_vmx(vcpu)->rmode.save_rflags = rflags;
3180 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3182 vmcs_writel(GUEST_RFLAGS, rflags);
3184 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
3185 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
3188 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
3190 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3193 if (interruptibility & GUEST_INTR_STATE_STI)
3194 ret |= KVM_X86_SHADOW_INT_STI;
3195 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
3196 ret |= KVM_X86_SHADOW_INT_MOV_SS;
3201 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
3203 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3204 u32 interruptibility = interruptibility_old;
3206 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
3208 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
3209 interruptibility |= GUEST_INTR_STATE_MOV_SS;
3210 else if (mask & KVM_X86_SHADOW_INT_STI)
3211 interruptibility |= GUEST_INTR_STATE_STI;
3213 if ((interruptibility != interruptibility_old))
3214 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
3217 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
3221 rip = kvm_rip_read(vcpu);
3222 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3223 kvm_rip_write(vcpu, rip);
3225 /* skipping an emulated instruction also counts */
3226 vmx_set_interrupt_shadow(vcpu, 0);
3229 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3230 unsigned long exit_qual)
3232 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3233 unsigned int nr = vcpu->arch.exception.nr;
3234 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3236 if (vcpu->arch.exception.has_error_code) {
3237 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3238 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3241 if (kvm_exception_is_soft(nr))
3242 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3244 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3246 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3247 vmx_get_nmi_mask(vcpu))
3248 intr_info |= INTR_INFO_UNBLOCK_NMI;
3250 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3254 * KVM wants to inject page-faults which it got to the guest. This function
3255 * checks whether in a nested guest, we need to inject them to L1 or L2.
3257 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
3259 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3260 unsigned int nr = vcpu->arch.exception.nr;
3262 if (nr == PF_VECTOR) {
3263 if (vcpu->arch.exception.nested_apf) {
3264 *exit_qual = vcpu->arch.apf.nested_apf_token;
3268 * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
3269 * The fix is to add the ancillary datum (CR2 or DR6) to structs
3270 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
3271 * can be written only when inject_pending_event runs. This should be
3272 * conditional on a new capability---if the capability is disabled,
3273 * kvm_multiple_exception would write the ancillary information to
3274 * CR2 or DR6, for backwards ABI-compatibility.
3276 if (nested_vmx_is_page_fault_vmexit(vmcs12,
3277 vcpu->arch.exception.error_code)) {
3278 *exit_qual = vcpu->arch.cr2;
3282 if (vmcs12->exception_bitmap & (1u << nr)) {
3283 if (nr == DB_VECTOR)
3284 *exit_qual = vcpu->arch.dr6;
3294 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
3297 * Ensure that we clear the HLT state in the VMCS. We don't need to
3298 * explicitly skip the instruction because if the HLT state is set,
3299 * then the instruction is already executing and RIP has already been
3302 if (kvm_hlt_in_guest(vcpu->kvm) &&
3303 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
3304 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
3307 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
3309 struct vcpu_vmx *vmx = to_vmx(vcpu);
3310 unsigned nr = vcpu->arch.exception.nr;
3311 bool has_error_code = vcpu->arch.exception.has_error_code;
3312 u32 error_code = vcpu->arch.exception.error_code;
3313 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3315 if (has_error_code) {
3316 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
3317 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3320 if (vmx->rmode.vm86_active) {
3322 if (kvm_exception_is_soft(nr))
3323 inc_eip = vcpu->arch.event_exit_inst_len;
3324 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
3325 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3329 WARN_ON_ONCE(vmx->emulation_required);
3331 if (kvm_exception_is_soft(nr)) {
3332 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3333 vmx->vcpu.arch.event_exit_inst_len);
3334 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3336 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3338 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
3340 vmx_clear_hlt(vcpu);
3343 static bool vmx_rdtscp_supported(void)
3345 return cpu_has_vmx_rdtscp();
3348 static bool vmx_invpcid_supported(void)
3350 return cpu_has_vmx_invpcid();
3354 * Swap MSR entry in host/guest MSR entry array.
3356 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
3358 struct shared_msr_entry tmp;
3360 tmp = vmx->guest_msrs[to];
3361 vmx->guest_msrs[to] = vmx->guest_msrs[from];
3362 vmx->guest_msrs[from] = tmp;
3366 * Set up the vmcs to automatically save and restore system
3367 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
3368 * mode, as fiddling with msrs is very expensive.
3370 static void setup_msrs(struct vcpu_vmx *vmx)
3372 int save_nmsrs, index;
3375 #ifdef CONFIG_X86_64
3376 if (is_long_mode(&vmx->vcpu)) {
3377 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
3379 move_msr_up(vmx, index, save_nmsrs++);
3380 index = __find_msr_index(vmx, MSR_LSTAR);
3382 move_msr_up(vmx, index, save_nmsrs++);
3383 index = __find_msr_index(vmx, MSR_CSTAR);
3385 move_msr_up(vmx, index, save_nmsrs++);
3386 index = __find_msr_index(vmx, MSR_TSC_AUX);
3387 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
3388 move_msr_up(vmx, index, save_nmsrs++);
3390 * MSR_STAR is only needed on long mode guests, and only
3391 * if efer.sce is enabled.
3393 index = __find_msr_index(vmx, MSR_STAR);
3394 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
3395 move_msr_up(vmx, index, save_nmsrs++);
3398 index = __find_msr_index(vmx, MSR_EFER);
3399 if (index >= 0 && update_transition_efer(vmx, index))
3400 move_msr_up(vmx, index, save_nmsrs++);
3402 vmx->save_nmsrs = save_nmsrs;
3404 if (cpu_has_vmx_msr_bitmap())
3405 vmx_update_msr_bitmap(&vmx->vcpu);
3408 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3410 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3412 if (is_guest_mode(vcpu) &&
3413 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
3414 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
3416 return vcpu->arch.tsc_offset;
3420 * writes 'offset' into guest's timestamp counter offset register
3422 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3424 if (is_guest_mode(vcpu)) {
3426 * We're here if L1 chose not to trap WRMSR to TSC. According
3427 * to the spec, this should set L1's TSC; The offset that L1
3428 * set for L2 remains unchanged, and still needs to be added
3429 * to the newly set TSC to get L2's TSC.
3431 struct vmcs12 *vmcs12;
3432 /* recalculate vmcs02.TSC_OFFSET: */
3433 vmcs12 = get_vmcs12(vcpu);
3434 vmcs_write64(TSC_OFFSET, offset +
3435 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3436 vmcs12->tsc_offset : 0));
3438 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3439 vmcs_read64(TSC_OFFSET), offset);
3440 vmcs_write64(TSC_OFFSET, offset);
3445 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
3446 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
3447 * all guests if the "nested" module option is off, and can also be disabled
3448 * for a single guest by disabling its VMX cpuid bit.
3450 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
3452 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
3456 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
3457 * returned for the various VMX controls MSRs when nested VMX is enabled.
3458 * The same values should also be used to verify that vmcs12 control fields are
3459 * valid during nested entry from L1 to L2.
3460 * Each of these control msrs has a low and high 32-bit half: A low bit is on
3461 * if the corresponding bit in the (32-bit) control field *must* be on, and a
3462 * bit in the high half is on if the corresponding bit in the control field
3463 * may be on. See also vmx_control_verify().
3465 static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3468 memset(msrs, 0, sizeof(*msrs));
3473 * Note that as a general rule, the high half of the MSRs (bits in
3474 * the control fields which may be 1) should be initialized by the
3475 * intersection of the underlying hardware's MSR (i.e., features which
3476 * can be supported) and the list of features we want to expose -
3477 * because they are known to be properly supported in our code.
3478 * Also, usually, the low half of the MSRs (bits which must be 1) can
3479 * be set to 0, meaning that L1 may turn off any of these bits. The
3480 * reason is that if one of these bits is necessary, it will appear
3481 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
3482 * fields of vmcs01 and vmcs02, will turn these bits off - and
3483 * nested_vmx_exit_reflected() will not pass related exits to L1.
3484 * These rules have exceptions below.
3487 /* pin-based controls */
3488 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
3489 msrs->pinbased_ctls_low,
3490 msrs->pinbased_ctls_high);
3491 msrs->pinbased_ctls_low |=
3492 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3493 msrs->pinbased_ctls_high &=
3494 PIN_BASED_EXT_INTR_MASK |
3495 PIN_BASED_NMI_EXITING |
3496 PIN_BASED_VIRTUAL_NMIS |
3497 (apicv ? PIN_BASED_POSTED_INTR : 0);
3498 msrs->pinbased_ctls_high |=
3499 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3500 PIN_BASED_VMX_PREEMPTION_TIMER;
3503 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
3504 msrs->exit_ctls_low,
3505 msrs->exit_ctls_high);
3506 msrs->exit_ctls_low =
3507 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3509 msrs->exit_ctls_high &=
3510 #ifdef CONFIG_X86_64
3511 VM_EXIT_HOST_ADDR_SPACE_SIZE |
3513 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
3514 msrs->exit_ctls_high |=
3515 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
3516 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
3517 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3519 if (kvm_mpx_supported())
3520 msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
3522 /* We support free control of debug control saving. */
3523 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3525 /* entry controls */
3526 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
3527 msrs->entry_ctls_low,
3528 msrs->entry_ctls_high);
3529 msrs->entry_ctls_low =
3530 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3531 msrs->entry_ctls_high &=
3532 #ifdef CONFIG_X86_64
3533 VM_ENTRY_IA32E_MODE |
3535 VM_ENTRY_LOAD_IA32_PAT;
3536 msrs->entry_ctls_high |=
3537 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3538 if (kvm_mpx_supported())
3539 msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
3541 /* We support free control of debug control loading. */
3542 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
3544 /* cpu-based controls */
3545 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
3546 msrs->procbased_ctls_low,
3547 msrs->procbased_ctls_high);
3548 msrs->procbased_ctls_low =
3549 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3550 msrs->procbased_ctls_high &=
3551 CPU_BASED_VIRTUAL_INTR_PENDING |
3552 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
3553 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3554 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3555 CPU_BASED_CR3_STORE_EXITING |
3556 #ifdef CONFIG_X86_64
3557 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3559 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
3560 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3561 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3562 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3563 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3565 * We can allow some features even when not supported by the
3566 * hardware. For example, L1 can specify an MSR bitmap - and we
3567 * can use it to avoid exits to L1 - even when L0 runs L2
3568 * without MSR bitmaps.
3570 msrs->procbased_ctls_high |=
3571 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3572 CPU_BASED_USE_MSR_BITMAPS;
3574 /* We support free control of CR3 access interception. */
3575 msrs->procbased_ctls_low &=
3576 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3579 * secondary cpu-based controls. Do not include those that
3580 * depend on CPUID bits, they are added later by vmx_cpuid_update.
3582 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
3583 msrs->secondary_ctls_low,
3584 msrs->secondary_ctls_high);
3585 msrs->secondary_ctls_low = 0;
3586 msrs->secondary_ctls_high &=
3587 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3588 SECONDARY_EXEC_DESC |
3589 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3590 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3591 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3592 SECONDARY_EXEC_WBINVD_EXITING;
3594 * We can emulate "VMCS shadowing," even if the hardware
3595 * doesn't support it.
3597 msrs->secondary_ctls_high |=
3598 SECONDARY_EXEC_SHADOW_VMCS;
3601 /* nested EPT: emulate EPT also to L1 */
3602 msrs->secondary_ctls_high |=
3603 SECONDARY_EXEC_ENABLE_EPT;
3604 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
3605 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
3606 if (cpu_has_vmx_ept_execute_only())
3608 VMX_EPT_EXECUTE_ONLY_BIT;
3609 msrs->ept_caps &= vmx_capability.ept;
3610 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
3611 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3612 VMX_EPT_1GB_PAGE_BIT;
3613 if (enable_ept_ad_bits) {
3614 msrs->secondary_ctls_high |=
3615 SECONDARY_EXEC_ENABLE_PML;
3616 msrs->ept_caps |= VMX_EPT_AD_BIT;
3620 if (cpu_has_vmx_vmfunc()) {
3621 msrs->secondary_ctls_high |=
3622 SECONDARY_EXEC_ENABLE_VMFUNC;
3624 * Advertise EPTP switching unconditionally
3625 * since we emulate it
3628 msrs->vmfunc_controls =
3629 VMX_VMFUNC_EPTP_SWITCHING;
3633 * Old versions of KVM use the single-context version without
3634 * checking for support, so declare that it is supported even
3635 * though it is treated as global context. The alternative is
3636 * not failing the single-context invvpid, and it is worse.