2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
45 #include <asm/virtext.h>
47 #include <asm/fpu/internal.h>
48 #include <asm/perf_event.h>
49 #include <asm/debugreg.h>
50 #include <asm/kexec.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/mmu_context.h>
54 #include <asm/nospec-branch.h>
59 #define __ex(x) __kvm_handle_fault_on_reboot(x)
60 #define __ex_clear(x, reg) \
61 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
63 MODULE_AUTHOR("Qumranet");
64 MODULE_LICENSE("GPL");
66 static const struct x86_cpu_id vmx_cpu_id[] = {
67 X86_FEATURE_MATCH(X86_FEATURE_VMX),
70 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
72 static bool __read_mostly enable_vpid = 1;
73 module_param_named(vpid, enable_vpid, bool, 0444);
75 static bool __read_mostly enable_vnmi = 1;
76 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
78 static bool __read_mostly flexpriority_enabled = 1;
79 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
81 static bool __read_mostly enable_ept = 1;
82 module_param_named(ept, enable_ept, bool, S_IRUGO);
84 static bool __read_mostly enable_unrestricted_guest = 1;
85 module_param_named(unrestricted_guest,
86 enable_unrestricted_guest, bool, S_IRUGO);
88 static bool __read_mostly enable_ept_ad_bits = 1;
89 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
91 static bool __read_mostly emulate_invalid_guest_state = true;
92 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
94 static bool __read_mostly fasteoi = 1;
95 module_param(fasteoi, bool, S_IRUGO);
97 static bool __read_mostly enable_apicv = 1;
98 module_param(enable_apicv, bool, S_IRUGO);
100 static bool __read_mostly enable_shadow_vmcs = 1;
101 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
103 * If nested=1, nested virtualization is supported, i.e., guests may use
104 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
105 * use VMX instructions.
107 static bool __read_mostly nested = 0;
108 module_param(nested, bool, S_IRUGO);
110 static u64 __read_mostly host_xss;
112 static bool __read_mostly enable_pml = 1;
113 module_param_named(pml, enable_pml, bool, S_IRUGO);
117 #define MSR_TYPE_RW 3
119 #define MSR_BITMAP_MODE_X2APIC 1
120 #define MSR_BITMAP_MODE_X2APIC_APICV 2
121 #define MSR_BITMAP_MODE_LM 4
123 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
125 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
126 static int __read_mostly cpu_preemption_timer_multi;
127 static bool __read_mostly enable_preemption_timer = 1;
129 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
132 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
133 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
134 #define KVM_VM_CR0_ALWAYS_ON \
135 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
136 #define KVM_CR4_GUEST_OWNED_BITS \
137 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
138 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
140 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
141 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
143 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
145 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
148 * Hyper-V requires all of these, so mark them as supported even though
149 * they are just treated the same as all-context.
151 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
152 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
153 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
154 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
155 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
158 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
159 * ple_gap: upper bound on the amount of time between two successive
160 * executions of PAUSE in a loop. Also indicate if ple enabled.
161 * According to test, this time is usually smaller than 128 cycles.
162 * ple_window: upper bound on the amount of time a guest is allowed to execute
163 * in a PAUSE loop. Tests indicate that most spinlocks are held for
164 * less than 2^12 cycles
165 * Time is measured based on a counter that runs at the same rate as the TSC,
166 * refer SDM volume 3b section 21.6.13 & 22.1.3.
168 #define KVM_VMX_DEFAULT_PLE_GAP 128
169 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
170 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2
171 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
172 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
173 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
175 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
176 module_param(ple_gap, int, S_IRUGO);
178 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
179 module_param(ple_window, int, S_IRUGO);
181 /* Default doubles per-vcpu window every exit. */
182 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
183 module_param(ple_window_grow, int, S_IRUGO);
185 /* Default resets per-vcpu window every exit to ple_window. */
186 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
187 module_param(ple_window_shrink, int, S_IRUGO);
189 /* Default is to compute the maximum so we can never overflow. */
190 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
191 static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
192 module_param(ple_window_max, int, S_IRUGO);
194 extern const ulong vmx_return;
196 #define NR_AUTOLOAD_MSRS 8
205 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
206 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
207 * loaded on this CPU (so we can clear them if the CPU goes down).
211 struct vmcs *shadow_vmcs;
214 bool nmi_known_unmasked;
215 unsigned long vmcs_host_cr3; /* May not match real cr3 */
216 unsigned long vmcs_host_cr4; /* May not match real cr4 */
217 /* Support for vnmi-less CPUs */
218 int soft_vnmi_blocked;
220 s64 vnmi_blocked_time;
221 unsigned long *msr_bitmap;
222 struct list_head loaded_vmcss_on_cpu_link;
225 struct shared_msr_entry {
232 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
233 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
234 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
235 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
236 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
237 * More than one of these structures may exist, if L1 runs multiple L2 guests.
238 * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
239 * underlying hardware which will be used to run L2.
240 * This structure is packed to ensure that its layout is identical across
241 * machines (necessary for live migration).
242 * If there are changes in this struct, VMCS12_REVISION must be changed.
244 typedef u64 natural_width;
245 struct __packed vmcs12 {
246 /* According to the Intel spec, a VMCS region must start with the
247 * following two fields. Then follow implementation-specific data.
252 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
253 u32 padding[7]; /* room for future expansion */
258 u64 vm_exit_msr_store_addr;
259 u64 vm_exit_msr_load_addr;
260 u64 vm_entry_msr_load_addr;
262 u64 virtual_apic_page_addr;
263 u64 apic_access_addr;
264 u64 posted_intr_desc_addr;
265 u64 vm_function_control;
267 u64 eoi_exit_bitmap0;
268 u64 eoi_exit_bitmap1;
269 u64 eoi_exit_bitmap2;
270 u64 eoi_exit_bitmap3;
271 u64 eptp_list_address;
273 u64 guest_physical_address;
274 u64 vmcs_link_pointer;
276 u64 guest_ia32_debugctl;
279 u64 guest_ia32_perf_global_ctrl;
287 u64 host_ia32_perf_global_ctrl;
288 u64 padding64[8]; /* room for future expansion */
290 * To allow migration of L1 (complete with its L2 guests) between
291 * machines of different natural widths (32 or 64 bit), we cannot have
292 * unsigned long fields with no explict size. We use u64 (aliased
293 * natural_width) instead. Luckily, x86 is little-endian.
295 natural_width cr0_guest_host_mask;
296 natural_width cr4_guest_host_mask;
297 natural_width cr0_read_shadow;
298 natural_width cr4_read_shadow;
299 natural_width cr3_target_value0;
300 natural_width cr3_target_value1;
301 natural_width cr3_target_value2;
302 natural_width cr3_target_value3;
303 natural_width exit_qualification;
304 natural_width guest_linear_address;
305 natural_width guest_cr0;
306 natural_width guest_cr3;
307 natural_width guest_cr4;
308 natural_width guest_es_base;
309 natural_width guest_cs_base;
310 natural_width guest_ss_base;
311 natural_width guest_ds_base;
312 natural_width guest_fs_base;
313 natural_width guest_gs_base;
314 natural_width guest_ldtr_base;
315 natural_width guest_tr_base;
316 natural_width guest_gdtr_base;
317 natural_width guest_idtr_base;
318 natural_width guest_dr7;
319 natural_width guest_rsp;
320 natural_width guest_rip;
321 natural_width guest_rflags;
322 natural_width guest_pending_dbg_exceptions;
323 natural_width guest_sysenter_esp;
324 natural_width guest_sysenter_eip;
325 natural_width host_cr0;
326 natural_width host_cr3;
327 natural_width host_cr4;
328 natural_width host_fs_base;
329 natural_width host_gs_base;
330 natural_width host_tr_base;
331 natural_width host_gdtr_base;
332 natural_width host_idtr_base;
333 natural_width host_ia32_sysenter_esp;
334 natural_width host_ia32_sysenter_eip;
335 natural_width host_rsp;
336 natural_width host_rip;
337 natural_width paddingl[8]; /* room for future expansion */
338 u32 pin_based_vm_exec_control;
339 u32 cpu_based_vm_exec_control;
340 u32 exception_bitmap;
341 u32 page_fault_error_code_mask;
342 u32 page_fault_error_code_match;
343 u32 cr3_target_count;
344 u32 vm_exit_controls;
345 u32 vm_exit_msr_store_count;
346 u32 vm_exit_msr_load_count;
347 u32 vm_entry_controls;
348 u32 vm_entry_msr_load_count;
349 u32 vm_entry_intr_info_field;
350 u32 vm_entry_exception_error_code;
351 u32 vm_entry_instruction_len;
353 u32 secondary_vm_exec_control;
354 u32 vm_instruction_error;
356 u32 vm_exit_intr_info;
357 u32 vm_exit_intr_error_code;
358 u32 idt_vectoring_info_field;
359 u32 idt_vectoring_error_code;
360 u32 vm_exit_instruction_len;
361 u32 vmx_instruction_info;
368 u32 guest_ldtr_limit;
370 u32 guest_gdtr_limit;
371 u32 guest_idtr_limit;
372 u32 guest_es_ar_bytes;
373 u32 guest_cs_ar_bytes;
374 u32 guest_ss_ar_bytes;
375 u32 guest_ds_ar_bytes;
376 u32 guest_fs_ar_bytes;
377 u32 guest_gs_ar_bytes;
378 u32 guest_ldtr_ar_bytes;
379 u32 guest_tr_ar_bytes;
380 u32 guest_interruptibility_info;
381 u32 guest_activity_state;
382 u32 guest_sysenter_cs;
383 u32 host_ia32_sysenter_cs;
384 u32 vmx_preemption_timer_value;
385 u32 padding32[7]; /* room for future expansion */
386 u16 virtual_processor_id;
388 u16 guest_es_selector;
389 u16 guest_cs_selector;
390 u16 guest_ss_selector;
391 u16 guest_ds_selector;
392 u16 guest_fs_selector;
393 u16 guest_gs_selector;
394 u16 guest_ldtr_selector;
395 u16 guest_tr_selector;
396 u16 guest_intr_status;
398 u16 host_es_selector;
399 u16 host_cs_selector;
400 u16 host_ss_selector;
401 u16 host_ds_selector;
402 u16 host_fs_selector;
403 u16 host_gs_selector;
404 u16 host_tr_selector;
408 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
409 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
410 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
412 #define VMCS12_REVISION 0x11e57ed0
415 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
416 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
417 * current implementation, 4K are reserved to avoid future complications.
419 #define VMCS12_SIZE 0x1000
422 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
423 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
426 /* Has the level1 guest done vmxon? */
431 /* The guest-physical address of the current VMCS L1 keeps for L2 */
434 * Cache of the guest's VMCS, existing outside of guest memory.
435 * Loaded from guest memory during VMPTRLD. Flushed to guest
436 * memory during VMCLEAR and VMPTRLD.
438 struct vmcs12 *cached_vmcs12;
440 * Indicates if the shadow vmcs must be updated with the
441 * data hold by vmcs12
443 bool sync_shadow_vmcs;
445 bool change_vmcs01_virtual_x2apic_mode;
446 /* L2 must run next, and mustn't decide to exit to L1. */
447 bool nested_run_pending;
449 struct loaded_vmcs vmcs02;
452 * Guest pages referred to in the vmcs02 with host-physical
453 * pointers, so we must keep them pinned while L2 runs.
455 struct page *apic_access_page;
456 struct page *virtual_apic_page;
457 struct page *pi_desc_page;
458 struct pi_desc *pi_desc;
462 struct hrtimer preemption_timer;
463 bool preemption_timer_expired;
465 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
472 * We only store the "true" versions of the VMX capability MSRs. We
473 * generate the "non-true" versions by setting the must-be-1 bits
474 * according to the SDM.
476 u32 nested_vmx_procbased_ctls_low;
477 u32 nested_vmx_procbased_ctls_high;
478 u32 nested_vmx_secondary_ctls_low;
479 u32 nested_vmx_secondary_ctls_high;
480 u32 nested_vmx_pinbased_ctls_low;
481 u32 nested_vmx_pinbased_ctls_high;
482 u32 nested_vmx_exit_ctls_low;
483 u32 nested_vmx_exit_ctls_high;
484 u32 nested_vmx_entry_ctls_low;
485 u32 nested_vmx_entry_ctls_high;
486 u32 nested_vmx_misc_low;
487 u32 nested_vmx_misc_high;
488 u32 nested_vmx_ept_caps;
489 u32 nested_vmx_vpid_caps;
490 u64 nested_vmx_basic;
491 u64 nested_vmx_cr0_fixed0;
492 u64 nested_vmx_cr0_fixed1;
493 u64 nested_vmx_cr4_fixed0;
494 u64 nested_vmx_cr4_fixed1;
495 u64 nested_vmx_vmcs_enum;
496 u64 nested_vmx_vmfunc_controls;
498 /* SMM related state */
500 /* in VMX operation on SMM entry? */
502 /* in guest mode on SMM entry? */
507 #define POSTED_INTR_ON 0
508 #define POSTED_INTR_SN 1
510 /* Posted-Interrupt Descriptor */
512 u32 pir[8]; /* Posted interrupt requested */
515 /* bit 256 - Outstanding Notification */
517 /* bit 257 - Suppress Notification */
519 /* bit 271:258 - Reserved */
521 /* bit 279:272 - Notification Vector */
523 /* bit 287:280 - Reserved */
525 /* bit 319:288 - Notification Destination */
533 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
535 return test_and_set_bit(POSTED_INTR_ON,
536 (unsigned long *)&pi_desc->control);
539 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
541 return test_and_clear_bit(POSTED_INTR_ON,
542 (unsigned long *)&pi_desc->control);
545 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
547 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
550 static inline void pi_clear_sn(struct pi_desc *pi_desc)
552 return clear_bit(POSTED_INTR_SN,
553 (unsigned long *)&pi_desc->control);
556 static inline void pi_set_sn(struct pi_desc *pi_desc)
558 return set_bit(POSTED_INTR_SN,
559 (unsigned long *)&pi_desc->control);
562 static inline void pi_clear_on(struct pi_desc *pi_desc)
564 clear_bit(POSTED_INTR_ON,
565 (unsigned long *)&pi_desc->control);
568 static inline int pi_test_on(struct pi_desc *pi_desc)
570 return test_bit(POSTED_INTR_ON,
571 (unsigned long *)&pi_desc->control);
574 static inline int pi_test_sn(struct pi_desc *pi_desc)
576 return test_bit(POSTED_INTR_SN,
577 (unsigned long *)&pi_desc->control);
581 struct kvm_vcpu vcpu;
582 unsigned long host_rsp;
586 u32 idt_vectoring_info;
588 struct shared_msr_entry *guest_msrs;
591 unsigned long host_idt_base;
593 u64 msr_host_kernel_gs_base;
594 u64 msr_guest_kernel_gs_base;
597 u64 arch_capabilities;
600 u32 vm_entry_controls_shadow;
601 u32 vm_exit_controls_shadow;
602 u32 secondary_exec_control;
605 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
606 * non-nested (L1) guest, it always points to vmcs01. For a nested
607 * guest (L2), it points to a different VMCS.
609 struct loaded_vmcs vmcs01;
610 struct loaded_vmcs *loaded_vmcs;
611 bool __launched; /* temporary, used in vmx_vcpu_run */
612 struct msr_autoload {
614 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
615 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
619 u16 fs_sel, gs_sel, ldt_sel;
623 int gs_ldt_reload_needed;
624 int fs_reload_needed;
625 u64 msr_host_bndcfgs;
630 struct kvm_segment segs[8];
633 u32 bitmask; /* 4 bits per segment (1 bit per field) */
634 struct kvm_save_segment {
642 bool emulation_required;
646 /* Posted interrupt descriptor */
647 struct pi_desc pi_desc;
649 /* Support for a guest hypervisor (nested VMX) */
650 struct nested_vmx nested;
652 /* Dynamic PLE window. */
654 bool ple_window_dirty;
656 /* Support for PML */
657 #define PML_ENTITY_NUM 512
660 /* apic deadline value in host tsc */
663 u64 current_tsc_ratio;
668 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
669 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
670 * in msr_ia32_feature_control_valid_bits.
672 u64 msr_ia32_feature_control;
673 u64 msr_ia32_feature_control_valid_bits;
676 enum segment_cache_field {
685 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
687 return container_of(vcpu, struct vcpu_vmx, vcpu);
690 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
692 return &(to_vmx(vcpu)->pi_desc);
695 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
696 #define FIELD(number, name) [number] = VMCS12_OFFSET(name)
697 #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
698 [number##_HIGH] = VMCS12_OFFSET(name)+4
701 static unsigned long shadow_read_only_fields[] = {
703 * We do NOT shadow fields that are modified when L0
704 * traps and emulates any vmx instruction (e.g. VMPTRLD,
705 * VMXON...) executed by L1.
706 * For example, VM_INSTRUCTION_ERROR is read
707 * by L1 if a vmx instruction fails (part of the error path).
708 * Note the code assumes this logic. If for some reason
709 * we start shadowing these fields then we need to
710 * force a shadow sync when L0 emulates vmx instructions
711 * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
712 * by nested_vmx_failValid)
716 VM_EXIT_INSTRUCTION_LEN,
717 IDT_VECTORING_INFO_FIELD,
718 IDT_VECTORING_ERROR_CODE,
719 VM_EXIT_INTR_ERROR_CODE,
721 GUEST_LINEAR_ADDRESS,
722 GUEST_PHYSICAL_ADDRESS
724 static int max_shadow_read_only_fields =
725 ARRAY_SIZE(shadow_read_only_fields);
727 static unsigned long shadow_read_write_fields[] = {
734 GUEST_INTERRUPTIBILITY_INFO,
747 CPU_BASED_VM_EXEC_CONTROL,
748 VM_ENTRY_EXCEPTION_ERROR_CODE,
749 VM_ENTRY_INTR_INFO_FIELD,
750 VM_ENTRY_INSTRUCTION_LEN,
751 VM_ENTRY_EXCEPTION_ERROR_CODE,
757 static int max_shadow_read_write_fields =
758 ARRAY_SIZE(shadow_read_write_fields);
760 static const unsigned short vmcs_field_to_offset_table[] = {
761 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
762 FIELD(POSTED_INTR_NV, posted_intr_nv),
763 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
764 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
765 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
766 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
767 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
768 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
769 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
770 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
771 FIELD(GUEST_INTR_STATUS, guest_intr_status),
772 FIELD(GUEST_PML_INDEX, guest_pml_index),
773 FIELD(HOST_ES_SELECTOR, host_es_selector),
774 FIELD(HOST_CS_SELECTOR, host_cs_selector),
775 FIELD(HOST_SS_SELECTOR, host_ss_selector),
776 FIELD(HOST_DS_SELECTOR, host_ds_selector),
777 FIELD(HOST_FS_SELECTOR, host_fs_selector),
778 FIELD(HOST_GS_SELECTOR, host_gs_selector),
779 FIELD(HOST_TR_SELECTOR, host_tr_selector),
780 FIELD64(IO_BITMAP_A, io_bitmap_a),
781 FIELD64(IO_BITMAP_B, io_bitmap_b),
782 FIELD64(MSR_BITMAP, msr_bitmap),
783 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
784 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
785 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
786 FIELD64(TSC_OFFSET, tsc_offset),
787 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
788 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
789 FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
790 FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
791 FIELD64(EPT_POINTER, ept_pointer),
792 FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
793 FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
794 FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
795 FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
796 FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
797 FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
798 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
799 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
800 FIELD64(PML_ADDRESS, pml_address),
801 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
802 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
803 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
804 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
805 FIELD64(GUEST_PDPTR0, guest_pdptr0),
806 FIELD64(GUEST_PDPTR1, guest_pdptr1),
807 FIELD64(GUEST_PDPTR2, guest_pdptr2),
808 FIELD64(GUEST_PDPTR3, guest_pdptr3),
809 FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
810 FIELD64(HOST_IA32_PAT, host_ia32_pat),
811 FIELD64(HOST_IA32_EFER, host_ia32_efer),
812 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
813 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
814 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
815 FIELD(EXCEPTION_BITMAP, exception_bitmap),
816 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
817 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
818 FIELD(CR3_TARGET_COUNT, cr3_target_count),
819 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
820 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
821 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
822 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
823 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
824 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
825 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
826 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
827 FIELD(TPR_THRESHOLD, tpr_threshold),
828 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
829 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
830 FIELD(VM_EXIT_REASON, vm_exit_reason),
831 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
832 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
833 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
834 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
835 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
836 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
837 FIELD(GUEST_ES_LIMIT, guest_es_limit),
838 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
839 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
840 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
841 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
842 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
843 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
844 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
845 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
846 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
847 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
848 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
849 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
850 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
851 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
852 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
853 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
854 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
855 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
856 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
857 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
858 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
859 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
860 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
861 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
862 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
863 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
864 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
865 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
866 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
867 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
868 FIELD(EXIT_QUALIFICATION, exit_qualification),
869 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
870 FIELD(GUEST_CR0, guest_cr0),
871 FIELD(GUEST_CR3, guest_cr3),
872 FIELD(GUEST_CR4, guest_cr4),
873 FIELD(GUEST_ES_BASE, guest_es_base),
874 FIELD(GUEST_CS_BASE, guest_cs_base),
875 FIELD(GUEST_SS_BASE, guest_ss_base),
876 FIELD(GUEST_DS_BASE, guest_ds_base),
877 FIELD(GUEST_FS_BASE, guest_fs_base),
878 FIELD(GUEST_GS_BASE, guest_gs_base),
879 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
880 FIELD(GUEST_TR_BASE, guest_tr_base),
881 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
882 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
883 FIELD(GUEST_DR7, guest_dr7),
884 FIELD(GUEST_RSP, guest_rsp),
885 FIELD(GUEST_RIP, guest_rip),
886 FIELD(GUEST_RFLAGS, guest_rflags),
887 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
888 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
889 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
890 FIELD(HOST_CR0, host_cr0),
891 FIELD(HOST_CR3, host_cr3),
892 FIELD(HOST_CR4, host_cr4),
893 FIELD(HOST_FS_BASE, host_fs_base),
894 FIELD(HOST_GS_BASE, host_gs_base),
895 FIELD(HOST_TR_BASE, host_tr_base),
896 FIELD(HOST_GDTR_BASE, host_gdtr_base),
897 FIELD(HOST_IDTR_BASE, host_idtr_base),
898 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
899 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
900 FIELD(HOST_RSP, host_rsp),
901 FIELD(HOST_RIP, host_rip),
904 static inline short vmcs_field_to_offset(unsigned long field)
906 const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
907 unsigned short offset;
909 BUILD_BUG_ON(size > SHRT_MAX);
913 field = array_index_nospec(field, size);
914 offset = vmcs_field_to_offset_table[field];
920 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
922 return to_vmx(vcpu)->nested.cached_vmcs12;
925 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
926 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
927 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
928 static bool vmx_xsaves_supported(void);
929 static void vmx_set_segment(struct kvm_vcpu *vcpu,
930 struct kvm_segment *var, int seg);
931 static void vmx_get_segment(struct kvm_vcpu *vcpu,
932 struct kvm_segment *var, int seg);
933 static bool guest_state_valid(struct kvm_vcpu *vcpu);
934 static u32 vmx_segment_access_rights(struct kvm_segment *var);
935 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
936 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
937 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
938 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
940 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
941 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
944 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
945 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
947 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
948 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
950 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
953 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
954 * can find which vCPU should be waken up.
956 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
957 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
967 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
969 #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A])
970 #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B])
971 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
972 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
974 static bool cpu_has_load_ia32_efer;
975 static bool cpu_has_load_perf_global_ctrl;
977 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
978 static DEFINE_SPINLOCK(vmx_vpid_lock);
980 static struct vmcs_config {
985 u32 pin_based_exec_ctrl;
986 u32 cpu_based_exec_ctrl;
987 u32 cpu_based_2nd_exec_ctrl;
992 static struct vmx_capability {
997 #define VMX_SEGMENT_FIELD(seg) \
998 [VCPU_SREG_##seg] = { \
999 .selector = GUEST_##seg##_SELECTOR, \
1000 .base = GUEST_##seg##_BASE, \
1001 .limit = GUEST_##seg##_LIMIT, \
1002 .ar_bytes = GUEST_##seg##_AR_BYTES, \
1005 static const struct kvm_vmx_segment_field {
1010 } kvm_vmx_segment_fields[] = {
1011 VMX_SEGMENT_FIELD(CS),
1012 VMX_SEGMENT_FIELD(DS),
1013 VMX_SEGMENT_FIELD(ES),
1014 VMX_SEGMENT_FIELD(FS),
1015 VMX_SEGMENT_FIELD(GS),
1016 VMX_SEGMENT_FIELD(SS),
1017 VMX_SEGMENT_FIELD(TR),
1018 VMX_SEGMENT_FIELD(LDTR),
1021 static u64 host_efer;
1023 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1026 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1027 * away by decrementing the array size.
1029 static const u32 vmx_msr_index[] = {
1030 #ifdef CONFIG_X86_64
1031 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1033 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1036 static inline bool is_exception_n(u32 intr_info, u8 vector)
1038 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1039 INTR_INFO_VALID_MASK)) ==
1040 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1043 static inline bool is_debug(u32 intr_info)
1045 return is_exception_n(intr_info, DB_VECTOR);
1048 static inline bool is_breakpoint(u32 intr_info)
1050 return is_exception_n(intr_info, BP_VECTOR);
1053 static inline bool is_page_fault(u32 intr_info)
1055 return is_exception_n(intr_info, PF_VECTOR);
1058 static inline bool is_no_device(u32 intr_info)
1060 return is_exception_n(intr_info, NM_VECTOR);
1063 static inline bool is_invalid_opcode(u32 intr_info)
1065 return is_exception_n(intr_info, UD_VECTOR);
1068 static inline bool is_external_interrupt(u32 intr_info)
1070 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1071 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1074 static inline bool is_machine_check(u32 intr_info)
1076 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1077 INTR_INFO_VALID_MASK)) ==
1078 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1081 static inline bool cpu_has_vmx_msr_bitmap(void)
1083 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1086 static inline bool cpu_has_vmx_tpr_shadow(void)
1088 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1091 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1093 return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1096 static inline bool cpu_has_secondary_exec_ctrls(void)
1098 return vmcs_config.cpu_based_exec_ctrl &
1099 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1102 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1104 return vmcs_config.cpu_based_2nd_exec_ctrl &
1105 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1108 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1110 return vmcs_config.cpu_based_2nd_exec_ctrl &
1111 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1114 static inline bool cpu_has_vmx_apic_register_virt(void)
1116 return vmcs_config.cpu_based_2nd_exec_ctrl &
1117 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1120 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1122 return vmcs_config.cpu_based_2nd_exec_ctrl &
1123 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1127 * Comment's format: document - errata name - stepping - processor name.
1129 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1131 static u32 vmx_preemption_cpu_tfms[] = {
1132 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
1134 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
1135 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1136 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1138 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1140 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
1141 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
1143 * 320767.pdf - AAP86 - B1 -
1144 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1147 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1149 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1151 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1153 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1154 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1155 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1159 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1161 u32 eax = cpuid_eax(0x00000001), i;
1163 /* Clear the reserved bits */
1164 eax &= ~(0x3U << 14 | 0xfU << 28);
1165 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1166 if (eax == vmx_preemption_cpu_tfms[i])
1172 static inline bool cpu_has_vmx_preemption_timer(void)
1174 return vmcs_config.pin_based_exec_ctrl &
1175 PIN_BASED_VMX_PREEMPTION_TIMER;
1178 static inline bool cpu_has_vmx_posted_intr(void)
1180 return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1181 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1184 static inline bool cpu_has_vmx_apicv(void)
1186 return cpu_has_vmx_apic_register_virt() &&
1187 cpu_has_vmx_virtual_intr_delivery() &&
1188 cpu_has_vmx_posted_intr();
1191 static inline bool cpu_has_vmx_flexpriority(void)
1193 return cpu_has_vmx_tpr_shadow() &&
1194 cpu_has_vmx_virtualize_apic_accesses();
1197 static inline bool cpu_has_vmx_ept_execute_only(void)
1199 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1202 static inline bool cpu_has_vmx_ept_2m_page(void)
1204 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1207 static inline bool cpu_has_vmx_ept_1g_page(void)
1209 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1212 static inline bool cpu_has_vmx_ept_4levels(void)
1214 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1217 static inline bool cpu_has_vmx_ept_mt_wb(void)
1219 return vmx_capability.ept & VMX_EPTP_WB_BIT;
1222 static inline bool cpu_has_vmx_ept_5levels(void)
1224 return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1227 static inline bool cpu_has_vmx_ept_ad_bits(void)
1229 return vmx_capability.ept & VMX_EPT_AD_BIT;
1232 static inline bool cpu_has_vmx_invept_context(void)
1234 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1237 static inline bool cpu_has_vmx_invept_global(void)
1239 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1242 static inline bool cpu_has_vmx_invvpid_single(void)
1244 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1247 static inline bool cpu_has_vmx_invvpid_global(void)
1249 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1252 static inline bool cpu_has_vmx_invvpid(void)
1254 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1257 static inline bool cpu_has_vmx_ept(void)
1259 return vmcs_config.cpu_based_2nd_exec_ctrl &
1260 SECONDARY_EXEC_ENABLE_EPT;
1263 static inline bool cpu_has_vmx_unrestricted_guest(void)
1265 return vmcs_config.cpu_based_2nd_exec_ctrl &
1266 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1269 static inline bool cpu_has_vmx_ple(void)
1271 return vmcs_config.cpu_based_2nd_exec_ctrl &
1272 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1275 static inline bool cpu_has_vmx_basic_inout(void)
1277 return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1280 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1282 return flexpriority_enabled && lapic_in_kernel(vcpu);
1285 static inline bool cpu_has_vmx_vpid(void)
1287 return vmcs_config.cpu_based_2nd_exec_ctrl &
1288 SECONDARY_EXEC_ENABLE_VPID;
1291 static inline bool cpu_has_vmx_rdtscp(void)
1293 return vmcs_config.cpu_based_2nd_exec_ctrl &
1294 SECONDARY_EXEC_RDTSCP;
1297 static inline bool cpu_has_vmx_invpcid(void)
1299 return vmcs_config.cpu_based_2nd_exec_ctrl &
1300 SECONDARY_EXEC_ENABLE_INVPCID;
1303 static inline bool cpu_has_virtual_nmis(void)
1305 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1308 static inline bool cpu_has_vmx_wbinvd_exit(void)
1310 return vmcs_config.cpu_based_2nd_exec_ctrl &
1311 SECONDARY_EXEC_WBINVD_EXITING;
1314 static inline bool cpu_has_vmx_shadow_vmcs(void)
1317 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1318 /* check if the cpu supports writing r/o exit information fields */
1319 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1322 return vmcs_config.cpu_based_2nd_exec_ctrl &
1323 SECONDARY_EXEC_SHADOW_VMCS;
1326 static inline bool cpu_has_vmx_pml(void)
1328 return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1331 static inline bool cpu_has_vmx_tsc_scaling(void)
1333 return vmcs_config.cpu_based_2nd_exec_ctrl &
1334 SECONDARY_EXEC_TSC_SCALING;
1337 static inline bool cpu_has_vmx_vmfunc(void)
1339 return vmcs_config.cpu_based_2nd_exec_ctrl &
1340 SECONDARY_EXEC_ENABLE_VMFUNC;
1343 static inline bool report_flexpriority(void)
1345 return flexpriority_enabled;
1348 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1350 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1353 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1355 return vmcs12->cpu_based_vm_exec_control & bit;
1358 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1360 return (vmcs12->cpu_based_vm_exec_control &
1361 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1362 (vmcs12->secondary_vm_exec_control & bit);
1365 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1367 return vmcs12->pin_based_vm_exec_control &
1368 PIN_BASED_VMX_PREEMPTION_TIMER;
1371 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1373 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1376 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1378 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1381 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1383 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1386 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1388 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1391 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1393 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1396 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1398 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1401 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1403 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1406 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1408 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1411 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1413 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1416 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1418 return nested_cpu_has_vmfunc(vmcs12) &&
1419 (vmcs12->vm_function_control &
1420 VMX_VMFUNC_EPTP_SWITCHING);
1423 static inline bool is_nmi(u32 intr_info)
1425 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1426 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1429 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1431 unsigned long exit_qualification);
1432 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1433 struct vmcs12 *vmcs12,
1434 u32 reason, unsigned long qualification);
1436 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1440 for (i = 0; i < vmx->nmsrs; ++i)
1441 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1446 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1452 } operand = { vpid, 0, gva };
1454 asm volatile (__ex(ASM_VMX_INVVPID)
1455 /* CF==1 or ZF==1 --> rc = -1 */
1456 "; ja 1f ; ud2 ; 1:"
1457 : : "a"(&operand), "c"(ext) : "cc", "memory");
1460 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1464 } operand = {eptp, gpa};
1466 asm volatile (__ex(ASM_VMX_INVEPT)
1467 /* CF==1 or ZF==1 --> rc = -1 */
1468 "; ja 1f ; ud2 ; 1:\n"
1469 : : "a" (&operand), "c" (ext) : "cc", "memory");
1472 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1476 i = __find_msr_index(vmx, msr);
1478 return &vmx->guest_msrs[i];
1482 static void vmcs_clear(struct vmcs *vmcs)
1484 u64 phys_addr = __pa(vmcs);
1487 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1488 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1491 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1495 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1497 vmcs_clear(loaded_vmcs->vmcs);
1498 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1499 vmcs_clear(loaded_vmcs->shadow_vmcs);
1500 loaded_vmcs->cpu = -1;
1501 loaded_vmcs->launched = 0;
1504 static void vmcs_load(struct vmcs *vmcs)
1506 u64 phys_addr = __pa(vmcs);
1509 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1510 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1513 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1517 #ifdef CONFIG_KEXEC_CORE
1519 * This bitmap is used to indicate whether the vmclear
1520 * operation is enabled on all cpus. All disabled by
1523 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1525 static inline void crash_enable_local_vmclear(int cpu)
1527 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1530 static inline void crash_disable_local_vmclear(int cpu)
1532 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1535 static inline int crash_local_vmclear_enabled(int cpu)
1537 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1540 static void crash_vmclear_local_loaded_vmcss(void)
1542 int cpu = raw_smp_processor_id();
1543 struct loaded_vmcs *v;
1545 if (!crash_local_vmclear_enabled(cpu))
1548 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1549 loaded_vmcss_on_cpu_link)
1550 vmcs_clear(v->vmcs);
1553 static inline void crash_enable_local_vmclear(int cpu) { }
1554 static inline void crash_disable_local_vmclear(int cpu) { }
1555 #endif /* CONFIG_KEXEC_CORE */
1557 static void __loaded_vmcs_clear(void *arg)
1559 struct loaded_vmcs *loaded_vmcs = arg;
1560 int cpu = raw_smp_processor_id();
1562 if (loaded_vmcs->cpu != cpu)
1563 return; /* vcpu migration can race with cpu offline */
1564 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1565 per_cpu(current_vmcs, cpu) = NULL;
1566 crash_disable_local_vmclear(cpu);
1567 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1570 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1571 * is before setting loaded_vmcs->vcpu to -1 which is done in
1572 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1573 * then adds the vmcs into percpu list before it is deleted.
1577 loaded_vmcs_init(loaded_vmcs);
1578 crash_enable_local_vmclear(cpu);
1581 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1583 int cpu = loaded_vmcs->cpu;
1586 smp_call_function_single(cpu,
1587 __loaded_vmcs_clear, loaded_vmcs, 1);
1590 static inline void vpid_sync_vcpu_single(int vpid)
1595 if (cpu_has_vmx_invvpid_single())
1596 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1599 static inline void vpid_sync_vcpu_global(void)
1601 if (cpu_has_vmx_invvpid_global())
1602 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1605 static inline void vpid_sync_context(int vpid)
1607 if (cpu_has_vmx_invvpid_single())
1608 vpid_sync_vcpu_single(vpid);
1610 vpid_sync_vcpu_global();
1613 static inline void ept_sync_global(void)
1615 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1618 static inline void ept_sync_context(u64 eptp)
1620 if (cpu_has_vmx_invept_context())
1621 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1626 static __always_inline void vmcs_check16(unsigned long field)
1628 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1629 "16-bit accessor invalid for 64-bit field");
1630 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1631 "16-bit accessor invalid for 64-bit high field");
1632 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1633 "16-bit accessor invalid for 32-bit high field");
1634 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1635 "16-bit accessor invalid for natural width field");
1638 static __always_inline void vmcs_check32(unsigned long field)
1640 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1641 "32-bit accessor invalid for 16-bit field");
1642 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1643 "32-bit accessor invalid for natural width field");
1646 static __always_inline void vmcs_check64(unsigned long field)
1648 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1649 "64-bit accessor invalid for 16-bit field");
1650 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1651 "64-bit accessor invalid for 64-bit high field");
1652 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1653 "64-bit accessor invalid for 32-bit field");
1654 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1655 "64-bit accessor invalid for natural width field");
1658 static __always_inline void vmcs_checkl(unsigned long field)
1660 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1661 "Natural width accessor invalid for 16-bit field");
1662 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1663 "Natural width accessor invalid for 64-bit field");
1664 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1665 "Natural width accessor invalid for 64-bit high field");
1666 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1667 "Natural width accessor invalid for 32-bit field");
1670 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1672 unsigned long value;
1674 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1675 : "=a"(value) : "d"(field) : "cc");
1679 static __always_inline u16 vmcs_read16(unsigned long field)
1681 vmcs_check16(field);
1682 return __vmcs_readl(field);
1685 static __always_inline u32 vmcs_read32(unsigned long field)
1687 vmcs_check32(field);
1688 return __vmcs_readl(field);
1691 static __always_inline u64 vmcs_read64(unsigned long field)
1693 vmcs_check64(field);
1694 #ifdef CONFIG_X86_64
1695 return __vmcs_readl(field);
1697 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1701 static __always_inline unsigned long vmcs_readl(unsigned long field)
1704 return __vmcs_readl(field);
1707 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1709 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1710 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1714 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1718 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1719 : "=q"(error) : "a"(value), "d"(field) : "cc");
1720 if (unlikely(error))
1721 vmwrite_error(field, value);
1724 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1726 vmcs_check16(field);
1727 __vmcs_writel(field, value);
1730 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1732 vmcs_check32(field);
1733 __vmcs_writel(field, value);
1736 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1738 vmcs_check64(field);
1739 __vmcs_writel(field, value);
1740 #ifndef CONFIG_X86_64
1742 __vmcs_writel(field+1, value >> 32);
1746 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1749 __vmcs_writel(field, value);
1752 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1754 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1755 "vmcs_clear_bits does not support 64-bit fields");
1756 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1759 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1761 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1762 "vmcs_set_bits does not support 64-bit fields");
1763 __vmcs_writel(field, __vmcs_readl(field) | mask);
1766 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1768 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1771 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1773 vmcs_write32(VM_ENTRY_CONTROLS, val);
1774 vmx->vm_entry_controls_shadow = val;
1777 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1779 if (vmx->vm_entry_controls_shadow != val)
1780 vm_entry_controls_init(vmx, val);
1783 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1785 return vmx->vm_entry_controls_shadow;
1789 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1791 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1794 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1796 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1799 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1801 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1804 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1806 vmcs_write32(VM_EXIT_CONTROLS, val);
1807 vmx->vm_exit_controls_shadow = val;
1810 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1812 if (vmx->vm_exit_controls_shadow != val)
1813 vm_exit_controls_init(vmx, val);
1816 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1818 return vmx->vm_exit_controls_shadow;
1822 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1824 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1827 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1829 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1832 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1834 vmx->segment_cache.bitmask = 0;
1837 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1841 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1843 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1844 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1845 vmx->segment_cache.bitmask = 0;
1847 ret = vmx->segment_cache.bitmask & mask;
1848 vmx->segment_cache.bitmask |= mask;
1852 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1854 u16 *p = &vmx->segment_cache.seg[seg].selector;
1856 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1857 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1861 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1863 ulong *p = &vmx->segment_cache.seg[seg].base;
1865 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1866 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1870 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1872 u32 *p = &vmx->segment_cache.seg[seg].limit;
1874 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1875 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1879 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1881 u32 *p = &vmx->segment_cache.seg[seg].ar;
1883 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1884 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1888 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1892 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1893 (1u << DB_VECTOR) | (1u << AC_VECTOR);
1894 if ((vcpu->guest_debug &
1895 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1896 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1897 eb |= 1u << BP_VECTOR;
1898 if (to_vmx(vcpu)->rmode.vm86_active)
1901 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1903 /* When we are running a nested L2 guest and L1 specified for it a
1904 * certain exception bitmap, we must trap the same exceptions and pass
1905 * them to L1. When running L2, we will only handle the exceptions
1906 * specified above if L1 did not want them.
1908 if (is_guest_mode(vcpu))
1909 eb |= get_vmcs12(vcpu)->exception_bitmap;
1911 vmcs_write32(EXCEPTION_BITMAP, eb);
1915 * Check if MSR is intercepted for currently loaded MSR bitmap.
1917 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
1919 unsigned long *msr_bitmap;
1920 int f = sizeof(unsigned long);
1922 if (!cpu_has_vmx_msr_bitmap())
1925 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
1927 if (msr <= 0x1fff) {
1928 return !!test_bit(msr, msr_bitmap + 0x800 / f);
1929 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1931 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
1938 * Check if MSR is intercepted for L01 MSR bitmap.
1940 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
1942 unsigned long *msr_bitmap;
1943 int f = sizeof(unsigned long);
1945 if (!cpu_has_vmx_msr_bitmap())
1948 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
1950 if (msr <= 0x1fff) {
1951 return !!test_bit(msr, msr_bitmap + 0x800 / f);
1952 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1954 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
1960 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1961 unsigned long entry, unsigned long exit)
1963 vm_entry_controls_clearbit(vmx, entry);
1964 vm_exit_controls_clearbit(vmx, exit);
1967 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1970 struct msr_autoload *m = &vmx->msr_autoload;
1974 if (cpu_has_load_ia32_efer) {
1975 clear_atomic_switch_msr_special(vmx,
1976 VM_ENTRY_LOAD_IA32_EFER,
1977 VM_EXIT_LOAD_IA32_EFER);
1981 case MSR_CORE_PERF_GLOBAL_CTRL:
1982 if (cpu_has_load_perf_global_ctrl) {
1983 clear_atomic_switch_msr_special(vmx,
1984 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1985 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1991 for (i = 0; i < m->nr; ++i)
1992 if (m->guest[i].index == msr)
1998 m->guest[i] = m->guest[m->nr];
1999 m->host[i] = m->host[m->nr];
2000 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2001 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2004 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2005 unsigned long entry, unsigned long exit,
2006 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2007 u64 guest_val, u64 host_val)
2009 vmcs_write64(guest_val_vmcs, guest_val);
2010 vmcs_write64(host_val_vmcs, host_val);
2011 vm_entry_controls_setbit(vmx, entry);
2012 vm_exit_controls_setbit(vmx, exit);
2015 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2016 u64 guest_val, u64 host_val)
2019 struct msr_autoload *m = &vmx->msr_autoload;
2023 if (cpu_has_load_ia32_efer) {
2024 add_atomic_switch_msr_special(vmx,
2025 VM_ENTRY_LOAD_IA32_EFER,
2026 VM_EXIT_LOAD_IA32_EFER,
2029 guest_val, host_val);
2033 case MSR_CORE_PERF_GLOBAL_CTRL:
2034 if (cpu_has_load_perf_global_ctrl) {
2035 add_atomic_switch_msr_special(vmx,
2036 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2037 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2038 GUEST_IA32_PERF_GLOBAL_CTRL,
2039 HOST_IA32_PERF_GLOBAL_CTRL,
2040 guest_val, host_val);
2044 case MSR_IA32_PEBS_ENABLE:
2045 /* PEBS needs a quiescent period after being disabled (to write
2046 * a record). Disabling PEBS through VMX MSR swapping doesn't
2047 * provide that period, so a CPU could write host's record into
2050 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2053 for (i = 0; i < m->nr; ++i)
2054 if (m->guest[i].index == msr)
2057 if (i == NR_AUTOLOAD_MSRS) {
2058 printk_once(KERN_WARNING "Not enough msr switch entries. "
2059 "Can't add msr %x\n", msr);
2061 } else if (i == m->nr) {
2063 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2064 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2067 m->guest[i].index = msr;
2068 m->guest[i].value = guest_val;
2069 m->host[i].index = msr;
2070 m->host[i].value = host_val;
2073 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2075 u64 guest_efer = vmx->vcpu.arch.efer;
2076 u64 ignore_bits = 0;
2080 * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
2081 * host CPUID is more efficient than testing guest CPUID
2082 * or CR4. Host SMEP is anyway a requirement for guest SMEP.
2084 if (boot_cpu_has(X86_FEATURE_SMEP))
2085 guest_efer |= EFER_NX;
2086 else if (!(guest_efer & EFER_NX))
2087 ignore_bits |= EFER_NX;
2091 * LMA and LME handled by hardware; SCE meaningless outside long mode.
2093 ignore_bits |= EFER_SCE;
2094 #ifdef CONFIG_X86_64
2095 ignore_bits |= EFER_LMA | EFER_LME;
2096 /* SCE is meaningful only in long mode on Intel */
2097 if (guest_efer & EFER_LMA)
2098 ignore_bits &= ~(u64)EFER_SCE;
2101 clear_atomic_switch_msr(vmx, MSR_EFER);
2104 * On EPT, we can't emulate NX, so we must switch EFER atomically.
2105 * On CPUs that support "load IA32_EFER", always switch EFER
2106 * atomically, since it's faster than switching it manually.
2108 if (cpu_has_load_ia32_efer ||
2109 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2110 if (!(guest_efer & EFER_LMA))
2111 guest_efer &= ~EFER_LME;
2112 if (guest_efer != host_efer)
2113 add_atomic_switch_msr(vmx, MSR_EFER,
2114 guest_efer, host_efer);
2117 guest_efer &= ~ignore_bits;
2118 guest_efer |= host_efer & ignore_bits;
2120 vmx->guest_msrs[efer_offset].data = guest_efer;
2121 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2127 #ifdef CONFIG_X86_32
2129 * On 32-bit kernels, VM exits still load the FS and GS bases from the
2130 * VMCS rather than the segment table. KVM uses this helper to figure
2131 * out the current bases to poke them into the VMCS before entry.
2133 static unsigned long segment_base(u16 selector)
2135 struct desc_struct *table;
2138 if (!(selector & ~SEGMENT_RPL_MASK))
2141 table = get_current_gdt_ro();
2143 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2144 u16 ldt_selector = kvm_read_ldt();
2146 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2149 table = (struct desc_struct *)segment_base(ldt_selector);
2151 v = get_desc_base(&table[selector >> 3]);
2156 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2158 struct vcpu_vmx *vmx = to_vmx(vcpu);
2161 if (vmx->host_state.loaded)
2164 vmx->host_state.loaded = 1;
2166 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
2167 * allow segment selectors with cpl > 0 or ti == 1.
2169 vmx->host_state.ldt_sel = kvm_read_ldt();
2170 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2171 savesegment(fs, vmx->host_state.fs_sel);
2172 if (!(vmx->host_state.fs_sel & 7)) {
2173 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2174 vmx->host_state.fs_reload_needed = 0;
2176 vmcs_write16(HOST_FS_SELECTOR, 0);
2177 vmx->host_state.fs_reload_needed = 1;
2179 savesegment(gs, vmx->host_state.gs_sel);
2180 if (!(vmx->host_state.gs_sel & 7))
2181 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2183 vmcs_write16(HOST_GS_SELECTOR, 0);
2184 vmx->host_state.gs_ldt_reload_needed = 1;
2187 #ifdef CONFIG_X86_64
2188 savesegment(ds, vmx->host_state.ds_sel);
2189 savesegment(es, vmx->host_state.es_sel);
2192 #ifdef CONFIG_X86_64
2193 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2194 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2196 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2197 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2200 #ifdef CONFIG_X86_64
2201 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2202 if (is_long_mode(&vmx->vcpu))
2203 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2205 if (boot_cpu_has(X86_FEATURE_MPX))
2206 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2207 for (i = 0; i < vmx->save_nmsrs; ++i)
2208 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2209 vmx->guest_msrs[i].data,
2210 vmx->guest_msrs[i].mask);
2213 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2215 if (!vmx->host_state.loaded)
2218 ++vmx->vcpu.stat.host_state_reload;
2219 vmx->host_state.loaded = 0;
2220 #ifdef CONFIG_X86_64
2221 if (is_long_mode(&vmx->vcpu))
2222 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2224 if (vmx->host_state.gs_ldt_reload_needed) {
2225 kvm_load_ldt(vmx->host_state.ldt_sel);
2226 #ifdef CONFIG_X86_64
2227 load_gs_index(vmx->host_state.gs_sel);
2229 loadsegment(gs, vmx->host_state.gs_sel);
2232 if (vmx->host_state.fs_reload_needed)
2233 loadsegment(fs, vmx->host_state.fs_sel);
2234 #ifdef CONFIG_X86_64
2235 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2236 loadsegment(ds, vmx->host_state.ds_sel);
2237 loadsegment(es, vmx->host_state.es_sel);
2240 invalidate_tss_limit();
2241 #ifdef CONFIG_X86_64
2242 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2244 if (vmx->host_state.msr_host_bndcfgs)
2245 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2246 load_fixmap_gdt(raw_smp_processor_id());
2249 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2252 __vmx_load_host_state(vmx);
2256 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2258 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2259 struct pi_desc old, new;
2263 * In case of hot-plug or hot-unplug, we may have to undo
2264 * vmx_vcpu_pi_put even if there is no assigned device. And we
2265 * always keep PI.NDST up to date for simplicity: it makes the
2266 * code easier, and CPU migration is not a fast path.
2268 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2272 * First handle the simple case where no cmpxchg is necessary; just
2273 * allow posting non-urgent interrupts.
2275 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2276 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2277 * expects the VCPU to be on the blocked_vcpu_list that matches
2280 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2282 pi_clear_sn(pi_desc);
2286 /* The full case. */
2288 old.control = new.control = pi_desc->control;
2290 dest = cpu_physical_id(cpu);
2292 if (x2apic_enabled())
2295 new.ndst = (dest << 8) & 0xFF00;
2298 } while (cmpxchg64(&pi_desc->control, old.control,
2299 new.control) != old.control);
2302 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2304 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2305 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2309 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2310 * vcpu mutex is already taken.
2312 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2314 struct vcpu_vmx *vmx = to_vmx(vcpu);
2315 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2317 if (!already_loaded) {
2318 loaded_vmcs_clear(vmx->loaded_vmcs);
2319 local_irq_disable();
2320 crash_disable_local_vmclear(cpu);
2323 * Read loaded_vmcs->cpu should be before fetching
2324 * loaded_vmcs->loaded_vmcss_on_cpu_link.
2325 * See the comments in __loaded_vmcs_clear().
2329 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2330 &per_cpu(loaded_vmcss_on_cpu, cpu));
2331 crash_enable_local_vmclear(cpu);
2335 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2336 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2337 vmcs_load(vmx->loaded_vmcs->vmcs);
2338 indirect_branch_prediction_barrier();
2341 if (!already_loaded) {
2342 void *gdt = get_current_gdt_ro();
2343 unsigned long sysenter_esp;
2345 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2348 * Linux uses per-cpu TSS and GDT, so set these when switching
2349 * processors. See 22.2.4.
2351 vmcs_writel(HOST_TR_BASE,
2352 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2353 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
2356 * VM exits change the host TR limit to 0x67 after a VM
2357 * exit. This is okay, since 0x67 covers everything except
2358 * the IO bitmap and have have code to handle the IO bitmap
2359 * being lost after a VM exit.
2361 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2363 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2364 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2366 vmx->loaded_vmcs->cpu = cpu;
2369 /* Setup TSC multiplier */
2370 if (kvm_has_tsc_control &&
2371 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2372 decache_tsc_multiplier(vmx);
2374 vmx_vcpu_pi_load(vcpu, cpu);
2375 vmx->host_pkru = read_pkru();
2378 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2380 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2382 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2383 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2384 !kvm_vcpu_apicv_active(vcpu))
2387 /* Set SN when the vCPU is preempted */
2388 if (vcpu->preempted)
2392 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2394 vmx_vcpu_pi_put(vcpu);
2396 __vmx_load_host_state(to_vmx(vcpu));
2399 static bool emulation_required(struct kvm_vcpu *vcpu)
2401 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2404 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2407 * Return the cr0 value that a nested guest would read. This is a combination
2408 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2409 * its hypervisor (cr0_read_shadow).
2411 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2413 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2414 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2416 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2418 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2419 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2422 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2424 unsigned long rflags, save_rflags;
2426 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2427 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2428 rflags = vmcs_readl(GUEST_RFLAGS);
2429 if (to_vmx(vcpu)->rmode.vm86_active) {
2430 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2431 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2432 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2434 to_vmx(vcpu)->rflags = rflags;
2436 return to_vmx(vcpu)->rflags;
2439 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2441 unsigned long old_rflags = vmx_get_rflags(vcpu);
2443 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2444 to_vmx(vcpu)->rflags = rflags;
2445 if (to_vmx(vcpu)->rmode.vm86_active) {
2446 to_vmx(vcpu)->rmode.save_rflags = rflags;
2447 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2449 vmcs_writel(GUEST_RFLAGS, rflags);
2451 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2452 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2455 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2457 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2460 if (interruptibility & GUEST_INTR_STATE_STI)
2461 ret |= KVM_X86_SHADOW_INT_STI;
2462 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2463 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2468 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2470 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2471 u32 interruptibility = interruptibility_old;
2473 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2475 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2476 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2477 else if (mask & KVM_X86_SHADOW_INT_STI)
2478 interruptibility |= GUEST_INTR_STATE_STI;
2480 if ((interruptibility != interruptibility_old))
2481 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2484 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2488 rip = kvm_rip_read(vcpu);
2489 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2490 kvm_rip_write(vcpu, rip);
2492 /* skipping an emulated instruction also counts */
2493 vmx_set_interrupt_shadow(vcpu, 0);
2496 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2497 unsigned long exit_qual)
2499 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2500 unsigned int nr = vcpu->arch.exception.nr;
2501 u32 intr_info = nr | INTR_INFO_VALID_MASK;
2503 if (vcpu->arch.exception.has_error_code) {
2504 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2505 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2508 if (kvm_exception_is_soft(nr))
2509 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2511 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2513 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2514 vmx_get_nmi_mask(vcpu))
2515 intr_info |= INTR_INFO_UNBLOCK_NMI;
2517 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2521 * KVM wants to inject page-faults which it got to the guest. This function
2522 * checks whether in a nested guest, we need to inject them to L1 or L2.
2524 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2526 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2527 unsigned int nr = vcpu->arch.exception.nr;
2529 if (nr == PF_VECTOR) {
2530 if (vcpu->arch.exception.nested_apf) {
2531 *exit_qual = vcpu->arch.apf.nested_apf_token;
2535 * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2536 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2537 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2538 * can be written only when inject_pending_event runs. This should be
2539 * conditional on a new capability---if the capability is disabled,
2540 * kvm_multiple_exception would write the ancillary information to
2541 * CR2 or DR6, for backwards ABI-compatibility.
2543 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2544 vcpu->arch.exception.error_code)) {
2545 *exit_qual = vcpu->arch.cr2;
2549 if (vmcs12->exception_bitmap & (1u << nr)) {
2550 if (nr == DB_VECTOR)
2551 *exit_qual = vcpu->arch.dr6;
2561 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2563 struct vcpu_vmx *vmx = to_vmx(vcpu);
2564 unsigned nr = vcpu->arch.exception.nr;
2565 bool has_error_code = vcpu->arch.exception.has_error_code;
2566 u32 error_code = vcpu->arch.exception.error_code;
2567 u32 intr_info = nr | INTR_INFO_VALID_MASK;
2569 if (has_error_code) {
2570 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2571 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2574 if (vmx->rmode.vm86_active) {
2576 if (kvm_exception_is_soft(nr))
2577 inc_eip = vcpu->arch.event_exit_inst_len;
2578 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2579 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2583 if (kvm_exception_is_soft(nr)) {
2584 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2585 vmx->vcpu.arch.event_exit_inst_len);
2586 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2588 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2590 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2593 static bool vmx_rdtscp_supported(void)
2595 return cpu_has_vmx_rdtscp();
2598 static bool vmx_invpcid_supported(void)
2600 return cpu_has_vmx_invpcid() && enable_ept;
2604 * Swap MSR entry in host/guest MSR entry array.
2606 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2608 struct shared_msr_entry tmp;
2610 tmp = vmx->guest_msrs[to];
2611 vmx->guest_msrs[to] = vmx->guest_msrs[from];
2612 vmx->guest_msrs[from] = tmp;
2616 * Set up the vmcs to automatically save and restore system
2617 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
2618 * mode, as fiddling with msrs is very expensive.
2620 static void setup_msrs(struct vcpu_vmx *vmx)
2622 int save_nmsrs, index;
2625 #ifdef CONFIG_X86_64
2626 if (is_long_mode(&vmx->vcpu)) {
2627 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2629 move_msr_up(vmx, index, save_nmsrs++);
2630 index = __find_msr_index(vmx, MSR_LSTAR);
2632 move_msr_up(vmx, index, save_nmsrs++);
2633 index = __find_msr_index(vmx, MSR_CSTAR);
2635 move_msr_up(vmx, index, save_nmsrs++);
2636 index = __find_msr_index(vmx, MSR_TSC_AUX);
2637 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2638 move_msr_up(vmx, index, save_nmsrs++);
2640 * MSR_STAR is only needed on long mode guests, and only
2641 * if efer.sce is enabled.
2643 index = __find_msr_index(vmx, MSR_STAR);
2644 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2645 move_msr_up(vmx, index, save_nmsrs++);
2648 index = __find_msr_index(vmx, MSR_EFER);
2649 if (index >= 0 && update_transition_efer(vmx, index))
2650 move_msr_up(vmx, index, save_nmsrs++);
2652 vmx->save_nmsrs = save_nmsrs;
2654 if (cpu_has_vmx_msr_bitmap())
2655 vmx_update_msr_bitmap(&vmx->vcpu);
2659 * reads and returns guest's timestamp counter "register"
2660 * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2661 * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2663 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2665 u64 host_tsc, tsc_offset;
2668 tsc_offset = vmcs_read64(TSC_OFFSET);
2669 return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2673 * writes 'offset' into guest's timestamp counter offset register
2675 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2677 if (is_guest_mode(vcpu)) {
2679 * We're here if L1 chose not to trap WRMSR to TSC. According
2680 * to the spec, this should set L1's TSC; The offset that L1
2681 * set for L2 remains unchanged, and still needs to be added
2682 * to the newly set TSC to get L2's TSC.
2684 struct vmcs12 *vmcs12;
2685 /* recalculate vmcs02.TSC_OFFSET: */
2686 vmcs12 = get_vmcs12(vcpu);
2687 vmcs_write64(TSC_OFFSET, offset +
2688 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2689 vmcs12->tsc_offset : 0));
2691 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2692 vmcs_read64(TSC_OFFSET), offset);
2693 vmcs_write64(TSC_OFFSET, offset);
2698 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2699 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2700 * all guests if the "nested" module option is off, and can also be disabled
2701 * for a single guest by disabling its VMX cpuid bit.
2703 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2705 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2709 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2710 * returned for the various VMX controls MSRs when nested VMX is enabled.
2711 * The same values should also be used to verify that vmcs12 control fields are
2712 * valid during nested entry from L1 to L2.
2713 * Each of these control msrs has a low and high 32-bit half: A low bit is on
2714 * if the corresponding bit in the (32-bit) control field *must* be on, and a
2715 * bit in the high half is on if the corresponding bit in the control field
2716 * may be on. See also vmx_control_verify().
2718 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2721 * Note that as a general rule, the high half of the MSRs (bits in
2722 * the control fields which may be 1) should be initialized by the
2723 * intersection of the underlying hardware's MSR (i.e., features which
2724 * can be supported) and the list of features we want to expose -
2725 * because they are known to be properly supported in our code.
2726 * Also, usually, the low half of the MSRs (bits which must be 1) can
2727 * be set to 0, meaning that L1 may turn off any of these bits. The
2728 * reason is that if one of these bits is necessary, it will appear
2729 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2730 * fields of vmcs01 and vmcs02, will turn these bits off - and
2731 * nested_vmx_exit_reflected() will not pass related exits to L1.
2732 * These rules have exceptions below.
2735 /* pin-based controls */
2736 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2737 vmx->nested.nested_vmx_pinbased_ctls_low,
2738 vmx->nested.nested_vmx_pinbased_ctls_high);
2739 vmx->nested.nested_vmx_pinbased_ctls_low |=
2740 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2741 vmx->nested.nested_vmx_pinbased_ctls_high &=
2742 PIN_BASED_EXT_INTR_MASK |
2743 PIN_BASED_NMI_EXITING |
2744 PIN_BASED_VIRTUAL_NMIS;
2745 vmx->nested.nested_vmx_pinbased_ctls_high |=
2746 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2747 PIN_BASED_VMX_PREEMPTION_TIMER;
2748 if (kvm_vcpu_apicv_active(&vmx->vcpu))
2749 vmx->nested.nested_vmx_pinbased_ctls_high |=
2750 PIN_BASED_POSTED_INTR;
2753 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2754 vmx->nested.nested_vmx_exit_ctls_low,
2755 vmx->nested.nested_vmx_exit_ctls_high);
2756 vmx->nested.nested_vmx_exit_ctls_low =
2757 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2759 vmx->nested.nested_vmx_exit_ctls_high &=
2760 #ifdef CONFIG_X86_64
2761 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2763 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2764 vmx->nested.nested_vmx_exit_ctls_high |=
2765 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2766 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2767 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2769 if (kvm_mpx_supported())
2770 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2772 /* We support free control of debug control saving. */
2773 vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2775 /* entry controls */
2776 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2777 vmx->nested.nested_vmx_entry_ctls_low,
2778 vmx->nested.nested_vmx_entry_ctls_high);
2779 vmx->nested.nested_vmx_entry_ctls_low =
2780 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2781 vmx->nested.nested_vmx_entry_ctls_high &=
2782 #ifdef CONFIG_X86_64
2783 VM_ENTRY_IA32E_MODE |
2785 VM_ENTRY_LOAD_IA32_PAT;
2786 vmx->nested.nested_vmx_entry_ctls_high |=
2787 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2788 if (kvm_mpx_supported())
2789 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2791 /* We support free control of debug control loading. */
2792 vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2794 /* cpu-based controls */
2795 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2796 vmx->nested.nested_vmx_procbased_ctls_low,
2797 vmx->nested.nested_vmx_procbased_ctls_high);
2798 vmx->nested.nested_vmx_procbased_ctls_low =
2799 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2800 vmx->nested.nested_vmx_procbased_ctls_high &=
2801 CPU_BASED_VIRTUAL_INTR_PENDING |
2802 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2803 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2804 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2805 CPU_BASED_CR3_STORE_EXITING |
2806 #ifdef CONFIG_X86_64
2807 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2809 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2810 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2811 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2812 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2813 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2815 * We can allow some features even when not supported by the
2816 * hardware. For example, L1 can specify an MSR bitmap - and we
2817 * can use it to avoid exits to L1 - even when L0 runs L2
2818 * without MSR bitmaps.
2820 vmx->nested.nested_vmx_procbased_ctls_high |=
2821 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2822 CPU_BASED_USE_MSR_BITMAPS;
2824 /* We support free control of CR3 access interception. */
2825 vmx->nested.nested_vmx_procbased_ctls_low &=
2826 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2829 * secondary cpu-based controls. Do not include those that
2830 * depend on CPUID bits, they are added later by vmx_cpuid_update.
2832 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2833 vmx->nested.nested_vmx_secondary_ctls_low,
2834 vmx->nested.nested_vmx_secondary_ctls_high);
2835 vmx->nested.nested_vmx_secondary_ctls_low = 0;
2836 vmx->nested.nested_vmx_secondary_ctls_high &=
2837 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2838 SECONDARY_EXEC_DESC |
2839 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2840 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2841 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2842 SECONDARY_EXEC_WBINVD_EXITING;
2845 /* nested EPT: emulate EPT also to L1 */
2846 vmx->nested.nested_vmx_secondary_ctls_high |=
2847 SECONDARY_EXEC_ENABLE_EPT;
2848 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2849 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2850 if (cpu_has_vmx_ept_execute_only())
2851 vmx->nested.nested_vmx_ept_caps |=
2852 VMX_EPT_EXECUTE_ONLY_BIT;
2853 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2854 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2855 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2856 VMX_EPT_1GB_PAGE_BIT;
2857 if (enable_ept_ad_bits) {
2858 vmx->nested.nested_vmx_secondary_ctls_high |=
2859 SECONDARY_EXEC_ENABLE_PML;
2860 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2864 if (cpu_has_vmx_vmfunc()) {
2865 vmx->nested.nested_vmx_secondary_ctls_high |=
2866 SECONDARY_EXEC_ENABLE_VMFUNC;
2868 * Advertise EPTP switching unconditionally
2869 * since we emulate it
2872 vmx->nested.nested_vmx_vmfunc_controls =
2873 VMX_VMFUNC_EPTP_SWITCHING;
2877 * Old versions of KVM use the single-context version without
2878 * checking for support, so declare that it is supported even
2879 * though it is treated as global context. The alternative is
2880 * not failing the single-context invvpid, and it is worse.
2883 vmx->nested.nested_vmx_secondary_ctls_high |=
2884 SECONDARY_EXEC_ENABLE_VPID;
2885 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2886 VMX_VPID_EXTENT_SUPPORTED_MASK;
2889 if (enable_unrestricted_guest)
2890 vmx->nested.nested_vmx_secondary_ctls_high |=
2891 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2893 /* miscellaneous data */
2894 rdmsr(MSR_IA32_VMX_MISC,
2895 vmx->nested.nested_vmx_misc_low,
2896 vmx->nested.nested_vmx_misc_high);
2897 vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2898 vmx->nested.nested_vmx_misc_low |=
2899 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2900 VMX_MISC_ACTIVITY_HLT;
2901 vmx->nested.nested_vmx_misc_high = 0;
2904 * This MSR reports some information about VMX support. We
2905 * should return information about the VMX we emulate for the
2906 * guest, and the VMCS structure we give it - not about the
2907 * VMX support of the underlying hardware.
2909 vmx->nested.nested_vmx_basic =
2911 VMX_BASIC_TRUE_CTLS |
2912 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2913 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2915 if (cpu_has_vmx_basic_inout())
2916 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2919 * These MSRs specify bits which the guest must keep fixed on
2920 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2921 * We picked the standard core2 setting.
2923 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2924 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
2925 vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2926 vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2928 /* These MSRs specify bits which the guest must keep fixed off. */
2929 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2930 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2932 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2933 vmx->nested.nested_vmx_vmcs_enum = 0x2e;
2937 * if fixed0[i] == 1: val[i] must be 1
2938 * if fixed1[i] == 0: val[i] must be 0
2940 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2942 return ((val & fixed1) | fixed0) == val;
2945 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2947 return fixed_bits_valid(control, low, high);
2950 static inline u64 vmx_control_msr(u32 low, u32 high)
2952 return low | ((u64)high << 32);
2955 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2960 return (superset | subset) == superset;
2963 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2965 const u64 feature_and_reserved =
2966 /* feature (except bit 48; see below) */
2967 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2969 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2970 u64 vmx_basic = vmx->nested.nested_vmx_basic;
2972 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2976 * KVM does not emulate a version of VMX that constrains physical
2977 * addresses of VMX structures (e.g. VMCS) to 32-bits.
2979 if (data & BIT_ULL(48))
2982 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2983 vmx_basic_vmcs_revision_id(data))
2986 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2989 vmx->nested.nested_vmx_basic = data;
2994 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2999 switch (msr_index) {
3000 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3001 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
3002 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
3004 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3005 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
3006 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
3008 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3009 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
3010 highp = &vmx->nested.nested_vmx_exit_ctls_high;
3012 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3013 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
3014 highp = &vmx->nested.nested_vmx_entry_ctls_high;
3016 case MSR_IA32_VMX_PROCBASED_CTLS2:
3017 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
3018 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
3024 supported = vmx_control_msr(*lowp, *highp);
3026 /* Check must-be-1 bits are still 1. */
3027 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3030 /* Check must-be-0 bits are still 0. */
3031 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3035 *highp = data >> 32;
3039 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3041 const u64 feature_and_reserved_bits =
3043 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3044 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3046 GENMASK_ULL(13, 9) | BIT_ULL(31);
3049 vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
3050 vmx->nested.nested_vmx_misc_high);
3052 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3055 if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3056 PIN_BASED_VMX_PREEMPTION_TIMER) &&
3057 vmx_misc_preemption_timer_rate(data) !=
3058 vmx_misc_preemption_timer_rate(vmx_misc))
3061 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3064 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3067 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3070 vmx->nested.nested_vmx_misc_low = data;
3071 vmx->nested.nested_vmx_misc_high = data >> 32;
3075 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3077 u64 vmx_ept_vpid_cap;
3079 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3080 vmx->nested.nested_vmx_vpid_caps);
3082 /* Every bit is either reserved or a feature bit. */
3083 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3086 vmx->nested.nested_vmx_ept_caps = data;
3087 vmx->nested.nested_vmx_vpid_caps = data >> 32;
3091 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3095 switch (msr_index) {
3096 case MSR_IA32_VMX_CR0_FIXED0:
3097 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3099 case MSR_IA32_VMX_CR4_FIXED0:
3100 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3107 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3108 * must be 1 in the restored value.
3110 if (!is_bitwise_subset(data, *msr, -1ULL))
3118 * Called when userspace is restoring VMX MSRs.
3120 * Returns 0 on success, non-0 otherwise.
3122 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3124 struct vcpu_vmx *vmx = to_vmx(vcpu);
3126 switch (msr_index) {
3127 case MSR_IA32_VMX_BASIC:
3128 return vmx_restore_vmx_basic(vmx, data);
3129 case MSR_IA32_VMX_PINBASED_CTLS:
3130 case MSR_IA32_VMX_PROCBASED_CTLS:
3131 case MSR_IA32_VMX_EXIT_CTLS:
3132 case MSR_IA32_VMX_ENTRY_CTLS:
3134 * The "non-true" VMX capability MSRs are generated from the
3135 * "true" MSRs, so we do not support restoring them directly.
3137 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3138 * should restore the "true" MSRs with the must-be-1 bits
3139 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3140 * DEFAULT SETTINGS".
3143 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3144 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3145 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3146 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3147 case MSR_IA32_VMX_PROCBASED_CTLS2:
3148 return vmx_restore_control_msr(vmx, msr_index, data);
3149 case MSR_IA32_VMX_MISC:
3150 return vmx_restore_vmx_misc(vmx, data);
3151 case MSR_IA32_VMX_CR0_FIXED0:
3152 case MSR_IA32_VMX_CR4_FIXED0:
3153 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3154 case MSR_IA32_VMX_CR0_FIXED1:
3155 case MSR_IA32_VMX_CR4_FIXED1:
3157 * These MSRs are generated based on the vCPU's CPUID, so we
3158 * do not support restoring them directly.
3161 case MSR_IA32_VMX_EPT_VPID_CAP:
3162 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3163 case MSR_IA32_VMX_VMCS_ENUM:
3164 vmx->nested.nested_vmx_vmcs_enum = data;
3168 * The rest of the VMX capability MSRs do not support restore.
3174 /* Returns 0 on success, non-0 otherwise. */
3175 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3177 struct vcpu_vmx *vmx = to_vmx(vcpu);
3179 switch (msr_index) {
3180 case MSR_IA32_VMX_BASIC:
3181 *pdata = vmx->nested.nested_vmx_basic;
3183 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3184 case MSR_IA32_VMX_PINBASED_CTLS:
3185 *pdata = vmx_control_msr(
3186 vmx->nested.nested_vmx_pinbased_ctls_low,
3187 vmx->nested.nested_vmx_pinbased_ctls_high);
3188 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3189 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3191 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3192 case MSR_IA32_VMX_PROCBASED_CTLS:
3193 *pdata = vmx_control_msr(
3194 vmx->nested.nested_vmx_procbased_ctls_low,
3195 vmx->nested.nested_vmx_procbased_ctls_high);
3196 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3197 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3199 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3200 case MSR_IA32_VMX_EXIT_CTLS:
3201 *pdata = vmx_control_msr(
3202 vmx->nested.nested_vmx_exit_ctls_low,
3203 vmx->nested.nested_vmx_exit_ctls_high);
3204 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3205 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3207 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3208 case MSR_IA32_VMX_ENTRY_CTLS:
3209 *pdata = vmx_control_msr(
3210 vmx->nested.nested_vmx_entry_ctls_low,
3211 vmx->nested.nested_vmx_entry_ctls_high);
3212 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3213 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3215 case MSR_IA32_VMX_MISC:
3216 *pdata = vmx_control_msr(
3217 vmx->nested.nested_vmx_misc_low,
3218 vmx->nested.nested_vmx_misc_high);
3220 case MSR_IA32_VMX_CR0_FIXED0:
3221 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3223 case MSR_IA32_VMX_CR0_FIXED1:
3224 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3226 case MSR_IA32_VMX_CR4_FIXED0:
3227 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3229 case MSR_IA32_VMX_CR4_FIXED1:
3230 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3232 case MSR_IA32_VMX_VMCS_ENUM:
3233 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3235 case MSR_IA32_VMX_PROCBASED_CTLS2:
3236 *pdata = vmx_control_msr(
3237 vmx->nested.nested_vmx_secondary_ctls_low,
3238 vmx->nested.nested_vmx_secondary_ctls_high);
3240 case MSR_IA32_VMX_EPT_VPID_CAP:
3241 *pdata = vmx->nested.nested_vmx_ept_caps |
3242 ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3244 case MSR_IA32_VMX_VMFUNC:
3245 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3254 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3257 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3259 return !(val & ~valid_bits);
3263 * Reads an msr value (of 'msr_index') into 'pdata'.
3264 * Returns 0 on success, non-0 otherwise.
3265 * Assumes vcpu_load() was already called.
3267 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3269 struct shared_msr_entry *msr;
3271 switch (msr_info->index) {
3272 #ifdef CONFIG_X86_64
3274 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3277 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3279 case MSR_KERNEL_GS_BASE:
3280 vmx_load_host_state(to_vmx(vcpu));
3281 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
3285 return kvm_get_msr_common(vcpu, msr_info);
3287 msr_info->data = guest_read_tsc(vcpu);
3289 case MSR_IA32_SPEC_CTRL:
3290 if (!msr_info->host_initiated &&
3291 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3292 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3295 msr_info->data = to_vmx(vcpu)->spec_ctrl;
3297 case MSR_IA32_ARCH_CAPABILITIES:
3298 if (!msr_info->host_initiated &&
3299 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3301 msr_info->data = to_vmx(vcpu)->arch_capabilities;
3303 case MSR_IA32_SYSENTER_CS:
3304 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3306 case MSR_IA32_SYSENTER_EIP:
3307 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3309 case MSR_IA32_SYSENTER_ESP:
3310 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3312 case MSR_IA32_BNDCFGS:
3313 if (!kvm_mpx_supported() ||
3314 (!msr_info->host_initiated &&
3315 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3317 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3319 case MSR_IA32_MCG_EXT_CTL:
3320 if (!msr_info->host_initiated &&
3321 !(to_vmx(vcpu)->msr_ia32_feature_control &
3322 FEATURE_CONTROL_LMCE))
3324 msr_info->data = vcpu->arch.mcg_ext_ctl;
3326 case MSR_IA32_FEATURE_CONTROL:
3327 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
3329 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3330 if (!nested_vmx_allowed(vcpu))
3332 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3334 if (!vmx_xsaves_supported())
3336 msr_info->data = vcpu->arch.ia32_xss;
3339 if (!msr_info->host_initiated &&
3340 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3342 /* Otherwise falls through */
3344 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
3346 msr_info->data = msr->data;
3349 return kvm_get_msr_common(vcpu, msr_info);
3355 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3358 * Writes msr value into into the appropriate "register".
3359 * Returns 0 on success, non-0 otherwise.
3360 * Assumes vcpu_load() was already called.
3362 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3364 struct vcpu_vmx *vmx = to_vmx(vcpu);
3365 struct shared_msr_entry *msr;
3367 u32 msr_index = msr_info->index;
3368 u64 data = msr_info->data;
3370 switch (msr_index) {
3372 ret = kvm_set_msr_common(vcpu, msr_info);
3374 #ifdef CONFIG_X86_64
3376 vmx_segment_cache_clear(vmx);
3377 vmcs_writel(GUEST_FS_BASE, data);
3380 vmx_segment_cache_clear(vmx);
3381 vmcs_writel(GUEST_GS_BASE, data);
3383 case MSR_KERNEL_GS_BASE:
3384 vmx_load_host_state(vmx);
3385 vmx->msr_guest_kernel_gs_base = data;
3388 case MSR_IA32_SYSENTER_CS:
3389 vmcs_write32(GUEST_SYSENTER_CS, data);
3391 case MSR_IA32_SYSENTER_EIP:
3392 vmcs_writel(GUEST_SYSENTER_EIP, data);
3394 case MSR_IA32_SYSENTER_ESP:
3395 vmcs_writel(GUEST_SYSENTER_ESP, data);
3397 case MSR_IA32_BNDCFGS:
3398 if (!kvm_mpx_supported() ||
3399 (!msr_info->host_initiated &&
3400 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3402 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3403 (data & MSR_IA32_BNDCFGS_RSVD))
3405 vmcs_write64(GUEST_BNDCFGS, data);
3408 kvm_write_tsc(vcpu, msr_info);
3410 case MSR_IA32_SPEC_CTRL:
3411 if (!msr_info->host_initiated &&
3412 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3413 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3416 /* The STIBP bit doesn't fault even if it's not advertised */
3417 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
3420 vmx->spec_ctrl = data;
3427 * When it's written (to non-zero) for the first time, pass
3431 * The handling of the MSR bitmap for L2 guests is done in
3432 * nested_vmx_merge_msr_bitmap. We should not touch the
3433 * vmcs02.msr_bitmap here since it gets completely overwritten
3434 * in the merging. We update the vmcs01 here for L1 as well
3435 * since it will end up touching the MSR anyway now.
3437 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
3441 case MSR_IA32_PRED_CMD:
3442 if (!msr_info->host_initiated &&
3443 !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
3444 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3447 if (data & ~PRED_CMD_IBPB)
3453 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
3457 * When it's written (to non-zero) for the first time, pass
3461 * The handling of the MSR bitmap for L2 guests is done in
3462 * nested_vmx_merge_msr_bitmap. We should not touch the
3463 * vmcs02.msr_bitmap here since it gets completely overwritten
3466 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
3469 case MSR_IA32_ARCH_CAPABILITIES:
3470 if (!msr_info->host_initiated)
3472 vmx->arch_capabilities = data;
3474 case MSR_IA32_CR_PAT:
3475 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3476 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3478 vmcs_write64(GUEST_IA32_PAT, data);
3479 vcpu->arch.pat = data;
3482 ret = kvm_set_msr_common(vcpu, msr_info);
3484 case MSR_IA32_TSC_ADJUST:
3485 ret = kvm_set_msr_common(vcpu, msr_info);
3487 case MSR_IA32_MCG_EXT_CTL:
3488 if ((!msr_info->host_initiated &&
3489 !(to_vmx(vcpu)->msr_ia32_feature_control &
3490 FEATURE_CONTROL_LMCE)) ||
3491 (data & ~MCG_EXT_CTL_LMCE_EN))
3493 vcpu->arch.mcg_ext_ctl = data;
3495 case MSR_IA32_FEATURE_CONTROL:
3496 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3497 (to_vmx(vcpu)->msr_ia32_feature_control &
3498 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3500 vmx->msr_ia32_feature_control = data;
3501 if (msr_info->host_initiated && data == 0)
3502 vmx_leave_nested(vcpu);
3504 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3505 if (!msr_info->host_initiated)
3506 return 1; /* they are read-only */
3507 if (!nested_vmx_allowed(vcpu))
3509 return vmx_set_vmx_msr(vcpu, msr_index, data);
3511 if (!vmx_xsaves_supported())
3514 * The only supported bit as of Skylake is bit 8, but
3515 * it is not supported on KVM.
3519 vcpu->arch.ia32_xss = data;
3520 if (vcpu->arch.ia32_xss != host_xss)
3521 add_atomic_switch_msr(vmx, MSR_IA32_XSS,
3522 vcpu->arch.ia32_xss, host_xss);
3524 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
3527 if (!msr_info->host_initiated &&
3528 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3530 /* Check reserved bit, higher 32 bits should be zero */
3531 if ((data >> 32) != 0)
3533 /* Otherwise falls through */
3535 msr = find_msr_entry(vmx, msr_index);
3537 u64 old_msr_data = msr->data;
3539 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
3541 ret = kvm_set_shared_msr(msr->index, msr->data,
3545 msr->data = old_msr_data;
3549 ret = kvm_set_msr_common(vcpu, msr_info);
3555 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
3557 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
3560 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
3563 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
3565 case VCPU_EXREG_PDPTR: