Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[muen/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/cpu.h>
42 #include <asm/io.h>
43 #include <asm/desc.h>
44 #include <asm/vmx.h>
45 #include <asm/virtext.h>
46 #include <asm/mce.h>
47 #include <asm/fpu/internal.h>
48 #include <asm/perf_event.h>
49 #include <asm/debugreg.h>
50 #include <asm/kexec.h>
51 #include <asm/apic.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/mmu_context.h>
54 #include <asm/nospec-branch.h>
55 #include <asm/mshyperv.h>
56
57 #include "trace.h"
58 #include "pmu.h"
59 #include "vmx_evmcs.h"
60
61 #define __ex(x) __kvm_handle_fault_on_reboot(x)
62 #define __ex_clear(x, reg) \
63         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
64
65 MODULE_AUTHOR("Qumranet");
66 MODULE_LICENSE("GPL");
67
68 static const struct x86_cpu_id vmx_cpu_id[] = {
69         X86_FEATURE_MATCH(X86_FEATURE_VMX),
70         {}
71 };
72 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
73
74 static bool __read_mostly enable_vpid = 1;
75 module_param_named(vpid, enable_vpid, bool, 0444);
76
77 static bool __read_mostly enable_vnmi = 1;
78 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
79
80 static bool __read_mostly flexpriority_enabled = 1;
81 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
82
83 static bool __read_mostly enable_ept = 1;
84 module_param_named(ept, enable_ept, bool, S_IRUGO);
85
86 static bool __read_mostly enable_unrestricted_guest = 1;
87 module_param_named(unrestricted_guest,
88                         enable_unrestricted_guest, bool, S_IRUGO);
89
90 static bool __read_mostly enable_ept_ad_bits = 1;
91 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
92
93 static bool __read_mostly emulate_invalid_guest_state = true;
94 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
95
96 static bool __read_mostly fasteoi = 1;
97 module_param(fasteoi, bool, S_IRUGO);
98
99 static bool __read_mostly enable_apicv = 1;
100 module_param(enable_apicv, bool, S_IRUGO);
101
102 static bool __read_mostly enable_shadow_vmcs = 1;
103 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
104 /*
105  * If nested=1, nested virtualization is supported, i.e., guests may use
106  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
107  * use VMX instructions.
108  */
109 static bool __read_mostly nested = 0;
110 module_param(nested, bool, S_IRUGO);
111
112 static u64 __read_mostly host_xss;
113
114 static bool __read_mostly enable_pml = 1;
115 module_param_named(pml, enable_pml, bool, S_IRUGO);
116
117 #define MSR_TYPE_R      1
118 #define MSR_TYPE_W      2
119 #define MSR_TYPE_RW     3
120
121 #define MSR_BITMAP_MODE_X2APIC          1
122 #define MSR_BITMAP_MODE_X2APIC_APICV    2
123 #define MSR_BITMAP_MODE_LM              4
124
125 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
126
127 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
128 static int __read_mostly cpu_preemption_timer_multi;
129 static bool __read_mostly enable_preemption_timer = 1;
130 #ifdef CONFIG_X86_64
131 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
132 #endif
133
134 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
135 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
136 #define KVM_VM_CR0_ALWAYS_ON                            \
137         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
138          X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
139 #define KVM_CR4_GUEST_OWNED_BITS                                      \
140         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
141          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
142
143 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
144 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
145 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
146
147 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
148
149 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
150
151 /*
152  * Hyper-V requires all of these, so mark them as supported even though
153  * they are just treated the same as all-context.
154  */
155 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
156         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
157         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
158         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
159         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
160
161 /*
162  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
163  * ple_gap:    upper bound on the amount of time between two successive
164  *             executions of PAUSE in a loop. Also indicate if ple enabled.
165  *             According to test, this time is usually smaller than 128 cycles.
166  * ple_window: upper bound on the amount of time a guest is allowed to execute
167  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
168  *             less than 2^12 cycles
169  * Time is measured based on a counter that runs at the same rate as the TSC,
170  * refer SDM volume 3b section 21.6.13 & 22.1.3.
171  */
172 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
173
174 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
175 module_param(ple_window, uint, 0444);
176
177 /* Default doubles per-vcpu window every exit. */
178 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
179 module_param(ple_window_grow, uint, 0444);
180
181 /* Default resets per-vcpu window every exit to ple_window. */
182 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
183 module_param(ple_window_shrink, uint, 0444);
184
185 /* Default is to compute the maximum so we can never overflow. */
186 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
187 module_param(ple_window_max, uint, 0444);
188
189 extern const ulong vmx_return;
190
191 struct kvm_vmx {
192         struct kvm kvm;
193
194         unsigned int tss_addr;
195         bool ept_identity_pagetable_done;
196         gpa_t ept_identity_map_addr;
197 };
198
199 #define NR_AUTOLOAD_MSRS 8
200
201 struct vmcs {
202         u32 revision_id;
203         u32 abort;
204         char data[0];
205 };
206
207 /*
208  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
209  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
210  * loaded on this CPU (so we can clear them if the CPU goes down).
211  */
212 struct loaded_vmcs {
213         struct vmcs *vmcs;
214         struct vmcs *shadow_vmcs;
215         int cpu;
216         bool launched;
217         bool nmi_known_unmasked;
218         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
219         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
220         /* Support for vnmi-less CPUs */
221         int soft_vnmi_blocked;
222         ktime_t entry_time;
223         s64 vnmi_blocked_time;
224         unsigned long *msr_bitmap;
225         struct list_head loaded_vmcss_on_cpu_link;
226 };
227
228 struct shared_msr_entry {
229         unsigned index;
230         u64 data;
231         u64 mask;
232 };
233
234 /*
235  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
236  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
237  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
238  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
239  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
240  * More than one of these structures may exist, if L1 runs multiple L2 guests.
241  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
242  * underlying hardware which will be used to run L2.
243  * This structure is packed to ensure that its layout is identical across
244  * machines (necessary for live migration).
245  * If there are changes in this struct, VMCS12_REVISION must be changed.
246  */
247 typedef u64 natural_width;
248 struct __packed vmcs12 {
249         /* According to the Intel spec, a VMCS region must start with the
250          * following two fields. Then follow implementation-specific data.
251          */
252         u32 revision_id;
253         u32 abort;
254
255         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
256         u32 padding[7]; /* room for future expansion */
257
258         u64 io_bitmap_a;
259         u64 io_bitmap_b;
260         u64 msr_bitmap;
261         u64 vm_exit_msr_store_addr;
262         u64 vm_exit_msr_load_addr;
263         u64 vm_entry_msr_load_addr;
264         u64 tsc_offset;
265         u64 virtual_apic_page_addr;
266         u64 apic_access_addr;
267         u64 posted_intr_desc_addr;
268         u64 vm_function_control;
269         u64 ept_pointer;
270         u64 eoi_exit_bitmap0;
271         u64 eoi_exit_bitmap1;
272         u64 eoi_exit_bitmap2;
273         u64 eoi_exit_bitmap3;
274         u64 eptp_list_address;
275         u64 xss_exit_bitmap;
276         u64 guest_physical_address;
277         u64 vmcs_link_pointer;
278         u64 pml_address;
279         u64 guest_ia32_debugctl;
280         u64 guest_ia32_pat;
281         u64 guest_ia32_efer;
282         u64 guest_ia32_perf_global_ctrl;
283         u64 guest_pdptr0;
284         u64 guest_pdptr1;
285         u64 guest_pdptr2;
286         u64 guest_pdptr3;
287         u64 guest_bndcfgs;
288         u64 host_ia32_pat;
289         u64 host_ia32_efer;
290         u64 host_ia32_perf_global_ctrl;
291         u64 padding64[8]; /* room for future expansion */
292         /*
293          * To allow migration of L1 (complete with its L2 guests) between
294          * machines of different natural widths (32 or 64 bit), we cannot have
295          * unsigned long fields with no explict size. We use u64 (aliased
296          * natural_width) instead. Luckily, x86 is little-endian.
297          */
298         natural_width cr0_guest_host_mask;
299         natural_width cr4_guest_host_mask;
300         natural_width cr0_read_shadow;
301         natural_width cr4_read_shadow;
302         natural_width cr3_target_value0;
303         natural_width cr3_target_value1;
304         natural_width cr3_target_value2;
305         natural_width cr3_target_value3;
306         natural_width exit_qualification;
307         natural_width guest_linear_address;
308         natural_width guest_cr0;
309         natural_width guest_cr3;
310         natural_width guest_cr4;
311         natural_width guest_es_base;
312         natural_width guest_cs_base;
313         natural_width guest_ss_base;
314         natural_width guest_ds_base;
315         natural_width guest_fs_base;
316         natural_width guest_gs_base;
317         natural_width guest_ldtr_base;
318         natural_width guest_tr_base;
319         natural_width guest_gdtr_base;
320         natural_width guest_idtr_base;
321         natural_width guest_dr7;
322         natural_width guest_rsp;
323         natural_width guest_rip;
324         natural_width guest_rflags;
325         natural_width guest_pending_dbg_exceptions;
326         natural_width guest_sysenter_esp;
327         natural_width guest_sysenter_eip;
328         natural_width host_cr0;
329         natural_width host_cr3;
330         natural_width host_cr4;
331         natural_width host_fs_base;
332         natural_width host_gs_base;
333         natural_width host_tr_base;
334         natural_width host_gdtr_base;
335         natural_width host_idtr_base;
336         natural_width host_ia32_sysenter_esp;
337         natural_width host_ia32_sysenter_eip;
338         natural_width host_rsp;
339         natural_width host_rip;
340         natural_width paddingl[8]; /* room for future expansion */
341         u32 pin_based_vm_exec_control;
342         u32 cpu_based_vm_exec_control;
343         u32 exception_bitmap;
344         u32 page_fault_error_code_mask;
345         u32 page_fault_error_code_match;
346         u32 cr3_target_count;
347         u32 vm_exit_controls;
348         u32 vm_exit_msr_store_count;
349         u32 vm_exit_msr_load_count;
350         u32 vm_entry_controls;
351         u32 vm_entry_msr_load_count;
352         u32 vm_entry_intr_info_field;
353         u32 vm_entry_exception_error_code;
354         u32 vm_entry_instruction_len;
355         u32 tpr_threshold;
356         u32 secondary_vm_exec_control;
357         u32 vm_instruction_error;
358         u32 vm_exit_reason;
359         u32 vm_exit_intr_info;
360         u32 vm_exit_intr_error_code;
361         u32 idt_vectoring_info_field;
362         u32 idt_vectoring_error_code;
363         u32 vm_exit_instruction_len;
364         u32 vmx_instruction_info;
365         u32 guest_es_limit;
366         u32 guest_cs_limit;
367         u32 guest_ss_limit;
368         u32 guest_ds_limit;
369         u32 guest_fs_limit;
370         u32 guest_gs_limit;
371         u32 guest_ldtr_limit;
372         u32 guest_tr_limit;
373         u32 guest_gdtr_limit;
374         u32 guest_idtr_limit;
375         u32 guest_es_ar_bytes;
376         u32 guest_cs_ar_bytes;
377         u32 guest_ss_ar_bytes;
378         u32 guest_ds_ar_bytes;
379         u32 guest_fs_ar_bytes;
380         u32 guest_gs_ar_bytes;
381         u32 guest_ldtr_ar_bytes;
382         u32 guest_tr_ar_bytes;
383         u32 guest_interruptibility_info;
384         u32 guest_activity_state;
385         u32 guest_sysenter_cs;
386         u32 host_ia32_sysenter_cs;
387         u32 vmx_preemption_timer_value;
388         u32 padding32[7]; /* room for future expansion */
389         u16 virtual_processor_id;
390         u16 posted_intr_nv;
391         u16 guest_es_selector;
392         u16 guest_cs_selector;
393         u16 guest_ss_selector;
394         u16 guest_ds_selector;
395         u16 guest_fs_selector;
396         u16 guest_gs_selector;
397         u16 guest_ldtr_selector;
398         u16 guest_tr_selector;
399         u16 guest_intr_status;
400         u16 guest_pml_index;
401         u16 host_es_selector;
402         u16 host_cs_selector;
403         u16 host_ss_selector;
404         u16 host_ds_selector;
405         u16 host_fs_selector;
406         u16 host_gs_selector;
407         u16 host_tr_selector;
408 };
409
410 /*
411  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
412  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
413  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
414  */
415 #define VMCS12_REVISION 0x11e57ed0
416
417 /*
418  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
419  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
420  * current implementation, 4K are reserved to avoid future complications.
421  */
422 #define VMCS12_SIZE 0x1000
423
424 /*
425  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
426  * supported VMCS12 field encoding.
427  */
428 #define VMCS12_MAX_FIELD_INDEX 0x17
429
430 struct nested_vmx_msrs {
431         /*
432          * We only store the "true" versions of the VMX capability MSRs. We
433          * generate the "non-true" versions by setting the must-be-1 bits
434          * according to the SDM.
435          */
436         u32 procbased_ctls_low;
437         u32 procbased_ctls_high;
438         u32 secondary_ctls_low;
439         u32 secondary_ctls_high;
440         u32 pinbased_ctls_low;
441         u32 pinbased_ctls_high;
442         u32 exit_ctls_low;
443         u32 exit_ctls_high;
444         u32 entry_ctls_low;
445         u32 entry_ctls_high;
446         u32 misc_low;
447         u32 misc_high;
448         u32 ept_caps;
449         u32 vpid_caps;
450         u64 basic;
451         u64 cr0_fixed0;
452         u64 cr0_fixed1;
453         u64 cr4_fixed0;
454         u64 cr4_fixed1;
455         u64 vmcs_enum;
456         u64 vmfunc_controls;
457 };
458
459 /*
460  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
461  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
462  */
463 struct nested_vmx {
464         /* Has the level1 guest done vmxon? */
465         bool vmxon;
466         gpa_t vmxon_ptr;
467         bool pml_full;
468
469         /* The guest-physical address of the current VMCS L1 keeps for L2 */
470         gpa_t current_vmptr;
471         /*
472          * Cache of the guest's VMCS, existing outside of guest memory.
473          * Loaded from guest memory during VMPTRLD. Flushed to guest
474          * memory during VMCLEAR and VMPTRLD.
475          */
476         struct vmcs12 *cached_vmcs12;
477         /*
478          * Indicates if the shadow vmcs must be updated with the
479          * data hold by vmcs12
480          */
481         bool sync_shadow_vmcs;
482         bool dirty_vmcs12;
483
484         bool change_vmcs01_virtual_x2apic_mode;
485         /* L2 must run next, and mustn't decide to exit to L1. */
486         bool nested_run_pending;
487
488         struct loaded_vmcs vmcs02;
489
490         /*
491          * Guest pages referred to in the vmcs02 with host-physical
492          * pointers, so we must keep them pinned while L2 runs.
493          */
494         struct page *apic_access_page;
495         struct page *virtual_apic_page;
496         struct page *pi_desc_page;
497         struct pi_desc *pi_desc;
498         bool pi_pending;
499         u16 posted_intr_nv;
500
501         struct hrtimer preemption_timer;
502         bool preemption_timer_expired;
503
504         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
505         u64 vmcs01_debugctl;
506
507         u16 vpid02;
508         u16 last_vpid;
509
510         struct nested_vmx_msrs msrs;
511
512         /* SMM related state */
513         struct {
514                 /* in VMX operation on SMM entry? */
515                 bool vmxon;
516                 /* in guest mode on SMM entry? */
517                 bool guest_mode;
518         } smm;
519 };
520
521 #define POSTED_INTR_ON  0
522 #define POSTED_INTR_SN  1
523
524 /* Posted-Interrupt Descriptor */
525 struct pi_desc {
526         u32 pir[8];     /* Posted interrupt requested */
527         union {
528                 struct {
529                                 /* bit 256 - Outstanding Notification */
530                         u16     on      : 1,
531                                 /* bit 257 - Suppress Notification */
532                                 sn      : 1,
533                                 /* bit 271:258 - Reserved */
534                                 rsvd_1  : 14;
535                                 /* bit 279:272 - Notification Vector */
536                         u8      nv;
537                                 /* bit 287:280 - Reserved */
538                         u8      rsvd_2;
539                                 /* bit 319:288 - Notification Destination */
540                         u32     ndst;
541                 };
542                 u64 control;
543         };
544         u32 rsvd[6];
545 } __aligned(64);
546
547 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
548 {
549         return test_and_set_bit(POSTED_INTR_ON,
550                         (unsigned long *)&pi_desc->control);
551 }
552
553 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
554 {
555         return test_and_clear_bit(POSTED_INTR_ON,
556                         (unsigned long *)&pi_desc->control);
557 }
558
559 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
560 {
561         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
562 }
563
564 static inline void pi_clear_sn(struct pi_desc *pi_desc)
565 {
566         return clear_bit(POSTED_INTR_SN,
567                         (unsigned long *)&pi_desc->control);
568 }
569
570 static inline void pi_set_sn(struct pi_desc *pi_desc)
571 {
572         return set_bit(POSTED_INTR_SN,
573                         (unsigned long *)&pi_desc->control);
574 }
575
576 static inline void pi_clear_on(struct pi_desc *pi_desc)
577 {
578         clear_bit(POSTED_INTR_ON,
579                   (unsigned long *)&pi_desc->control);
580 }
581
582 static inline int pi_test_on(struct pi_desc *pi_desc)
583 {
584         return test_bit(POSTED_INTR_ON,
585                         (unsigned long *)&pi_desc->control);
586 }
587
588 static inline int pi_test_sn(struct pi_desc *pi_desc)
589 {
590         return test_bit(POSTED_INTR_SN,
591                         (unsigned long *)&pi_desc->control);
592 }
593
594 struct vcpu_vmx {
595         struct kvm_vcpu       vcpu;
596         unsigned long         host_rsp;
597         u8                    fail;
598         u8                    msr_bitmap_mode;
599         u32                   exit_intr_info;
600         u32                   idt_vectoring_info;
601         ulong                 rflags;
602         struct shared_msr_entry *guest_msrs;
603         int                   nmsrs;
604         int                   save_nmsrs;
605         unsigned long         host_idt_base;
606 #ifdef CONFIG_X86_64
607         u64                   msr_host_kernel_gs_base;
608         u64                   msr_guest_kernel_gs_base;
609 #endif
610
611         u64                   arch_capabilities;
612         u64                   spec_ctrl;
613
614         u32 vm_entry_controls_shadow;
615         u32 vm_exit_controls_shadow;
616         u32 secondary_exec_control;
617
618         /*
619          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
620          * non-nested (L1) guest, it always points to vmcs01. For a nested
621          * guest (L2), it points to a different VMCS.
622          */
623         struct loaded_vmcs    vmcs01;
624         struct loaded_vmcs   *loaded_vmcs;
625         bool                  __launched; /* temporary, used in vmx_vcpu_run */
626         struct msr_autoload {
627                 unsigned nr;
628                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
629                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
630         } msr_autoload;
631         struct {
632                 int           loaded;
633                 u16           fs_sel, gs_sel, ldt_sel;
634 #ifdef CONFIG_X86_64
635                 u16           ds_sel, es_sel;
636 #endif
637                 int           gs_ldt_reload_needed;
638                 int           fs_reload_needed;
639                 u64           msr_host_bndcfgs;
640         } host_state;
641         struct {
642                 int vm86_active;
643                 ulong save_rflags;
644                 struct kvm_segment segs[8];
645         } rmode;
646         struct {
647                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
648                 struct kvm_save_segment {
649                         u16 selector;
650                         unsigned long base;
651                         u32 limit;
652                         u32 ar;
653                 } seg[8];
654         } segment_cache;
655         int vpid;
656         bool emulation_required;
657
658         u32 exit_reason;
659
660         /* Posted interrupt descriptor */
661         struct pi_desc pi_desc;
662
663         /* Support for a guest hypervisor (nested VMX) */
664         struct nested_vmx nested;
665
666         /* Dynamic PLE window. */
667         int ple_window;
668         bool ple_window_dirty;
669
670         /* Support for PML */
671 #define PML_ENTITY_NUM          512
672         struct page *pml_pg;
673
674         /* apic deadline value in host tsc */
675         u64 hv_deadline_tsc;
676
677         u64 current_tsc_ratio;
678
679         u32 host_pkru;
680
681         unsigned long host_debugctlmsr;
682
683         /*
684          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
685          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
686          * in msr_ia32_feature_control_valid_bits.
687          */
688         u64 msr_ia32_feature_control;
689         u64 msr_ia32_feature_control_valid_bits;
690 };
691
692 enum segment_cache_field {
693         SEG_FIELD_SEL = 0,
694         SEG_FIELD_BASE = 1,
695         SEG_FIELD_LIMIT = 2,
696         SEG_FIELD_AR = 3,
697
698         SEG_FIELD_NR = 4
699 };
700
701 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
702 {
703         return container_of(kvm, struct kvm_vmx, kvm);
704 }
705
706 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
707 {
708         return container_of(vcpu, struct vcpu_vmx, vcpu);
709 }
710
711 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
712 {
713         return &(to_vmx(vcpu)->pi_desc);
714 }
715
716 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
717 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
718 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
719 #define FIELD64(number, name)                                           \
720         FIELD(number, name),                                            \
721         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
722
723
724 static u16 shadow_read_only_fields[] = {
725 #define SHADOW_FIELD_RO(x) x,
726 #include "vmx_shadow_fields.h"
727 };
728 static int max_shadow_read_only_fields =
729         ARRAY_SIZE(shadow_read_only_fields);
730
731 static u16 shadow_read_write_fields[] = {
732 #define SHADOW_FIELD_RW(x) x,
733 #include "vmx_shadow_fields.h"
734 };
735 static int max_shadow_read_write_fields =
736         ARRAY_SIZE(shadow_read_write_fields);
737
738 static const unsigned short vmcs_field_to_offset_table[] = {
739         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
740         FIELD(POSTED_INTR_NV, posted_intr_nv),
741         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
742         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
743         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
744         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
745         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
746         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
747         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
748         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
749         FIELD(GUEST_INTR_STATUS, guest_intr_status),
750         FIELD(GUEST_PML_INDEX, guest_pml_index),
751         FIELD(HOST_ES_SELECTOR, host_es_selector),
752         FIELD(HOST_CS_SELECTOR, host_cs_selector),
753         FIELD(HOST_SS_SELECTOR, host_ss_selector),
754         FIELD(HOST_DS_SELECTOR, host_ds_selector),
755         FIELD(HOST_FS_SELECTOR, host_fs_selector),
756         FIELD(HOST_GS_SELECTOR, host_gs_selector),
757         FIELD(HOST_TR_SELECTOR, host_tr_selector),
758         FIELD64(IO_BITMAP_A, io_bitmap_a),
759         FIELD64(IO_BITMAP_B, io_bitmap_b),
760         FIELD64(MSR_BITMAP, msr_bitmap),
761         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
762         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
763         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
764         FIELD64(TSC_OFFSET, tsc_offset),
765         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
766         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
767         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
768         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
769         FIELD64(EPT_POINTER, ept_pointer),
770         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
771         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
772         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
773         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
774         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
775         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
776         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
777         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
778         FIELD64(PML_ADDRESS, pml_address),
779         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
780         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
781         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
782         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
783         FIELD64(GUEST_PDPTR0, guest_pdptr0),
784         FIELD64(GUEST_PDPTR1, guest_pdptr1),
785         FIELD64(GUEST_PDPTR2, guest_pdptr2),
786         FIELD64(GUEST_PDPTR3, guest_pdptr3),
787         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
788         FIELD64(HOST_IA32_PAT, host_ia32_pat),
789         FIELD64(HOST_IA32_EFER, host_ia32_efer),
790         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
791         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
792         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
793         FIELD(EXCEPTION_BITMAP, exception_bitmap),
794         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
795         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
796         FIELD(CR3_TARGET_COUNT, cr3_target_count),
797         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
798         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
799         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
800         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
801         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
802         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
803         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
804         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
805         FIELD(TPR_THRESHOLD, tpr_threshold),
806         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
807         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
808         FIELD(VM_EXIT_REASON, vm_exit_reason),
809         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
810         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
811         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
812         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
813         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
814         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
815         FIELD(GUEST_ES_LIMIT, guest_es_limit),
816         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
817         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
818         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
819         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
820         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
821         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
822         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
823         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
824         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
825         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
826         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
827         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
828         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
829         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
830         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
831         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
832         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
833         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
834         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
835         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
836         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
837         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
838         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
839         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
840         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
841         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
842         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
843         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
844         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
845         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
846         FIELD(EXIT_QUALIFICATION, exit_qualification),
847         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
848         FIELD(GUEST_CR0, guest_cr0),
849         FIELD(GUEST_CR3, guest_cr3),
850         FIELD(GUEST_CR4, guest_cr4),
851         FIELD(GUEST_ES_BASE, guest_es_base),
852         FIELD(GUEST_CS_BASE, guest_cs_base),
853         FIELD(GUEST_SS_BASE, guest_ss_base),
854         FIELD(GUEST_DS_BASE, guest_ds_base),
855         FIELD(GUEST_FS_BASE, guest_fs_base),
856         FIELD(GUEST_GS_BASE, guest_gs_base),
857         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
858         FIELD(GUEST_TR_BASE, guest_tr_base),
859         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
860         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
861         FIELD(GUEST_DR7, guest_dr7),
862         FIELD(GUEST_RSP, guest_rsp),
863         FIELD(GUEST_RIP, guest_rip),
864         FIELD(GUEST_RFLAGS, guest_rflags),
865         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
866         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
867         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
868         FIELD(HOST_CR0, host_cr0),
869         FIELD(HOST_CR3, host_cr3),
870         FIELD(HOST_CR4, host_cr4),
871         FIELD(HOST_FS_BASE, host_fs_base),
872         FIELD(HOST_GS_BASE, host_gs_base),
873         FIELD(HOST_TR_BASE, host_tr_base),
874         FIELD(HOST_GDTR_BASE, host_gdtr_base),
875         FIELD(HOST_IDTR_BASE, host_idtr_base),
876         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
877         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
878         FIELD(HOST_RSP, host_rsp),
879         FIELD(HOST_RIP, host_rip),
880 };
881
882 static inline short vmcs_field_to_offset(unsigned long field)
883 {
884         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
885         unsigned short offset;
886         unsigned index;
887
888         if (field >> 15)
889                 return -ENOENT;
890
891         index = ROL16(field, 6);
892         if (index >= size)
893                 return -ENOENT;
894
895         index = array_index_nospec(index, size);
896         offset = vmcs_field_to_offset_table[index];
897         if (offset == 0)
898                 return -ENOENT;
899         return offset;
900 }
901
902 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
903 {
904         return to_vmx(vcpu)->nested.cached_vmcs12;
905 }
906
907 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
908 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
909 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
910 static bool vmx_xsaves_supported(void);
911 static void vmx_set_segment(struct kvm_vcpu *vcpu,
912                             struct kvm_segment *var, int seg);
913 static void vmx_get_segment(struct kvm_vcpu *vcpu,
914                             struct kvm_segment *var, int seg);
915 static bool guest_state_valid(struct kvm_vcpu *vcpu);
916 static u32 vmx_segment_access_rights(struct kvm_segment *var);
917 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
918 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
919 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
920 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
921                                             u16 error_code);
922 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
923 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
924                                                           u32 msr, int type);
925
926 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
927 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
928 /*
929  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
930  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
931  */
932 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
933
934 /*
935  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
936  * can find which vCPU should be waken up.
937  */
938 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
939 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
940
941 enum {
942         VMX_VMREAD_BITMAP,
943         VMX_VMWRITE_BITMAP,
944         VMX_BITMAP_NR
945 };
946
947 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
948
949 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
950 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
951
952 static bool cpu_has_load_ia32_efer;
953 static bool cpu_has_load_perf_global_ctrl;
954
955 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
956 static DEFINE_SPINLOCK(vmx_vpid_lock);
957
958 static struct vmcs_config {
959         int size;
960         int order;
961         u32 basic_cap;
962         u32 revision_id;
963         u32 pin_based_exec_ctrl;
964         u32 cpu_based_exec_ctrl;
965         u32 cpu_based_2nd_exec_ctrl;
966         u32 vmexit_ctrl;
967         u32 vmentry_ctrl;
968         struct nested_vmx_msrs nested;
969 } vmcs_config;
970
971 static struct vmx_capability {
972         u32 ept;
973         u32 vpid;
974 } vmx_capability;
975
976 #define VMX_SEGMENT_FIELD(seg)                                  \
977         [VCPU_SREG_##seg] = {                                   \
978                 .selector = GUEST_##seg##_SELECTOR,             \
979                 .base = GUEST_##seg##_BASE,                     \
980                 .limit = GUEST_##seg##_LIMIT,                   \
981                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
982         }
983
984 static const struct kvm_vmx_segment_field {
985         unsigned selector;
986         unsigned base;
987         unsigned limit;
988         unsigned ar_bytes;
989 } kvm_vmx_segment_fields[] = {
990         VMX_SEGMENT_FIELD(CS),
991         VMX_SEGMENT_FIELD(DS),
992         VMX_SEGMENT_FIELD(ES),
993         VMX_SEGMENT_FIELD(FS),
994         VMX_SEGMENT_FIELD(GS),
995         VMX_SEGMENT_FIELD(SS),
996         VMX_SEGMENT_FIELD(TR),
997         VMX_SEGMENT_FIELD(LDTR),
998 };
999
1000 static u64 host_efer;
1001
1002 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1003
1004 /*
1005  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1006  * away by decrementing the array size.
1007  */
1008 static const u32 vmx_msr_index[] = {
1009 #ifdef CONFIG_X86_64
1010         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1011 #endif
1012         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1013 };
1014
1015 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1016
1017 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1018
1019 #define KVM_EVMCS_VERSION 1
1020
1021 #if IS_ENABLED(CONFIG_HYPERV)
1022 static bool __read_mostly enlightened_vmcs = true;
1023 module_param(enlightened_vmcs, bool, 0444);
1024
1025 static inline void evmcs_write64(unsigned long field, u64 value)
1026 {
1027         u16 clean_field;
1028         int offset = get_evmcs_offset(field, &clean_field);
1029
1030         if (offset < 0)
1031                 return;
1032
1033         *(u64 *)((char *)current_evmcs + offset) = value;
1034
1035         current_evmcs->hv_clean_fields &= ~clean_field;
1036 }
1037
1038 static inline void evmcs_write32(unsigned long field, u32 value)
1039 {
1040         u16 clean_field;
1041         int offset = get_evmcs_offset(field, &clean_field);
1042
1043         if (offset < 0)
1044                 return;
1045
1046         *(u32 *)((char *)current_evmcs + offset) = value;
1047         current_evmcs->hv_clean_fields &= ~clean_field;
1048 }
1049
1050 static inline void evmcs_write16(unsigned long field, u16 value)
1051 {
1052         u16 clean_field;
1053         int offset = get_evmcs_offset(field, &clean_field);
1054
1055         if (offset < 0)
1056                 return;
1057
1058         *(u16 *)((char *)current_evmcs + offset) = value;
1059         current_evmcs->hv_clean_fields &= ~clean_field;
1060 }
1061
1062 static inline u64 evmcs_read64(unsigned long field)
1063 {
1064         int offset = get_evmcs_offset(field, NULL);
1065
1066         if (offset < 0)
1067                 return 0;
1068
1069         return *(u64 *)((char *)current_evmcs + offset);
1070 }
1071
1072 static inline u32 evmcs_read32(unsigned long field)
1073 {
1074         int offset = get_evmcs_offset(field, NULL);
1075
1076         if (offset < 0)
1077                 return 0;
1078
1079         return *(u32 *)((char *)current_evmcs + offset);
1080 }
1081
1082 static inline u16 evmcs_read16(unsigned long field)
1083 {
1084         int offset = get_evmcs_offset(field, NULL);
1085
1086         if (offset < 0)
1087                 return 0;
1088
1089         return *(u16 *)((char *)current_evmcs + offset);
1090 }
1091
1092 static void evmcs_load(u64 phys_addr)
1093 {
1094         struct hv_vp_assist_page *vp_ap =
1095                 hv_get_vp_assist_page(smp_processor_id());
1096
1097         vp_ap->current_nested_vmcs = phys_addr;
1098         vp_ap->enlighten_vmentry = 1;
1099 }
1100
1101 static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1102 {
1103         /*
1104          * Enlightened VMCSv1 doesn't support these:
1105          *
1106          *      POSTED_INTR_NV                  = 0x00000002,
1107          *      GUEST_INTR_STATUS               = 0x00000810,
1108          *      APIC_ACCESS_ADDR                = 0x00002014,
1109          *      POSTED_INTR_DESC_ADDR           = 0x00002016,
1110          *      EOI_EXIT_BITMAP0                = 0x0000201c,
1111          *      EOI_EXIT_BITMAP1                = 0x0000201e,
1112          *      EOI_EXIT_BITMAP2                = 0x00002020,
1113          *      EOI_EXIT_BITMAP3                = 0x00002022,
1114          */
1115         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1116         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1117                 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1118         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1119                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1120         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1121                 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1122
1123         /*
1124          *      GUEST_PML_INDEX                 = 0x00000812,
1125          *      PML_ADDRESS                     = 0x0000200e,
1126          */
1127         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1128
1129         /*      VM_FUNCTION_CONTROL             = 0x00002018, */
1130         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1131
1132         /*
1133          *      EPTP_LIST_ADDRESS               = 0x00002024,
1134          *      VMREAD_BITMAP                   = 0x00002026,
1135          *      VMWRITE_BITMAP                  = 0x00002028,
1136          */
1137         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1138
1139         /*
1140          *      TSC_MULTIPLIER                  = 0x00002032,
1141          */
1142         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1143
1144         /*
1145          *      PLE_GAP                         = 0x00004020,
1146          *      PLE_WINDOW                      = 0x00004022,
1147          */
1148         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1149
1150         /*
1151          *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
1152          */
1153         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1154
1155         /*
1156          *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
1157          *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
1158          */
1159         vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1160         vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1161
1162         /*
1163          * Currently unsupported in KVM:
1164          *      GUEST_IA32_RTIT_CTL             = 0x00002814,
1165          */
1166 }
1167 #else /* !IS_ENABLED(CONFIG_HYPERV) */
1168 static inline void evmcs_write64(unsigned long field, u64 value) {}
1169 static inline void evmcs_write32(unsigned long field, u32 value) {}
1170 static inline void evmcs_write16(unsigned long field, u16 value) {}
1171 static inline u64 evmcs_read64(unsigned long field) { return 0; }
1172 static inline u32 evmcs_read32(unsigned long field) { return 0; }
1173 static inline u16 evmcs_read16(unsigned long field) { return 0; }
1174 static inline void evmcs_load(u64 phys_addr) {}
1175 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1176 #endif /* IS_ENABLED(CONFIG_HYPERV) */
1177
1178 static inline bool is_exception_n(u32 intr_info, u8 vector)
1179 {
1180         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1181                              INTR_INFO_VALID_MASK)) ==
1182                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1183 }
1184
1185 static inline bool is_debug(u32 intr_info)
1186 {
1187         return is_exception_n(intr_info, DB_VECTOR);
1188 }
1189
1190 static inline bool is_breakpoint(u32 intr_info)
1191 {
1192         return is_exception_n(intr_info, BP_VECTOR);
1193 }
1194
1195 static inline bool is_page_fault(u32 intr_info)
1196 {
1197         return is_exception_n(intr_info, PF_VECTOR);
1198 }
1199
1200 static inline bool is_no_device(u32 intr_info)
1201 {
1202         return is_exception_n(intr_info, NM_VECTOR);
1203 }
1204
1205 static inline bool is_invalid_opcode(u32 intr_info)
1206 {
1207         return is_exception_n(intr_info, UD_VECTOR);
1208 }
1209
1210 static inline bool is_gp_fault(u32 intr_info)
1211 {
1212         return is_exception_n(intr_info, GP_VECTOR);
1213 }
1214
1215 static inline bool is_external_interrupt(u32 intr_info)
1216 {
1217         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1218                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1219 }
1220
1221 static inline bool is_machine_check(u32 intr_info)
1222 {
1223         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1224                              INTR_INFO_VALID_MASK)) ==
1225                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1226 }
1227
1228 /* Undocumented: icebp/int1 */
1229 static inline bool is_icebp(u32 intr_info)
1230 {
1231         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1232                 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1233 }
1234
1235 static inline bool cpu_has_vmx_msr_bitmap(void)
1236 {
1237         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1238 }
1239
1240 static inline bool cpu_has_vmx_tpr_shadow(void)
1241 {
1242         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1243 }
1244
1245 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1246 {
1247         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1248 }
1249
1250 static inline bool cpu_has_secondary_exec_ctrls(void)
1251 {
1252         return vmcs_config.cpu_based_exec_ctrl &
1253                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1254 }
1255
1256 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1257 {
1258         return vmcs_config.cpu_based_2nd_exec_ctrl &
1259                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1260 }
1261
1262 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1263 {
1264         return vmcs_config.cpu_based_2nd_exec_ctrl &
1265                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1266 }
1267
1268 static inline bool cpu_has_vmx_apic_register_virt(void)
1269 {
1270         return vmcs_config.cpu_based_2nd_exec_ctrl &
1271                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1272 }
1273
1274 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1275 {
1276         return vmcs_config.cpu_based_2nd_exec_ctrl &
1277                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1278 }
1279
1280 /*
1281  * Comment's format: document - errata name - stepping - processor name.
1282  * Refer from
1283  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1284  */
1285 static u32 vmx_preemption_cpu_tfms[] = {
1286 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1287 0x000206E6,
1288 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1289 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1290 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1291 0x00020652,
1292 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1293 0x00020655,
1294 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1295 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1296 /*
1297  * 320767.pdf - AAP86  - B1 -
1298  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1299  */
1300 0x000106E5,
1301 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1302 0x000106A0,
1303 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1304 0x000106A1,
1305 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1306 0x000106A4,
1307  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1308  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1309  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1310 0x000106A5,
1311 };
1312
1313 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1314 {
1315         u32 eax = cpuid_eax(0x00000001), i;
1316
1317         /* Clear the reserved bits */
1318         eax &= ~(0x3U << 14 | 0xfU << 28);
1319         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1320                 if (eax == vmx_preemption_cpu_tfms[i])
1321                         return true;
1322
1323         return false;
1324 }
1325
1326 static inline bool cpu_has_vmx_preemption_timer(void)
1327 {
1328         return vmcs_config.pin_based_exec_ctrl &
1329                 PIN_BASED_VMX_PREEMPTION_TIMER;
1330 }
1331
1332 static inline bool cpu_has_vmx_posted_intr(void)
1333 {
1334         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1335                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1336 }
1337
1338 static inline bool cpu_has_vmx_apicv(void)
1339 {
1340         return cpu_has_vmx_apic_register_virt() &&
1341                 cpu_has_vmx_virtual_intr_delivery() &&
1342                 cpu_has_vmx_posted_intr();
1343 }
1344
1345 static inline bool cpu_has_vmx_flexpriority(void)
1346 {
1347         return cpu_has_vmx_tpr_shadow() &&
1348                 cpu_has_vmx_virtualize_apic_accesses();
1349 }
1350
1351 static inline bool cpu_has_vmx_ept_execute_only(void)
1352 {
1353         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1354 }
1355
1356 static inline bool cpu_has_vmx_ept_2m_page(void)
1357 {
1358         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1359 }
1360
1361 static inline bool cpu_has_vmx_ept_1g_page(void)
1362 {
1363         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1364 }
1365
1366 static inline bool cpu_has_vmx_ept_4levels(void)
1367 {
1368         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1369 }
1370
1371 static inline bool cpu_has_vmx_ept_mt_wb(void)
1372 {
1373         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1374 }
1375
1376 static inline bool cpu_has_vmx_ept_5levels(void)
1377 {
1378         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1379 }
1380
1381 static inline bool cpu_has_vmx_ept_ad_bits(void)
1382 {
1383         return vmx_capability.ept & VMX_EPT_AD_BIT;
1384 }
1385
1386 static inline bool cpu_has_vmx_invept_context(void)
1387 {
1388         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1389 }
1390
1391 static inline bool cpu_has_vmx_invept_global(void)
1392 {
1393         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1394 }
1395
1396 static inline bool cpu_has_vmx_invvpid_single(void)
1397 {
1398         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1399 }
1400
1401 static inline bool cpu_has_vmx_invvpid_global(void)
1402 {
1403         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1404 }
1405
1406 static inline bool cpu_has_vmx_invvpid(void)
1407 {
1408         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1409 }
1410
1411 static inline bool cpu_has_vmx_ept(void)
1412 {
1413         return vmcs_config.cpu_based_2nd_exec_ctrl &
1414                 SECONDARY_EXEC_ENABLE_EPT;
1415 }
1416
1417 static inline bool cpu_has_vmx_unrestricted_guest(void)
1418 {
1419         return vmcs_config.cpu_based_2nd_exec_ctrl &
1420                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1421 }
1422
1423 static inline bool cpu_has_vmx_ple(void)
1424 {
1425         return vmcs_config.cpu_based_2nd_exec_ctrl &
1426                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1427 }
1428
1429 static inline bool cpu_has_vmx_basic_inout(void)
1430 {
1431         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1432 }
1433
1434 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1435 {
1436         return flexpriority_enabled && lapic_in_kernel(vcpu);
1437 }
1438
1439 static inline bool cpu_has_vmx_vpid(void)
1440 {
1441         return vmcs_config.cpu_based_2nd_exec_ctrl &
1442                 SECONDARY_EXEC_ENABLE_VPID;
1443 }
1444
1445 static inline bool cpu_has_vmx_rdtscp(void)
1446 {
1447         return vmcs_config.cpu_based_2nd_exec_ctrl &
1448                 SECONDARY_EXEC_RDTSCP;
1449 }
1450
1451 static inline bool cpu_has_vmx_invpcid(void)
1452 {
1453         return vmcs_config.cpu_based_2nd_exec_ctrl &
1454                 SECONDARY_EXEC_ENABLE_INVPCID;
1455 }
1456
1457 static inline bool cpu_has_virtual_nmis(void)
1458 {
1459         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1460 }
1461
1462 static inline bool cpu_has_vmx_wbinvd_exit(void)
1463 {
1464         return vmcs_config.cpu_based_2nd_exec_ctrl &
1465                 SECONDARY_EXEC_WBINVD_EXITING;
1466 }
1467
1468 static inline bool cpu_has_vmx_shadow_vmcs(void)
1469 {
1470         u64 vmx_msr;
1471         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1472         /* check if the cpu supports writing r/o exit information fields */
1473         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1474                 return false;
1475
1476         return vmcs_config.cpu_based_2nd_exec_ctrl &
1477                 SECONDARY_EXEC_SHADOW_VMCS;
1478 }
1479
1480 static inline bool cpu_has_vmx_pml(void)
1481 {
1482         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1483 }
1484
1485 static inline bool cpu_has_vmx_tsc_scaling(void)
1486 {
1487         return vmcs_config.cpu_based_2nd_exec_ctrl &
1488                 SECONDARY_EXEC_TSC_SCALING;
1489 }
1490
1491 static inline bool cpu_has_vmx_vmfunc(void)
1492 {
1493         return vmcs_config.cpu_based_2nd_exec_ctrl &
1494                 SECONDARY_EXEC_ENABLE_VMFUNC;
1495 }
1496
1497 static inline bool report_flexpriority(void)
1498 {
1499         return flexpriority_enabled;
1500 }
1501
1502 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1503 {
1504         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
1505 }
1506
1507 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1508 {
1509         return vmcs12->cpu_based_vm_exec_control & bit;
1510 }
1511
1512 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1513 {
1514         return (vmcs12->cpu_based_vm_exec_control &
1515                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1516                 (vmcs12->secondary_vm_exec_control & bit);
1517 }
1518
1519 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1520 {
1521         return vmcs12->pin_based_vm_exec_control &
1522                 PIN_BASED_VMX_PREEMPTION_TIMER;
1523 }
1524
1525 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1526 {
1527         return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1528 }
1529
1530 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1531 {
1532         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1533 }
1534
1535 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1536 {
1537         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1538 }
1539
1540 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1541 {
1542         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1543 }
1544
1545 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1546 {
1547         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1548 }
1549
1550 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1551 {
1552         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1553 }
1554
1555 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1556 {
1557         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1558 }
1559
1560 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1561 {
1562         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1563 }
1564
1565 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1566 {
1567         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1568 }
1569
1570 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1571 {
1572         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1573 }
1574
1575 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1576 {
1577         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1578 }
1579
1580 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1581 {
1582         return nested_cpu_has_vmfunc(vmcs12) &&
1583                 (vmcs12->vm_function_control &
1584                  VMX_VMFUNC_EPTP_SWITCHING);
1585 }
1586
1587 static inline bool is_nmi(u32 intr_info)
1588 {
1589         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1590                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1591 }
1592
1593 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1594                               u32 exit_intr_info,
1595                               unsigned long exit_qualification);
1596 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1597                         struct vmcs12 *vmcs12,
1598                         u32 reason, unsigned long qualification);
1599
1600 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1601 {
1602         int i;
1603
1604         for (i = 0; i < vmx->nmsrs; ++i)
1605                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1606                         return i;
1607         return -1;
1608 }
1609
1610 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1611 {
1612     struct {
1613         u64 vpid : 16;
1614         u64 rsvd : 48;
1615         u64 gva;
1616     } operand = { vpid, 0, gva };
1617
1618     asm volatile (__ex(ASM_VMX_INVVPID)
1619                   /* CF==1 or ZF==1 --> rc = -1 */
1620                   "; ja 1f ; ud2 ; 1:"
1621                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1622 }
1623
1624 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1625 {
1626         struct {
1627                 u64 eptp, gpa;
1628         } operand = {eptp, gpa};
1629
1630         asm volatile (__ex(ASM_VMX_INVEPT)
1631                         /* CF==1 or ZF==1 --> rc = -1 */
1632                         "; ja 1f ; ud2 ; 1:\n"
1633                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1634 }
1635
1636 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1637 {
1638         int i;
1639
1640         i = __find_msr_index(vmx, msr);
1641         if (i >= 0)
1642                 return &vmx->guest_msrs[i];
1643         return NULL;
1644 }
1645
1646 static void vmcs_clear(struct vmcs *vmcs)
1647 {
1648         u64 phys_addr = __pa(vmcs);
1649         u8 error;
1650
1651         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1652                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1653                       : "cc", "memory");
1654         if (error)
1655                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1656                        vmcs, phys_addr);
1657 }
1658
1659 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1660 {
1661         vmcs_clear(loaded_vmcs->vmcs);
1662         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1663                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1664         loaded_vmcs->cpu = -1;
1665         loaded_vmcs->launched = 0;
1666 }
1667
1668 static void vmcs_load(struct vmcs *vmcs)
1669 {
1670         u64 phys_addr = __pa(vmcs);
1671         u8 error;
1672
1673         if (static_branch_unlikely(&enable_evmcs))
1674                 return evmcs_load(phys_addr);
1675
1676         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1677                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1678                         : "cc", "memory");
1679         if (error)
1680                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1681                        vmcs, phys_addr);
1682 }
1683
1684 #ifdef CONFIG_KEXEC_CORE
1685 /*
1686  * This bitmap is used to indicate whether the vmclear
1687  * operation is enabled on all cpus. All disabled by
1688  * default.
1689  */
1690 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1691
1692 static inline void crash_enable_local_vmclear(int cpu)
1693 {
1694         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1695 }
1696
1697 static inline void crash_disable_local_vmclear(int cpu)
1698 {
1699         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1700 }
1701
1702 static inline int crash_local_vmclear_enabled(int cpu)
1703 {
1704         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1705 }
1706
1707 static void crash_vmclear_local_loaded_vmcss(void)
1708 {
1709         int cpu = raw_smp_processor_id();
1710         struct loaded_vmcs *v;
1711
1712         if (!crash_local_vmclear_enabled(cpu))
1713                 return;
1714
1715         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1716                             loaded_vmcss_on_cpu_link)
1717                 vmcs_clear(v->vmcs);
1718 }
1719 #else
1720 static inline void crash_enable_local_vmclear(int cpu) { }
1721 static inline void crash_disable_local_vmclear(int cpu) { }
1722 #endif /* CONFIG_KEXEC_CORE */
1723
1724 static void __loaded_vmcs_clear(void *arg)
1725 {
1726         struct loaded_vmcs *loaded_vmcs = arg;
1727         int cpu = raw_smp_processor_id();
1728
1729         if (loaded_vmcs->cpu != cpu)
1730                 return; /* vcpu migration can race with cpu offline */
1731         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1732                 per_cpu(current_vmcs, cpu) = NULL;
1733         crash_disable_local_vmclear(cpu);
1734         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1735
1736         /*
1737          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1738          * is before setting loaded_vmcs->vcpu to -1 which is done in
1739          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1740          * then adds the vmcs into percpu list before it is deleted.
1741          */
1742         smp_wmb();
1743
1744         loaded_vmcs_init(loaded_vmcs);
1745         crash_enable_local_vmclear(cpu);
1746 }
1747
1748 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1749 {
1750         int cpu = loaded_vmcs->cpu;
1751
1752         if (cpu != -1)
1753                 smp_call_function_single(cpu,
1754                          __loaded_vmcs_clear, loaded_vmcs, 1);
1755 }
1756
1757 static inline void vpid_sync_vcpu_single(int vpid)
1758 {
1759         if (vpid == 0)
1760                 return;
1761
1762         if (cpu_has_vmx_invvpid_single())
1763                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1764 }
1765
1766 static inline void vpid_sync_vcpu_global(void)
1767 {
1768         if (cpu_has_vmx_invvpid_global())
1769                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1770 }
1771
1772 static inline void vpid_sync_context(int vpid)
1773 {
1774         if (cpu_has_vmx_invvpid_single())
1775                 vpid_sync_vcpu_single(vpid);
1776         else
1777                 vpid_sync_vcpu_global();
1778 }
1779
1780 static inline void ept_sync_global(void)
1781 {
1782         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1783 }
1784
1785 static inline void ept_sync_context(u64 eptp)
1786 {
1787         if (cpu_has_vmx_invept_context())
1788                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1789         else
1790                 ept_sync_global();
1791 }
1792
1793 static __always_inline void vmcs_check16(unsigned long field)
1794 {
1795         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1796                          "16-bit accessor invalid for 64-bit field");
1797         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1798                          "16-bit accessor invalid for 64-bit high field");
1799         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1800                          "16-bit accessor invalid for 32-bit high field");
1801         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1802                          "16-bit accessor invalid for natural width field");
1803 }
1804
1805 static __always_inline void vmcs_check32(unsigned long field)
1806 {
1807         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1808                          "32-bit accessor invalid for 16-bit field");
1809         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1810                          "32-bit accessor invalid for natural width field");
1811 }
1812
1813 static __always_inline void vmcs_check64(unsigned long field)
1814 {
1815         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1816                          "64-bit accessor invalid for 16-bit field");
1817         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1818                          "64-bit accessor invalid for 64-bit high field");
1819         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1820                          "64-bit accessor invalid for 32-bit field");
1821         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1822                          "64-bit accessor invalid for natural width field");
1823 }
1824
1825 static __always_inline void vmcs_checkl(unsigned long field)
1826 {
1827         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1828                          "Natural width accessor invalid for 16-bit field");
1829         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1830                          "Natural width accessor invalid for 64-bit field");
1831         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1832                          "Natural width accessor invalid for 64-bit high field");
1833         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1834                          "Natural width accessor invalid for 32-bit field");
1835 }
1836
1837 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1838 {
1839         unsigned long value;
1840
1841         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1842                       : "=a"(value) : "d"(field) : "cc");
1843         return value;
1844 }
1845
1846 static __always_inline u16 vmcs_read16(unsigned long field)
1847 {
1848         vmcs_check16(field);
1849         if (static_branch_unlikely(&enable_evmcs))
1850                 return evmcs_read16(field);
1851         return __vmcs_readl(field);
1852 }
1853
1854 static __always_inline u32 vmcs_read32(unsigned long field)
1855 {
1856         vmcs_check32(field);
1857         if (static_branch_unlikely(&enable_evmcs))
1858                 return evmcs_read32(field);
1859         return __vmcs_readl(field);
1860 }
1861
1862 static __always_inline u64 vmcs_read64(unsigned long field)
1863 {
1864         vmcs_check64(field);
1865         if (static_branch_unlikely(&enable_evmcs))
1866                 return evmcs_read64(field);
1867 #ifdef CONFIG_X86_64
1868         return __vmcs_readl(field);
1869 #else
1870         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1871 #endif
1872 }
1873
1874 static __always_inline unsigned long vmcs_readl(unsigned long field)
1875 {
1876         vmcs_checkl(field);
1877         if (static_branch_unlikely(&enable_evmcs))
1878                 return evmcs_read64(field);
1879         return __vmcs_readl(field);
1880 }
1881
1882 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1883 {
1884         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1885                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1886         dump_stack();
1887 }
1888
1889 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1890 {
1891         u8 error;
1892
1893         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1894                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1895         if (unlikely(error))
1896                 vmwrite_error(field, value);
1897 }
1898
1899 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1900 {
1901         vmcs_check16(field);
1902         if (static_branch_unlikely(&enable_evmcs))
1903                 return evmcs_write16(field, value);
1904
1905         __vmcs_writel(field, value);
1906 }
1907
1908 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1909 {
1910         vmcs_check32(field);
1911         if (static_branch_unlikely(&enable_evmcs))
1912                 return evmcs_write32(field, value);
1913
1914         __vmcs_writel(field, value);
1915 }
1916
1917 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1918 {
1919         vmcs_check64(field);
1920         if (static_branch_unlikely(&enable_evmcs))
1921                 return evmcs_write64(field, value);
1922
1923         __vmcs_writel(field, value);
1924 #ifndef CONFIG_X86_64
1925         asm volatile ("");
1926         __vmcs_writel(field+1, value >> 32);
1927 #endif
1928 }
1929
1930 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1931 {
1932         vmcs_checkl(field);
1933         if (static_branch_unlikely(&enable_evmcs))
1934                 return evmcs_write64(field, value);
1935
1936         __vmcs_writel(field, value);
1937 }
1938
1939 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1940 {
1941         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1942                          "vmcs_clear_bits does not support 64-bit fields");
1943         if (static_branch_unlikely(&enable_evmcs))
1944                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
1945
1946         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1947 }
1948
1949 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1950 {
1951         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1952                          "vmcs_set_bits does not support 64-bit fields");
1953         if (static_branch_unlikely(&enable_evmcs))
1954                 return evmcs_write32(field, evmcs_read32(field) | mask);
1955
1956         __vmcs_writel(field, __vmcs_readl(field) | mask);
1957 }
1958
1959 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1960 {
1961         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1962 }
1963
1964 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1965 {
1966         vmcs_write32(VM_ENTRY_CONTROLS, val);
1967         vmx->vm_entry_controls_shadow = val;
1968 }
1969
1970 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1971 {
1972         if (vmx->vm_entry_controls_shadow != val)
1973                 vm_entry_controls_init(vmx, val);
1974 }
1975
1976 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1977 {
1978         return vmx->vm_entry_controls_shadow;
1979 }
1980
1981
1982 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1983 {
1984         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1985 }
1986
1987 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1988 {
1989         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1990 }
1991
1992 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1993 {
1994         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1995 }
1996
1997 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1998 {
1999         vmcs_write32(VM_EXIT_CONTROLS, val);
2000         vmx->vm_exit_controls_shadow = val;
2001 }
2002
2003 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2004 {
2005         if (vmx->vm_exit_controls_shadow != val)
2006                 vm_exit_controls_init(vmx, val);
2007 }
2008
2009 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2010 {
2011         return vmx->vm_exit_controls_shadow;
2012 }
2013
2014
2015 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2016 {
2017         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2018 }
2019
2020 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2021 {
2022         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2023 }
2024
2025 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2026 {
2027         vmx->segment_cache.bitmask = 0;
2028 }
2029
2030 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2031                                        unsigned field)
2032 {
2033         bool ret;
2034         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2035
2036         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2037                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2038                 vmx->segment_cache.bitmask = 0;
2039         }
2040         ret = vmx->segment_cache.bitmask & mask;
2041         vmx->segment_cache.bitmask |= mask;
2042         return ret;
2043 }
2044
2045 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2046 {
2047         u16 *p = &vmx->segment_cache.seg[seg].selector;
2048
2049         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2050                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2051         return *p;
2052 }
2053
2054 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2055 {
2056         ulong *p = &vmx->segment_cache.seg[seg].base;
2057
2058         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2059                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2060         return *p;
2061 }
2062
2063 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2064 {
2065         u32 *p = &vmx->segment_cache.seg[seg].limit;
2066
2067         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2068                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2069         return *p;
2070 }
2071
2072 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2073 {
2074         u32 *p = &vmx->segment_cache.seg[seg].ar;
2075
2076         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2077                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2078         return *p;
2079 }
2080
2081 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2082 {
2083         u32 eb;
2084
2085         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
2086              (1u << DB_VECTOR) | (1u << AC_VECTOR);
2087         /*
2088          * Guest access to VMware backdoor ports could legitimately
2089          * trigger #GP because of TSS I/O permission bitmap.
2090          * We intercept those #GP and allow access to them anyway
2091          * as VMware does.
2092          */
2093         if (enable_vmware_backdoor)
2094                 eb |= (1u << GP_VECTOR);
2095         if ((vcpu->guest_debug &
2096              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2097             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2098                 eb |= 1u << BP_VECTOR;
2099         if (to_vmx(vcpu)->rmode.vm86_active)
2100                 eb = ~0;
2101         if (enable_ept)
2102                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
2103
2104         /* When we are running a nested L2 guest and L1 specified for it a
2105          * certain exception bitmap, we must trap the same exceptions and pass
2106          * them to L1. When running L2, we will only handle the exceptions
2107          * specified above if L1 did not want them.
2108          */
2109         if (is_guest_mode(vcpu))
2110                 eb |= get_vmcs12(vcpu)->exception_bitmap;
2111
2112         vmcs_write32(EXCEPTION_BITMAP, eb);
2113 }
2114
2115 /*
2116  * Check if MSR is intercepted for currently loaded MSR bitmap.
2117  */
2118 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2119 {
2120         unsigned long *msr_bitmap;
2121         int f = sizeof(unsigned long);
2122
2123         if (!cpu_has_vmx_msr_bitmap())
2124                 return true;
2125
2126         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2127
2128         if (msr <= 0x1fff) {
2129                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2130         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2131                 msr &= 0x1fff;
2132                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2133         }
2134
2135         return true;
2136 }
2137
2138 /*
2139  * Check if MSR is intercepted for L01 MSR bitmap.
2140  */
2141 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2142 {
2143         unsigned long *msr_bitmap;
2144         int f = sizeof(unsigned long);
2145
2146         if (!cpu_has_vmx_msr_bitmap())
2147                 return true;
2148
2149         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2150
2151         if (msr <= 0x1fff) {
2152                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2153         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2154                 msr &= 0x1fff;
2155                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2156         }
2157
2158         return true;
2159 }
2160
2161 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2162                 unsigned long entry, unsigned long exit)
2163 {
2164         vm_entry_controls_clearbit(vmx, entry);
2165         vm_exit_controls_clearbit(vmx, exit);
2166 }
2167
2168 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2169 {
2170         unsigned i;
2171         struct msr_autoload *m = &vmx->msr_autoload;
2172
2173         switch (msr) {
2174         case MSR_EFER:
2175                 if (cpu_has_load_ia32_efer) {
2176                         clear_atomic_switch_msr_special(vmx,
2177                                         VM_ENTRY_LOAD_IA32_EFER,
2178                                         VM_EXIT_LOAD_IA32_EFER);
2179                         return;
2180                 }
2181                 break;
2182         case MSR_CORE_PERF_GLOBAL_CTRL:
2183                 if (cpu_has_load_perf_global_ctrl) {
2184                         clear_atomic_switch_msr_special(vmx,
2185                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2186                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2187                         return;
2188                 }
2189                 break;
2190         }
2191
2192         for (i = 0; i < m->nr; ++i)
2193                 if (m->guest[i].index == msr)
2194                         break;
2195
2196         if (i == m->nr)
2197                 return;
2198         --m->nr;
2199         m->guest[i] = m->guest[m->nr];
2200         m->host[i] = m->host[m->nr];
2201         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2202         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2203 }
2204
2205 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2206                 unsigned long entry, unsigned long exit,
2207                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2208                 u64 guest_val, u64 host_val)
2209 {
2210         vmcs_write64(guest_val_vmcs, guest_val);
2211         vmcs_write64(host_val_vmcs, host_val);
2212         vm_entry_controls_setbit(vmx, entry);
2213         vm_exit_controls_setbit(vmx, exit);
2214 }
2215
2216 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2217                                   u64 guest_val, u64 host_val)
2218 {
2219         unsigned i;
2220         struct msr_autoload *m = &vmx->msr_autoload;
2221
2222         switch (msr) {
2223         case MSR_EFER:
2224                 if (cpu_has_load_ia32_efer) {
2225                         add_atomic_switch_msr_special(vmx,
2226                                         VM_ENTRY_LOAD_IA32_EFER,
2227                                         VM_EXIT_LOAD_IA32_EFER,
2228                                         GUEST_IA32_EFER,
2229                                         HOST_IA32_EFER,
2230                                         guest_val, host_val);
2231                         return;
2232                 }
2233                 break;
2234         case MSR_CORE_PERF_GLOBAL_CTRL:
2235                 if (cpu_has_load_perf_global_ctrl) {
2236                         add_atomic_switch_msr_special(vmx,
2237                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2238                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2239                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2240                                         HOST_IA32_PERF_GLOBAL_CTRL,
2241                                         guest_val, host_val);
2242                         return;
2243                 }
2244                 break;
2245         case MSR_IA32_PEBS_ENABLE:
2246                 /* PEBS needs a quiescent period after being disabled (to write
2247                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2248                  * provide that period, so a CPU could write host's record into
2249                  * guest's memory.
2250                  */
2251                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2252         }
2253
2254         for (i = 0; i < m->nr; ++i)
2255                 if (m->guest[i].index == msr)
2256                         break;
2257
2258         if (i == NR_AUTOLOAD_MSRS) {
2259                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2260                                 "Can't add msr %x\n", msr);
2261                 return;
2262         } else if (i == m->nr) {
2263                 ++m->nr;
2264                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2265                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2266         }
2267
2268         m->guest[i].index = msr;
2269         m->guest[i].value = guest_val;
2270         m->host[i].index = msr;
2271         m->host[i].value = host_val;
2272 }
2273
2274 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2275 {
2276         u64 guest_efer = vmx->vcpu.arch.efer;
2277         u64 ignore_bits = 0;
2278
2279         if (!enable_ept) {
2280                 /*
2281                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2282                  * host CPUID is more efficient than testing guest CPUID
2283                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2284                  */
2285                 if (boot_cpu_has(X86_FEATURE_SMEP))
2286                         guest_efer |= EFER_NX;
2287                 else if (!(guest_efer & EFER_NX))
2288                         ignore_bits |= EFER_NX;
2289         }
2290
2291         /*
2292          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2293          */
2294         ignore_bits |= EFER_SCE;
2295 #ifdef CONFIG_X86_64
2296         ignore_bits |= EFER_LMA | EFER_LME;
2297         /* SCE is meaningful only in long mode on Intel */
2298         if (guest_efer & EFER_LMA)
2299                 ignore_bits &= ~(u64)EFER_SCE;
2300 #endif
2301
2302         clear_atomic_switch_msr(vmx, MSR_EFER);
2303
2304         /*
2305          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2306          * On CPUs that support "load IA32_EFER", always switch EFER
2307          * atomically, since it's faster than switching it manually.
2308          */
2309         if (cpu_has_load_ia32_efer ||
2310             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2311                 if (!(guest_efer & EFER_LMA))
2312                         guest_efer &= ~EFER_LME;
2313                 if (guest_efer != host_efer)
2314                         add_atomic_switch_msr(vmx, MSR_EFER,
2315                                               guest_efer, host_efer);
2316                 return false;
2317         } else {
2318                 guest_efer &= ~ignore_bits;
2319                 guest_efer |= host_efer & ignore_bits;
2320
2321                 vmx->guest_msrs[efer_offset].data = guest_efer;
2322                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2323
2324                 return true;
2325         }
2326 }
2327
2328 #ifdef CONFIG_X86_32
2329 /*
2330  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2331  * VMCS rather than the segment table.  KVM uses this helper to figure
2332  * out the current bases to poke them into the VMCS before entry.
2333  */
2334 static unsigned long segment_base(u16 selector)
2335 {
2336         struct desc_struct *table;
2337         unsigned long v;
2338
2339         if (!(selector & ~SEGMENT_RPL_MASK))
2340                 return 0;
2341
2342         table = get_current_gdt_ro();
2343
2344         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2345                 u16 ldt_selector = kvm_read_ldt();
2346
2347                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2348                         return 0;
2349
2350                 table = (struct desc_struct *)segment_base(ldt_selector);
2351         }
2352         v = get_desc_base(&table[selector >> 3]);
2353         return v;
2354 }
2355 #endif
2356
2357 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2358 {
2359         struct vcpu_vmx *vmx = to_vmx(vcpu);
2360 #ifdef CONFIG_X86_64
2361         int cpu = raw_smp_processor_id();
2362 #endif
2363         int i;
2364
2365         if (vmx->host_state.loaded)
2366                 return;
2367
2368         vmx->host_state.loaded = 1;
2369         /*
2370          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2371          * allow segment selectors with cpl > 0 or ti == 1.
2372          */
2373         vmx->host_state.ldt_sel = kvm_read_ldt();
2374         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2375
2376 #ifdef CONFIG_X86_64
2377         save_fsgs_for_kvm();
2378         vmx->host_state.fs_sel = current->thread.fsindex;
2379         vmx->host_state.gs_sel = current->thread.gsindex;
2380 #else
2381         savesegment(fs, vmx->host_state.fs_sel);
2382         savesegment(gs, vmx->host_state.gs_sel);
2383 #endif
2384         if (!(vmx->host_state.fs_sel & 7)) {
2385                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2386                 vmx->host_state.fs_reload_needed = 0;
2387         } else {
2388                 vmcs_write16(HOST_FS_SELECTOR, 0);
2389                 vmx->host_state.fs_reload_needed = 1;
2390         }
2391         if (!(vmx->host_state.gs_sel & 7))
2392                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2393         else {
2394                 vmcs_write16(HOST_GS_SELECTOR, 0);
2395                 vmx->host_state.gs_ldt_reload_needed = 1;
2396         }
2397
2398 #ifdef CONFIG_X86_64
2399         savesegment(ds, vmx->host_state.ds_sel);
2400         savesegment(es, vmx->host_state.es_sel);
2401
2402         vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
2403         vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
2404
2405         vmx->msr_host_kernel_gs_base = current->thread.gsbase;
2406         if (is_long_mode(&vmx->vcpu))
2407                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2408 #else
2409         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2410         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2411 #endif
2412         if (boot_cpu_has(X86_FEATURE_MPX))
2413                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2414         for (i = 0; i < vmx->save_nmsrs; ++i)
2415                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2416                                    vmx->guest_msrs[i].data,
2417                                    vmx->guest_msrs[i].mask);
2418 }
2419
2420 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2421 {
2422         if (!vmx->host_state.loaded)
2423                 return;
2424
2425         ++vmx->vcpu.stat.host_state_reload;
2426         vmx->host_state.loaded = 0;
2427 #ifdef CONFIG_X86_64
2428         if (is_long_mode(&vmx->vcpu))
2429                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2430 #endif
2431         if (vmx->host_state.gs_ldt_reload_needed) {
2432                 kvm_load_ldt(vmx->host_state.ldt_sel);
2433 #ifdef CONFIG_X86_64
2434                 load_gs_index(vmx->host_state.gs_sel);
2435 #else
2436                 loadsegment(gs, vmx->host_state.gs_sel);
2437 #endif
2438         }
2439         if (vmx->host_state.fs_reload_needed)
2440                 loadsegment(fs, vmx->host_state.fs_sel);
2441 #ifdef CONFIG_X86_64
2442         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2443                 loadsegment(ds, vmx->host_state.ds_sel);
2444                 loadsegment(es, vmx->host_state.es_sel);
2445         }
2446 #endif
2447         invalidate_tss_limit();
2448 #ifdef CONFIG_X86_64
2449         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2450 #endif
2451         if (vmx->host_state.msr_host_bndcfgs)
2452                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2453         load_fixmap_gdt(raw_smp_processor_id());
2454 }
2455
2456 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2457 {
2458         preempt_disable();
2459         __vmx_load_host_state(vmx);
2460         preempt_enable();
2461 }
2462
2463 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2464 {
2465         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2466         struct pi_desc old, new;
2467         unsigned int dest;
2468
2469         /*
2470          * In case of hot-plug or hot-unplug, we may have to undo
2471          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2472          * always keep PI.NDST up to date for simplicity: it makes the
2473          * code easier, and CPU migration is not a fast path.
2474          */
2475         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2476                 return;
2477
2478         /*
2479          * First handle the simple case where no cmpxchg is necessary; just
2480          * allow posting non-urgent interrupts.
2481          *
2482          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2483          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2484          * expects the VCPU to be on the blocked_vcpu_list that matches
2485          * PI.NDST.
2486          */
2487         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2488             vcpu->cpu == cpu) {
2489                 pi_clear_sn(pi_desc);
2490                 return;
2491         }
2492
2493         /* The full case.  */
2494         do {
2495                 old.control = new.control = pi_desc->control;
2496
2497                 dest = cpu_physical_id(cpu);
2498
2499                 if (x2apic_enabled())
2500                         new.ndst = dest;
2501                 else
2502                         new.ndst = (dest << 8) & 0xFF00;
2503
2504                 new.sn = 0;
2505         } while (cmpxchg64(&pi_desc->control, old.control,
2506                            new.control) != old.control);
2507 }
2508
2509 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2510 {
2511         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2512         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2513 }
2514
2515 /*
2516  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2517  * vcpu mutex is already taken.
2518  */
2519 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2520 {
2521         struct vcpu_vmx *vmx = to_vmx(vcpu);
2522         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2523
2524         if (!already_loaded) {
2525                 loaded_vmcs_clear(vmx->loaded_vmcs);
2526                 local_irq_disable();
2527                 crash_disable_local_vmclear(cpu);
2528
2529                 /*
2530                  * Read loaded_vmcs->cpu should be before fetching
2531                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2532                  * See the comments in __loaded_vmcs_clear().
2533                  */
2534                 smp_rmb();
2535
2536                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2537                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2538                 crash_enable_local_vmclear(cpu);
2539                 local_irq_enable();
2540         }
2541
2542         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2543                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2544                 vmcs_load(vmx->loaded_vmcs->vmcs);
2545                 indirect_branch_prediction_barrier();
2546         }
2547
2548         if (!already_loaded) {
2549                 void *gdt = get_current_gdt_ro();
2550                 unsigned long sysenter_esp;
2551
2552                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2553
2554                 /*
2555                  * Linux uses per-cpu TSS and GDT, so set these when switching
2556                  * processors.  See 22.2.4.
2557                  */
2558                 vmcs_writel(HOST_TR_BASE,
2559                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2560                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2561
2562                 /*
2563                  * VM exits change the host TR limit to 0x67 after a VM
2564                  * exit.  This is okay, since 0x67 covers everything except
2565                  * the IO bitmap and have have code to handle the IO bitmap
2566                  * being lost after a VM exit.
2567                  */
2568                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2569
2570                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2571                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2572
2573                 vmx->loaded_vmcs->cpu = cpu;
2574         }
2575
2576         /* Setup TSC multiplier */
2577         if (kvm_has_tsc_control &&
2578             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2579                 decache_tsc_multiplier(vmx);
2580
2581         vmx_vcpu_pi_load(vcpu, cpu);
2582         vmx->host_pkru = read_pkru();
2583         vmx->host_debugctlmsr = get_debugctlmsr();
2584 }
2585
2586 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2587 {
2588         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2589
2590         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2591                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2592                 !kvm_vcpu_apicv_active(vcpu))
2593                 return;
2594
2595         /* Set SN when the vCPU is preempted */
2596         if (vcpu->preempted)
2597                 pi_set_sn(pi_desc);
2598 }
2599
2600 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2601 {
2602         vmx_vcpu_pi_put(vcpu);
2603
2604         __vmx_load_host_state(to_vmx(vcpu));
2605 }
2606
2607 static bool emulation_required(struct kvm_vcpu *vcpu)
2608 {
2609         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2610 }
2611
2612 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2613
2614 /*
2615  * Return the cr0 value that a nested guest would read. This is a combination
2616  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2617  * its hypervisor (cr0_read_shadow).
2618  */
2619 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2620 {
2621         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2622                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2623 }
2624 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2625 {
2626         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2627                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2628 }
2629
2630 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2631 {
2632         unsigned long rflags, save_rflags;
2633
2634         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2635                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2636                 rflags = vmcs_readl(GUEST_RFLAGS);
2637                 if (to_vmx(vcpu)->rmode.vm86_active) {
2638                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2639                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2640                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2641                 }
2642                 to_vmx(vcpu)->rflags = rflags;
2643         }
2644         return to_vmx(vcpu)->rflags;
2645 }
2646
2647 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2648 {
2649         unsigned long old_rflags = vmx_get_rflags(vcpu);
2650
2651         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2652         to_vmx(vcpu)->rflags = rflags;
2653         if (to_vmx(vcpu)->rmode.vm86_active) {
2654                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2655                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2656         }
2657         vmcs_writel(GUEST_RFLAGS, rflags);
2658
2659         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2660                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2661 }
2662
2663 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2664 {
2665         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2666         int ret = 0;
2667
2668         if (interruptibility & GUEST_INTR_STATE_STI)
2669                 ret |= KVM_X86_SHADOW_INT_STI;
2670         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2671                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2672
2673         return ret;
2674 }
2675
2676 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2677 {
2678         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2679         u32 interruptibility = interruptibility_old;
2680
2681         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2682
2683         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2684                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2685         else if (mask & KVM_X86_SHADOW_INT_STI)
2686                 interruptibility |= GUEST_INTR_STATE_STI;
2687
2688         if ((interruptibility != interruptibility_old))
2689                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2690 }
2691
2692 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2693 {
2694         unsigned long rip;
2695
2696         rip = kvm_rip_read(vcpu);
2697         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2698         kvm_rip_write(vcpu, rip);
2699
2700         /* skipping an emulated instruction also counts */
2701         vmx_set_interrupt_shadow(vcpu, 0);
2702 }
2703
2704 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2705                                                unsigned long exit_qual)
2706 {
2707         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2708         unsigned int nr = vcpu->arch.exception.nr;
2709         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2710
2711         if (vcpu->arch.exception.has_error_code) {
2712                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2713                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2714         }
2715
2716         if (kvm_exception_is_soft(nr))
2717                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2718         else
2719                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2720
2721         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2722             vmx_get_nmi_mask(vcpu))
2723                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2724
2725         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2726 }
2727
2728 /*
2729  * KVM wants to inject page-faults which it got to the guest. This function
2730  * checks whether in a nested guest, we need to inject them to L1 or L2.
2731  */
2732 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2733 {
2734         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2735         unsigned int nr = vcpu->arch.exception.nr;
2736
2737         if (nr == PF_VECTOR) {
2738                 if (vcpu->arch.exception.nested_apf) {
2739                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2740                         return 1;
2741                 }
2742                 /*
2743                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2744                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2745                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2746                  * can be written only when inject_pending_event runs.  This should be
2747                  * conditional on a new capability---if the capability is disabled,
2748                  * kvm_multiple_exception would write the ancillary information to
2749                  * CR2 or DR6, for backwards ABI-compatibility.
2750                  */
2751                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2752                                                     vcpu->arch.exception.error_code)) {
2753                         *exit_qual = vcpu->arch.cr2;
2754                         return 1;
2755                 }
2756         } else {
2757                 if (vmcs12->exception_bitmap & (1u << nr)) {
2758                         if (nr == DB_VECTOR)
2759                                 *exit_qual = vcpu->arch.dr6;
2760                         else
2761                                 *exit_qual = 0;
2762                         return 1;
2763                 }
2764         }
2765
2766         return 0;
2767 }
2768
2769 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
2770 {
2771         /*
2772          * Ensure that we clear the HLT state in the VMCS.  We don't need to
2773          * explicitly skip the instruction because if the HLT state is set,
2774          * then the instruction is already executing and RIP has already been
2775          * advanced.
2776          */
2777         if (kvm_hlt_in_guest(vcpu->kvm) &&
2778                         vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
2779                 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
2780 }
2781
2782 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2783 {
2784         struct vcpu_vmx *vmx = to_vmx(vcpu);
2785         unsigned nr = vcpu->arch.exception.nr;
2786         bool has_error_code = vcpu->arch.exception.has_error_code;
2787         u32 error_code = vcpu->arch.exception.error_code;
2788         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2789
2790         if (has_error_code) {
2791                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2792                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2793         }
2794
2795         if (vmx->rmode.vm86_active) {
2796                 int inc_eip = 0;
2797                 if (kvm_exception_is_soft(nr))
2798                         inc_eip = vcpu->arch.event_exit_inst_len;
2799                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2800                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2801                 return;
2802         }
2803
2804         WARN_ON_ONCE(vmx->emulation_required);
2805
2806         if (kvm_exception_is_soft(nr)) {
2807                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2808                              vmx->vcpu.arch.event_exit_inst_len);
2809                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2810         } else
2811                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2812
2813         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2814
2815         vmx_clear_hlt(vcpu);
2816 }
2817
2818 static bool vmx_rdtscp_supported(void)
2819 {
2820         return cpu_has_vmx_rdtscp();
2821 }
2822
2823 static bool vmx_invpcid_supported(void)
2824 {
2825         return cpu_has_vmx_invpcid() && enable_ept;
2826 }
2827
2828 /*
2829  * Swap MSR entry in host/guest MSR entry array.
2830  */
2831 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2832 {
2833         struct shared_msr_entry tmp;
2834
2835         tmp = vmx->guest_msrs[to];
2836         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2837         vmx->guest_msrs[from] = tmp;
2838 }
2839
2840 /*
2841  * Set up the vmcs to automatically save and restore system
2842  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2843  * mode, as fiddling with msrs is very expensive.
2844  */
2845 static void setup_msrs(struct vcpu_vmx *vmx)
2846 {
2847         int save_nmsrs, index;
2848
2849         save_nmsrs = 0;
2850 #ifdef CONFIG_X86_64
2851         if (is_long_mode(&vmx->vcpu)) {
2852                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2853                 if (index >= 0)
2854                         move_msr_up(vmx, index, save_nmsrs++);
2855                 index = __find_msr_index(vmx, MSR_LSTAR);
2856                 if (index >= 0)
2857                         move_msr_up(vmx, index, save_nmsrs++);
2858                 index = __find_msr_index(vmx, MSR_CSTAR);
2859                 if (index >= 0)
2860                         move_msr_up(vmx, index, save_nmsrs++);
2861                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2862                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2863                         move_msr_up(vmx, index, save_nmsrs++);
2864                 /*
2865                  * MSR_STAR is only needed on long mode guests, and only
2866                  * if efer.sce is enabled.
2867                  */
2868                 index = __find_msr_index(vmx, MSR_STAR);
2869                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2870                         move_msr_up(vmx, index, save_nmsrs++);
2871         }
2872 #endif
2873         index = __find_msr_index(vmx, MSR_EFER);
2874         if (index >= 0 && update_transition_efer(vmx, index))
2875                 move_msr_up(vmx, index, save_nmsrs++);
2876
2877         vmx->save_nmsrs = save_nmsrs;
2878
2879         if (cpu_has_vmx_msr_bitmap())
2880                 vmx_update_msr_bitmap(&vmx->vcpu);
2881 }
2882
2883 /*
2884  * reads and returns guest's timestamp counter "register"
2885  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2886  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2887  */
2888 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2889 {
2890         u64 host_tsc, tsc_offset;
2891
2892         host_tsc = rdtsc();
2893         tsc_offset = vmcs_read64(TSC_OFFSET);
2894         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2895 }
2896
2897 /*
2898  * writes 'offset' into guest's timestamp counter offset register
2899  */
2900 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2901 {
2902         if (is_guest_mode(vcpu)) {
2903                 /*
2904                  * We're here if L1 chose not to trap WRMSR to TSC. According
2905                  * to the spec, this should set L1's TSC; The offset that L1
2906                  * set for L2 remains unchanged, and still needs to be added
2907                  * to the newly set TSC to get L2's TSC.
2908                  */
2909                 struct vmcs12 *vmcs12;
2910                 /* recalculate vmcs02.TSC_OFFSET: */
2911                 vmcs12 = get_vmcs12(vcpu);
2912                 vmcs_write64(TSC_OFFSET, offset +
2913                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2914                          vmcs12->tsc_offset : 0));
2915         } else {
2916                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2917                                            vmcs_read64(TSC_OFFSET), offset);
2918                 vmcs_write64(TSC_OFFSET, offset);
2919         }
2920 }
2921
2922 /*
2923  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2924  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2925  * all guests if the "nested" module option is off, and can also be disabled
2926  * for a single guest by disabling its VMX cpuid bit.
2927  */
2928 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2929 {
2930         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2931 }
2932
2933 /*
2934  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2935  * returned for the various VMX controls MSRs when nested VMX is enabled.
2936  * The same values should also be used to verify that vmcs12 control fields are
2937  * valid during nested entry from L1 to L2.
2938  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2939  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2940  * bit in the high half is on if the corresponding bit in the control field
2941  * may be on. See also vmx_control_verify().
2942  */
2943 static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
2944 {
2945         if (!nested) {
2946                 memset(msrs, 0, sizeof(*msrs));
2947                 return;
2948         }
2949
2950         /*
2951          * Note that as a general rule, the high half of the MSRs (bits in
2952          * the control fields which may be 1) should be initialized by the
2953          * intersection of the underlying hardware's MSR (i.e., features which
2954          * can be supported) and the list of features we want to expose -
2955          * because they are known to be properly supported in our code.
2956          * Also, usually, the low half of the MSRs (bits which must be 1) can
2957          * be set to 0, meaning that L1 may turn off any of these bits. The
2958          * reason is that if one of these bits is necessary, it will appear
2959          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2960          * fields of vmcs01 and vmcs02, will turn these bits off - and
2961          * nested_vmx_exit_reflected() will not pass related exits to L1.
2962          * These rules have exceptions below.
2963          */
2964
2965         /* pin-based controls */
2966         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2967                 msrs->pinbased_ctls_low,
2968                 msrs->pinbased_ctls_high);
2969         msrs->pinbased_ctls_low |=
2970                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2971         msrs->pinbased_ctls_high &=
2972                 PIN_BASED_EXT_INTR_MASK |
2973                 PIN_BASED_NMI_EXITING |
2974                 PIN_BASED_VIRTUAL_NMIS |
2975                 (apicv ? PIN_BASED_POSTED_INTR : 0);
2976         msrs->pinbased_ctls_high |=
2977                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2978                 PIN_BASED_VMX_PREEMPTION_TIMER;
2979
2980         /* exit controls */
2981         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2982                 msrs->exit_ctls_low,
2983                 msrs->exit_ctls_high);
2984         msrs->exit_ctls_low =
2985                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2986
2987         msrs->exit_ctls_high &=
2988 #ifdef CONFIG_X86_64
2989                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2990 #endif
2991                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2992         msrs->exit_ctls_high |=
2993                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2994                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2995                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2996
2997         if (kvm_mpx_supported())
2998                 msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2999
3000         /* We support free control of debug control saving. */
3001         msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3002
3003         /* entry controls */
3004         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
3005                 msrs->entry_ctls_low,
3006                 msrs->entry_ctls_high);
3007         msrs->entry_ctls_low =
3008                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3009         msrs->entry_ctls_high &=
3010 #ifdef CONFIG_X86_64
3011                 VM_ENTRY_IA32E_MODE |
3012 #endif
3013                 VM_ENTRY_LOAD_IA32_PAT;
3014         msrs->entry_ctls_high |=
3015                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3016         if (kvm_mpx_supported())
3017                 msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
3018
3019         /* We support free control of debug control loading. */
3020         msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
3021
3022         /* cpu-based controls */
3023         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
3024                 msrs->procbased_ctls_low,
3025                 msrs->procbased_ctls_high);
3026         msrs->procbased_ctls_low =
3027                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3028         msrs->procbased_ctls_high &=
3029                 CPU_BASED_VIRTUAL_INTR_PENDING |
3030                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
3031                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3032                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3033                 CPU_BASED_CR3_STORE_EXITING |
3034 #ifdef CONFIG_X86_64
3035                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3036 #endif
3037                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
3038                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3039                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3040                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3041                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3042         /*
3043          * We can allow some features even when not supported by the
3044          * hardware. For example, L1 can specify an MSR bitmap - and we
3045          * can use it to avoid exits to L1 - even when L0 runs L2
3046          * without MSR bitmaps.
3047          */
3048         msrs->procbased_ctls_high |=
3049                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3050                 CPU_BASED_USE_MSR_BITMAPS;
3051
3052         /* We support free control of CR3 access interception. */
3053         msrs->procbased_ctls_low &=
3054                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3055
3056         /*
3057          * secondary cpu-based controls.  Do not include those that
3058          * depend on CPUID bits, they are added later by vmx_cpuid_update.
3059          */
3060         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
3061                 msrs->secondary_ctls_low,
3062                 msrs->secondary_ctls_high);
3063         msrs->secondary_ctls_low = 0;
3064         msrs->secondary_ctls_high &=
3065                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3066                 SECONDARY_EXEC_DESC |
3067                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3068                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3069                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3070                 SECONDARY_EXEC_WBINVD_EXITING;
3071
3072         if (enable_ept) {
3073                 /* nested EPT: emulate EPT also to L1 */
3074                 msrs->secondary_ctls_high |=
3075                         SECONDARY_EXEC_ENABLE_EPT;
3076                 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
3077                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
3078                 if (cpu_has_vmx_ept_execute_only())
3079                         msrs->ept_caps |=
3080                                 VMX_EPT_EXECUTE_ONLY_BIT;
3081                 msrs->ept_caps &= vmx_capability.ept;
3082                 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
3083                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3084                         VMX_EPT_1GB_PAGE_BIT;
3085                 if (enable_ept_ad_bits) {
3086                         msrs->secondary_ctls_high |=
3087                                 SECONDARY_EXEC_ENABLE_PML;
3088                         msrs->ept_caps |= VMX_EPT_AD_BIT;
3089                 }
3090         }
3091
3092         if (cpu_has_vmx_vmfunc()) {
3093                 msrs->secondary_ctls_high |=
3094                         SECONDARY_EXEC_ENABLE_VMFUNC;
3095                 /*
3096                  * Advertise EPTP switching unconditionally
3097                  * since we emulate it
3098                  */
3099                 if (enable_ept)
3100                         msrs->vmfunc_controls =
3101                                 VMX_VMFUNC_EPTP_SWITCHING;
3102         }
3103
3104         /*
3105          * Old versions of KVM use the single-context version without
3106          * checking for support, so declare that it is supported even
3107          * though it is treated as global context.  The alternative is
3108          * not failing the single-context invvpid, and it is worse.
3109          */
3110         if (enable_vpid) {
3111                 msrs->secondary_ctls_high |=
3112                         SECONDARY_EXEC_ENABLE_VPID;
3113                 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
3114                         VMX_VPID_EXTENT_SUPPORTED_MASK;
3115         }
3116
3117         if (enable_unrestricted_guest)
3118                 msrs->secondary_ctls_high |=
3119                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
3120
3121         /* miscellaneous data */
3122         rdmsr(MSR_IA32_VMX_MISC,
3123                 msrs->misc_low,
3124                 msrs->misc_high);
3125         msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
3126         msrs->misc_low |=
3127                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
3128                 VMX_MISC_ACTIVITY_HLT;
3129         msrs->misc_high = 0;
3130
3131         /*
3132          * This MSR reports some information about VMX support. We
3133          * should return information about the VMX we emulate for the
3134          * guest, and the VMCS structure we give it - not about the
3135          * VMX support of the underlying hardware.
3136          */
3137         msrs->basic =
3138                 VMCS12_REVISION |
3139                 VMX_BASIC_TRUE_CTLS |
3140                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
3141                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
3142
3143         if (cpu_has_vmx_basic_inout())
3144                 msrs->basic |= VMX_BASIC_INOUT;
3145
3146         /*
3147          * These MSRs specify bits which the guest must keep fixed on
3148          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
3149          * We picked the standard core2 setting.
3150          */
3151 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
3152 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
3153         msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
3154         msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
3155
3156         /* These MSRs specify bits which the guest must keep fixed off. */
3157         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
3158         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
3159
3160         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
3161         msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
3162 }
3163
3164 /*
3165  * if fixed0[i] == 1: val[i] must be 1
3166  * if fixed1[i] == 0: val[i] must be 0
3167  */
3168 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
3169 {
3170         return ((val & fixed1) | fixed0) == val;
3171 }
3172
3173 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
3174 {
3175         return fixed_bits_valid(control, low, high);
3176 }
3177
3178 static inline u64 vmx_control_msr(u32 low, u32 high)
3179 {
3180         return low | ((u64)high << 32);
3181 }
3182
3183 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
3184 {
3185         superset &= mask;
3186         subset &= mask;
3187
3188         return (superset | subset) == superset;
3189 }
3190
3191 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
3192 {
3193         const u64 feature_and_reserved =
3194                 /* feature (except bit 48; see below) */
3195                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
3196                 /* reserved */
3197                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
3198         u64 vmx_basic = vmx->nested.msrs.basic;
3199
3200         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
3201                 return -EINVAL;
3202
3203         /*
3204          * KVM does not emulate a version of VMX that constrains physical
3205          * addresses of VMX structures (e.g. VMCS) to 32-bits.
3206          */
3207         if (data & BIT_ULL(48))
3208                 return -EINVAL;
3209
3210         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
3211             vmx_basic_vmcs_revision_id(data))
3212                 return -EINVAL;
3213
3214         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
3215                 return -EINVAL;
3216
3217         vmx->nested.msrs.basic = data;
3218         return 0;
3219 }
3220
3221 static int
3222 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3223 {
3224         u64 supported;
3225         u32 *lowp, *highp;
3226
3227         switch (msr_index) {
3228         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3229                 lowp = &vmx->nested.msrs.pinbased_ctls_low;
3230                 highp = &vmx->nested.msrs.pinbased_ctls_high;
3231                 break;
3232         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3233                 lowp = &vmx->nested.msrs.procbased_ctls_low;
3234                 highp = &vmx->nested.msrs.procbased_ctls_high;
3235                 break;
3236         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3237                 lowp = &vmx->nested.msrs.exit_ctls_low;
3238                 highp = &vmx->nested.msrs.exit_ctls_high;
3239                 break;
3240         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3241                 lowp = &vmx->nested.msrs.entry_ctls_low;
3242                 highp = &vmx->nested.msrs.entry_ctls_high;
3243                 break;
3244         case MSR_IA32_VMX_PROCBASED_CTLS2:
3245                 lowp = &vmx->nested.msrs.secondary_ctls_low;
3246                 highp = &vmx->nested.msrs.secondary_ctls_high;
3247                 break;
3248         default:
3249                 BUG();
3250         }
3251
3252         supported = vmx_control_msr(*lowp, *highp);
3253
3254         /* Check must-be-1 bits are still 1. */
3255         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3256                 return -EINVAL;
3257
3258         /* Check must-be-0 bits are still 0. */
3259         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3260                 return -EINVAL;
3261
3262         *lowp = data;
3263         *highp = data >> 32;
3264         return 0;
3265 }
3266
3267 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3268 {
3269         const u64 feature_and_reserved_bits =
3270                 /* feature */
3271                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3272                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3273                 /* reserved */
3274                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3275         u64 vmx_misc;
3276
3277         vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
3278                                    vmx->nested.msrs.misc_high);
3279
3280         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3281                 return -EINVAL;
3282
3283         if ((vmx->nested.msrs.pinbased_ctls_high &
3284              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3285             vmx_misc_preemption_timer_rate(data) !=
3286             vmx_misc_preemption_timer_rate(vmx_misc))
3287                 return -EINVAL;
3288
3289         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3290                 return -EINVAL;
3291
3292         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3293                 return -EINVAL;
3294
3295         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3296                 return -EINVAL;
3297
3298         vmx->nested.msrs.misc_low = data;
3299         vmx->nested.msrs.misc_high = data >> 32;
3300         return 0;
3301 }
3302
3303 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3304 {
3305         u64 vmx_ept_vpid_cap;
3306
3307         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
3308                                            vmx->nested.msrs.vpid_caps);
3309
3310         /* Every bit is either reserved or a feature bit. */
3311         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3312                 return -EINVAL;
3313
3314         vmx->nested.msrs.ept_caps = data;
3315         vmx->nested.msrs.vpid_caps = data >> 32;
3316         return 0;
3317 }
3318
3319 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3320 {
3321         u64 *msr;
3322
3323         switch (msr_index) {
3324         case MSR_IA32_VMX_CR0_FIXED0:
3325                 msr = &vmx->nested.msrs.cr0_fixed0;
3326                 break;
3327         case MSR_IA32_VMX_CR4_FIXED0:
3328                 msr = &vmx->nested.msrs.cr4_fixed0;
3329                 break;
3330         default:
3331                 BUG();
3332         }
3333
3334         /*
3335          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3336          * must be 1 in the restored value.
3337          */
3338         if (!is_bitwise_subset(data, *msr, -1ULL))
3339                 return -EINVAL;
3340
3341         *msr = data;
3342         return 0;
3343 }
3344
3345 /*
3346  * Called when userspace is restoring VMX MSRs.
3347  *
3348  * Returns 0 on success, non-0 otherwise.
3349  */
3350 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3351 {
3352         struct vcpu_vmx *vmx = to_vmx(vcpu);
3353
3354         switch (msr_index) {
3355         case MSR_IA32_VMX_BASIC:
3356                 return vmx_restore_vmx_basic(vmx, data);
3357         case MSR_IA32_VMX_PINBASED_CTLS:
3358         case MSR_IA32_VMX_PROCBASED_CTLS:
3359         case MSR_IA32_VMX_EXIT_CTLS:
3360         case MSR_IA32_VMX_ENTRY_CTLS:
3361                 /*
3362                  * The "non-true" VMX capability MSRs are generated from the
3363                  * "true" MSRs, so we do not support restoring them directly.
3364                  *
3365                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3366                  * should restore the "true" MSRs with the must-be-1 bits
3367                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3368                  * DEFAULT SETTINGS".
3369                  */
3370                 return -EINVAL;
3371         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3372         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3373         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3374         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3375         case MSR_IA32_VMX_PROCBASED_CTLS2:
3376                 return vmx_restore_control_msr(vmx, msr_index, data);
3377         case MSR_IA32_VMX_MISC:
3378                 return vmx_restore_vmx_misc(vmx, data);
3379         case MSR_IA32_VMX_CR0_FIXED0:
3380         case MSR_IA32_VMX_CR4_FIXED0:
3381                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3382         case MSR_IA32_VMX_CR0_FIXED1:
3383         case MSR_IA32_VMX_CR4_FIXED1:
3384                 /*
3385                  * These MSRs are generated based on the vCPU's CPUID, so we
3386                  * do not support restoring them directly.
3387                  */
3388                 return -EINVAL;
3389         case MSR_IA32_VMX_EPT_VPID_CAP:
3390                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3391         case MSR_IA32_VMX_VMCS_ENUM:
3392                 vmx->nested.msrs.vmcs_enum = data;
3393                 return 0;
3394         default:
3395                 /*
3396                  * The rest of the VMX capability MSRs do not support restore.
3397                  */
3398                 return -EINVAL;
3399         }
3400 }
3401
3402 /* Returns 0 on success, non-0 otherwise. */
3403 static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
3404 {
3405         switch (msr_index) {
3406         case MSR_IA32_VMX_BASIC:
3407                 *pdata = msrs->basic;
3408                 break;
3409         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3410         case MSR_IA32_VMX_PINBASED_CTLS:
3411                 *pdata = vmx_control_msr(
3412                         msrs->pinbased_ctls_low,
3413                         msrs->pinbased_ctls_high);
3414                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3415                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3416                 break;
3417         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3418         case MSR_IA32_VMX_PROCBASED_CTLS:
3419                 *pdata = vmx_control_msr(
3420                         msrs->procbased_ctls_low,
3421                         msrs->procbased_ctls_high);
3422                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3423                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3424                 break;
3425         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3426         case MSR_IA32_VMX_EXIT_CTLS:
3427                 *pdata = vmx_control_msr(
3428                         msrs->exit_ctls_low,
3429                         msrs->exit_ctls_high);
3430                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3431                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3432                 break;
3433         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3434         case MSR_IA32_VMX_ENTRY_CTLS:
3435                 *pdata = vmx_control_msr(
3436                         msrs->entry_ctls_low,
3437                         msrs->entry_ctls_high);
3438                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3439                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3440                 break;
3441         case MSR_IA32_VMX_MISC:
3442                 *pdata = vmx_control_msr(
3443                         msrs->misc_low,
3444                         msrs->misc_high);
3445                 break;
3446         case MSR_IA32_VMX_CR0_FIXED0:
3447                 *pdata = msrs->cr0_fixed0;
3448                 break;
3449         case MSR_IA32_VMX_CR0_FIXED1:
3450                 *pdata = msrs->cr0_fixed1;
3451                 break;
3452         case MSR_IA32_VMX_CR4_FIXED0:
3453                 *pdata = msrs->cr4_fixed0;
3454                 break;
3455         case MSR_IA32_VMX_CR4_FIXED1:
3456                 *pdata = msrs->cr4_fixed1;
3457                 break;
3458         case MSR_IA32_VMX_VMCS_ENUM:
3459                 *pdata = msrs->vmcs_enum;
3460                 break;
3461         case MSR_IA32_VMX_PROCBASED_CTLS2:
3462                 *pdata = vmx_control_msr(
3463                         msrs->secondary_ctls_low,
3464                         msrs->secondary_ctls_high);
3465                 break;
3466         case MSR_IA32_VMX_EPT_VPID_CAP:
3467                 *pdata = msrs->ept_caps |
3468                         ((u64)msrs->vpid_caps << 32);
3469                 break;
3470         case MSR_IA32_VMX_VMFUNC:
3471                 *pdata = msrs->vmfunc_controls;
3472                 break;
3473         default:
3474                 return 1;
3475         }
3476
3477         return 0;
3478 }
3479
3480 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3481                                                  uint64_t val)
3482 {
3483         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3484
3485         return !(val & ~valid_bits);
3486 }
3487
3488 static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
3489 {
3490         switch (msr->index) {
3491         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3492                 if (!nested)
3493                         return 1;
3494                 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
3495         default:
3496                 return 1;
3497         }
3498
3499         return 0;
3500 }
3501
3502 /*
3503  * Reads an msr value (of 'msr_index') into 'pdata'.
3504  * Returns 0 on success, non-0 otherwise.
3505  * Assumes vcpu_load() was already called.
3506  */
3507 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3508 {
3509         struct vcpu_vmx *vmx = to_vmx(vcpu);
3510         struct shared_msr_entry *msr;
3511
3512         switch (msr_info->index) {
3513 #ifdef CONFIG_X86_64
3514         case MSR_FS_BASE:
3515                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3516                 break;
3517         case MSR_GS_BASE:
3518                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3519                 break;
3520         case MSR_KERNEL_GS_BASE:
3521                 vmx_load_host_state(vmx);
3522                 msr_info->data = vmx->msr_guest_kernel_gs_base;
3523                 break;
3524 #endif
3525         case MSR_EFER:
3526                 return kvm_get_msr_common(vcpu, msr_info);
3527         case MSR_IA32_TSC:
3528                 msr_info->data = guest_read_tsc(vcpu);
3529                 break;
3530         case MSR_IA32_SPEC_CTRL:
3531                 if (!msr_info->host_initiated &&
3532                     !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3533                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3534                         return 1;
3535
3536                 msr_info->data = to_vmx(vcpu)->spec_ctrl;
3537                 break;
3538         case MSR_IA32_ARCH_CAPABILITIES:
3539                 if (!msr_info->host_initiated &&
3540                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3541                         return 1;
3542                 msr_info->data = to_vmx(vcpu)->arch_capabilities;
3543                 break;
3544         case MSR_IA32_SYSENTER_CS:
3545                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3546                 break;
3547         case MSR_IA32_SYSENTER_EIP:
3548                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3549                 break;
3550         case MSR_IA32_SYSENTER_ESP:
3551                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3552                 break;
3553         case MSR_IA32_BNDCFGS:
3554                 if (!kvm_mpx_supported() ||
3555                     (!msr_info->host_initiated &&
3556                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3557                         return 1;
3558                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3559                 break;
3560         case MSR_IA32_MCG_EXT_CTL:
3561                 if (!msr_info->host_initiated &&
3562                     !(vmx->msr_ia32_feature_control &
3563                       FEATURE_CONTROL_LMCE))
3564                         return 1;
3565                 msr_info->data = vcpu->arch.mcg_ext_ctl;
3566                 break;
3567         case MSR_IA32_FEATURE_CONTROL:
3568                 msr_info->data = vmx->msr_ia32_feature_control;
3569                 break;
3570         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3571                 if (!nested_vmx_allowed(vcpu))
3572                         return 1;
3573                 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
3574                                        &msr_info->data);
3575         case MSR_IA32_XSS:
3576                 if (!vmx_xsaves_supported())
3577                         return 1;
3578                 msr_info->data = vcpu->arch.ia32_xss;
3579                 break;
3580         case MSR_TSC_AUX:
3581                 if (!msr_info->host_initiated &&
3582                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3583                         return 1;
3584                 /* Otherwise falls through */
3585         default:
3586                 msr = find_msr_entry(vmx, msr_info->index);
3587                 if (msr) {
3588                         msr_info->data = msr->data;
3589                         break;
3590                 }
3591                 return kvm_get_msr_common(vcpu, msr_info);
3592         }
3593
3594         return 0;
3595 }
3596
3597 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3598
3599 /*
3600  * Writes msr value into into the appropriate "register".
3601  * Returns 0 on success, non-0 otherwise.
3602  * Assumes vcpu_load() was already called.
3603  */
3604 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3605 {
3606         struct vcpu_vmx *vmx = to_vmx(vcpu);
3607         struct shared_msr_entry *msr;
3608         int ret = 0;
3609         u32 msr_index = msr_info->index;
3610         u64 data = msr_info->data;
3611