KVM: x86: VMX: fix build without hyper-v
[muen/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/cpu.h>
42 #include <asm/io.h>
43 #include <asm/desc.h>
44 #include <asm/vmx.h>
45 #include <asm/virtext.h>
46 #include <asm/mce.h>
47 #include <asm/fpu/internal.h>
48 #include <asm/perf_event.h>
49 #include <asm/debugreg.h>
50 #include <asm/kexec.h>
51 #include <asm/apic.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/mmu_context.h>
54 #include <asm/spec-ctrl.h>
55 #include <asm/mshyperv.h>
56
57 #include "trace.h"
58 #include "pmu.h"
59 #include "vmx_evmcs.h"
60
61 #define __ex(x) __kvm_handle_fault_on_reboot(x)
62 #define __ex_clear(x, reg) \
63         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
64
65 MODULE_AUTHOR("Qumranet");
66 MODULE_LICENSE("GPL");
67
68 static const struct x86_cpu_id vmx_cpu_id[] = {
69         X86_FEATURE_MATCH(X86_FEATURE_VMX),
70         {}
71 };
72 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
73
74 static bool __read_mostly enable_vpid = 1;
75 module_param_named(vpid, enable_vpid, bool, 0444);
76
77 static bool __read_mostly enable_vnmi = 1;
78 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
79
80 static bool __read_mostly flexpriority_enabled = 1;
81 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
82
83 static bool __read_mostly enable_ept = 1;
84 module_param_named(ept, enable_ept, bool, S_IRUGO);
85
86 static bool __read_mostly enable_unrestricted_guest = 1;
87 module_param_named(unrestricted_guest,
88                         enable_unrestricted_guest, bool, S_IRUGO);
89
90 static bool __read_mostly enable_ept_ad_bits = 1;
91 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
92
93 static bool __read_mostly emulate_invalid_guest_state = true;
94 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
95
96 static bool __read_mostly fasteoi = 1;
97 module_param(fasteoi, bool, S_IRUGO);
98
99 static bool __read_mostly enable_apicv = 1;
100 module_param(enable_apicv, bool, S_IRUGO);
101
102 static bool __read_mostly enable_shadow_vmcs = 1;
103 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
104 /*
105  * If nested=1, nested virtualization is supported, i.e., guests may use
106  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
107  * use VMX instructions.
108  */
109 static bool __read_mostly nested = 0;
110 module_param(nested, bool, S_IRUGO);
111
112 static u64 __read_mostly host_xss;
113
114 static bool __read_mostly enable_pml = 1;
115 module_param_named(pml, enable_pml, bool, S_IRUGO);
116
117 #define MSR_TYPE_R      1
118 #define MSR_TYPE_W      2
119 #define MSR_TYPE_RW     3
120
121 #define MSR_BITMAP_MODE_X2APIC          1
122 #define MSR_BITMAP_MODE_X2APIC_APICV    2
123 #define MSR_BITMAP_MODE_LM              4
124
125 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
126
127 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
128 static int __read_mostly cpu_preemption_timer_multi;
129 static bool __read_mostly enable_preemption_timer = 1;
130 #ifdef CONFIG_X86_64
131 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
132 #endif
133
134 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
135 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
136 #define KVM_VM_CR0_ALWAYS_ON                            \
137         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
138          X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
139 #define KVM_CR4_GUEST_OWNED_BITS                                      \
140         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
141          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
142
143 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
144 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
145 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
146
147 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
148
149 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
150
151 /*
152  * Hyper-V requires all of these, so mark them as supported even though
153  * they are just treated the same as all-context.
154  */
155 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
156         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
157         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
158         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
159         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
160
161 /*
162  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
163  * ple_gap:    upper bound on the amount of time between two successive
164  *             executions of PAUSE in a loop. Also indicate if ple enabled.
165  *             According to test, this time is usually smaller than 128 cycles.
166  * ple_window: upper bound on the amount of time a guest is allowed to execute
167  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
168  *             less than 2^12 cycles
169  * Time is measured based on a counter that runs at the same rate as the TSC,
170  * refer SDM volume 3b section 21.6.13 & 22.1.3.
171  */
172 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
173
174 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
175 module_param(ple_window, uint, 0444);
176
177 /* Default doubles per-vcpu window every exit. */
178 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
179 module_param(ple_window_grow, uint, 0444);
180
181 /* Default resets per-vcpu window every exit to ple_window. */
182 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
183 module_param(ple_window_shrink, uint, 0444);
184
185 /* Default is to compute the maximum so we can never overflow. */
186 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
187 module_param(ple_window_max, uint, 0444);
188
189 extern const ulong vmx_return;
190
191 struct kvm_vmx {
192         struct kvm kvm;
193
194         unsigned int tss_addr;
195         bool ept_identity_pagetable_done;
196         gpa_t ept_identity_map_addr;
197 };
198
199 #define NR_AUTOLOAD_MSRS 8
200
201 struct vmcs {
202         u32 revision_id;
203         u32 abort;
204         char data[0];
205 };
206
207 /*
208  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
209  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
210  * loaded on this CPU (so we can clear them if the CPU goes down).
211  */
212 struct loaded_vmcs {
213         struct vmcs *vmcs;
214         struct vmcs *shadow_vmcs;
215         int cpu;
216         bool launched;
217         bool nmi_known_unmasked;
218         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
219         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
220         /* Support for vnmi-less CPUs */
221         int soft_vnmi_blocked;
222         ktime_t entry_time;
223         s64 vnmi_blocked_time;
224         unsigned long *msr_bitmap;
225         struct list_head loaded_vmcss_on_cpu_link;
226 };
227
228 struct shared_msr_entry {
229         unsigned index;
230         u64 data;
231         u64 mask;
232 };
233
234 /*
235  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
236  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
237  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
238  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
239  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
240  * More than one of these structures may exist, if L1 runs multiple L2 guests.
241  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
242  * underlying hardware which will be used to run L2.
243  * This structure is packed to ensure that its layout is identical across
244  * machines (necessary for live migration).
245  *
246  * IMPORTANT: Changing the layout of existing fields in this structure
247  * will break save/restore compatibility with older kvm releases. When
248  * adding new fields, either use space in the reserved padding* arrays
249  * or add the new fields to the end of the structure.
250  */
251 typedef u64 natural_width;
252 struct __packed vmcs12 {
253         /* According to the Intel spec, a VMCS region must start with the
254          * following two fields. Then follow implementation-specific data.
255          */
256         u32 revision_id;
257         u32 abort;
258
259         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
260         u32 padding[7]; /* room for future expansion */
261
262         u64 io_bitmap_a;
263         u64 io_bitmap_b;
264         u64 msr_bitmap;
265         u64 vm_exit_msr_store_addr;
266         u64 vm_exit_msr_load_addr;
267         u64 vm_entry_msr_load_addr;
268         u64 tsc_offset;
269         u64 virtual_apic_page_addr;
270         u64 apic_access_addr;
271         u64 posted_intr_desc_addr;
272         u64 ept_pointer;
273         u64 eoi_exit_bitmap0;
274         u64 eoi_exit_bitmap1;
275         u64 eoi_exit_bitmap2;
276         u64 eoi_exit_bitmap3;
277         u64 xss_exit_bitmap;
278         u64 guest_physical_address;
279         u64 vmcs_link_pointer;
280         u64 guest_ia32_debugctl;
281         u64 guest_ia32_pat;
282         u64 guest_ia32_efer;
283         u64 guest_ia32_perf_global_ctrl;
284         u64 guest_pdptr0;
285         u64 guest_pdptr1;
286         u64 guest_pdptr2;
287         u64 guest_pdptr3;
288         u64 guest_bndcfgs;
289         u64 host_ia32_pat;
290         u64 host_ia32_efer;
291         u64 host_ia32_perf_global_ctrl;
292         u64 vmread_bitmap;
293         u64 vmwrite_bitmap;
294         u64 vm_function_control;
295         u64 eptp_list_address;
296         u64 pml_address;
297         u64 padding64[3]; /* room for future expansion */
298         /*
299          * To allow migration of L1 (complete with its L2 guests) between
300          * machines of different natural widths (32 or 64 bit), we cannot have
301          * unsigned long fields with no explict size. We use u64 (aliased
302          * natural_width) instead. Luckily, x86 is little-endian.
303          */
304         natural_width cr0_guest_host_mask;
305         natural_width cr4_guest_host_mask;
306         natural_width cr0_read_shadow;
307         natural_width cr4_read_shadow;
308         natural_width cr3_target_value0;
309         natural_width cr3_target_value1;
310         natural_width cr3_target_value2;
311         natural_width cr3_target_value3;
312         natural_width exit_qualification;
313         natural_width guest_linear_address;
314         natural_width guest_cr0;
315         natural_width guest_cr3;
316         natural_width guest_cr4;
317         natural_width guest_es_base;
318         natural_width guest_cs_base;
319         natural_width guest_ss_base;
320         natural_width guest_ds_base;
321         natural_width guest_fs_base;
322         natural_width guest_gs_base;
323         natural_width guest_ldtr_base;
324         natural_width guest_tr_base;
325         natural_width guest_gdtr_base;
326         natural_width guest_idtr_base;
327         natural_width guest_dr7;
328         natural_width guest_rsp;
329         natural_width guest_rip;
330         natural_width guest_rflags;
331         natural_width guest_pending_dbg_exceptions;
332         natural_width guest_sysenter_esp;
333         natural_width guest_sysenter_eip;
334         natural_width host_cr0;
335         natural_width host_cr3;
336         natural_width host_cr4;
337         natural_width host_fs_base;
338         natural_width host_gs_base;
339         natural_width host_tr_base;
340         natural_width host_gdtr_base;
341         natural_width host_idtr_base;
342         natural_width host_ia32_sysenter_esp;
343         natural_width host_ia32_sysenter_eip;
344         natural_width host_rsp;
345         natural_width host_rip;
346         natural_width paddingl[8]; /* room for future expansion */
347         u32 pin_based_vm_exec_control;
348         u32 cpu_based_vm_exec_control;
349         u32 exception_bitmap;
350         u32 page_fault_error_code_mask;
351         u32 page_fault_error_code_match;
352         u32 cr3_target_count;
353         u32 vm_exit_controls;
354         u32 vm_exit_msr_store_count;
355         u32 vm_exit_msr_load_count;
356         u32 vm_entry_controls;
357         u32 vm_entry_msr_load_count;
358         u32 vm_entry_intr_info_field;
359         u32 vm_entry_exception_error_code;
360         u32 vm_entry_instruction_len;
361         u32 tpr_threshold;
362         u32 secondary_vm_exec_control;
363         u32 vm_instruction_error;
364         u32 vm_exit_reason;
365         u32 vm_exit_intr_info;
366         u32 vm_exit_intr_error_code;
367         u32 idt_vectoring_info_field;
368         u32 idt_vectoring_error_code;
369         u32 vm_exit_instruction_len;
370         u32 vmx_instruction_info;
371         u32 guest_es_limit;
372         u32 guest_cs_limit;
373         u32 guest_ss_limit;
374         u32 guest_ds_limit;
375         u32 guest_fs_limit;
376         u32 guest_gs_limit;
377         u32 guest_ldtr_limit;
378         u32 guest_tr_limit;
379         u32 guest_gdtr_limit;
380         u32 guest_idtr_limit;
381         u32 guest_es_ar_bytes;
382         u32 guest_cs_ar_bytes;
383         u32 guest_ss_ar_bytes;
384         u32 guest_ds_ar_bytes;
385         u32 guest_fs_ar_bytes;
386         u32 guest_gs_ar_bytes;
387         u32 guest_ldtr_ar_bytes;
388         u32 guest_tr_ar_bytes;
389         u32 guest_interruptibility_info;
390         u32 guest_activity_state;
391         u32 guest_sysenter_cs;
392         u32 host_ia32_sysenter_cs;
393         u32 vmx_preemption_timer_value;
394         u32 padding32[7]; /* room for future expansion */
395         u16 virtual_processor_id;
396         u16 posted_intr_nv;
397         u16 guest_es_selector;
398         u16 guest_cs_selector;
399         u16 guest_ss_selector;
400         u16 guest_ds_selector;
401         u16 guest_fs_selector;
402         u16 guest_gs_selector;
403         u16 guest_ldtr_selector;
404         u16 guest_tr_selector;
405         u16 guest_intr_status;
406         u16 host_es_selector;
407         u16 host_cs_selector;
408         u16 host_ss_selector;
409         u16 host_ds_selector;
410         u16 host_fs_selector;
411         u16 host_gs_selector;
412         u16 host_tr_selector;
413         u16 guest_pml_index;
414 };
415
416 /*
417  * For save/restore compatibility, the vmcs12 field offsets must not change.
418  */
419 #define CHECK_OFFSET(field, loc)                                \
420         BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc),       \
421                 "Offset of " #field " in struct vmcs12 has changed.")
422
423 static inline void vmx_check_vmcs12_offsets(void) {
424         CHECK_OFFSET(revision_id, 0);
425         CHECK_OFFSET(abort, 4);
426         CHECK_OFFSET(launch_state, 8);
427         CHECK_OFFSET(io_bitmap_a, 40);
428         CHECK_OFFSET(io_bitmap_b, 48);
429         CHECK_OFFSET(msr_bitmap, 56);
430         CHECK_OFFSET(vm_exit_msr_store_addr, 64);
431         CHECK_OFFSET(vm_exit_msr_load_addr, 72);
432         CHECK_OFFSET(vm_entry_msr_load_addr, 80);
433         CHECK_OFFSET(tsc_offset, 88);
434         CHECK_OFFSET(virtual_apic_page_addr, 96);
435         CHECK_OFFSET(apic_access_addr, 104);
436         CHECK_OFFSET(posted_intr_desc_addr, 112);
437         CHECK_OFFSET(ept_pointer, 120);
438         CHECK_OFFSET(eoi_exit_bitmap0, 128);
439         CHECK_OFFSET(eoi_exit_bitmap1, 136);
440         CHECK_OFFSET(eoi_exit_bitmap2, 144);
441         CHECK_OFFSET(eoi_exit_bitmap3, 152);
442         CHECK_OFFSET(xss_exit_bitmap, 160);
443         CHECK_OFFSET(guest_physical_address, 168);
444         CHECK_OFFSET(vmcs_link_pointer, 176);
445         CHECK_OFFSET(guest_ia32_debugctl, 184);
446         CHECK_OFFSET(guest_ia32_pat, 192);
447         CHECK_OFFSET(guest_ia32_efer, 200);
448         CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
449         CHECK_OFFSET(guest_pdptr0, 216);
450         CHECK_OFFSET(guest_pdptr1, 224);
451         CHECK_OFFSET(guest_pdptr2, 232);
452         CHECK_OFFSET(guest_pdptr3, 240);
453         CHECK_OFFSET(guest_bndcfgs, 248);
454         CHECK_OFFSET(host_ia32_pat, 256);
455         CHECK_OFFSET(host_ia32_efer, 264);
456         CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
457         CHECK_OFFSET(vmread_bitmap, 280);
458         CHECK_OFFSET(vmwrite_bitmap, 288);
459         CHECK_OFFSET(vm_function_control, 296);
460         CHECK_OFFSET(eptp_list_address, 304);
461         CHECK_OFFSET(pml_address, 312);
462         CHECK_OFFSET(cr0_guest_host_mask, 344);
463         CHECK_OFFSET(cr4_guest_host_mask, 352);
464         CHECK_OFFSET(cr0_read_shadow, 360);
465         CHECK_OFFSET(cr4_read_shadow, 368);
466         CHECK_OFFSET(cr3_target_value0, 376);
467         CHECK_OFFSET(cr3_target_value1, 384);
468         CHECK_OFFSET(cr3_target_value2, 392);
469         CHECK_OFFSET(cr3_target_value3, 400);
470         CHECK_OFFSET(exit_qualification, 408);
471         CHECK_OFFSET(guest_linear_address, 416);
472         CHECK_OFFSET(guest_cr0, 424);
473         CHECK_OFFSET(guest_cr3, 432);
474         CHECK_OFFSET(guest_cr4, 440);
475         CHECK_OFFSET(guest_es_base, 448);
476         CHECK_OFFSET(guest_cs_base, 456);
477         CHECK_OFFSET(guest_ss_base, 464);
478         CHECK_OFFSET(guest_ds_base, 472);
479         CHECK_OFFSET(guest_fs_base, 480);
480         CHECK_OFFSET(guest_gs_base, 488);
481         CHECK_OFFSET(guest_ldtr_base, 496);
482         CHECK_OFFSET(guest_tr_base, 504);
483         CHECK_OFFSET(guest_gdtr_base, 512);
484         CHECK_OFFSET(guest_idtr_base, 520);
485         CHECK_OFFSET(guest_dr7, 528);
486         CHECK_OFFSET(guest_rsp, 536);
487         CHECK_OFFSET(guest_rip, 544);
488         CHECK_OFFSET(guest_rflags, 552);
489         CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
490         CHECK_OFFSET(guest_sysenter_esp, 568);
491         CHECK_OFFSET(guest_sysenter_eip, 576);
492         CHECK_OFFSET(host_cr0, 584);
493         CHECK_OFFSET(host_cr3, 592);
494         CHECK_OFFSET(host_cr4, 600);
495         CHECK_OFFSET(host_fs_base, 608);
496         CHECK_OFFSET(host_gs_base, 616);
497         CHECK_OFFSET(host_tr_base, 624);
498         CHECK_OFFSET(host_gdtr_base, 632);
499         CHECK_OFFSET(host_idtr_base, 640);
500         CHECK_OFFSET(host_ia32_sysenter_esp, 648);
501         CHECK_OFFSET(host_ia32_sysenter_eip, 656);
502         CHECK_OFFSET(host_rsp, 664);
503         CHECK_OFFSET(host_rip, 672);
504         CHECK_OFFSET(pin_based_vm_exec_control, 744);
505         CHECK_OFFSET(cpu_based_vm_exec_control, 748);
506         CHECK_OFFSET(exception_bitmap, 752);
507         CHECK_OFFSET(page_fault_error_code_mask, 756);
508         CHECK_OFFSET(page_fault_error_code_match, 760);
509         CHECK_OFFSET(cr3_target_count, 764);
510         CHECK_OFFSET(vm_exit_controls, 768);
511         CHECK_OFFSET(vm_exit_msr_store_count, 772);
512         CHECK_OFFSET(vm_exit_msr_load_count, 776);
513         CHECK_OFFSET(vm_entry_controls, 780);
514         CHECK_OFFSET(vm_entry_msr_load_count, 784);
515         CHECK_OFFSET(vm_entry_intr_info_field, 788);
516         CHECK_OFFSET(vm_entry_exception_error_code, 792);
517         CHECK_OFFSET(vm_entry_instruction_len, 796);
518         CHECK_OFFSET(tpr_threshold, 800);
519         CHECK_OFFSET(secondary_vm_exec_control, 804);
520         CHECK_OFFSET(vm_instruction_error, 808);
521         CHECK_OFFSET(vm_exit_reason, 812);
522         CHECK_OFFSET(vm_exit_intr_info, 816);
523         CHECK_OFFSET(vm_exit_intr_error_code, 820);
524         CHECK_OFFSET(idt_vectoring_info_field, 824);
525         CHECK_OFFSET(idt_vectoring_error_code, 828);
526         CHECK_OFFSET(vm_exit_instruction_len, 832);
527         CHECK_OFFSET(vmx_instruction_info, 836);
528         CHECK_OFFSET(guest_es_limit, 840);
529         CHECK_OFFSET(guest_cs_limit, 844);
530         CHECK_OFFSET(guest_ss_limit, 848);
531         CHECK_OFFSET(guest_ds_limit, 852);
532         CHECK_OFFSET(guest_fs_limit, 856);
533         CHECK_OFFSET(guest_gs_limit, 860);
534         CHECK_OFFSET(guest_ldtr_limit, 864);
535         CHECK_OFFSET(guest_tr_limit, 868);
536         CHECK_OFFSET(guest_gdtr_limit, 872);
537         CHECK_OFFSET(guest_idtr_limit, 876);
538         CHECK_OFFSET(guest_es_ar_bytes, 880);
539         CHECK_OFFSET(guest_cs_ar_bytes, 884);
540         CHECK_OFFSET(guest_ss_ar_bytes, 888);
541         CHECK_OFFSET(guest_ds_ar_bytes, 892);
542         CHECK_OFFSET(guest_fs_ar_bytes, 896);
543         CHECK_OFFSET(guest_gs_ar_bytes, 900);
544         CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
545         CHECK_OFFSET(guest_tr_ar_bytes, 908);
546         CHECK_OFFSET(guest_interruptibility_info, 912);
547         CHECK_OFFSET(guest_activity_state, 916);
548         CHECK_OFFSET(guest_sysenter_cs, 920);
549         CHECK_OFFSET(host_ia32_sysenter_cs, 924);
550         CHECK_OFFSET(vmx_preemption_timer_value, 928);
551         CHECK_OFFSET(virtual_processor_id, 960);
552         CHECK_OFFSET(posted_intr_nv, 962);
553         CHECK_OFFSET(guest_es_selector, 964);
554         CHECK_OFFSET(guest_cs_selector, 966);
555         CHECK_OFFSET(guest_ss_selector, 968);
556         CHECK_OFFSET(guest_ds_selector, 970);
557         CHECK_OFFSET(guest_fs_selector, 972);
558         CHECK_OFFSET(guest_gs_selector, 974);
559         CHECK_OFFSET(guest_ldtr_selector, 976);
560         CHECK_OFFSET(guest_tr_selector, 978);
561         CHECK_OFFSET(guest_intr_status, 980);
562         CHECK_OFFSET(host_es_selector, 982);
563         CHECK_OFFSET(host_cs_selector, 984);
564         CHECK_OFFSET(host_ss_selector, 986);
565         CHECK_OFFSET(host_ds_selector, 988);
566         CHECK_OFFSET(host_fs_selector, 990);
567         CHECK_OFFSET(host_gs_selector, 992);
568         CHECK_OFFSET(host_tr_selector, 994);
569         CHECK_OFFSET(guest_pml_index, 996);
570 }
571
572 /*
573  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
574  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
575  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
576  *
577  * IMPORTANT: Changing this value will break save/restore compatibility with
578  * older kvm releases.
579  */
580 #define VMCS12_REVISION 0x11e57ed0
581
582 /*
583  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
584  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
585  * current implementation, 4K are reserved to avoid future complications.
586  */
587 #define VMCS12_SIZE 0x1000
588
589 /*
590  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
591  * supported VMCS12 field encoding.
592  */
593 #define VMCS12_MAX_FIELD_INDEX 0x17
594
595 struct nested_vmx_msrs {
596         /*
597          * We only store the "true" versions of the VMX capability MSRs. We
598          * generate the "non-true" versions by setting the must-be-1 bits
599          * according to the SDM.
600          */
601         u32 procbased_ctls_low;
602         u32 procbased_ctls_high;
603         u32 secondary_ctls_low;
604         u32 secondary_ctls_high;
605         u32 pinbased_ctls_low;
606         u32 pinbased_ctls_high;
607         u32 exit_ctls_low;
608         u32 exit_ctls_high;
609         u32 entry_ctls_low;
610         u32 entry_ctls_high;
611         u32 misc_low;
612         u32 misc_high;
613         u32 ept_caps;
614         u32 vpid_caps;
615         u64 basic;
616         u64 cr0_fixed0;
617         u64 cr0_fixed1;
618         u64 cr4_fixed0;
619         u64 cr4_fixed1;
620         u64 vmcs_enum;
621         u64 vmfunc_controls;
622 };
623
624 /*
625  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
626  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
627  */
628 struct nested_vmx {
629         /* Has the level1 guest done vmxon? */
630         bool vmxon;
631         gpa_t vmxon_ptr;
632         bool pml_full;
633
634         /* The guest-physical address of the current VMCS L1 keeps for L2 */
635         gpa_t current_vmptr;
636         /*
637          * Cache of the guest's VMCS, existing outside of guest memory.
638          * Loaded from guest memory during VMPTRLD. Flushed to guest
639          * memory during VMCLEAR and VMPTRLD.
640          */
641         struct vmcs12 *cached_vmcs12;
642         /*
643          * Indicates if the shadow vmcs must be updated with the
644          * data hold by vmcs12
645          */
646         bool sync_shadow_vmcs;
647         bool dirty_vmcs12;
648
649         bool change_vmcs01_virtual_apic_mode;
650
651         /* L2 must run next, and mustn't decide to exit to L1. */
652         bool nested_run_pending;
653
654         struct loaded_vmcs vmcs02;
655
656         /*
657          * Guest pages referred to in the vmcs02 with host-physical
658          * pointers, so we must keep them pinned while L2 runs.
659          */
660         struct page *apic_access_page;
661         struct page *virtual_apic_page;
662         struct page *pi_desc_page;
663         struct pi_desc *pi_desc;
664         bool pi_pending;
665         u16 posted_intr_nv;
666
667         struct hrtimer preemption_timer;
668         bool preemption_timer_expired;
669
670         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
671         u64 vmcs01_debugctl;
672
673         u16 vpid02;
674         u16 last_vpid;
675
676         struct nested_vmx_msrs msrs;
677
678         /* SMM related state */
679         struct {
680                 /* in VMX operation on SMM entry? */
681                 bool vmxon;
682                 /* in guest mode on SMM entry? */
683                 bool guest_mode;
684         } smm;
685 };
686
687 #define POSTED_INTR_ON  0
688 #define POSTED_INTR_SN  1
689
690 /* Posted-Interrupt Descriptor */
691 struct pi_desc {
692         u32 pir[8];     /* Posted interrupt requested */
693         union {
694                 struct {
695                                 /* bit 256 - Outstanding Notification */
696                         u16     on      : 1,
697                                 /* bit 257 - Suppress Notification */
698                                 sn      : 1,
699                                 /* bit 271:258 - Reserved */
700                                 rsvd_1  : 14;
701                                 /* bit 279:272 - Notification Vector */
702                         u8      nv;
703                                 /* bit 287:280 - Reserved */
704                         u8      rsvd_2;
705                                 /* bit 319:288 - Notification Destination */
706                         u32     ndst;
707                 };
708                 u64 control;
709         };
710         u32 rsvd[6];
711 } __aligned(64);
712
713 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
714 {
715         return test_and_set_bit(POSTED_INTR_ON,
716                         (unsigned long *)&pi_desc->control);
717 }
718
719 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
720 {
721         return test_and_clear_bit(POSTED_INTR_ON,
722                         (unsigned long *)&pi_desc->control);
723 }
724
725 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
726 {
727         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
728 }
729
730 static inline void pi_clear_sn(struct pi_desc *pi_desc)
731 {
732         return clear_bit(POSTED_INTR_SN,
733                         (unsigned long *)&pi_desc->control);
734 }
735
736 static inline void pi_set_sn(struct pi_desc *pi_desc)
737 {
738         return set_bit(POSTED_INTR_SN,
739                         (unsigned long *)&pi_desc->control);
740 }
741
742 static inline void pi_clear_on(struct pi_desc *pi_desc)
743 {
744         clear_bit(POSTED_INTR_ON,
745                   (unsigned long *)&pi_desc->control);
746 }
747
748 static inline int pi_test_on(struct pi_desc *pi_desc)
749 {
750         return test_bit(POSTED_INTR_ON,
751                         (unsigned long *)&pi_desc->control);
752 }
753
754 static inline int pi_test_sn(struct pi_desc *pi_desc)
755 {
756         return test_bit(POSTED_INTR_SN,
757                         (unsigned long *)&pi_desc->control);
758 }
759
760 struct vcpu_vmx {
761         struct kvm_vcpu       vcpu;
762         unsigned long         host_rsp;
763         u8                    fail;
764         u8                    msr_bitmap_mode;
765         u32                   exit_intr_info;
766         u32                   idt_vectoring_info;
767         ulong                 rflags;
768         struct shared_msr_entry *guest_msrs;
769         int                   nmsrs;
770         int                   save_nmsrs;
771         unsigned long         host_idt_base;
772 #ifdef CONFIG_X86_64
773         u64                   msr_host_kernel_gs_base;
774         u64                   msr_guest_kernel_gs_base;
775 #endif
776
777         u64                   arch_capabilities;
778         u64                   spec_ctrl;
779
780         u32 vm_entry_controls_shadow;
781         u32 vm_exit_controls_shadow;
782         u32 secondary_exec_control;
783
784         /*
785          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
786          * non-nested (L1) guest, it always points to vmcs01. For a nested
787          * guest (L2), it points to a different VMCS.
788          */
789         struct loaded_vmcs    vmcs01;
790         struct loaded_vmcs   *loaded_vmcs;
791         bool                  __launched; /* temporary, used in vmx_vcpu_run */
792         struct msr_autoload {
793                 unsigned nr;
794                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
795                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
796         } msr_autoload;
797         struct {
798                 int           loaded;
799                 u16           fs_sel, gs_sel, ldt_sel;
800 #ifdef CONFIG_X86_64
801                 u16           ds_sel, es_sel;
802 #endif
803                 int           gs_ldt_reload_needed;
804                 int           fs_reload_needed;
805                 u64           msr_host_bndcfgs;
806         } host_state;
807         struct {
808                 int vm86_active;
809                 ulong save_rflags;
810                 struct kvm_segment segs[8];
811         } rmode;
812         struct {
813                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
814                 struct kvm_save_segment {
815                         u16 selector;
816                         unsigned long base;
817                         u32 limit;
818                         u32 ar;
819                 } seg[8];
820         } segment_cache;
821         int vpid;
822         bool emulation_required;
823
824         u32 exit_reason;
825
826         /* Posted interrupt descriptor */
827         struct pi_desc pi_desc;
828
829         /* Support for a guest hypervisor (nested VMX) */
830         struct nested_vmx nested;
831
832         /* Dynamic PLE window. */
833         int ple_window;
834         bool ple_window_dirty;
835
836         /* Support for PML */
837 #define PML_ENTITY_NUM          512
838         struct page *pml_pg;
839
840         /* apic deadline value in host tsc */
841         u64 hv_deadline_tsc;
842
843         u64 current_tsc_ratio;
844
845         u32 host_pkru;
846
847         unsigned long host_debugctlmsr;
848
849         /*
850          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
851          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
852          * in msr_ia32_feature_control_valid_bits.
853          */
854         u64 msr_ia32_feature_control;
855         u64 msr_ia32_feature_control_valid_bits;
856 };
857
858 enum segment_cache_field {
859         SEG_FIELD_SEL = 0,
860         SEG_FIELD_BASE = 1,
861         SEG_FIELD_LIMIT = 2,
862         SEG_FIELD_AR = 3,
863
864         SEG_FIELD_NR = 4
865 };
866
867 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
868 {
869         return container_of(kvm, struct kvm_vmx, kvm);
870 }
871
872 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
873 {
874         return container_of(vcpu, struct vcpu_vmx, vcpu);
875 }
876
877 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
878 {
879         return &(to_vmx(vcpu)->pi_desc);
880 }
881
882 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
883 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
884 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
885 #define FIELD64(number, name)                                           \
886         FIELD(number, name),                                            \
887         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
888
889
890 static u16 shadow_read_only_fields[] = {
891 #define SHADOW_FIELD_RO(x) x,
892 #include "vmx_shadow_fields.h"
893 };
894 static int max_shadow_read_only_fields =
895         ARRAY_SIZE(shadow_read_only_fields);
896
897 static u16 shadow_read_write_fields[] = {
898 #define SHADOW_FIELD_RW(x) x,
899 #include "vmx_shadow_fields.h"
900 };
901 static int max_shadow_read_write_fields =
902         ARRAY_SIZE(shadow_read_write_fields);
903
904 static const unsigned short vmcs_field_to_offset_table[] = {
905         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
906         FIELD(POSTED_INTR_NV, posted_intr_nv),
907         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
908         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
909         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
910         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
911         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
912         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
913         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
914         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
915         FIELD(GUEST_INTR_STATUS, guest_intr_status),
916         FIELD(GUEST_PML_INDEX, guest_pml_index),
917         FIELD(HOST_ES_SELECTOR, host_es_selector),
918         FIELD(HOST_CS_SELECTOR, host_cs_selector),
919         FIELD(HOST_SS_SELECTOR, host_ss_selector),
920         FIELD(HOST_DS_SELECTOR, host_ds_selector),
921         FIELD(HOST_FS_SELECTOR, host_fs_selector),
922         FIELD(HOST_GS_SELECTOR, host_gs_selector),
923         FIELD(HOST_TR_SELECTOR, host_tr_selector),
924         FIELD64(IO_BITMAP_A, io_bitmap_a),
925         FIELD64(IO_BITMAP_B, io_bitmap_b),
926         FIELD64(MSR_BITMAP, msr_bitmap),
927         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
928         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
929         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
930         FIELD64(PML_ADDRESS, pml_address),
931         FIELD64(TSC_OFFSET, tsc_offset),
932         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
933         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
934         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
935         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
936         FIELD64(EPT_POINTER, ept_pointer),
937         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
938         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
939         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
940         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
941         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
942         FIELD64(VMREAD_BITMAP, vmread_bitmap),
943         FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
944         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
945         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
946         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
947         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
948         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
949         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
950         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
951         FIELD64(GUEST_PDPTR0, guest_pdptr0),
952         FIELD64(GUEST_PDPTR1, guest_pdptr1),
953         FIELD64(GUEST_PDPTR2, guest_pdptr2),
954         FIELD64(GUEST_PDPTR3, guest_pdptr3),
955         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
956         FIELD64(HOST_IA32_PAT, host_ia32_pat),
957         FIELD64(HOST_IA32_EFER, host_ia32_efer),
958         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
959         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
960         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
961         FIELD(EXCEPTION_BITMAP, exception_bitmap),
962         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
963         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
964         FIELD(CR3_TARGET_COUNT, cr3_target_count),
965         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
966         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
967         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
968         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
969         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
970         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
971         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
972         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
973         FIELD(TPR_THRESHOLD, tpr_threshold),
974         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
975         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
976         FIELD(VM_EXIT_REASON, vm_exit_reason),
977         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
978         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
979         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
980         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
981         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
982         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
983         FIELD(GUEST_ES_LIMIT, guest_es_limit),
984         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
985         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
986         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
987         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
988         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
989         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
990         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
991         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
992         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
993         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
994         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
995         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
996         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
997         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
998         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
999         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
1000         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
1001         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
1002         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
1003         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
1004         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
1005         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
1006         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
1007         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
1008         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
1009         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
1010         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
1011         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
1012         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
1013         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
1014         FIELD(EXIT_QUALIFICATION, exit_qualification),
1015         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
1016         FIELD(GUEST_CR0, guest_cr0),
1017         FIELD(GUEST_CR3, guest_cr3),
1018         FIELD(GUEST_CR4, guest_cr4),
1019         FIELD(GUEST_ES_BASE, guest_es_base),
1020         FIELD(GUEST_CS_BASE, guest_cs_base),
1021         FIELD(GUEST_SS_BASE, guest_ss_base),
1022         FIELD(GUEST_DS_BASE, guest_ds_base),
1023         FIELD(GUEST_FS_BASE, guest_fs_base),
1024         FIELD(GUEST_GS_BASE, guest_gs_base),
1025         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
1026         FIELD(GUEST_TR_BASE, guest_tr_base),
1027         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
1028         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
1029         FIELD(GUEST_DR7, guest_dr7),
1030         FIELD(GUEST_RSP, guest_rsp),
1031         FIELD(GUEST_RIP, guest_rip),
1032         FIELD(GUEST_RFLAGS, guest_rflags),
1033         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
1034         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
1035         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
1036         FIELD(HOST_CR0, host_cr0),
1037         FIELD(HOST_CR3, host_cr3),
1038         FIELD(HOST_CR4, host_cr4),
1039         FIELD(HOST_FS_BASE, host_fs_base),
1040         FIELD(HOST_GS_BASE, host_gs_base),
1041         FIELD(HOST_TR_BASE, host_tr_base),
1042         FIELD(HOST_GDTR_BASE, host_gdtr_base),
1043         FIELD(HOST_IDTR_BASE, host_idtr_base),
1044         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
1045         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
1046         FIELD(HOST_RSP, host_rsp),
1047         FIELD(HOST_RIP, host_rip),
1048 };
1049
1050 static inline short vmcs_field_to_offset(unsigned long field)
1051 {
1052         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
1053         unsigned short offset;
1054         unsigned index;
1055
1056         if (field >> 15)
1057                 return -ENOENT;
1058
1059         index = ROL16(field, 6);
1060         if (index >= size)
1061                 return -ENOENT;
1062
1063         index = array_index_nospec(index, size);
1064         offset = vmcs_field_to_offset_table[index];
1065         if (offset == 0)
1066                 return -ENOENT;
1067         return offset;
1068 }
1069
1070 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
1071 {
1072         return to_vmx(vcpu)->nested.cached_vmcs12;
1073 }
1074
1075 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
1076 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
1077 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
1078 static bool vmx_xsaves_supported(void);
1079 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1080                             struct kvm_segment *var, int seg);
1081 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1082                             struct kvm_segment *var, int seg);
1083 static bool guest_state_valid(struct kvm_vcpu *vcpu);
1084 static u32 vmx_segment_access_rights(struct kvm_segment *var);
1085 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1086 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
1087 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1088 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1089                                             u16 error_code);
1090 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1091 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1092                                                           u32 msr, int type);
1093
1094 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
1095 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
1096 /*
1097  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
1098  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
1099  */
1100 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
1101
1102 /*
1103  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
1104  * can find which vCPU should be waken up.
1105  */
1106 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
1107 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
1108
1109 enum {
1110         VMX_VMREAD_BITMAP,
1111         VMX_VMWRITE_BITMAP,
1112         VMX_BITMAP_NR
1113 };
1114
1115 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
1116
1117 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
1118 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
1119
1120 static bool cpu_has_load_ia32_efer;
1121 static bool cpu_has_load_perf_global_ctrl;
1122
1123 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
1124 static DEFINE_SPINLOCK(vmx_vpid_lock);
1125
1126 static struct vmcs_config {
1127         int size;
1128         int order;
1129         u32 basic_cap;
1130         u32 revision_id;
1131         u32 pin_based_exec_ctrl;
1132         u32 cpu_based_exec_ctrl;
1133         u32 cpu_based_2nd_exec_ctrl;
1134         u32 vmexit_ctrl;
1135         u32 vmentry_ctrl;
1136         struct nested_vmx_msrs nested;
1137 } vmcs_config;
1138
1139 static struct vmx_capability {
1140         u32 ept;
1141         u32 vpid;
1142 } vmx_capability;
1143
1144 #define VMX_SEGMENT_FIELD(seg)                                  \
1145         [VCPU_SREG_##seg] = {                                   \
1146                 .selector = GUEST_##seg##_SELECTOR,             \
1147                 .base = GUEST_##seg##_BASE,                     \
1148                 .limit = GUEST_##seg##_LIMIT,                   \
1149                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
1150         }
1151
1152 static const struct kvm_vmx_segment_field {
1153         unsigned selector;
1154         unsigned base;
1155         unsigned limit;
1156         unsigned ar_bytes;
1157 } kvm_vmx_segment_fields[] = {
1158         VMX_SEGMENT_FIELD(CS),
1159         VMX_SEGMENT_FIELD(DS),
1160         VMX_SEGMENT_FIELD(ES),
1161         VMX_SEGMENT_FIELD(FS),
1162         VMX_SEGMENT_FIELD(GS),
1163         VMX_SEGMENT_FIELD(SS),
1164         VMX_SEGMENT_FIELD(TR),
1165         VMX_SEGMENT_FIELD(LDTR),
1166 };
1167
1168 static u64 host_efer;
1169
1170 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1171
1172 /*
1173  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1174  * away by decrementing the array size.
1175  */
1176 static const u32 vmx_msr_index[] = {
1177 #ifdef CONFIG_X86_64
1178         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1179 #endif
1180         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1181 };
1182
1183 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1184
1185 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1186
1187 #define KVM_EVMCS_VERSION 1
1188
1189 #if IS_ENABLED(CONFIG_HYPERV)
1190 static bool __read_mostly enlightened_vmcs = true;
1191 module_param(enlightened_vmcs, bool, 0444);
1192
1193 static inline void evmcs_write64(unsigned long field, u64 value)
1194 {
1195         u16 clean_field;
1196         int offset = get_evmcs_offset(field, &clean_field);
1197
1198         if (offset < 0)
1199                 return;
1200
1201         *(u64 *)((char *)current_evmcs + offset) = value;
1202
1203         current_evmcs->hv_clean_fields &= ~clean_field;
1204 }
1205
1206 static inline void evmcs_write32(unsigned long field, u32 value)
1207 {
1208         u16 clean_field;
1209         int offset = get_evmcs_offset(field, &clean_field);
1210
1211         if (offset < 0)
1212                 return;
1213
1214         *(u32 *)((char *)current_evmcs + offset) = value;
1215         current_evmcs->hv_clean_fields &= ~clean_field;
1216 }
1217
1218 static inline void evmcs_write16(unsigned long field, u16 value)
1219 {
1220         u16 clean_field;
1221         int offset = get_evmcs_offset(field, &clean_field);
1222
1223         if (offset < 0)
1224                 return;
1225
1226         *(u16 *)((char *)current_evmcs + offset) = value;
1227         current_evmcs->hv_clean_fields &= ~clean_field;
1228 }
1229
1230 static inline u64 evmcs_read64(unsigned long field)
1231 {
1232         int offset = get_evmcs_offset(field, NULL);
1233
1234         if (offset < 0)
1235                 return 0;
1236
1237         return *(u64 *)((char *)current_evmcs + offset);
1238 }
1239
1240 static inline u32 evmcs_read32(unsigned long field)
1241 {
1242         int offset = get_evmcs_offset(field, NULL);
1243
1244         if (offset < 0)
1245                 return 0;
1246
1247         return *(u32 *)((char *)current_evmcs + offset);
1248 }
1249
1250 static inline u16 evmcs_read16(unsigned long field)
1251 {
1252         int offset = get_evmcs_offset(field, NULL);
1253
1254         if (offset < 0)
1255                 return 0;
1256
1257         return *(u16 *)((char *)current_evmcs + offset);
1258 }
1259
1260 static inline void evmcs_touch_msr_bitmap(void)
1261 {
1262         if (unlikely(!current_evmcs))
1263                 return;
1264
1265         if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1266                 current_evmcs->hv_clean_fields &=
1267                         ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1268 }
1269
1270 static void evmcs_load(u64 phys_addr)
1271 {
1272         struct hv_vp_assist_page *vp_ap =
1273                 hv_get_vp_assist_page(smp_processor_id());
1274
1275         vp_ap->current_nested_vmcs = phys_addr;
1276         vp_ap->enlighten_vmentry = 1;
1277 }
1278
1279 static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1280 {
1281         /*
1282          * Enlightened VMCSv1 doesn't support these:
1283          *
1284          *      POSTED_INTR_NV                  = 0x00000002,
1285          *      GUEST_INTR_STATUS               = 0x00000810,
1286          *      APIC_ACCESS_ADDR                = 0x00002014,
1287          *      POSTED_INTR_DESC_ADDR           = 0x00002016,
1288          *      EOI_EXIT_BITMAP0                = 0x0000201c,
1289          *      EOI_EXIT_BITMAP1                = 0x0000201e,
1290          *      EOI_EXIT_BITMAP2                = 0x00002020,
1291          *      EOI_EXIT_BITMAP3                = 0x00002022,
1292          */
1293         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1294         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1295                 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1296         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1297                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1298         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1299                 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1300
1301         /*
1302          *      GUEST_PML_INDEX                 = 0x00000812,
1303          *      PML_ADDRESS                     = 0x0000200e,
1304          */
1305         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1306
1307         /*      VM_FUNCTION_CONTROL             = 0x00002018, */
1308         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1309
1310         /*
1311          *      EPTP_LIST_ADDRESS               = 0x00002024,
1312          *      VMREAD_BITMAP                   = 0x00002026,
1313          *      VMWRITE_BITMAP                  = 0x00002028,
1314          */
1315         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1316
1317         /*
1318          *      TSC_MULTIPLIER                  = 0x00002032,
1319          */
1320         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1321
1322         /*
1323          *      PLE_GAP                         = 0x00004020,
1324          *      PLE_WINDOW                      = 0x00004022,
1325          */
1326         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1327
1328         /*
1329          *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
1330          */
1331         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1332
1333         /*
1334          *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
1335          *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
1336          */
1337         vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1338         vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1339
1340         /*
1341          * Currently unsupported in KVM:
1342          *      GUEST_IA32_RTIT_CTL             = 0x00002814,
1343          */
1344 }
1345 #else /* !IS_ENABLED(CONFIG_HYPERV) */
1346 static inline void evmcs_write64(unsigned long field, u64 value) {}
1347 static inline void evmcs_write32(unsigned long field, u32 value) {}
1348 static inline void evmcs_write16(unsigned long field, u16 value) {}
1349 static inline u64 evmcs_read64(unsigned long field) { return 0; }
1350 static inline u32 evmcs_read32(unsigned long field) { return 0; }
1351 static inline u16 evmcs_read16(unsigned long field) { return 0; }
1352 static inline void evmcs_load(u64 phys_addr) {}
1353 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1354 static inline void evmcs_touch_msr_bitmap(void) {}
1355 #endif /* IS_ENABLED(CONFIG_HYPERV) */
1356
1357 static inline bool is_exception_n(u32 intr_info, u8 vector)
1358 {
1359         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1360                              INTR_INFO_VALID_MASK)) ==
1361                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1362 }
1363
1364 static inline bool is_debug(u32 intr_info)
1365 {
1366         return is_exception_n(intr_info, DB_VECTOR);
1367 }
1368
1369 static inline bool is_breakpoint(u32 intr_info)
1370 {
1371         return is_exception_n(intr_info, BP_VECTOR);
1372 }
1373
1374 static inline bool is_page_fault(u32 intr_info)
1375 {
1376         return is_exception_n(intr_info, PF_VECTOR);
1377 }
1378
1379 static inline bool is_no_device(u32 intr_info)
1380 {
1381         return is_exception_n(intr_info, NM_VECTOR);
1382 }
1383
1384 static inline bool is_invalid_opcode(u32 intr_info)
1385 {
1386         return is_exception_n(intr_info, UD_VECTOR);
1387 }
1388
1389 static inline bool is_gp_fault(u32 intr_info)
1390 {
1391         return is_exception_n(intr_info, GP_VECTOR);
1392 }
1393
1394 static inline bool is_external_interrupt(u32 intr_info)
1395 {
1396         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1397                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1398 }
1399
1400 static inline bool is_machine_check(u32 intr_info)
1401 {
1402         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1403                              INTR_INFO_VALID_MASK)) ==
1404                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1405 }
1406
1407 /* Undocumented: icebp/int1 */
1408 static inline bool is_icebp(u32 intr_info)
1409 {
1410         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1411                 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1412 }
1413
1414 static inline bool cpu_has_vmx_msr_bitmap(void)
1415 {
1416         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1417 }
1418
1419 static inline bool cpu_has_vmx_tpr_shadow(void)
1420 {
1421         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1422 }
1423
1424 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1425 {
1426         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1427 }
1428
1429 static inline bool cpu_has_secondary_exec_ctrls(void)
1430 {
1431         return vmcs_config.cpu_based_exec_ctrl &
1432                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1433 }
1434
1435 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1436 {
1437         return vmcs_config.cpu_based_2nd_exec_ctrl &
1438                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1439 }
1440
1441 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1442 {
1443         return vmcs_config.cpu_based_2nd_exec_ctrl &
1444                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1445 }
1446
1447 static inline bool cpu_has_vmx_apic_register_virt(void)
1448 {
1449         return vmcs_config.cpu_based_2nd_exec_ctrl &
1450                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1451 }
1452
1453 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1454 {
1455         return vmcs_config.cpu_based_2nd_exec_ctrl &
1456                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1457 }
1458
1459 /*
1460  * Comment's format: document - errata name - stepping - processor name.
1461  * Refer from
1462  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1463  */
1464 static u32 vmx_preemption_cpu_tfms[] = {
1465 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1466 0x000206E6,
1467 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1468 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1469 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1470 0x00020652,
1471 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1472 0x00020655,
1473 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1474 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1475 /*
1476  * 320767.pdf - AAP86  - B1 -
1477  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1478  */
1479 0x000106E5,
1480 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1481 0x000106A0,
1482 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1483 0x000106A1,
1484 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1485 0x000106A4,
1486  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1487  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1488  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1489 0x000106A5,
1490 };
1491
1492 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1493 {
1494         u32 eax = cpuid_eax(0x00000001), i;
1495
1496         /* Clear the reserved bits */
1497         eax &= ~(0x3U << 14 | 0xfU << 28);
1498         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1499                 if (eax == vmx_preemption_cpu_tfms[i])
1500                         return true;
1501
1502         return false;
1503 }
1504
1505 static inline bool cpu_has_vmx_preemption_timer(void)
1506 {
1507         return vmcs_config.pin_based_exec_ctrl &
1508                 PIN_BASED_VMX_PREEMPTION_TIMER;
1509 }
1510
1511 static inline bool cpu_has_vmx_posted_intr(void)
1512 {
1513         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1514                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1515 }
1516
1517 static inline bool cpu_has_vmx_apicv(void)
1518 {
1519         return cpu_has_vmx_apic_register_virt() &&
1520                 cpu_has_vmx_virtual_intr_delivery() &&
1521                 cpu_has_vmx_posted_intr();
1522 }
1523
1524 static inline bool cpu_has_vmx_flexpriority(void)
1525 {
1526         return cpu_has_vmx_tpr_shadow() &&
1527                 cpu_has_vmx_virtualize_apic_accesses();
1528 }
1529
1530 static inline bool cpu_has_vmx_ept_execute_only(void)
1531 {
1532         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1533 }
1534
1535 static inline bool cpu_has_vmx_ept_2m_page(void)
1536 {
1537         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1538 }
1539
1540 static inline bool cpu_has_vmx_ept_1g_page(void)
1541 {
1542         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1543 }
1544
1545 static inline bool cpu_has_vmx_ept_4levels(void)
1546 {
1547         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1548 }
1549
1550 static inline bool cpu_has_vmx_ept_mt_wb(void)
1551 {
1552         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1553 }
1554
1555 static inline bool cpu_has_vmx_ept_5levels(void)
1556 {
1557         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1558 }
1559
1560 static inline bool cpu_has_vmx_ept_ad_bits(void)
1561 {
1562         return vmx_capability.ept & VMX_EPT_AD_BIT;
1563 }
1564
1565 static inline bool cpu_has_vmx_invept_context(void)
1566 {
1567         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1568 }
1569
1570 static inline bool cpu_has_vmx_invept_global(void)
1571 {
1572         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1573 }
1574
1575 static inline bool cpu_has_vmx_invvpid_individual_addr(void)
1576 {
1577         return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
1578 }
1579
1580 static inline bool cpu_has_vmx_invvpid_single(void)
1581 {
1582         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1583 }
1584
1585 static inline bool cpu_has_vmx_invvpid_global(void)
1586 {
1587         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1588 }
1589
1590 static inline bool cpu_has_vmx_invvpid(void)
1591 {
1592         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1593 }
1594
1595 static inline bool cpu_has_vmx_ept(void)
1596 {
1597         return vmcs_config.cpu_based_2nd_exec_ctrl &
1598                 SECONDARY_EXEC_ENABLE_EPT;
1599 }
1600
1601 static inline bool cpu_has_vmx_unrestricted_guest(void)
1602 {
1603         return vmcs_config.cpu_based_2nd_exec_ctrl &
1604                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1605 }
1606
1607 static inline bool cpu_has_vmx_ple(void)
1608 {
1609         return vmcs_config.cpu_based_2nd_exec_ctrl &
1610                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1611 }
1612
1613 static inline bool cpu_has_vmx_basic_inout(void)
1614 {
1615         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1616 }
1617
1618 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1619 {
1620         return flexpriority_enabled && lapic_in_kernel(vcpu);
1621 }
1622
1623 static inline bool cpu_has_vmx_vpid(void)
1624 {
1625         return vmcs_config.cpu_based_2nd_exec_ctrl &
1626                 SECONDARY_EXEC_ENABLE_VPID;
1627 }
1628
1629 static inline bool cpu_has_vmx_rdtscp(void)
1630 {
1631         return vmcs_config.cpu_based_2nd_exec_ctrl &
1632                 SECONDARY_EXEC_RDTSCP;
1633 }
1634
1635 static inline bool cpu_has_vmx_invpcid(void)
1636 {
1637         return vmcs_config.cpu_based_2nd_exec_ctrl &
1638                 SECONDARY_EXEC_ENABLE_INVPCID;
1639 }
1640
1641 static inline bool cpu_has_virtual_nmis(void)
1642 {
1643         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1644 }
1645
1646 static inline bool cpu_has_vmx_wbinvd_exit(void)
1647 {
1648         return vmcs_config.cpu_based_2nd_exec_ctrl &
1649                 SECONDARY_EXEC_WBINVD_EXITING;
1650 }
1651
1652 static inline bool cpu_has_vmx_shadow_vmcs(void)
1653 {
1654         u64 vmx_msr;
1655         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1656         /* check if the cpu supports writing r/o exit information fields */
1657         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1658                 return false;
1659
1660         return vmcs_config.cpu_based_2nd_exec_ctrl &
1661                 SECONDARY_EXEC_SHADOW_VMCS;
1662 }
1663
1664 static inline bool cpu_has_vmx_pml(void)
1665 {
1666         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1667 }
1668
1669 static inline bool cpu_has_vmx_tsc_scaling(void)
1670 {
1671         return vmcs_config.cpu_based_2nd_exec_ctrl &
1672                 SECONDARY_EXEC_TSC_SCALING;
1673 }
1674
1675 static inline bool cpu_has_vmx_vmfunc(void)
1676 {
1677         return vmcs_config.cpu_based_2nd_exec_ctrl &
1678                 SECONDARY_EXEC_ENABLE_VMFUNC;
1679 }
1680
1681 static bool vmx_umip_emulated(void)
1682 {
1683         return vmcs_config.cpu_based_2nd_exec_ctrl &
1684                 SECONDARY_EXEC_DESC;
1685 }
1686
1687 static inline bool report_flexpriority(void)
1688 {
1689         return flexpriority_enabled;
1690 }
1691
1692 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1693 {
1694         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
1695 }
1696
1697 /*
1698  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
1699  * to modify any valid field of the VMCS, or are the VM-exit
1700  * information fields read-only?
1701  */
1702 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
1703 {
1704         return to_vmx(vcpu)->nested.msrs.misc_low &
1705                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
1706 }
1707
1708 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1709 {
1710         return vmcs12->cpu_based_vm_exec_control & bit;
1711 }
1712
1713 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1714 {
1715         return (vmcs12->cpu_based_vm_exec_control &
1716                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1717                 (vmcs12->secondary_vm_exec_control & bit);
1718 }
1719
1720 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1721 {
1722         return vmcs12->pin_based_vm_exec_control &
1723                 PIN_BASED_VMX_PREEMPTION_TIMER;
1724 }
1725
1726 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1727 {
1728         return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1729 }
1730
1731 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1732 {
1733         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1734 }
1735
1736 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1737 {
1738         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1739 }
1740
1741 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1742 {
1743         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1744 }
1745
1746 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1747 {
1748         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1749 }
1750
1751 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1752 {
1753         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1754 }
1755
1756 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1757 {
1758         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1759 }
1760
1761 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1762 {
1763         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1764 }
1765
1766 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1767 {
1768         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1769 }
1770
1771 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1772 {
1773         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1774 }
1775
1776 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1777 {
1778         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1779 }
1780
1781 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1782 {
1783         return nested_cpu_has_vmfunc(vmcs12) &&
1784                 (vmcs12->vm_function_control &
1785                  VMX_VMFUNC_EPTP_SWITCHING);
1786 }
1787
1788 static inline bool is_nmi(u32 intr_info)
1789 {
1790         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1791                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1792 }
1793
1794 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1795                               u32 exit_intr_info,
1796                               unsigned long exit_qualification);
1797 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1798                         struct vmcs12 *vmcs12,
1799                         u32 reason, unsigned long qualification);
1800
1801 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1802 {
1803         int i;
1804
1805         for (i = 0; i < vmx->nmsrs; ++i)
1806                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1807                         return i;
1808         return -1;
1809 }
1810
1811 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1812 {
1813     struct {
1814         u64 vpid : 16;
1815         u64 rsvd : 48;
1816         u64 gva;
1817     } operand = { vpid, 0, gva };
1818
1819     asm volatile (__ex(ASM_VMX_INVVPID)
1820                   /* CF==1 or ZF==1 --> rc = -1 */
1821                   "; ja 1f ; ud2 ; 1:"
1822                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1823 }
1824
1825 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1826 {
1827         struct {
1828                 u64 eptp, gpa;
1829         } operand = {eptp, gpa};
1830
1831         asm volatile (__ex(ASM_VMX_INVEPT)
1832                         /* CF==1 or ZF==1 --> rc = -1 */
1833                         "; ja 1f ; ud2 ; 1:\n"
1834                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1835 }
1836
1837 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1838 {
1839         int i;
1840
1841         i = __find_msr_index(vmx, msr);
1842         if (i >= 0)
1843                 return &vmx->guest_msrs[i];
1844         return NULL;
1845 }
1846
1847 static void vmcs_clear(struct vmcs *vmcs)
1848 {
1849         u64 phys_addr = __pa(vmcs);
1850         u8 error;
1851
1852         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1853                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1854                       : "cc", "memory");
1855         if (error)
1856                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1857                        vmcs, phys_addr);
1858 }
1859
1860 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1861 {
1862         vmcs_clear(loaded_vmcs->vmcs);
1863         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1864                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1865         loaded_vmcs->cpu = -1;
1866         loaded_vmcs->launched = 0;
1867 }
1868
1869 static void vmcs_load(struct vmcs *vmcs)
1870 {
1871         u64 phys_addr = __pa(vmcs);
1872         u8 error;
1873
1874         if (static_branch_unlikely(&enable_evmcs))
1875                 return evmcs_load(phys_addr);
1876
1877         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1878                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1879                         : "cc", "memory");
1880         if (error)
1881                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1882                        vmcs, phys_addr);
1883 }
1884
1885 #ifdef CONFIG_KEXEC_CORE
1886 /*
1887  * This bitmap is used to indicate whether the vmclear
1888  * operation is enabled on all cpus. All disabled by
1889  * default.
1890  */
1891 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1892
1893 static inline void crash_enable_local_vmclear(int cpu)
1894 {
1895         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1896 }
1897
1898 static inline void crash_disable_local_vmclear(int cpu)
1899 {
1900         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1901 }
1902
1903 static inline int crash_local_vmclear_enabled(int cpu)
1904 {
1905         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1906 }
1907
1908 static void crash_vmclear_local_loaded_vmcss(void)
1909 {
1910         int cpu = raw_smp_processor_id();
1911         struct loaded_vmcs *v;
1912
1913         if (!crash_local_vmclear_enabled(cpu))
1914                 return;
1915
1916         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1917                             loaded_vmcss_on_cpu_link)
1918                 vmcs_clear(v->vmcs);
1919 }
1920 #else
1921 static inline void crash_enable_local_vmclear(int cpu) { }
1922 static inline void crash_disable_local_vmclear(int cpu) { }
1923 #endif /* CONFIG_KEXEC_CORE */
1924
1925 static void __loaded_vmcs_clear(void *arg)
1926 {
1927         struct loaded_vmcs *loaded_vmcs = arg;
1928         int cpu = raw_smp_processor_id();
1929
1930         if (loaded_vmcs->cpu != cpu)
1931                 return; /* vcpu migration can race with cpu offline */
1932         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1933                 per_cpu(current_vmcs, cpu) = NULL;
1934         crash_disable_local_vmclear(cpu);
1935         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1936
1937         /*
1938          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1939          * is before setting loaded_vmcs->vcpu to -1 which is done in
1940          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1941          * then adds the vmcs into percpu list before it is deleted.
1942          */
1943         smp_wmb();
1944
1945         loaded_vmcs_init(loaded_vmcs);
1946         crash_enable_local_vmclear(cpu);
1947 }
1948
1949 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1950 {
1951         int cpu = loaded_vmcs->cpu;
1952
1953         if (cpu != -1)
1954                 smp_call_function_single(cpu,
1955                          __loaded_vmcs_clear, loaded_vmcs, 1);
1956 }
1957
1958 static inline void vpid_sync_vcpu_single(int vpid)
1959 {
1960         if (vpid == 0)
1961                 return;
1962
1963         if (cpu_has_vmx_invvpid_single())
1964                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1965 }
1966
1967 static inline void vpid_sync_vcpu_global(void)
1968 {
1969         if (cpu_has_vmx_invvpid_global())
1970                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1971 }
1972
1973 static inline void vpid_sync_context(int vpid)
1974 {
1975         if (cpu_has_vmx_invvpid_single())
1976                 vpid_sync_vcpu_single(vpid);
1977         else
1978                 vpid_sync_vcpu_global();
1979 }
1980
1981 static inline void ept_sync_global(void)
1982 {
1983         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1984 }
1985
1986 static inline void ept_sync_context(u64 eptp)
1987 {
1988         if (cpu_has_vmx_invept_context())
1989                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1990         else
1991                 ept_sync_global();
1992 }
1993
1994 static __always_inline void vmcs_check16(unsigned long field)
1995 {
1996         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1997                          "16-bit accessor invalid for 64-bit field");
1998         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1999                          "16-bit accessor invalid for 64-bit high field");
2000         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2001                          "16-bit accessor invalid for 32-bit high field");
2002         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2003                          "16-bit accessor invalid for natural width field");
2004 }
2005
2006 static __always_inline void vmcs_check32(unsigned long field)
2007 {
2008         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2009                          "32-bit accessor invalid for 16-bit field");
2010         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2011                          "32-bit accessor invalid for natural width field");
2012 }
2013
2014 static __always_inline void vmcs_check64(unsigned long field)
2015 {
2016         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2017                          "64-bit accessor invalid for 16-bit field");
2018         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2019                          "64-bit accessor invalid for 64-bit high field");
2020         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2021                          "64-bit accessor invalid for 32-bit field");
2022         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2023                          "64-bit accessor invalid for natural width field");
2024 }
2025
2026 static __always_inline void vmcs_checkl(unsigned long field)
2027 {
2028         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2029                          "Natural width accessor invalid for 16-bit field");
2030         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2031                          "Natural width accessor invalid for 64-bit field");
2032         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2033                          "Natural width accessor invalid for 64-bit high field");
2034         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2035                          "Natural width accessor invalid for 32-bit field");
2036 }
2037
2038 static __always_inline unsigned long __vmcs_readl(unsigned long field)
2039 {
2040         unsigned long value;
2041
2042         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
2043                       : "=a"(value) : "d"(field) : "cc");
2044         return value;
2045 }
2046
2047 static __always_inline u16 vmcs_read16(unsigned long field)
2048 {
2049         vmcs_check16(field);
2050         if (static_branch_unlikely(&enable_evmcs))
2051                 return evmcs_read16(field);
2052         return __vmcs_readl(field);
2053 }
2054
2055 static __always_inline u32 vmcs_read32(unsigned long field)
2056 {
2057         vmcs_check32(field);
2058         if (static_branch_unlikely(&enable_evmcs))
2059                 return evmcs_read32(field);
2060         return __vmcs_readl(field);
2061 }
2062
2063 static __always_inline u64 vmcs_read64(unsigned long field)
2064 {
2065         vmcs_check64(field);
2066         if (static_branch_unlikely(&enable_evmcs))
2067                 return evmcs_read64(field);
2068 #ifdef CONFIG_X86_64
2069         return __vmcs_readl(field);
2070 #else
2071         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
2072 #endif
2073 }
2074
2075 static __always_inline unsigned long vmcs_readl(unsigned long field)
2076 {
2077         vmcs_checkl(field);
2078         if (static_branch_unlikely(&enable_evmcs))
2079                 return evmcs_read64(field);
2080         return __vmcs_readl(field);
2081 }
2082
2083 static noinline void vmwrite_error(unsigned long field, unsigned long value)
2084 {
2085         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
2086                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
2087         dump_stack();
2088 }
2089
2090 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
2091 {
2092         u8 error;
2093
2094         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
2095                        : "=q"(error) : "a"(value), "d"(field) : "cc");
2096         if (unlikely(error))
2097                 vmwrite_error(field, value);
2098 }
2099
2100 static __always_inline void vmcs_write16(unsigned long field, u16 value)
2101 {
2102         vmcs_check16(field);
2103         if (static_branch_unlikely(&enable_evmcs))
2104                 return evmcs_write16(field, value);
2105
2106         __vmcs_writel(field, value);
2107 }
2108
2109 static __always_inline void vmcs_write32(unsigned long field, u32 value)
2110 {
2111         vmcs_check32(field);
2112         if (static_branch_unlikely(&enable_evmcs))
2113                 return evmcs_write32(field, value);
2114
2115         __vmcs_writel(field, value);
2116 }
2117
2118 static __always_inline void vmcs_write64(unsigned long field, u64 value)
2119 {
2120         vmcs_check64(field);
2121         if (static_branch_unlikely(&enable_evmcs))
2122                 return evmcs_write64(field, value);
2123
2124         __vmcs_writel(field, value);
2125 #ifndef CONFIG_X86_64
2126         asm volatile ("");
2127         __vmcs_writel(field+1, value >> 32);
2128 #endif
2129 }
2130
2131 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
2132 {
2133         vmcs_checkl(field);
2134         if (static_branch_unlikely(&enable_evmcs))
2135                 return evmcs_write64(field, value);
2136
2137         __vmcs_writel(field, value);
2138 }
2139
2140 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
2141 {
2142         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2143                          "vmcs_clear_bits does not support 64-bit fields");
2144         if (static_branch_unlikely(&enable_evmcs))
2145                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
2146
2147         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
2148 }
2149
2150 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
2151 {
2152         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2153                          "vmcs_set_bits does not support 64-bit fields");
2154         if (static_branch_unlikely(&enable_evmcs))
2155                 return evmcs_write32(field, evmcs_read32(field) | mask);
2156
2157         __vmcs_writel(field, __vmcs_readl(field) | mask);
2158 }
2159
2160 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
2161 {
2162         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
2163 }
2164
2165 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
2166 {
2167         vmcs_write32(VM_ENTRY_CONTROLS, val);
2168         vmx->vm_entry_controls_shadow = val;
2169 }
2170
2171 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
2172 {
2173         if (vmx->vm_entry_controls_shadow != val)
2174                 vm_entry_controls_init(vmx, val);
2175 }
2176
2177 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
2178 {
2179         return vmx->vm_entry_controls_shadow;
2180 }
2181
2182
2183 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2184 {
2185         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
2186 }
2187
2188 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2189 {
2190         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
2191 }
2192
2193 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
2194 {
2195         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
2196 }
2197
2198 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
2199 {
2200         vmcs_write32(VM_EXIT_CONTROLS, val);
2201         vmx->vm_exit_controls_shadow = val;
2202 }
2203
2204 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2205 {
2206         if (vmx->vm_exit_controls_shadow != val)
2207                 vm_exit_controls_init(vmx, val);
2208 }
2209
2210 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2211 {
2212         return vmx->vm_exit_controls_shadow;
2213 }
2214
2215
2216 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2217 {
2218         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2219 }
2220
2221 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2222 {
2223         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2224 }
2225
2226 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2227 {
2228         vmx->segment_cache.bitmask = 0;
2229 }
2230
2231 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2232                                        unsigned field)
2233 {
2234         bool ret;
2235         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2236
2237         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2238                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2239                 vmx->segment_cache.bitmask = 0;
2240         }
2241         ret = vmx->segment_cache.bitmask & mask;
2242         vmx->segment_cache.bitmask |= mask;
2243         return ret;
2244 }
2245
2246 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2247 {
2248         u16 *p = &vmx->segment_cache.seg[seg].selector;
2249
2250         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2251                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2252         return *p;
2253 }
2254
2255 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2256 {
2257         ulong *p = &vmx->segment_cache.seg[seg].base;
2258
2259         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2260                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2261         return *p;
2262 }
2263
2264 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2265 {
2266         u32 *p = &vmx->segment_cache.seg[seg].limit;
2267
2268         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2269                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2270         return *p;
2271 }
2272
2273 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2274 {
2275         u32 *p = &vmx->segment_cache.seg[seg].ar;
2276
2277         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2278                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2279         return *p;
2280 }
2281
2282 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2283 {
2284         u32 eb;
2285
2286         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
2287              (1u << DB_VECTOR) | (1u << AC_VECTOR);
2288         /*
2289          * Guest access to VMware backdoor ports could legitimately
2290          * trigger #GP because of TSS I/O permission bitmap.
2291          * We intercept those #GP and allow access to them anyway
2292          * as VMware does.
2293          */
2294         if (enable_vmware_backdoor)
2295                 eb |= (1u << GP_VECTOR);
2296         if ((vcpu->guest_debug &
2297              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2298             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2299                 eb |= 1u << BP_VECTOR;
2300         if (to_vmx(vcpu)->rmode.vm86_active)
2301                 eb = ~0;
2302         if (enable_ept)
2303                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
2304
2305         /* When we are running a nested L2 guest and L1 specified for it a
2306          * certain exception bitmap, we must trap the same exceptions and pass
2307          * them to L1. When running L2, we will only handle the exceptions
2308          * specified above if L1 did not want them.
2309          */
2310         if (is_guest_mode(vcpu))
2311                 eb |= get_vmcs12(vcpu)->exception_bitmap;
2312
2313         vmcs_write32(EXCEPTION_BITMAP, eb);
2314 }
2315
2316 /*
2317  * Check if MSR is intercepted for currently loaded MSR bitmap.
2318  */
2319 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2320 {
2321         unsigned long *msr_bitmap;
2322         int f = sizeof(unsigned long);
2323
2324         if (!cpu_has_vmx_msr_bitmap())
2325                 return true;
2326
2327         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2328
2329         if (msr <= 0x1fff) {
2330                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2331         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2332                 msr &= 0x1fff;
2333                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2334         }
2335
2336         return true;
2337 }
2338
2339 /*
2340  * Check if MSR is intercepted for L01 MSR bitmap.
2341  */
2342 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2343 {
2344         unsigned long *msr_bitmap;
2345         int f = sizeof(unsigned long);
2346
2347         if (!cpu_has_vmx_msr_bitmap())
2348                 return true;
2349
2350         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2351
2352         if (msr <= 0x1fff) {
2353                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2354         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2355                 msr &= 0x1fff;
2356                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2357         }
2358
2359         return true;
2360 }
2361
2362 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2363                 unsigned long entry, unsigned long exit)
2364 {
2365         vm_entry_controls_clearbit(vmx, entry);
2366         vm_exit_controls_clearbit(vmx, exit);
2367 }
2368
2369 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2370 {
2371         unsigned i;
2372         struct msr_autoload *m = &vmx->msr_autoload;
2373
2374         switch (msr) {
2375         case MSR_EFER:
2376                 if (cpu_has_load_ia32_efer) {
2377                         clear_atomic_switch_msr_special(vmx,
2378                                         VM_ENTRY_LOAD_IA32_EFER,
2379                                         VM_EXIT_LOAD_IA32_EFER);
2380                         return;
2381                 }
2382                 break;
2383         case MSR_CORE_PERF_GLOBAL_CTRL:
2384                 if (cpu_has_load_perf_global_ctrl) {
2385                         clear_atomic_switch_msr_special(vmx,
2386                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2387                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2388                         return;
2389                 }
2390                 break;
2391         }
2392
2393         for (i = 0; i < m->nr; ++i)
2394                 if (m->guest[i].index == msr)
2395                         break;
2396
2397         if (i == m->nr)
2398                 return;
2399         --m->nr;
2400         m->guest[i] = m->guest[m->nr];
2401         m->host[i] = m->host[m->nr];
2402         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2403         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2404 }
2405
2406 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2407                 unsigned long entry, unsigned long exit,
2408                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2409                 u64 guest_val, u64 host_val)
2410 {
2411         vmcs_write64(guest_val_vmcs, guest_val);
2412         vmcs_write64(host_val_vmcs, host_val);
2413         vm_entry_controls_setbit(vmx, entry);
2414         vm_exit_controls_setbit(vmx, exit);
2415 }
2416
2417 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2418                                   u64 guest_val, u64 host_val)
2419 {
2420         unsigned i;
2421         struct msr_autoload *m = &vmx->msr_autoload;
2422
2423         switch (msr) {
2424         case MSR_EFER:
2425                 if (cpu_has_load_ia32_efer) {
2426                         add_atomic_switch_msr_special(vmx,
2427                                         VM_ENTRY_LOAD_IA32_EFER,
2428                                         VM_EXIT_LOAD_IA32_EFER,
2429                                         GUEST_IA32_EFER,
2430                                         HOST_IA32_EFER,
2431                                         guest_val, host_val);
2432                         return;
2433                 }
2434                 break;
2435         case MSR_CORE_PERF_GLOBAL_CTRL:
2436                 if (cpu_has_load_perf_global_ctrl) {
2437                         add_atomic_switch_msr_special(vmx,
2438                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2439                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2440                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2441                                         HOST_IA32_PERF_GLOBAL_CTRL,
2442                                         guest_val, host_val);
2443                         return;
2444                 }
2445                 break;
2446         case MSR_IA32_PEBS_ENABLE:
2447                 /* PEBS needs a quiescent period after being disabled (to write
2448                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2449                  * provide that period, so a CPU could write host's record into
2450                  * guest's memory.
2451                  */
2452                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2453         }
2454
2455         for (i = 0; i < m->nr; ++i)
2456                 if (m->guest[i].index == msr)
2457                         break;
2458
2459         if (i == NR_AUTOLOAD_MSRS) {
2460                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2461                                 "Can't add msr %x\n", msr);
2462                 return;
2463         } else if (i == m->nr) {
2464                 ++m->nr;
2465                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2466                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2467         }
2468
2469         m->guest[i].index = msr;
2470         m->guest[i].value = guest_val;
2471         m->host[i].index = msr;
2472         m->host[i].value = host_val;
2473 }
2474
2475 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2476 {
2477         u64 guest_efer = vmx->vcpu.arch.efer;
2478         u64 ignore_bits = 0;
2479
2480         if (!enable_ept) {
2481                 /*
2482                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2483                  * host CPUID is more efficient than testing guest CPUID
2484                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2485                  */
2486                 if (boot_cpu_has(X86_FEATURE_SMEP))
2487                         guest_efer |= EFER_NX;
2488                 else if (!(guest_efer & EFER_NX))
2489                         ignore_bits |= EFER_NX;
2490         }
2491
2492         /*
2493          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2494          */
2495         ignore_bits |= EFER_SCE;
2496 #ifdef CONFIG_X86_64
2497         ignore_bits |= EFER_LMA | EFER_LME;
2498         /* SCE is meaningful only in long mode on Intel */
2499         if (guest_efer & EFER_LMA)
2500                 ignore_bits &= ~(u64)EFER_SCE;
2501 #endif
2502
2503         clear_atomic_switch_msr(vmx, MSR_EFER);
2504
2505         /*
2506          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2507          * On CPUs that support "load IA32_EFER", always switch EFER
2508          * atomically, since it's faster than switching it manually.
2509          */
2510         if (cpu_has_load_ia32_efer ||
2511             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2512                 if (!(guest_efer & EFER_LMA))
2513                         guest_efer &= ~EFER_LME;
2514                 if (guest_efer != host_efer)
2515                         add_atomic_switch_msr(vmx, MSR_EFER,
2516                                               guest_efer, host_efer);
2517                 return false;
2518         } else {
2519                 guest_efer &= ~ignore_bits;
2520                 guest_efer |= host_efer & ignore_bits;
2521
2522                 vmx->guest_msrs[efer_offset].data = guest_efer;
2523                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2524
2525                 return true;
2526         }
2527 }
2528
2529 #ifdef CONFIG_X86_32
2530 /*
2531  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2532  * VMCS rather than the segment table.  KVM uses this helper to figure
2533  * out the current bases to poke them into the VMCS before entry.
2534  */
2535 static unsigned long segment_base(u16 selector)
2536 {
2537         struct desc_struct *table;
2538         unsigned long v;
2539
2540         if (!(selector & ~SEGMENT_RPL_MASK))
2541                 return 0;
2542
2543         table = get_current_gdt_ro();
2544
2545         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2546                 u16 ldt_selector = kvm_read_ldt();
2547
2548                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2549                         return 0;
2550
2551                 table = (struct desc_struct *)segment_base(ldt_selector);
2552         }
2553         v = get_desc_base(&table[selector >> 3]);
2554         return v;
2555 }
2556 #endif
2557
2558 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2559 {
2560         struct vcpu_vmx *vmx = to_vmx(vcpu);
2561 #ifdef CONFIG_X86_64
2562         int cpu = raw_smp_processor_id();
2563 #endif
2564         int i;
2565
2566         if (vmx->host_state.loaded)
2567                 return;
2568
2569         vmx->host_state.loaded = 1;
2570         /*
2571          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2572          * allow segment selectors with cpl > 0 or ti == 1.
2573          */
2574         vmx->host_state.ldt_sel = kvm_read_ldt();
2575         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2576
2577 #ifdef CONFIG_X86_64
2578         save_fsgs_for_kvm();
2579         vmx->host_state.fs_sel = current->thread.fsindex;
2580         vmx->host_state.gs_sel = current->thread.gsindex;
2581 #else
2582         savesegment(fs, vmx->host_state.fs_sel);
2583         savesegment(gs, vmx->host_state.gs_sel);
2584 #endif
2585         if (!(vmx->host_state.fs_sel & 7)) {
2586                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2587                 vmx->host_state.fs_reload_needed = 0;
2588         } else {
2589                 vmcs_write16(HOST_FS_SELECTOR, 0);
2590                 vmx->host_state.fs_reload_needed = 1;
2591         }
2592         if (!(vmx->host_state.gs_sel & 7))
2593                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2594         else {
2595                 vmcs_write16(HOST_GS_SELECTOR, 0);
2596                 vmx->host_state.gs_ldt_reload_needed = 1;
2597         }
2598
2599 #ifdef CONFIG_X86_64
2600         savesegment(ds, vmx->host_state.ds_sel);
2601         savesegment(es, vmx->host_state.es_sel);
2602
2603         vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
2604         vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
2605
2606         vmx->msr_host_kernel_gs_base = current->thread.gsbase;
2607         if (is_long_mode(&vmx->vcpu))
2608                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2609 #else
2610         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2611         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2612 #endif
2613         if (boot_cpu_has(X86_FEATURE_MPX))
2614                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2615         for (i = 0; i < vmx->save_nmsrs; ++i)
2616                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2617                                    vmx->guest_msrs[i].data,
2618                                    vmx->guest_msrs[i].mask);
2619 }
2620
2621 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2622 {
2623         if (!vmx->host_state.loaded)
2624                 return;
2625
2626         ++vmx->vcpu.stat.host_state_reload;
2627         vmx->host_state.loaded = 0;
2628 #ifdef CONFIG_X86_64
2629         if (is_long_mode(&vmx->vcpu))
2630                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2631 #endif
2632         if (vmx->host_state.gs_ldt_reload_needed) {
2633                 kvm_load_ldt(vmx->host_state.ldt_sel);
2634 #ifdef CONFIG_X86_64
2635                 load_gs_index(vmx->host_state.gs_sel);
2636 #else
2637                 loadsegment(gs, vmx->host_state.gs_sel);
2638 #endif
2639         }
2640         if (vmx->host_state.fs_reload_needed)
2641                 loadsegment(fs, vmx->host_state.fs_sel);
2642 #ifdef CONFIG_X86_64
2643         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2644                 loadsegment(ds, vmx->host_state.ds_sel);
2645                 loadsegment(es, vmx->host_state.es_sel);
2646         }
2647 #endif
2648         invalidate_tss_limit();
2649 #ifdef CONFIG_X86_64
2650         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2651 #endif
2652         if (vmx->host_state.msr_host_bndcfgs)
2653                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2654         load_fixmap_gdt(raw_smp_processor_id());
2655 }
2656
2657 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2658 {
2659         preempt_disable();
2660         __vmx_load_host_state(vmx);
2661         preempt_enable();
2662 }
2663
2664 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2665 {
2666         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2667         struct pi_desc old, new;
2668         unsigned int dest;
2669
2670         /*
2671          * In case of hot-plug or hot-unplug, we may have to undo
2672          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2673          * always keep PI.NDST up to date for simplicity: it makes the
2674          * code easier, and CPU migration is not a fast path.
2675          */
2676         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2677                 return;
2678
2679         /*
2680          * First handle the simple case where no cmpxchg is necessary; just
2681          * allow posting non-urgent interrupts.
2682          *
2683          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2684          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2685          * expects the VCPU to be on the blocked_vcpu_list that matches
2686          * PI.NDST.
2687          */
2688         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2689             vcpu->cpu == cpu) {
2690                 pi_clear_sn(pi_desc);
2691                 return;
2692         }
2693
2694         /* The full case.  */
2695         do {
2696                 old.control = new.control = pi_desc->control;
2697
2698                 dest = cpu_physical_id(cpu);
2699
2700                 if (x2apic_enabled())
2701                         new.ndst = dest;
2702                 else
2703                         new.ndst = (dest << 8) & 0xFF00;
2704
2705                 new.sn = 0;
2706         } while (cmpxchg64(&pi_desc->control, old.control,
2707                            new.control) != old.control);
2708 }
2709
2710 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2711 {
2712         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2713         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2714 }
2715
2716 /*
2717  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2718  * vcpu mutex is already taken.
2719  */
2720 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2721 {
2722         struct vcpu_vmx *vmx = to_vmx(vcpu);
2723         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2724
2725         if (!already_loaded) {
2726                 loaded_vmcs_clear(vmx->loaded_vmcs);
2727                 local_irq_disable();
2728                 crash_disable_local_vmclear(cpu);
2729
2730                 /*
2731                  * Read loaded_vmcs->cpu should be before fetching
2732                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2733                  * See the comments in __loaded_vmcs_clear().
2734                  */
2735                 smp_rmb();
2736
2737                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2738                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2739                 crash_enable_local_vmclear(cpu);
2740                 local_irq_enable();
2741         }
2742
2743         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2744                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2745                 vmcs_load(vmx->loaded_vmcs->vmcs);
2746                 indirect_branch_prediction_barrier();
2747         }
2748
2749         if (!already_loaded) {
2750                 void *gdt = get_current_gdt_ro();
2751                 unsigned long sysenter_esp;
2752
2753                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2754
2755                 /*
2756                  * Linux uses per-cpu TSS and GDT, so set these when switching
2757                  * processors.  See 22.2.4.
2758                  */
2759                 vmcs_writel(HOST_TR_BASE,
2760                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2761                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2762
2763                 /*
2764                  * VM exits change the host TR limit to 0x67 after a VM
2765                  * exit.  This is okay, since 0x67 covers everything except
2766                  * the IO bitmap and have have code to handle the IO bitmap
2767                  * being lost after a VM exit.
2768                  */
2769                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2770
2771                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2772                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2773
2774                 vmx->loaded_vmcs->cpu = cpu;
2775         }
2776
2777         /* Setup TSC multiplier */
2778         if (kvm_has_tsc_control &&
2779             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2780                 decache_tsc_multiplier(vmx);
2781
2782         vmx_vcpu_pi_load(vcpu, cpu);
2783         vmx->host_pkru = read_pkru();
2784         vmx->host_debugctlmsr = get_debugctlmsr();
2785 }
2786
2787 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2788 {
2789         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2790
2791         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2792                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2793                 !kvm_vcpu_apicv_active(vcpu))
2794                 return;
2795
2796         /* Set SN when the vCPU is preempted */
2797         if (vcpu->preempted)
2798                 pi_set_sn(pi_desc);
2799 }
2800
2801 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2802 {
2803         vmx_vcpu_pi_put(vcpu);
2804
2805         __vmx_load_host_state(to_vmx(vcpu));
2806 }
2807
2808 static bool emulation_required(struct kvm_vcpu *vcpu)
2809 {
2810         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2811 }
2812
2813 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2814
2815 /*
2816  * Return the cr0 value that a nested guest would read. This is a combination
2817  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2818  * its hypervisor (cr0_read_shadow).
2819  */
2820 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2821 {
2822         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2823                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2824 }
2825 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2826 {
2827         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2828                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2829 }
2830
2831 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2832 {
2833         unsigned long rflags, save_rflags;
2834
2835         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2836                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2837                 rflags = vmcs_readl(GUEST_RFLAGS);
2838                 if (to_vmx(vcpu)->rmode.vm86_active) {
2839                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2840                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2841                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2842                 }
2843                 to_vmx(vcpu)->rflags = rflags;
2844         }
2845         return to_vmx(vcpu)->rflags;
2846 }
2847
2848 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2849 {
2850         unsigned long old_rflags = vmx_get_rflags(vcpu);
2851
2852         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2853         to_vmx(vcpu)->rflags = rflags;
2854         if (to_vmx(vcpu)->rmode.vm86_active) {
2855                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2856                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2857         }
2858         vmcs_writel(GUEST_RFLAGS, rflags);
2859
2860         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2861                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2862 }
2863
2864 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2865 {
2866         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2867         int ret = 0;
2868
2869         if (interruptibility & GUEST_INTR_STATE_STI)
2870                 ret |= KVM_X86_SHADOW_INT_STI;
2871         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2872                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2873
2874         return ret;
2875 }
2876
2877 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2878 {
2879         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2880         u32 interruptibility = interruptibility_old;
2881
2882         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2883
2884         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2885                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2886         else if (mask & KVM_X86_SHADOW_INT_STI)
2887                 interruptibility |= GUEST_INTR_STATE_STI;
2888
2889         if ((interruptibility != interruptibility_old))
2890                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2891 }
2892
2893 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2894 {
2895         unsigned long rip;
2896
2897         rip = kvm_rip_read(vcpu);
2898         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2899         kvm_rip_write(vcpu, rip);
2900
2901         /* skipping an emulated instruction also counts */
2902         vmx_set_interrupt_shadow(vcpu, 0);
2903 }
2904
2905 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2906                                                unsigned long exit_qual)
2907 {
2908         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2909         unsigned int nr = vcpu->arch.exception.nr;
2910         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2911
2912         if (vcpu->arch.exception.has_error_code) {
2913                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2914                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2915         }
2916
2917         if (kvm_exception_is_soft(nr))
2918                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2919         else
2920                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2921
2922         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2923             vmx_get_nmi_mask(vcpu))
2924                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2925
2926         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2927 }
2928
2929 /*
2930  * KVM wants to inject page-faults which it got to the guest. This function
2931  * checks whether in a nested guest, we need to inject them to L1 or L2.
2932  */
2933 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2934 {
2935         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2936         unsigned int nr = vcpu->arch.exception.nr;
2937
2938         if (nr == PF_VECTOR) {
2939                 if (vcpu->arch.exception.nested_apf) {
2940                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2941                         return 1;
2942                 }
2943                 /*
2944                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2945                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2946                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2947                  * can be written only when inject_pending_event runs.  This should be
2948                  * conditional on a new capability---if the capability is disabled,
2949                  * kvm_multiple_exception would write the ancillary information to
2950                  * CR2 or DR6, for backwards ABI-compatibility.
2951                  */
2952                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2953                                                     vcpu->arch.exception.error_code)) {
2954                         *exit_qual = vcpu->arch.cr2;
2955                         return 1;
2956                 }
2957         } else {
2958                 if (vmcs12->exception_bitmap & (1u << nr)) {
2959                         if (nr == DB_VECTOR)
2960                                 *exit_qual = vcpu->arch.dr6;
2961                         else
2962                                 *exit_qual = 0;
2963                         return 1;
2964                 }
2965         }
2966
2967         return 0;
2968 }
2969
2970 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
2971 {
2972         /*
2973          * Ensure that we clear the HLT state in the VMCS.  We don't need to
2974          * explicitly skip the instruction because if the HLT state is set,
2975          * then the instruction is already executing and RIP has already been
2976          * advanced.
2977          */
2978         if (kvm_hlt_in_guest(vcpu->kvm) &&
2979                         vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
2980                 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
2981 }
2982
2983 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2984 {
2985         struct vcpu_vmx *vmx = to_vmx(vcpu);
2986         unsigned nr = vcpu->arch.exception.nr;
2987         bool has_error_code = vcpu->arch.exception.has_error_code;
2988         u32 error_code = vcpu->arch.exception.error_code;
2989         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2990
2991         if (has_error_code) {
2992                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2993                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2994         }
2995
2996         if (vmx->rmode.vm86_active) {
2997                 int inc_eip = 0;
2998                 if (kvm_exception_is_soft(nr))
2999                         inc_eip = vcpu->arch.event_exit_inst_len;
3000                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
3001                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3002                 return;
3003         }
3004
3005         WARN_ON_ONCE(vmx->emulation_required);
3006
3007         if (kvm_exception_is_soft(nr)) {
3008                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3009                              vmx->vcpu.arch.event_exit_inst_len);
3010                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3011         } else
3012                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3013
3014         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
3015
3016         vmx_clear_hlt(vcpu);
3017 }
3018
3019 static bool vmx_rdtscp_supported(void)
3020 {
3021         return cpu_has_vmx_rdtscp();
3022 }
3023
3024 static bool vmx_invpcid_supported(void)
3025 {
3026         return cpu_has_vmx_invpcid() && enable_ept;
3027 }
3028
3029 /*
3030  * Swap MSR entry in host/guest MSR entry array.
3031  */
3032 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
3033 {
3034         struct shared_msr_entry tmp;
3035
3036         tmp = vmx->guest_msrs[to];
3037         vmx->guest_msrs[to] = vmx->guest_msrs[from];
3038         vmx->guest_msrs[from] = tmp;
3039 }
3040
3041 /*
3042  * Set up the vmcs to automatically save and restore system
3043  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
3044  * mode, as fiddling with msrs is very expensive.
3045  */
3046 static void setup_msrs(struct vcpu_vmx *vmx)
3047 {
3048         int save_nmsrs, index;
3049
3050         save_nmsrs = 0;
3051 #ifdef CONFIG_X86_64
3052         if (is_long_mode(&vmx->vcpu)) {
3053                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
3054                 if (index >= 0)
3055                         move_msr_up(vmx, index, save_nmsrs++);
3056                 index = __find_msr_index(vmx, MSR_LSTAR);
3057                 if (index >= 0)
3058                         move_msr_up(vmx, index, save_nmsrs++);
3059                 index = __find_msr_index(vmx, MSR_CSTAR);
3060                 if (index >= 0)
3061                         move_msr_up(vmx, index, save_nmsrs++);
3062                 index = __find_msr_index(vmx, MSR_TSC_AUX);
3063                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
3064                         move_msr_up(vmx, index, save_nmsrs++);
3065                 /*
3066                  * MSR_STAR is only needed on long mode guests, and only
3067                  * if efer.sce is enabled.
3068                  */
3069                 index = __find_msr_index(vmx, MSR_STAR);
3070                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
3071                         move_msr_up(vmx, index, save_nmsrs++);
3072         }
3073 #endif
3074         index = __find_msr_index(vmx, MSR_EFER);
3075         if (index >= 0 && update_transition_efer(vmx, index))
3076                 move_msr_up(vmx, index, save_nmsrs++);
3077
3078         vmx->save_nmsrs = save_nmsrs;
3079
3080         if (cpu_has_vmx_msr_bitmap())
3081                 vmx_update_msr_bitmap(&vmx->vcpu);
3082 }
3083
3084 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3085 {
3086         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3087
3088         if (is_guest_mode(vcpu) &&
3089             (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
3090                 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
3091
3092         return vcpu->arch.tsc_offset;
3093 }
3094
3095 /*
3096  * writes 'offset' into guest's timestamp counter offset register
3097  */
3098 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3099 {
3100         if (is_guest_mode(vcpu)) {
3101                 /*
3102                  * We're here if L1 chose not to trap WRMSR to TSC. According
3103                  * to the spec, this should set L1's TSC; The offset that L1
3104                  * set for L2 remains unchanged, and still needs to be added
3105                  * to the newly set TSC to get L2's TSC.
3106                  */
3107                 struct vmcs12 *vmcs12;
3108                 /* recalculate vmcs02.TSC_OFFSET: */
3109                 vmcs12 = get_vmcs12(vcpu);
3110                 vmcs_write64(TSC_OFFSET, offset +
3111                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3112                          vmcs12->tsc_offset : 0));
3113         } else {
3114                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3115                                            vmcs_read64(TSC_OFFSET), offset);
3116                 vmcs_write64(TSC_OFFSET, offset);
3117         }
3118 }
3119
3120 /*
3121  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
3122  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
3123  * all guests if the "nested" module option is off, and can also be disabled
3124  * for a single guest by disabling its VMX cpuid bit.
3125  */
3126 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
3127 {
3128         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
3129 }
3130
3131 /*
3132  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
3133  * returned for the various VMX controls MSRs when nested VMX is enabled.
3134  * The same values should also be used to verify that vmcs12 control fields are
3135  * valid during nested entry from L1 to L2.
3136  * Each of these control msrs has a low and high 32-bit half: A low bit is on
3137  * if the corresponding bit in the (32-bit) control field *must* be on, and a
3138  * bit in the high half is on if the corresponding bit in the control field
3139  * may be on. See also vmx_control_verify().
3140  */
3141 static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3142 {
3143         if (!nested) {
3144                 memset(msrs, 0, sizeof(*msrs));
3145                 return;
3146         }
3147
3148         /*
3149          * Note that as a general rule, the high half of the MSRs (bits in
3150          * the control fields which may be 1) should be initialized by the
3151          * intersection of the underlying hardware's MSR (i.e., features which
3152          * can be supported) and the list of features we want to expose -
3153          * because they are known to be properly supported in our code.
3154          * Also, usually, the low half of the MSRs (bits which must be 1) can
3155          * be set to 0, meaning that L1 may turn off any of these bits. The
3156          * reason is that if one of these bits is necessary, it will appear
3157          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
3158          * fields of vmcs01 and vmcs02, will turn these bits off - and
3159          * nested_vmx_exit_reflected() will not pass related exits to L1.
3160          * These rules have exceptions below.
3161          */
3162
3163         /* pin-based controls */
3164         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
3165                 msrs->pinbased_ctls_low,
3166                 msrs->pinbased_ctls_high);
3167         msrs->pinbased_ctls_low |=
3168                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3169         msrs->pinbased_ctls_high &=
3170                 PIN_BASED_EXT_INTR_MASK |
3171                 PIN_BASED_NMI_EXITING |
3172                 PIN_BASED_VIRTUAL_NMIS |
3173                 (apicv ? PIN_BASED_POSTED_INTR : 0);
3174         msrs->pinbased_ctls_high |=
3175                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3176                 PIN_BASED_VMX_PREEMPTION_TIMER;
3177
3178         /* exit controls */
3179         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
3180                 msrs->exit_ctls_low,
3181                 msrs->exit_ctls_high);
3182         msrs->exit_ctls_low =
3183                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3184
3185         msrs->exit_ctls_high &=
3186 #ifdef CONFIG_X86_64
3187                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
3188 #endif
3189                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
3190         msrs->exit_ctls_high |=
3191                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
3192                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
3193                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3194
3195         if (kvm_mpx_supported())
3196                 msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
3197
3198         /* We support free control of debug control saving. */
3199         msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3200
3201         /* entry controls */
3202         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
3203                 msrs->entry_ctls_low,
3204                 msrs->entry_ctls_high);
3205         msrs->entry_ctls_low =
3206                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3207         msrs->entry_ctls_high &=
3208 #ifdef CONFIG_X86_64
3209                 VM_ENTRY_IA32E_MODE |
3210 #endif
3211                 VM_ENTRY_LOAD_IA32_PAT;
3212         msrs->entry_ctls_high |=
3213                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3214         if (kvm_mpx_supported())
3215                 msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
3216
3217         /* We support free control of debug control loading. */
3218         msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
3219
3220         /* cpu-based controls */
3221         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
3222                 msrs->procbased_ctls_low,
3223                 msrs->procbased_ctls_high);
3224         msrs->procbased_ctls_low =
3225                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3226         msrs->procbased_ctls_high &=
3227                 CPU_BASED_VIRTUAL_INTR_PENDING |
3228                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
3229                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3230                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3231                 CPU_BASED_CR3_STORE_EXITING |
3232 #ifdef CONFIG_X86_64
3233                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3234 #endif
3235                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
3236                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3237                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3238                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3239                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3240         /*
3241          * We can allow some features even when not supported by the
3242          * hardware. For example, L1 can specify an MSR bitmap - and we
3243          * can use it to avoid exits to L1 - even when L0 runs L2
3244          * without MSR bitmaps.
3245          */
3246         msrs->procbased_ctls_high |=
3247                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3248                 CPU_BASED_USE_MSR_BITMAPS;
3249
3250         /* We support free control of CR3 access interception. */
3251         msrs->procbased_ctls_low &=
3252                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3253
3254         /*
3255          * secondary cpu-based controls.  Do not include those that
3256          * depend on CPUID bits, they are added later by vmx_cpuid_update.
3257          */
3258         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
3259                 msrs->secondary_ctls_low,
3260                 msrs->secondary_ctls_high);
3261         msrs->secondary_ctls_low = 0;
3262         msrs->secondary_ctls_high &=
3263                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3264                 SECONDARY_EXEC_DESC |
3265                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3266                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3267                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3268                 SECONDARY_EXEC_WBINVD_EXITING;
3269
3270         if (enable_ept) {
3271                 /* nested EPT: emulate EPT also to L1 */
3272                 msrs->secondary_ctls_high |=
3273                         SECONDARY_EXEC_ENABLE_EPT;
3274                 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
3275                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
3276                 if (cpu_has_vmx_ept_execute_only())
3277                         msrs->ept_caps |=
3278                                 VMX_EPT_EXECUTE_ONLY_BIT;
3279                 msrs->ept_caps &= vmx_capability.ept;
3280                 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
3281                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3282                         VMX_EPT_1GB_PAGE_BIT;
3283                 if (enable_ept_ad_bits) {
3284                         msrs->secondary_ctls_high |=
3285                                 SECONDARY_EXEC_ENABLE_PML;
3286                         msrs->ept_caps |= VMX_EPT_AD_BIT;
3287                 }
3288         }
3289
3290         if (cpu_has_vmx_vmfunc()) {
3291                 msrs->secondary_ctls_high |=
3292                         SECONDARY_EXEC_ENABLE_VMFUNC;
3293                 /*
3294                  * Advertise EPTP switching unconditionally
3295                  * since we emulate it
3296                  */
3297                 if (enable_ept)
3298                         msrs->vmfunc_controls =
3299                                 VMX_VMFUNC_EPTP_SWITCHING;
3300         }
3301
3302         /*
3303          * Old versions of KVM use the single-context version without
3304          * checking for support, so declare that it is supported even
3305          * though it is treated as global context.  The alternative is
3306          * not failing the single-context invvpid, and it is worse.
3307          */
3308         if (enable_vpid) {
3309                 msrs->secondary_ctls_high |=
3310                         SECONDARY_EXEC_ENABLE_VPID;
3311                 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
3312                         VMX_VPID_EXTENT_SUPPORTED_MASK;
3313         }
3314
3315         if (enable_unrestricted_guest)
3316                 msrs->secondary_ctls_high |=
3317                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
3318
3319         /* miscellaneous data */
3320         rdmsr(MSR_IA32_VMX_MISC,
3321                 msrs->misc_low,
3322                 msrs->misc_high);
3323         msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
3324         msrs->misc_low |=
3325                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
3326                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
3327                 VMX_MISC_ACTIVITY_HLT;
3328         msrs->misc_high = 0;
3329
3330         /*
3331          * This MSR reports some information about VMX support. We
3332          * should return information about the VMX we emulate for the
3333          * guest, and the VMCS structure we give it - not about the
3334          * VMX support of the underlying hardware.
3335          */
3336         msrs->basic =
3337                 VMCS12_REVISION |
3338                 VMX_BASIC_TRUE_CTLS |
3339                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
3340                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
3341
3342         if (cpu_has_vmx_basic_inout())
3343                 msrs->basic |= VMX_BASIC_INOUT;
3344
3345         /*
3346          * These MSRs specify bits which the guest must keep fixed on
3347          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
3348          * We picked the standard core2 setting.
3349          */
3350 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
3351 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
3352         msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
3353         msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
3354
3355         /* These MSRs specify bits which the guest must keep fixed off. */
3356         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
3357         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
3358
3359         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
3360         msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
3361 }
3362
3363 /*
3364  * if fixed0[i] == 1: val[i] must be 1
3365  * if fixed1[i] == 0: val[i] must be 0
3366  */
3367 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
3368 {
3369         return ((val & fixed1) | fixed0) == val;
3370 }
3371
3372 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
3373 {
3374         return fixed_bits_valid(control, low, high);
3375 }
3376
3377 static inline u64 vmx_control_msr(u32 low, u32 high)
3378 {
3379         return low | ((u64)high << 32);
3380 }
3381
3382 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
3383 {
3384         superset &= mask;
3385         subset &= mask;
3386
3387         return (superset | subset) == superset;
3388 }
3389
3390 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
3391 {
3392         const u64 feature_and_reserved =
3393                 /* feature (except bit 48; see below) */
3394                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
3395                 /* reserved */
3396                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
3397         u64 vmx_basic = vmx->nested.msrs.basic;
3398
3399         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
3400                 return -EINVAL;
3401
3402         /*
3403          * KVM does not emulate a version of VMX that constrains physical
3404          * addresses of VMX structures (e.g. VMCS) to 32-bits.
3405          */
3406         if (data & BIT_ULL(48))
3407                 return -EINVAL;
3408
3409         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
3410             vmx_basic_vmcs_revision_id(data))
3411                 return -EINVAL;
3412
3413         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
3414                 return -EINVAL;
3415
3416         vmx->nested.msrs.basic = data;
3417         return 0;
3418 }
3419
3420 static int
3421 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3422 {
3423         u64 supported;
3424         u32 *lowp, *highp;
3425
3426         switch (msr_index) {
3427         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3428                 lowp = &vmx->nested.msrs.pinbased_ctls_low;
3429                 highp = &vmx->nested.msrs.pinbased_ctls_high;
3430                 break;
3431         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3432                 lowp = &vmx->nested.msrs.procbased_ctls_low;
3433                 highp = &vmx->nested.msrs.procbased_ctls_high;
3434                 break;
3435         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3436                 lowp = &vmx->nested.msrs.exit_ctls_low;
3437                 highp = &vmx->nested.msrs.exit_ctls_high;
3438                 break;
3439         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3440                 lowp = &vmx->nested.msrs.entry_ctls_low;
3441                 highp = &vmx->nested.msrs.entry_ctls_high;
3442                 break;
3443         case MSR_IA32_VMX_PROCBASED_CTLS2:
3444                 lowp = &vmx->nested.msrs.secondary_ctls_low;
3445                 highp = &vmx->nested.msrs.secondary_ctls_high;
3446                 break;
3447         default:
3448                 BUG();
3449         }
3450
3451         supported = vmx_control_msr(*lowp, *highp);
3452
3453         /* Check must-be-1 bits are still 1. */
3454         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3455                 return -EINVAL;
3456
3457         /* Check must-be-0 bits are still 0. */
3458         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3459                 return -EINVAL;
3460
3461         *lowp = data;
3462         *highp = data >> 32;
3463         return 0;
3464 }
3465
3466 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3467 {
3468         const u64 feature_and_reserved_bits =
3469                 /* feature */
3470                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3471                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3472                 /* reserved */
3473                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3474         u64 vmx_misc;
3475
3476         vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
3477                                    vmx->nested.msrs.misc_high);
3478
3479         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3480                 return -EINVAL;
3481
3482         if ((vmx->nested.msrs.pinbased_ctls_high &
3483              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3484             vmx_misc_preemption_timer_rate(data) !=
3485             vmx_misc_preemption_timer_rate(vmx_misc))
3486                 return -EINVAL;
3487
3488         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3489                 return -EINVAL;
3490
3491         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3492                 return -EINVAL;
3493
3494         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3495                 return -EINVAL;
3496
3497         vmx->nested.msrs.misc_low = data;
3498         vmx->nested.msrs.misc_high = data >> 32;
3499
3500         /*
3501          * If L1 has read-only VM-exit information fields, use the
3502          * less permissive vmx_vmwrite_bitmap to specify write
3503          * permissions for the shadow VMCS.
3504          */
3505         if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
3506                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
3507
3508         return 0;
3509 }
3510
3511 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3512 {
3513         u64 vmx_ept_vpid_cap;
3514
3515         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
3516                                            vmx->nested.msrs.vpid_caps);
3517
3518         /* Every bit is either reserved or a feature bit. */
3519         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3520                 return -EINVAL;
3521
3522         vmx->nested.msrs.ept_caps = data;
3523         vmx->nested.msrs.vpid_caps = data >> 32;
3524         return 0;
3525 }
3526
3527 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3528 {
3529         u64 *msr;
3530
3531         switch (msr_index) {
3532         case MSR_IA32_VMX_CR0_FIXED0:
3533                 msr = &vmx->nested.msrs.cr0_fixed0;
3534                 break;
3535         case MSR_IA32_VMX_CR4_FIXED0:
3536                 msr = &vmx->nested.msrs.cr4_fixed0;
3537                 break;
3538         default:
3539                 BUG();
3540         }
3541
3542         /*
3543          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3544          * must be 1 in the restored value.
3545          */
3546         if (!is_bitwise_subset(data, *msr, -1ULL))
3547                 return -EINVAL;
3548
3549         *msr = data;
3550         return 0;
3551 }
3552
3553 /*
3554  * Called when userspace is restoring VMX MSRs.
3555  *
3556  * Returns 0 on success, non-0 otherwise.
3557  */
3558 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3559 {
3560         struct vcpu_vmx *vmx = to_vmx(vcpu);
3561
3562         /*
3563          * Don't allow changes to the VMX capability MSRs while the vCPU
3564          * is in VMX operation.
3565          */
3566         if (vmx->nested.vmxon)
3567                 return -EBUSY;
3568
3569         switch (msr_index) {
3570         case MSR_IA32_VMX_BASIC:
3571                 return vmx_restore_vmx_basic(vmx, data);
3572         case MSR_IA32_VMX_PINBASED_CTLS:
3573         case MSR_IA32_VMX_PROCBASED_CTLS:
3574         case MSR_IA32_VMX_EXIT_CTLS:
3575         case MSR_IA32_VMX_ENTRY_CTLS:
3576                 /*
3577                  * The "non-true" VMX capability MSRs are generated from the
3578                  * "true" MSRs, so we do not support restoring them directly.
3579                  *
3580                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3581                  * should restore the "true" MSRs with the must-be-1 bits
3582                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3583                  * DEFAULT SETTINGS".
3584                  */
3585                 return -EINVAL;
3586         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3587         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3588         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3589         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3590         case MSR_IA32_VMX_PROCBASED_CTLS2:
3591                 return vmx_restore_control_msr(vmx, msr_index, data);
3592         case MSR_IA32_VMX_MISC:
3593                 return vmx_restore_vmx_misc(vmx, data);
3594         case MSR_IA32_VMX_CR0_FIXED0:
3595         case MSR_IA32_VMX_CR4_FIXED0:
3596                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3597         case MSR_IA32_VMX_CR0_FIXED1:
3598         case MSR_IA32_VMX_CR4_FIXED1:
3599                 /*
3600                  * These MSRs are generated based on the vCPU's CPUID, so we
3601                  * do not support restoring them directly.
3602                  */
3603                 return -EINVAL;
3604         case MSR_IA32_VMX_EPT_VPID_CAP:
3605                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3606         case MSR_IA32_VMX_VMCS_ENUM:
3607                 vmx->nested.msrs.vmcs_enum = data;
3608                 return 0;
3609         default:
3610                 /*
3611                  * The rest of the VMX capability MSRs do not support restore.
3612                  */
3613                 return -EINVAL;
3614         }
3615 }
3616
3617 /* Returns 0 on success, non-0 otherwise. */
3618 static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
3619 {
3620         switch (msr_index) {
3621         case MSR_IA32_VMX_BASIC:
3622                 *pdata = msrs->basic;
3623                 break;
3624         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3625         case MSR_IA32_VMX_PINBASED_CTLS:
3626                 *pdata = vmx_control_msr(