Merge tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block
[muen/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/asm.h>
42 #include <asm/cpu.h>
43 #include <asm/io.h>
44 #include <asm/desc.h>
45 #include <asm/vmx.h>
46 #include <asm/virtext.h>
47 #include <asm/mce.h>
48 #include <asm/fpu/internal.h>
49 #include <asm/perf_event.h>
50 #include <asm/debugreg.h>
51 #include <asm/kexec.h>
52 #include <asm/apic.h>
53 #include <asm/irq_remapping.h>
54 #include <asm/mmu_context.h>
55 #include <asm/spec-ctrl.h>
56 #include <asm/mshyperv.h>
57
58 #include "trace.h"
59 #include "pmu.h"
60 #include "vmx_evmcs.h"
61
62 #define __ex(x) __kvm_handle_fault_on_reboot(x)
63 #define __ex_clear(x, reg) \
64         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
65
66 MODULE_AUTHOR("Qumranet");
67 MODULE_LICENSE("GPL");
68
69 static const struct x86_cpu_id vmx_cpu_id[] = {
70         X86_FEATURE_MATCH(X86_FEATURE_VMX),
71         {}
72 };
73 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
74
75 static bool __read_mostly enable_vpid = 1;
76 module_param_named(vpid, enable_vpid, bool, 0444);
77
78 static bool __read_mostly enable_vnmi = 1;
79 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
80
81 static bool __read_mostly flexpriority_enabled = 1;
82 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
83
84 static bool __read_mostly enable_ept = 1;
85 module_param_named(ept, enable_ept, bool, S_IRUGO);
86
87 static bool __read_mostly enable_unrestricted_guest = 1;
88 module_param_named(unrestricted_guest,
89                         enable_unrestricted_guest, bool, S_IRUGO);
90
91 static bool __read_mostly enable_ept_ad_bits = 1;
92 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
93
94 static bool __read_mostly emulate_invalid_guest_state = true;
95 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
96
97 static bool __read_mostly fasteoi = 1;
98 module_param(fasteoi, bool, S_IRUGO);
99
100 static bool __read_mostly enable_apicv = 1;
101 module_param(enable_apicv, bool, S_IRUGO);
102
103 static bool __read_mostly enable_shadow_vmcs = 1;
104 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
105 /*
106  * If nested=1, nested virtualization is supported, i.e., guests may use
107  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
108  * use VMX instructions.
109  */
110 static bool __read_mostly nested = 0;
111 module_param(nested, bool, S_IRUGO);
112
113 static u64 __read_mostly host_xss;
114
115 static bool __read_mostly enable_pml = 1;
116 module_param_named(pml, enable_pml, bool, S_IRUGO);
117
118 #define MSR_TYPE_R      1
119 #define MSR_TYPE_W      2
120 #define MSR_TYPE_RW     3
121
122 #define MSR_BITMAP_MODE_X2APIC          1
123 #define MSR_BITMAP_MODE_X2APIC_APICV    2
124 #define MSR_BITMAP_MODE_LM              4
125
126 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
127
128 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
129 static int __read_mostly cpu_preemption_timer_multi;
130 static bool __read_mostly enable_preemption_timer = 1;
131 #ifdef CONFIG_X86_64
132 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
133 #endif
134
135 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
136 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
137 #define KVM_VM_CR0_ALWAYS_ON                            \
138         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
139          X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
140 #define KVM_CR4_GUEST_OWNED_BITS                                      \
141         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
142          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
143
144 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
145 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
146 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
147
148 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
149
150 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
151
152 /*
153  * Hyper-V requires all of these, so mark them as supported even though
154  * they are just treated the same as all-context.
155  */
156 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
157         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
158         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
159         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
160         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
161
162 /*
163  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
164  * ple_gap:    upper bound on the amount of time between two successive
165  *             executions of PAUSE in a loop. Also indicate if ple enabled.
166  *             According to test, this time is usually smaller than 128 cycles.
167  * ple_window: upper bound on the amount of time a guest is allowed to execute
168  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
169  *             less than 2^12 cycles
170  * Time is measured based on a counter that runs at the same rate as the TSC,
171  * refer SDM volume 3b section 21.6.13 & 22.1.3.
172  */
173 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
174
175 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
176 module_param(ple_window, uint, 0444);
177
178 /* Default doubles per-vcpu window every exit. */
179 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
180 module_param(ple_window_grow, uint, 0444);
181
182 /* Default resets per-vcpu window every exit to ple_window. */
183 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
184 module_param(ple_window_shrink, uint, 0444);
185
186 /* Default is to compute the maximum so we can never overflow. */
187 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
188 module_param(ple_window_max, uint, 0444);
189
190 extern const ulong vmx_return;
191
192 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
193 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
194 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
195
196 /* Storage for pre module init parameter parsing */
197 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
198
199 static const struct {
200         const char *option;
201         enum vmx_l1d_flush_state cmd;
202 } vmentry_l1d_param[] = {
203         {"auto",        VMENTER_L1D_FLUSH_AUTO},
204         {"never",       VMENTER_L1D_FLUSH_NEVER},
205         {"cond",        VMENTER_L1D_FLUSH_COND},
206         {"always",      VMENTER_L1D_FLUSH_ALWAYS},
207 };
208
209 #define L1D_CACHE_ORDER 4
210 static void *vmx_l1d_flush_pages;
211
212 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
213 {
214         struct page *page;
215         unsigned int i;
216
217         if (!enable_ept) {
218                 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
219                 return 0;
220         }
221
222        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
223                u64 msr;
224
225                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
226                if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
227                        l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
228                        return 0;
229                }
230        }
231
232         /* If set to auto use the default l1tf mitigation method */
233         if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
234                 switch (l1tf_mitigation) {
235                 case L1TF_MITIGATION_OFF:
236                         l1tf = VMENTER_L1D_FLUSH_NEVER;
237                         break;
238                 case L1TF_MITIGATION_FLUSH_NOWARN:
239                 case L1TF_MITIGATION_FLUSH:
240                 case L1TF_MITIGATION_FLUSH_NOSMT:
241                         l1tf = VMENTER_L1D_FLUSH_COND;
242                         break;
243                 case L1TF_MITIGATION_FULL:
244                 case L1TF_MITIGATION_FULL_FORCE:
245                         l1tf = VMENTER_L1D_FLUSH_ALWAYS;
246                         break;
247                 }
248         } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
249                 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
250         }
251
252         if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
253             !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
254                 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
255                 if (!page)
256                         return -ENOMEM;
257                 vmx_l1d_flush_pages = page_address(page);
258
259                 /*
260                  * Initialize each page with a different pattern in
261                  * order to protect against KSM in the nested
262                  * virtualization case.
263                  */
264                 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
265                         memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
266                                PAGE_SIZE);
267                 }
268         }
269
270         l1tf_vmx_mitigation = l1tf;
271
272         if (l1tf != VMENTER_L1D_FLUSH_NEVER)
273                 static_branch_enable(&vmx_l1d_should_flush);
274         else
275                 static_branch_disable(&vmx_l1d_should_flush);
276
277         if (l1tf == VMENTER_L1D_FLUSH_COND)
278                 static_branch_enable(&vmx_l1d_flush_cond);
279         else
280                 static_branch_disable(&vmx_l1d_flush_cond);
281         return 0;
282 }
283
284 static int vmentry_l1d_flush_parse(const char *s)
285 {
286         unsigned int i;
287
288         if (s) {
289                 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
290                         if (sysfs_streq(s, vmentry_l1d_param[i].option))
291                                 return vmentry_l1d_param[i].cmd;
292                 }
293         }
294         return -EINVAL;
295 }
296
297 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
298 {
299         int l1tf, ret;
300
301         if (!boot_cpu_has(X86_BUG_L1TF))
302                 return 0;
303
304         l1tf = vmentry_l1d_flush_parse(s);
305         if (l1tf < 0)
306                 return l1tf;
307
308         /*
309          * Has vmx_init() run already? If not then this is the pre init
310          * parameter parsing. In that case just store the value and let
311          * vmx_init() do the proper setup after enable_ept has been
312          * established.
313          */
314         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
315                 vmentry_l1d_flush_param = l1tf;
316                 return 0;
317         }
318
319         mutex_lock(&vmx_l1d_flush_mutex);
320         ret = vmx_setup_l1d_flush(l1tf);
321         mutex_unlock(&vmx_l1d_flush_mutex);
322         return ret;
323 }
324
325 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
326 {
327         return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
328 }
329
330 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
331         .set = vmentry_l1d_flush_set,
332         .get = vmentry_l1d_flush_get,
333 };
334 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
335
336 enum ept_pointers_status {
337         EPT_POINTERS_CHECK = 0,
338         EPT_POINTERS_MATCH = 1,
339         EPT_POINTERS_MISMATCH = 2
340 };
341
342 struct kvm_vmx {
343         struct kvm kvm;
344
345         unsigned int tss_addr;
346         bool ept_identity_pagetable_done;
347         gpa_t ept_identity_map_addr;
348
349         enum ept_pointers_status ept_pointers_match;
350         spinlock_t ept_pointer_lock;
351 };
352
353 #define NR_AUTOLOAD_MSRS 8
354
355 struct vmcs_hdr {
356         u32 revision_id:31;
357         u32 shadow_vmcs:1;
358 };
359
360 struct vmcs {
361         struct vmcs_hdr hdr;
362         u32 abort;
363         char data[0];
364 };
365
366 /*
367  * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
368  * and whose values change infrequently, but are not constant.  I.e. this is
369  * used as a write-through cache of the corresponding VMCS fields.
370  */
371 struct vmcs_host_state {
372         unsigned long cr3;      /* May not match real cr3 */
373         unsigned long cr4;      /* May not match real cr4 */
374         unsigned long gs_base;
375         unsigned long fs_base;
376
377         u16           fs_sel, gs_sel, ldt_sel;
378 #ifdef CONFIG_X86_64
379         u16           ds_sel, es_sel;
380 #endif
381 };
382
383 /*
384  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
385  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
386  * loaded on this CPU (so we can clear them if the CPU goes down).
387  */
388 struct loaded_vmcs {
389         struct vmcs *vmcs;
390         struct vmcs *shadow_vmcs;
391         int cpu;
392         bool launched;
393         bool nmi_known_unmasked;
394         /* Support for vnmi-less CPUs */
395         int soft_vnmi_blocked;
396         ktime_t entry_time;
397         s64 vnmi_blocked_time;
398         unsigned long *msr_bitmap;
399         struct list_head loaded_vmcss_on_cpu_link;
400         struct vmcs_host_state host_state;
401 };
402
403 struct shared_msr_entry {
404         unsigned index;
405         u64 data;
406         u64 mask;
407 };
408
409 /*
410  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
411  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
412  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
413  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
414  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
415  * More than one of these structures may exist, if L1 runs multiple L2 guests.
416  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
417  * underlying hardware which will be used to run L2.
418  * This structure is packed to ensure that its layout is identical across
419  * machines (necessary for live migration).
420  *
421  * IMPORTANT: Changing the layout of existing fields in this structure
422  * will break save/restore compatibility with older kvm releases. When
423  * adding new fields, either use space in the reserved padding* arrays
424  * or add the new fields to the end of the structure.
425  */
426 typedef u64 natural_width;
427 struct __packed vmcs12 {
428         /* According to the Intel spec, a VMCS region must start with the
429          * following two fields. Then follow implementation-specific data.
430          */
431         struct vmcs_hdr hdr;
432         u32 abort;
433
434         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
435         u32 padding[7]; /* room for future expansion */
436
437         u64 io_bitmap_a;
438         u64 io_bitmap_b;
439         u64 msr_bitmap;
440         u64 vm_exit_msr_store_addr;
441         u64 vm_exit_msr_load_addr;
442         u64 vm_entry_msr_load_addr;
443         u64 tsc_offset;
444         u64 virtual_apic_page_addr;
445         u64 apic_access_addr;
446         u64 posted_intr_desc_addr;
447         u64 ept_pointer;
448         u64 eoi_exit_bitmap0;
449         u64 eoi_exit_bitmap1;
450         u64 eoi_exit_bitmap2;
451         u64 eoi_exit_bitmap3;
452         u64 xss_exit_bitmap;
453         u64 guest_physical_address;
454         u64 vmcs_link_pointer;
455         u64 guest_ia32_debugctl;
456         u64 guest_ia32_pat;
457         u64 guest_ia32_efer;
458         u64 guest_ia32_perf_global_ctrl;
459         u64 guest_pdptr0;
460         u64 guest_pdptr1;
461         u64 guest_pdptr2;
462         u64 guest_pdptr3;
463         u64 guest_bndcfgs;
464         u64 host_ia32_pat;
465         u64 host_ia32_efer;
466         u64 host_ia32_perf_global_ctrl;
467         u64 vmread_bitmap;
468         u64 vmwrite_bitmap;
469         u64 vm_function_control;
470         u64 eptp_list_address;
471         u64 pml_address;
472         u64 padding64[3]; /* room for future expansion */
473         /*
474          * To allow migration of L1 (complete with its L2 guests) between
475          * machines of different natural widths (32 or 64 bit), we cannot have
476          * unsigned long fields with no explict size. We use u64 (aliased
477          * natural_width) instead. Luckily, x86 is little-endian.
478          */
479         natural_width cr0_guest_host_mask;
480         natural_width cr4_guest_host_mask;
481         natural_width cr0_read_shadow;
482         natural_width cr4_read_shadow;
483         natural_width cr3_target_value0;
484         natural_width cr3_target_value1;
485         natural_width cr3_target_value2;
486         natural_width cr3_target_value3;
487         natural_width exit_qualification;
488         natural_width guest_linear_address;
489         natural_width guest_cr0;
490         natural_width guest_cr3;
491         natural_width guest_cr4;
492         natural_width guest_es_base;
493         natural_width guest_cs_base;
494         natural_width guest_ss_base;
495         natural_width guest_ds_base;
496         natural_width guest_fs_base;
497         natural_width guest_gs_base;
498         natural_width guest_ldtr_base;
499         natural_width guest_tr_base;
500         natural_width guest_gdtr_base;
501         natural_width guest_idtr_base;
502         natural_width guest_dr7;
503         natural_width guest_rsp;
504         natural_width guest_rip;
505         natural_width guest_rflags;
506         natural_width guest_pending_dbg_exceptions;
507         natural_width guest_sysenter_esp;
508         natural_width guest_sysenter_eip;
509         natural_width host_cr0;
510         natural_width host_cr3;
511         natural_width host_cr4;
512         natural_width host_fs_base;
513         natural_width host_gs_base;
514         natural_width host_tr_base;
515         natural_width host_gdtr_base;
516         natural_width host_idtr_base;
517         natural_width host_ia32_sysenter_esp;
518         natural_width host_ia32_sysenter_eip;
519         natural_width host_rsp;
520         natural_width host_rip;
521         natural_width paddingl[8]; /* room for future expansion */
522         u32 pin_based_vm_exec_control;
523         u32 cpu_based_vm_exec_control;
524         u32 exception_bitmap;
525         u32 page_fault_error_code_mask;
526         u32 page_fault_error_code_match;
527         u32 cr3_target_count;
528         u32 vm_exit_controls;
529         u32 vm_exit_msr_store_count;
530         u32 vm_exit_msr_load_count;
531         u32 vm_entry_controls;
532         u32 vm_entry_msr_load_count;
533         u32 vm_entry_intr_info_field;
534         u32 vm_entry_exception_error_code;
535         u32 vm_entry_instruction_len;
536         u32 tpr_threshold;
537         u32 secondary_vm_exec_control;
538         u32 vm_instruction_error;
539         u32 vm_exit_reason;
540         u32 vm_exit_intr_info;
541         u32 vm_exit_intr_error_code;
542         u32 idt_vectoring_info_field;
543         u32 idt_vectoring_error_code;
544         u32 vm_exit_instruction_len;
545         u32 vmx_instruction_info;
546         u32 guest_es_limit;
547         u32 guest_cs_limit;
548         u32 guest_ss_limit;
549         u32 guest_ds_limit;
550         u32 guest_fs_limit;
551         u32 guest_gs_limit;
552         u32 guest_ldtr_limit;
553         u32 guest_tr_limit;
554         u32 guest_gdtr_limit;
555         u32 guest_idtr_limit;
556         u32 guest_es_ar_bytes;
557         u32 guest_cs_ar_bytes;
558         u32 guest_ss_ar_bytes;
559         u32 guest_ds_ar_bytes;
560         u32 guest_fs_ar_bytes;
561         u32 guest_gs_ar_bytes;
562         u32 guest_ldtr_ar_bytes;
563         u32 guest_tr_ar_bytes;
564         u32 guest_interruptibility_info;
565         u32 guest_activity_state;
566         u32 guest_sysenter_cs;
567         u32 host_ia32_sysenter_cs;
568         u32 vmx_preemption_timer_value;
569         u32 padding32[7]; /* room for future expansion */
570         u16 virtual_processor_id;
571         u16 posted_intr_nv;
572         u16 guest_es_selector;
573         u16 guest_cs_selector;
574         u16 guest_ss_selector;
575         u16 guest_ds_selector;
576         u16 guest_fs_selector;
577         u16 guest_gs_selector;
578         u16 guest_ldtr_selector;
579         u16 guest_tr_selector;
580         u16 guest_intr_status;
581         u16 host_es_selector;
582         u16 host_cs_selector;
583         u16 host_ss_selector;
584         u16 host_ds_selector;
585         u16 host_fs_selector;
586         u16 host_gs_selector;
587         u16 host_tr_selector;
588         u16 guest_pml_index;
589 };
590
591 /*
592  * For save/restore compatibility, the vmcs12 field offsets must not change.
593  */
594 #define CHECK_OFFSET(field, loc)                                \
595         BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc),       \
596                 "Offset of " #field " in struct vmcs12 has changed.")
597
598 static inline void vmx_check_vmcs12_offsets(void) {
599         CHECK_OFFSET(hdr, 0);
600         CHECK_OFFSET(abort, 4);
601         CHECK_OFFSET(launch_state, 8);
602         CHECK_OFFSET(io_bitmap_a, 40);
603         CHECK_OFFSET(io_bitmap_b, 48);
604         CHECK_OFFSET(msr_bitmap, 56);
605         CHECK_OFFSET(vm_exit_msr_store_addr, 64);
606         CHECK_OFFSET(vm_exit_msr_load_addr, 72);
607         CHECK_OFFSET(vm_entry_msr_load_addr, 80);
608         CHECK_OFFSET(tsc_offset, 88);
609         CHECK_OFFSET(virtual_apic_page_addr, 96);
610         CHECK_OFFSET(apic_access_addr, 104);
611         CHECK_OFFSET(posted_intr_desc_addr, 112);
612         CHECK_OFFSET(ept_pointer, 120);
613         CHECK_OFFSET(eoi_exit_bitmap0, 128);
614         CHECK_OFFSET(eoi_exit_bitmap1, 136);
615         CHECK_OFFSET(eoi_exit_bitmap2, 144);
616         CHECK_OFFSET(eoi_exit_bitmap3, 152);
617         CHECK_OFFSET(xss_exit_bitmap, 160);
618         CHECK_OFFSET(guest_physical_address, 168);
619         CHECK_OFFSET(vmcs_link_pointer, 176);
620         CHECK_OFFSET(guest_ia32_debugctl, 184);
621         CHECK_OFFSET(guest_ia32_pat, 192);
622         CHECK_OFFSET(guest_ia32_efer, 200);
623         CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
624         CHECK_OFFSET(guest_pdptr0, 216);
625         CHECK_OFFSET(guest_pdptr1, 224);
626         CHECK_OFFSET(guest_pdptr2, 232);
627         CHECK_OFFSET(guest_pdptr3, 240);
628         CHECK_OFFSET(guest_bndcfgs, 248);
629         CHECK_OFFSET(host_ia32_pat, 256);
630         CHECK_OFFSET(host_ia32_efer, 264);
631         CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
632         CHECK_OFFSET(vmread_bitmap, 280);
633         CHECK_OFFSET(vmwrite_bitmap, 288);
634         CHECK_OFFSET(vm_function_control, 296);
635         CHECK_OFFSET(eptp_list_address, 304);
636         CHECK_OFFSET(pml_address, 312);
637         CHECK_OFFSET(cr0_guest_host_mask, 344);
638         CHECK_OFFSET(cr4_guest_host_mask, 352);
639         CHECK_OFFSET(cr0_read_shadow, 360);
640         CHECK_OFFSET(cr4_read_shadow, 368);
641         CHECK_OFFSET(cr3_target_value0, 376);
642         CHECK_OFFSET(cr3_target_value1, 384);
643         CHECK_OFFSET(cr3_target_value2, 392);
644         CHECK_OFFSET(cr3_target_value3, 400);
645         CHECK_OFFSET(exit_qualification, 408);
646         CHECK_OFFSET(guest_linear_address, 416);
647         CHECK_OFFSET(guest_cr0, 424);
648         CHECK_OFFSET(guest_cr3, 432);
649         CHECK_OFFSET(guest_cr4, 440);
650         CHECK_OFFSET(guest_es_base, 448);
651         CHECK_OFFSET(guest_cs_base, 456);
652         CHECK_OFFSET(guest_ss_base, 464);
653         CHECK_OFFSET(guest_ds_base, 472);
654         CHECK_OFFSET(guest_fs_base, 480);
655         CHECK_OFFSET(guest_gs_base, 488);
656         CHECK_OFFSET(guest_ldtr_base, 496);
657         CHECK_OFFSET(guest_tr_base, 504);
658         CHECK_OFFSET(guest_gdtr_base, 512);
659         CHECK_OFFSET(guest_idtr_base, 520);
660         CHECK_OFFSET(guest_dr7, 528);
661         CHECK_OFFSET(guest_rsp, 536);
662         CHECK_OFFSET(guest_rip, 544);
663         CHECK_OFFSET(guest_rflags, 552);
664         CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
665         CHECK_OFFSET(guest_sysenter_esp, 568);
666         CHECK_OFFSET(guest_sysenter_eip, 576);
667         CHECK_OFFSET(host_cr0, 584);
668         CHECK_OFFSET(host_cr3, 592);
669         CHECK_OFFSET(host_cr4, 600);
670         CHECK_OFFSET(host_fs_base, 608);
671         CHECK_OFFSET(host_gs_base, 616);
672         CHECK_OFFSET(host_tr_base, 624);
673         CHECK_OFFSET(host_gdtr_base, 632);
674         CHECK_OFFSET(host_idtr_base, 640);
675         CHECK_OFFSET(host_ia32_sysenter_esp, 648);
676         CHECK_OFFSET(host_ia32_sysenter_eip, 656);
677         CHECK_OFFSET(host_rsp, 664);
678         CHECK_OFFSET(host_rip, 672);
679         CHECK_OFFSET(pin_based_vm_exec_control, 744);
680         CHECK_OFFSET(cpu_based_vm_exec_control, 748);
681         CHECK_OFFSET(exception_bitmap, 752);
682         CHECK_OFFSET(page_fault_error_code_mask, 756);
683         CHECK_OFFSET(page_fault_error_code_match, 760);
684         CHECK_OFFSET(cr3_target_count, 764);
685         CHECK_OFFSET(vm_exit_controls, 768);
686         CHECK_OFFSET(vm_exit_msr_store_count, 772);
687         CHECK_OFFSET(vm_exit_msr_load_count, 776);
688         CHECK_OFFSET(vm_entry_controls, 780);
689         CHECK_OFFSET(vm_entry_msr_load_count, 784);
690         CHECK_OFFSET(vm_entry_intr_info_field, 788);
691         CHECK_OFFSET(vm_entry_exception_error_code, 792);
692         CHECK_OFFSET(vm_entry_instruction_len, 796);
693         CHECK_OFFSET(tpr_threshold, 800);
694         CHECK_OFFSET(secondary_vm_exec_control, 804);
695         CHECK_OFFSET(vm_instruction_error, 808);
696         CHECK_OFFSET(vm_exit_reason, 812);
697         CHECK_OFFSET(vm_exit_intr_info, 816);
698         CHECK_OFFSET(vm_exit_intr_error_code, 820);
699         CHECK_OFFSET(idt_vectoring_info_field, 824);
700         CHECK_OFFSET(idt_vectoring_error_code, 828);
701         CHECK_OFFSET(vm_exit_instruction_len, 832);
702         CHECK_OFFSET(vmx_instruction_info, 836);
703         CHECK_OFFSET(guest_es_limit, 840);
704         CHECK_OFFSET(guest_cs_limit, 844);
705         CHECK_OFFSET(guest_ss_limit, 848);
706         CHECK_OFFSET(guest_ds_limit, 852);
707         CHECK_OFFSET(guest_fs_limit, 856);
708         CHECK_OFFSET(guest_gs_limit, 860);
709         CHECK_OFFSET(guest_ldtr_limit, 864);
710         CHECK_OFFSET(guest_tr_limit, 868);
711         CHECK_OFFSET(guest_gdtr_limit, 872);
712         CHECK_OFFSET(guest_idtr_limit, 876);
713         CHECK_OFFSET(guest_es_ar_bytes, 880);
714         CHECK_OFFSET(guest_cs_ar_bytes, 884);
715         CHECK_OFFSET(guest_ss_ar_bytes, 888);
716         CHECK_OFFSET(guest_ds_ar_bytes, 892);
717         CHECK_OFFSET(guest_fs_ar_bytes, 896);
718         CHECK_OFFSET(guest_gs_ar_bytes, 900);
719         CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
720         CHECK_OFFSET(guest_tr_ar_bytes, 908);
721         CHECK_OFFSET(guest_interruptibility_info, 912);
722         CHECK_OFFSET(guest_activity_state, 916);
723         CHECK_OFFSET(guest_sysenter_cs, 920);
724         CHECK_OFFSET(host_ia32_sysenter_cs, 924);
725         CHECK_OFFSET(vmx_preemption_timer_value, 928);
726         CHECK_OFFSET(virtual_processor_id, 960);
727         CHECK_OFFSET(posted_intr_nv, 962);
728         CHECK_OFFSET(guest_es_selector, 964);
729         CHECK_OFFSET(guest_cs_selector, 966);
730         CHECK_OFFSET(guest_ss_selector, 968);
731         CHECK_OFFSET(guest_ds_selector, 970);
732         CHECK_OFFSET(guest_fs_selector, 972);
733         CHECK_OFFSET(guest_gs_selector, 974);
734         CHECK_OFFSET(guest_ldtr_selector, 976);
735         CHECK_OFFSET(guest_tr_selector, 978);
736         CHECK_OFFSET(guest_intr_status, 980);
737         CHECK_OFFSET(host_es_selector, 982);
738         CHECK_OFFSET(host_cs_selector, 984);
739         CHECK_OFFSET(host_ss_selector, 986);
740         CHECK_OFFSET(host_ds_selector, 988);
741         CHECK_OFFSET(host_fs_selector, 990);
742         CHECK_OFFSET(host_gs_selector, 992);
743         CHECK_OFFSET(host_tr_selector, 994);
744         CHECK_OFFSET(guest_pml_index, 996);
745 }
746
747 /*
748  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
749  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
750  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
751  *
752  * IMPORTANT: Changing this value will break save/restore compatibility with
753  * older kvm releases.
754  */
755 #define VMCS12_REVISION 0x11e57ed0
756
757 /*
758  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
759  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
760  * current implementation, 4K are reserved to avoid future complications.
761  */
762 #define VMCS12_SIZE 0x1000
763
764 /*
765  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
766  * supported VMCS12 field encoding.
767  */
768 #define VMCS12_MAX_FIELD_INDEX 0x17
769
770 struct nested_vmx_msrs {
771         /*
772          * We only store the "true" versions of the VMX capability MSRs. We
773          * generate the "non-true" versions by setting the must-be-1 bits
774          * according to the SDM.
775          */
776         u32 procbased_ctls_low;
777         u32 procbased_ctls_high;
778         u32 secondary_ctls_low;
779         u32 secondary_ctls_high;
780         u32 pinbased_ctls_low;
781         u32 pinbased_ctls_high;
782         u32 exit_ctls_low;
783         u32 exit_ctls_high;
784         u32 entry_ctls_low;
785         u32 entry_ctls_high;
786         u32 misc_low;
787         u32 misc_high;
788         u32 ept_caps;
789         u32 vpid_caps;
790         u64 basic;
791         u64 cr0_fixed0;
792         u64 cr0_fixed1;
793         u64 cr4_fixed0;
794         u64 cr4_fixed1;
795         u64 vmcs_enum;
796         u64 vmfunc_controls;
797 };
798
799 /*
800  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
801  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
802  */
803 struct nested_vmx {
804         /* Has the level1 guest done vmxon? */
805         bool vmxon;
806         gpa_t vmxon_ptr;
807         bool pml_full;
808
809         /* The guest-physical address of the current VMCS L1 keeps for L2 */
810         gpa_t current_vmptr;
811         /*
812          * Cache of the guest's VMCS, existing outside of guest memory.
813          * Loaded from guest memory during VMPTRLD. Flushed to guest
814          * memory during VMCLEAR and VMPTRLD.
815          */
816         struct vmcs12 *cached_vmcs12;
817         /*
818          * Cache of the guest's shadow VMCS, existing outside of guest
819          * memory. Loaded from guest memory during VM entry. Flushed
820          * to guest memory during VM exit.
821          */
822         struct vmcs12 *cached_shadow_vmcs12;
823         /*
824          * Indicates if the shadow vmcs must be updated with the
825          * data hold by vmcs12
826          */
827         bool sync_shadow_vmcs;
828         bool dirty_vmcs12;
829
830         bool change_vmcs01_virtual_apic_mode;
831
832         /* L2 must run next, and mustn't decide to exit to L1. */
833         bool nested_run_pending;
834
835         struct loaded_vmcs vmcs02;
836
837         /*
838          * Guest pages referred to in the vmcs02 with host-physical
839          * pointers, so we must keep them pinned while L2 runs.
840          */
841         struct page *apic_access_page;
842         struct page *virtual_apic_page;
843         struct page *pi_desc_page;
844         struct pi_desc *pi_desc;
845         bool pi_pending;
846         u16 posted_intr_nv;
847
848         struct hrtimer preemption_timer;
849         bool preemption_timer_expired;
850
851         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
852         u64 vmcs01_debugctl;
853
854         u16 vpid02;
855         u16 last_vpid;
856
857         struct nested_vmx_msrs msrs;
858
859         /* SMM related state */
860         struct {
861                 /* in VMX operation on SMM entry? */
862                 bool vmxon;
863                 /* in guest mode on SMM entry? */
864                 bool guest_mode;
865         } smm;
866 };
867
868 #define POSTED_INTR_ON  0
869 #define POSTED_INTR_SN  1
870
871 /* Posted-Interrupt Descriptor */
872 struct pi_desc {
873         u32 pir[8];     /* Posted interrupt requested */
874         union {
875                 struct {
876                                 /* bit 256 - Outstanding Notification */
877                         u16     on      : 1,
878                                 /* bit 257 - Suppress Notification */
879                                 sn      : 1,
880                                 /* bit 271:258 - Reserved */
881                                 rsvd_1  : 14;
882                                 /* bit 279:272 - Notification Vector */
883                         u8      nv;
884                                 /* bit 287:280 - Reserved */
885                         u8      rsvd_2;
886                                 /* bit 319:288 - Notification Destination */
887                         u32     ndst;
888                 };
889                 u64 control;
890         };
891         u32 rsvd[6];
892 } __aligned(64);
893
894 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
895 {
896         return test_and_set_bit(POSTED_INTR_ON,
897                         (unsigned long *)&pi_desc->control);
898 }
899
900 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
901 {
902         return test_and_clear_bit(POSTED_INTR_ON,
903                         (unsigned long *)&pi_desc->control);
904 }
905
906 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
907 {
908         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
909 }
910
911 static inline void pi_clear_sn(struct pi_desc *pi_desc)
912 {
913         return clear_bit(POSTED_INTR_SN,
914                         (unsigned long *)&pi_desc->control);
915 }
916
917 static inline void pi_set_sn(struct pi_desc *pi_desc)
918 {
919         return set_bit(POSTED_INTR_SN,
920                         (unsigned long *)&pi_desc->control);
921 }
922
923 static inline void pi_clear_on(struct pi_desc *pi_desc)
924 {
925         clear_bit(POSTED_INTR_ON,
926                   (unsigned long *)&pi_desc->control);
927 }
928
929 static inline int pi_test_on(struct pi_desc *pi_desc)
930 {
931         return test_bit(POSTED_INTR_ON,
932                         (unsigned long *)&pi_desc->control);
933 }
934
935 static inline int pi_test_sn(struct pi_desc *pi_desc)
936 {
937         return test_bit(POSTED_INTR_SN,
938                         (unsigned long *)&pi_desc->control);
939 }
940
941 struct vmx_msrs {
942         unsigned int            nr;
943         struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
944 };
945
946 struct vcpu_vmx {
947         struct kvm_vcpu       vcpu;
948         unsigned long         host_rsp;
949         u8                    fail;
950         u8                    msr_bitmap_mode;
951         u32                   exit_intr_info;
952         u32                   idt_vectoring_info;
953         ulong                 rflags;
954         struct shared_msr_entry *guest_msrs;
955         int                   nmsrs;
956         int                   save_nmsrs;
957         unsigned long         host_idt_base;
958 #ifdef CONFIG_X86_64
959         u64                   msr_host_kernel_gs_base;
960         u64                   msr_guest_kernel_gs_base;
961 #endif
962
963         u64                   arch_capabilities;
964         u64                   spec_ctrl;
965
966         u32 vm_entry_controls_shadow;
967         u32 vm_exit_controls_shadow;
968         u32 secondary_exec_control;
969
970         /*
971          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
972          * non-nested (L1) guest, it always points to vmcs01. For a nested
973          * guest (L2), it points to a different VMCS.  loaded_cpu_state points
974          * to the VMCS whose state is loaded into the CPU registers that only
975          * need to be switched when transitioning to/from the kernel; a NULL
976          * value indicates that host state is loaded.
977          */
978         struct loaded_vmcs    vmcs01;
979         struct loaded_vmcs   *loaded_vmcs;
980         struct loaded_vmcs   *loaded_cpu_state;
981         bool                  __launched; /* temporary, used in vmx_vcpu_run */
982         struct msr_autoload {
983                 struct vmx_msrs guest;
984                 struct vmx_msrs host;
985         } msr_autoload;
986
987         struct {
988                 int vm86_active;
989                 ulong save_rflags;
990                 struct kvm_segment segs[8];
991         } rmode;
992         struct {
993                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
994                 struct kvm_save_segment {
995                         u16 selector;
996                         unsigned long base;
997                         u32 limit;
998                         u32 ar;
999                 } seg[8];
1000         } segment_cache;
1001         int vpid;
1002         bool emulation_required;
1003
1004         u32 exit_reason;
1005
1006         /* Posted interrupt descriptor */
1007         struct pi_desc pi_desc;
1008
1009         /* Support for a guest hypervisor (nested VMX) */
1010         struct nested_vmx nested;
1011
1012         /* Dynamic PLE window. */
1013         int ple_window;
1014         bool ple_window_dirty;
1015
1016         /* Support for PML */
1017 #define PML_ENTITY_NUM          512
1018         struct page *pml_pg;
1019
1020         /* apic deadline value in host tsc */
1021         u64 hv_deadline_tsc;
1022
1023         u64 current_tsc_ratio;
1024
1025         u32 host_pkru;
1026
1027         unsigned long host_debugctlmsr;
1028
1029         /*
1030          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
1031          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
1032          * in msr_ia32_feature_control_valid_bits.
1033          */
1034         u64 msr_ia32_feature_control;
1035         u64 msr_ia32_feature_control_valid_bits;
1036         u64 ept_pointer;
1037 };
1038
1039 enum segment_cache_field {
1040         SEG_FIELD_SEL = 0,
1041         SEG_FIELD_BASE = 1,
1042         SEG_FIELD_LIMIT = 2,
1043         SEG_FIELD_AR = 3,
1044
1045         SEG_FIELD_NR = 4
1046 };
1047
1048 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
1049 {
1050         return container_of(kvm, struct kvm_vmx, kvm);
1051 }
1052
1053 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
1054 {
1055         return container_of(vcpu, struct vcpu_vmx, vcpu);
1056 }
1057
1058 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
1059 {
1060         return &(to_vmx(vcpu)->pi_desc);
1061 }
1062
1063 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
1064 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
1065 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
1066 #define FIELD64(number, name)                                           \
1067         FIELD(number, name),                                            \
1068         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
1069
1070
1071 static u16 shadow_read_only_fields[] = {
1072 #define SHADOW_FIELD_RO(x) x,
1073 #include "vmx_shadow_fields.h"
1074 };
1075 static int max_shadow_read_only_fields =
1076         ARRAY_SIZE(shadow_read_only_fields);
1077
1078 static u16 shadow_read_write_fields[] = {
1079 #define SHADOW_FIELD_RW(x) x,
1080 #include "vmx_shadow_fields.h"
1081 };
1082 static int max_shadow_read_write_fields =
1083         ARRAY_SIZE(shadow_read_write_fields);
1084
1085 static const unsigned short vmcs_field_to_offset_table[] = {
1086         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
1087         FIELD(POSTED_INTR_NV, posted_intr_nv),
1088         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
1089         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
1090         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
1091         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
1092         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
1093         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
1094         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
1095         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
1096         FIELD(GUEST_INTR_STATUS, guest_intr_status),
1097         FIELD(GUEST_PML_INDEX, guest_pml_index),
1098         FIELD(HOST_ES_SELECTOR, host_es_selector),
1099         FIELD(HOST_CS_SELECTOR, host_cs_selector),
1100         FIELD(HOST_SS_SELECTOR, host_ss_selector),
1101         FIELD(HOST_DS_SELECTOR, host_ds_selector),
1102         FIELD(HOST_FS_SELECTOR, host_fs_selector),
1103         FIELD(HOST_GS_SELECTOR, host_gs_selector),
1104         FIELD(HOST_TR_SELECTOR, host_tr_selector),
1105         FIELD64(IO_BITMAP_A, io_bitmap_a),
1106         FIELD64(IO_BITMAP_B, io_bitmap_b),
1107         FIELD64(MSR_BITMAP, msr_bitmap),
1108         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
1109         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
1110         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
1111         FIELD64(PML_ADDRESS, pml_address),
1112         FIELD64(TSC_OFFSET, tsc_offset),
1113         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
1114         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
1115         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
1116         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
1117         FIELD64(EPT_POINTER, ept_pointer),
1118         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
1119         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
1120         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
1121         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
1122         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
1123         FIELD64(VMREAD_BITMAP, vmread_bitmap),
1124         FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
1125         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
1126         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
1127         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
1128         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
1129         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
1130         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
1131         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
1132         FIELD64(GUEST_PDPTR0, guest_pdptr0),
1133         FIELD64(GUEST_PDPTR1, guest_pdptr1),
1134         FIELD64(GUEST_PDPTR2, guest_pdptr2),
1135         FIELD64(GUEST_PDPTR3, guest_pdptr3),
1136         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
1137         FIELD64(HOST_IA32_PAT, host_ia32_pat),
1138         FIELD64(HOST_IA32_EFER, host_ia32_efer),
1139         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
1140         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
1141         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
1142         FIELD(EXCEPTION_BITMAP, exception_bitmap),
1143         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
1144         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
1145         FIELD(CR3_TARGET_COUNT, cr3_target_count),
1146         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
1147         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
1148         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
1149         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
1150         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
1151         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
1152         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
1153         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
1154         FIELD(TPR_THRESHOLD, tpr_threshold),
1155         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
1156         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
1157         FIELD(VM_EXIT_REASON, vm_exit_reason),
1158         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
1159         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
1160         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
1161         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
1162         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
1163         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
1164         FIELD(GUEST_ES_LIMIT, guest_es_limit),
1165         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
1166         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
1167         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
1168         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
1169         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
1170         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
1171         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
1172         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
1173         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
1174         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
1175         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
1176         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
1177         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
1178         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
1179         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
1180         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
1181         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
1182         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
1183         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
1184         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
1185         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
1186         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
1187         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
1188         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
1189         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
1190         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
1191         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
1192         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
1193         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
1194         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
1195         FIELD(EXIT_QUALIFICATION, exit_qualification),
1196         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
1197         FIELD(GUEST_CR0, guest_cr0),
1198         FIELD(GUEST_CR3, guest_cr3),
1199         FIELD(GUEST_CR4, guest_cr4),
1200         FIELD(GUEST_ES_BASE, guest_es_base),
1201         FIELD(GUEST_CS_BASE, guest_cs_base),
1202         FIELD(GUEST_SS_BASE, guest_ss_base),
1203         FIELD(GUEST_DS_BASE, guest_ds_base),
1204         FIELD(GUEST_FS_BASE, guest_fs_base),
1205         FIELD(GUEST_GS_BASE, guest_gs_base),
1206         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
1207         FIELD(GUEST_TR_BASE, guest_tr_base),
1208         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
1209         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
1210         FIELD(GUEST_DR7, guest_dr7),
1211         FIELD(GUEST_RSP, guest_rsp),
1212         FIELD(GUEST_RIP, guest_rip),
1213         FIELD(GUEST_RFLAGS, guest_rflags),
1214         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
1215         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
1216         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
1217         FIELD(HOST_CR0, host_cr0),
1218         FIELD(HOST_CR3, host_cr3),
1219         FIELD(HOST_CR4, host_cr4),
1220         FIELD(HOST_FS_BASE, host_fs_base),
1221         FIELD(HOST_GS_BASE, host_gs_base),
1222         FIELD(HOST_TR_BASE, host_tr_base),
1223         FIELD(HOST_GDTR_BASE, host_gdtr_base),
1224         FIELD(HOST_IDTR_BASE, host_idtr_base),
1225         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
1226         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
1227         FIELD(HOST_RSP, host_rsp),
1228         FIELD(HOST_RIP, host_rip),
1229 };
1230
1231 static inline short vmcs_field_to_offset(unsigned long field)
1232 {
1233         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
1234         unsigned short offset;
1235         unsigned index;
1236
1237         if (field >> 15)
1238                 return -ENOENT;
1239
1240         index = ROL16(field, 6);
1241         if (index >= size)
1242                 return -ENOENT;
1243
1244         index = array_index_nospec(index, size);
1245         offset = vmcs_field_to_offset_table[index];
1246         if (offset == 0)
1247                 return -ENOENT;
1248         return offset;
1249 }
1250
1251 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
1252 {
1253         return to_vmx(vcpu)->nested.cached_vmcs12;
1254 }
1255
1256 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
1257 {
1258         return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
1259 }
1260
1261 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
1262 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
1263 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
1264 static bool vmx_xsaves_supported(void);
1265 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1266                             struct kvm_segment *var, int seg);
1267 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1268                             struct kvm_segment *var, int seg);
1269 static bool guest_state_valid(struct kvm_vcpu *vcpu);
1270 static u32 vmx_segment_access_rights(struct kvm_segment *var);
1271 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1272 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
1273 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1274 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1275                                             u16 error_code);
1276 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1277 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1278                                                           u32 msr, int type);
1279
1280 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
1281 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
1282 /*
1283  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
1284  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
1285  */
1286 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
1287
1288 /*
1289  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
1290  * can find which vCPU should be waken up.
1291  */
1292 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
1293 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
1294
1295 enum {
1296         VMX_VMREAD_BITMAP,
1297         VMX_VMWRITE_BITMAP,
1298         VMX_BITMAP_NR
1299 };
1300
1301 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
1302
1303 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
1304 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
1305
1306 static bool cpu_has_load_ia32_efer;
1307 static bool cpu_has_load_perf_global_ctrl;
1308
1309 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
1310 static DEFINE_SPINLOCK(vmx_vpid_lock);
1311
1312 static struct vmcs_config {
1313         int size;
1314         int order;
1315         u32 basic_cap;
1316         u32 revision_id;
1317         u32 pin_based_exec_ctrl;
1318         u32 cpu_based_exec_ctrl;
1319         u32 cpu_based_2nd_exec_ctrl;
1320         u32 vmexit_ctrl;
1321         u32 vmentry_ctrl;
1322         struct nested_vmx_msrs nested;
1323 } vmcs_config;
1324
1325 static struct vmx_capability {
1326         u32 ept;
1327         u32 vpid;
1328 } vmx_capability;
1329
1330 #define VMX_SEGMENT_FIELD(seg)                                  \
1331         [VCPU_SREG_##seg] = {                                   \
1332                 .selector = GUEST_##seg##_SELECTOR,             \
1333                 .base = GUEST_##seg##_BASE,                     \
1334                 .limit = GUEST_##seg##_LIMIT,                   \
1335                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
1336         }
1337
1338 static const struct kvm_vmx_segment_field {
1339         unsigned selector;
1340         unsigned base;
1341         unsigned limit;
1342         unsigned ar_bytes;
1343 } kvm_vmx_segment_fields[] = {
1344         VMX_SEGMENT_FIELD(CS),
1345         VMX_SEGMENT_FIELD(DS),
1346         VMX_SEGMENT_FIELD(ES),
1347         VMX_SEGMENT_FIELD(FS),
1348         VMX_SEGMENT_FIELD(GS),
1349         VMX_SEGMENT_FIELD(SS),
1350         VMX_SEGMENT_FIELD(TR),
1351         VMX_SEGMENT_FIELD(LDTR),
1352 };
1353
1354 static u64 host_efer;
1355
1356 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1357
1358 /*
1359  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1360  * away by decrementing the array size.
1361  */
1362 static const u32 vmx_msr_index[] = {
1363 #ifdef CONFIG_X86_64
1364         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1365 #endif
1366         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1367 };
1368
1369 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1370
1371 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1372
1373 #define KVM_EVMCS_VERSION 1
1374
1375 #if IS_ENABLED(CONFIG_HYPERV)
1376 static bool __read_mostly enlightened_vmcs = true;
1377 module_param(enlightened_vmcs, bool, 0444);
1378
1379 static inline void evmcs_write64(unsigned long field, u64 value)
1380 {
1381         u16 clean_field;
1382         int offset = get_evmcs_offset(field, &clean_field);
1383
1384         if (offset < 0)
1385                 return;
1386
1387         *(u64 *)((char *)current_evmcs + offset) = value;
1388
1389         current_evmcs->hv_clean_fields &= ~clean_field;
1390 }
1391
1392 static inline void evmcs_write32(unsigned long field, u32 value)
1393 {
1394         u16 clean_field;
1395         int offset = get_evmcs_offset(field, &clean_field);
1396
1397         if (offset < 0)
1398                 return;
1399
1400         *(u32 *)((char *)current_evmcs + offset) = value;
1401         current_evmcs->hv_clean_fields &= ~clean_field;
1402 }
1403
1404 static inline void evmcs_write16(unsigned long field, u16 value)
1405 {
1406         u16 clean_field;
1407         int offset = get_evmcs_offset(field, &clean_field);
1408
1409         if (offset < 0)
1410                 return;
1411
1412         *(u16 *)((char *)current_evmcs + offset) = value;
1413         current_evmcs->hv_clean_fields &= ~clean_field;
1414 }
1415
1416 static inline u64 evmcs_read64(unsigned long field)
1417 {
1418         int offset = get_evmcs_offset(field, NULL);
1419
1420         if (offset < 0)
1421                 return 0;
1422
1423         return *(u64 *)((char *)current_evmcs + offset);
1424 }
1425
1426 static inline u32 evmcs_read32(unsigned long field)
1427 {
1428         int offset = get_evmcs_offset(field, NULL);
1429
1430         if (offset < 0)
1431                 return 0;
1432
1433         return *(u32 *)((char *)current_evmcs + offset);
1434 }
1435
1436 static inline u16 evmcs_read16(unsigned long field)
1437 {
1438         int offset = get_evmcs_offset(field, NULL);
1439
1440         if (offset < 0)
1441                 return 0;
1442
1443         return *(u16 *)((char *)current_evmcs + offset);
1444 }
1445
1446 static inline void evmcs_touch_msr_bitmap(void)
1447 {
1448         if (unlikely(!current_evmcs))
1449                 return;
1450
1451         if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1452                 current_evmcs->hv_clean_fields &=
1453                         ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1454 }
1455
1456 static void evmcs_load(u64 phys_addr)
1457 {
1458         struct hv_vp_assist_page *vp_ap =
1459                 hv_get_vp_assist_page(smp_processor_id());
1460
1461         vp_ap->current_nested_vmcs = phys_addr;
1462         vp_ap->enlighten_vmentry = 1;
1463 }
1464
1465 static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1466 {
1467         /*
1468          * Enlightened VMCSv1 doesn't support these:
1469          *
1470          *      POSTED_INTR_NV                  = 0x00000002,
1471          *      GUEST_INTR_STATUS               = 0x00000810,
1472          *      APIC_ACCESS_ADDR                = 0x00002014,
1473          *      POSTED_INTR_DESC_ADDR           = 0x00002016,
1474          *      EOI_EXIT_BITMAP0                = 0x0000201c,
1475          *      EOI_EXIT_BITMAP1                = 0x0000201e,
1476          *      EOI_EXIT_BITMAP2                = 0x00002020,
1477          *      EOI_EXIT_BITMAP3                = 0x00002022,
1478          */
1479         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1480         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1481                 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1482         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1483                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1484         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1485                 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1486
1487         /*
1488          *      GUEST_PML_INDEX                 = 0x00000812,
1489          *      PML_ADDRESS                     = 0x0000200e,
1490          */
1491         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1492
1493         /*      VM_FUNCTION_CONTROL             = 0x00002018, */
1494         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1495
1496         /*
1497          *      EPTP_LIST_ADDRESS               = 0x00002024,
1498          *      VMREAD_BITMAP                   = 0x00002026,
1499          *      VMWRITE_BITMAP                  = 0x00002028,
1500          */
1501         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1502
1503         /*
1504          *      TSC_MULTIPLIER                  = 0x00002032,
1505          */
1506         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1507
1508         /*
1509          *      PLE_GAP                         = 0x00004020,
1510          *      PLE_WINDOW                      = 0x00004022,
1511          */
1512         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1513
1514         /*
1515          *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
1516          */
1517         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1518
1519         /*
1520          *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
1521          *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
1522          */
1523         vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1524         vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1525
1526         /*
1527          * Currently unsupported in KVM:
1528          *      GUEST_IA32_RTIT_CTL             = 0x00002814,
1529          */
1530 }
1531
1532 /* check_ept_pointer() should be under protection of ept_pointer_lock. */
1533 static void check_ept_pointer_match(struct kvm *kvm)
1534 {
1535         struct kvm_vcpu *vcpu;
1536         u64 tmp_eptp = INVALID_PAGE;
1537         int i;
1538
1539         kvm_for_each_vcpu(i, vcpu, kvm) {
1540                 if (!VALID_PAGE(tmp_eptp)) {
1541                         tmp_eptp = to_vmx(vcpu)->ept_pointer;
1542                 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
1543                         to_kvm_vmx(kvm)->ept_pointers_match
1544                                 = EPT_POINTERS_MISMATCH;
1545                         return;
1546                 }
1547         }
1548
1549         to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
1550 }
1551
1552 static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
1553 {
1554         int ret;
1555
1556         spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1557
1558         if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
1559                 check_ept_pointer_match(kvm);
1560
1561         if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
1562                 ret = -ENOTSUPP;
1563                 goto out;
1564         }
1565
1566         ret = hyperv_flush_guest_mapping(
1567                         to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
1568
1569 out:
1570         spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1571         return ret;
1572 }
1573 #else /* !IS_ENABLED(CONFIG_HYPERV) */
1574 static inline void evmcs_write64(unsigned long field, u64 value) {}
1575 static inline void evmcs_write32(unsigned long field, u32 value) {}
1576 static inline void evmcs_write16(unsigned long field, u16 value) {}
1577 static inline u64 evmcs_read64(unsigned long field) { return 0; }
1578 static inline u32 evmcs_read32(unsigned long field) { return 0; }
1579 static inline u16 evmcs_read16(unsigned long field) { return 0; }
1580 static inline void evmcs_load(u64 phys_addr) {}
1581 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1582 static inline void evmcs_touch_msr_bitmap(void) {}
1583 #endif /* IS_ENABLED(CONFIG_HYPERV) */
1584
1585 static inline bool is_exception_n(u32 intr_info, u8 vector)
1586 {
1587         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1588                              INTR_INFO_VALID_MASK)) ==
1589                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1590 }
1591
1592 static inline bool is_debug(u32 intr_info)
1593 {
1594         return is_exception_n(intr_info, DB_VECTOR);
1595 }
1596
1597 static inline bool is_breakpoint(u32 intr_info)
1598 {
1599         return is_exception_n(intr_info, BP_VECTOR);
1600 }
1601
1602 static inline bool is_page_fault(u32 intr_info)
1603 {
1604         return is_exception_n(intr_info, PF_VECTOR);
1605 }
1606
1607 static inline bool is_no_device(u32 intr_info)
1608 {
1609         return is_exception_n(intr_info, NM_VECTOR);
1610 }
1611
1612 static inline bool is_invalid_opcode(u32 intr_info)
1613 {
1614         return is_exception_n(intr_info, UD_VECTOR);
1615 }
1616
1617 static inline bool is_gp_fault(u32 intr_info)
1618 {
1619         return is_exception_n(intr_info, GP_VECTOR);
1620 }
1621
1622 static inline bool is_external_interrupt(u32 intr_info)
1623 {
1624         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1625                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1626 }
1627
1628 static inline bool is_machine_check(u32 intr_info)
1629 {
1630         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1631                              INTR_INFO_VALID_MASK)) ==
1632                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1633 }
1634
1635 /* Undocumented: icebp/int1 */
1636 static inline bool is_icebp(u32 intr_info)
1637 {
1638         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1639                 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1640 }
1641
1642 static inline bool cpu_has_vmx_msr_bitmap(void)
1643 {
1644         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1645 }
1646
1647 static inline bool cpu_has_vmx_tpr_shadow(void)
1648 {
1649         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1650 }
1651
1652 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1653 {
1654         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1655 }
1656
1657 static inline bool cpu_has_secondary_exec_ctrls(void)
1658 {
1659         return vmcs_config.cpu_based_exec_ctrl &
1660                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1661 }
1662
1663 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1664 {
1665         return vmcs_config.cpu_based_2nd_exec_ctrl &
1666                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1667 }
1668
1669 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1670 {
1671         return vmcs_config.cpu_based_2nd_exec_ctrl &
1672                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1673 }
1674
1675 static inline bool cpu_has_vmx_apic_register_virt(void)
1676 {
1677         return vmcs_config.cpu_based_2nd_exec_ctrl &
1678                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1679 }
1680
1681 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1682 {
1683         return vmcs_config.cpu_based_2nd_exec_ctrl &
1684                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1685 }
1686
1687 /*
1688  * Comment's format: document - errata name - stepping - processor name.
1689  * Refer from
1690  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1691  */
1692 static u32 vmx_preemption_cpu_tfms[] = {
1693 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1694 0x000206E6,
1695 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1696 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1697 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1698 0x00020652,
1699 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1700 0x00020655,
1701 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1702 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1703 /*
1704  * 320767.pdf - AAP86  - B1 -
1705  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1706  */
1707 0x000106E5,
1708 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1709 0x000106A0,
1710 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1711 0x000106A1,
1712 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1713 0x000106A4,
1714  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1715  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1716  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1717 0x000106A5,
1718 };
1719
1720 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1721 {
1722         u32 eax = cpuid_eax(0x00000001), i;
1723
1724         /* Clear the reserved bits */
1725         eax &= ~(0x3U << 14 | 0xfU << 28);
1726         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1727                 if (eax == vmx_preemption_cpu_tfms[i])
1728                         return true;
1729
1730         return false;
1731 }
1732
1733 static inline bool cpu_has_vmx_preemption_timer(void)
1734 {
1735         return vmcs_config.pin_based_exec_ctrl &
1736                 PIN_BASED_VMX_PREEMPTION_TIMER;
1737 }
1738
1739 static inline bool cpu_has_vmx_posted_intr(void)
1740 {
1741         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1742                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1743 }
1744
1745 static inline bool cpu_has_vmx_apicv(void)
1746 {
1747         return cpu_has_vmx_apic_register_virt() &&
1748                 cpu_has_vmx_virtual_intr_delivery() &&
1749                 cpu_has_vmx_posted_intr();
1750 }
1751
1752 static inline bool cpu_has_vmx_flexpriority(void)
1753 {
1754         return cpu_has_vmx_tpr_shadow() &&
1755                 cpu_has_vmx_virtualize_apic_accesses();
1756 }
1757
1758 static inline bool cpu_has_vmx_ept_execute_only(void)
1759 {
1760         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1761 }
1762
1763 static inline bool cpu_has_vmx_ept_2m_page(void)
1764 {
1765         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1766 }
1767
1768 static inline bool cpu_has_vmx_ept_1g_page(void)
1769 {
1770         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1771 }
1772
1773 static inline bool cpu_has_vmx_ept_4levels(void)
1774 {
1775         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1776 }
1777
1778 static inline bool cpu_has_vmx_ept_mt_wb(void)
1779 {
1780         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1781 }
1782
1783 static inline bool cpu_has_vmx_ept_5levels(void)
1784 {
1785         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1786 }
1787
1788 static inline bool cpu_has_vmx_ept_ad_bits(void)
1789 {
1790         return vmx_capability.ept & VMX_EPT_AD_BIT;
1791 }
1792
1793 static inline bool cpu_has_vmx_invept_context(void)
1794 {
1795         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1796 }
1797
1798 static inline bool cpu_has_vmx_invept_global(void)
1799 {
1800         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1801 }
1802
1803 static inline bool cpu_has_vmx_invvpid_individual_addr(void)
1804 {
1805         return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
1806 }
1807
1808 static inline bool cpu_has_vmx_invvpid_single(void)
1809 {
1810         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1811 }
1812
1813 static inline bool cpu_has_vmx_invvpid_global(void)
1814 {
1815         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1816 }
1817
1818 static inline bool cpu_has_vmx_invvpid(void)
1819 {
1820         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1821 }
1822
1823 static inline bool cpu_has_vmx_ept(void)
1824 {
1825         return vmcs_config.cpu_based_2nd_exec_ctrl &
1826                 SECONDARY_EXEC_ENABLE_EPT;
1827 }
1828
1829 static inline bool cpu_has_vmx_unrestricted_guest(void)
1830 {
1831         return vmcs_config.cpu_based_2nd_exec_ctrl &
1832                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1833 }
1834
1835 static inline bool cpu_has_vmx_ple(void)
1836 {
1837         return vmcs_config.cpu_based_2nd_exec_ctrl &
1838                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1839 }
1840
1841 static inline bool cpu_has_vmx_basic_inout(void)
1842 {
1843         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1844 }
1845
1846 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1847 {
1848         return flexpriority_enabled && lapic_in_kernel(vcpu);
1849 }
1850
1851 static inline bool cpu_has_vmx_vpid(void)
1852 {
1853         return vmcs_config.cpu_based_2nd_exec_ctrl &
1854                 SECONDARY_EXEC_ENABLE_VPID;
1855 }
1856
1857 static inline bool cpu_has_vmx_rdtscp(void)
1858 {
1859         return vmcs_config.cpu_based_2nd_exec_ctrl &
1860                 SECONDARY_EXEC_RDTSCP;
1861 }
1862
1863 static inline bool cpu_has_vmx_invpcid(void)
1864 {
1865         return vmcs_config.cpu_based_2nd_exec_ctrl &
1866                 SECONDARY_EXEC_ENABLE_INVPCID;
1867 }
1868
1869 static inline bool cpu_has_virtual_nmis(void)
1870 {
1871         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1872 }
1873
1874 static inline bool cpu_has_vmx_wbinvd_exit(void)
1875 {
1876         return vmcs_config.cpu_based_2nd_exec_ctrl &
1877                 SECONDARY_EXEC_WBINVD_EXITING;
1878 }
1879
1880 static inline bool cpu_has_vmx_shadow_vmcs(void)
1881 {
1882         u64 vmx_msr;
1883         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1884         /* check if the cpu supports writing r/o exit information fields */
1885         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1886                 return false;
1887
1888         return vmcs_config.cpu_based_2nd_exec_ctrl &
1889                 SECONDARY_EXEC_SHADOW_VMCS;
1890 }
1891
1892 static inline bool cpu_has_vmx_pml(void)
1893 {
1894         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1895 }
1896
1897 static inline bool cpu_has_vmx_tsc_scaling(void)
1898 {
1899         return vmcs_config.cpu_based_2nd_exec_ctrl &
1900                 SECONDARY_EXEC_TSC_SCALING;
1901 }
1902
1903 static inline bool cpu_has_vmx_vmfunc(void)
1904 {
1905         return vmcs_config.cpu_based_2nd_exec_ctrl &
1906                 SECONDARY_EXEC_ENABLE_VMFUNC;
1907 }
1908
1909 static bool vmx_umip_emulated(void)
1910 {
1911         return vmcs_config.cpu_based_2nd_exec_ctrl &
1912                 SECONDARY_EXEC_DESC;
1913 }
1914
1915 static inline bool report_flexpriority(void)
1916 {
1917         return flexpriority_enabled;
1918 }
1919
1920 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1921 {
1922         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
1923 }
1924
1925 /*
1926  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
1927  * to modify any valid field of the VMCS, or are the VM-exit
1928  * information fields read-only?
1929  */
1930 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
1931 {
1932         return to_vmx(vcpu)->nested.msrs.misc_low &
1933                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
1934 }
1935
1936 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
1937 {
1938         return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
1939 }
1940
1941 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
1942 {
1943         return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
1944                         CPU_BASED_MONITOR_TRAP_FLAG;
1945 }
1946
1947 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
1948 {
1949         return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
1950                 SECONDARY_EXEC_SHADOW_VMCS;
1951 }
1952
1953 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1954 {
1955         return vmcs12->cpu_based_vm_exec_control & bit;
1956 }
1957
1958 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1959 {
1960         return (vmcs12->cpu_based_vm_exec_control &
1961                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1962                 (vmcs12->secondary_vm_exec_control & bit);
1963 }
1964
1965 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1966 {
1967         return vmcs12->pin_based_vm_exec_control &
1968                 PIN_BASED_VMX_PREEMPTION_TIMER;
1969 }
1970
1971 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1972 {
1973         return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1974 }
1975
1976 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1977 {
1978         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1979 }
1980
1981 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1982 {
1983         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1984 }
1985
1986 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1987 {
1988         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1989 }
1990
1991 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1992 {
1993         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1994 }
1995
1996 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1997 {
1998         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1999 }
2000
2001 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
2002 {
2003         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
2004 }
2005
2006 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
2007 {
2008         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
2009 }
2010
2011 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
2012 {
2013         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2014 }
2015
2016 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
2017 {
2018         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
2019 }
2020
2021 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
2022 {
2023         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
2024 }
2025
2026 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
2027 {
2028         return nested_cpu_has_vmfunc(vmcs12) &&
2029                 (vmcs12->vm_function_control &
2030                  VMX_VMFUNC_EPTP_SWITCHING);
2031 }
2032
2033 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
2034 {
2035         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
2036 }
2037
2038 static inline bool is_nmi(u32 intr_info)
2039 {
2040         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
2041                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
2042 }
2043
2044 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
2045                               u32 exit_intr_info,
2046                               unsigned long exit_qualification);
2047 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
2048                         struct vmcs12 *vmcs12,
2049                         u32 reason, unsigned long qualification);
2050
2051 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
2052 {
2053         int i;
2054
2055         for (i = 0; i < vmx->nmsrs; ++i)
2056                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
2057                         return i;
2058         return -1;
2059 }
2060
2061 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
2062 {
2063     struct {
2064         u64 vpid : 16;
2065         u64 rsvd : 48;
2066         u64 gva;
2067     } operand = { vpid, 0, gva };
2068     bool error;
2069
2070     asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
2071                   : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
2072                   : "memory");
2073     BUG_ON(error);
2074 }
2075
2076 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
2077 {
2078         struct {
2079                 u64 eptp, gpa;
2080         } operand = {eptp, gpa};
2081         bool error;
2082
2083         asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
2084                       : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
2085                       : "memory");
2086         BUG_ON(error);
2087 }
2088
2089 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
2090 {
2091         int i;
2092
2093         i = __find_msr_index(vmx, msr);
2094         if (i >= 0)
2095                 return &vmx->guest_msrs[i];
2096         return NULL;
2097 }
2098
2099 static void vmcs_clear(struct vmcs *vmcs)
2100 {
2101         u64 phys_addr = __pa(vmcs);
2102         bool error;
2103
2104         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
2105                       : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2106                       : "memory");
2107         if (unlikely(error))
2108                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
2109                        vmcs, phys_addr);
2110 }
2111
2112 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
2113 {
2114         vmcs_clear(loaded_vmcs->vmcs);
2115         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
2116                 vmcs_clear(loaded_vmcs->shadow_vmcs);
2117         loaded_vmcs->cpu = -1;
2118         loaded_vmcs->launched = 0;
2119 }
2120
2121 static void vmcs_load(struct vmcs *vmcs)
2122 {
2123         u64 phys_addr = __pa(vmcs);
2124         bool error;
2125
2126         if (static_branch_unlikely(&enable_evmcs))
2127                 return evmcs_load(phys_addr);
2128
2129         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
2130                       : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2131                       : "memory");
2132         if (unlikely(error))
2133                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
2134                        vmcs, phys_addr);
2135 }
2136
2137 #ifdef CONFIG_KEXEC_CORE
2138 /*
2139  * This bitmap is used to indicate whether the vmclear
2140  * operation is enabled on all cpus. All disabled by
2141  * default.
2142  */
2143 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
2144
2145 static inline void crash_enable_local_vmclear(int cpu)
2146 {
2147         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
2148 }
2149
2150 static inline void crash_disable_local_vmclear(int cpu)
2151 {
2152         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
2153 }
2154
2155 static inline int crash_local_vmclear_enabled(int cpu)
2156 {
2157         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
2158 }
2159
2160 static void crash_vmclear_local_loaded_vmcss(void)
2161 {
2162         int cpu = raw_smp_processor_id();
2163         struct loaded_vmcs *v;
2164
2165         if (!crash_local_vmclear_enabled(cpu))
2166                 return;
2167
2168         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
2169                             loaded_vmcss_on_cpu_link)
2170                 vmcs_clear(v->vmcs);
2171 }
2172 #else
2173 static inline void crash_enable_local_vmclear(int cpu) { }
2174 static inline void crash_disable_local_vmclear(int cpu) { }
2175 #endif /* CONFIG_KEXEC_CORE */
2176
2177 static void __loaded_vmcs_clear(void *arg)
2178 {
2179         struct loaded_vmcs *loaded_vmcs = arg;
2180         int cpu = raw_smp_processor_id();
2181
2182         if (loaded_vmcs->cpu != cpu)
2183                 return; /* vcpu migration can race with cpu offline */
2184         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
2185                 per_cpu(current_vmcs, cpu) = NULL;
2186         crash_disable_local_vmclear(cpu);
2187         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
2188
2189         /*
2190          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
2191          * is before setting loaded_vmcs->vcpu to -1 which is done in
2192          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
2193          * then adds the vmcs into percpu list before it is deleted.
2194          */
2195         smp_wmb();
2196
2197         loaded_vmcs_init(loaded_vmcs);
2198         crash_enable_local_vmclear(cpu);
2199 }
2200
2201 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
2202 {
2203         int cpu = loaded_vmcs->cpu;
2204
2205         if (cpu != -1)
2206                 smp_call_function_single(cpu,
2207                          __loaded_vmcs_clear, loaded_vmcs, 1);
2208 }
2209
2210 static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
2211 {
2212         if (vpid == 0)
2213                 return true;
2214
2215         if (cpu_has_vmx_invvpid_individual_addr()) {
2216                 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
2217                 return true;
2218         }
2219
2220         return false;
2221 }
2222
2223 static inline void vpid_sync_vcpu_single(int vpid)
2224 {
2225         if (vpid == 0)
2226                 return;
2227
2228         if (cpu_has_vmx_invvpid_single())
2229                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
2230 }
2231
2232 static inline void vpid_sync_vcpu_global(void)
2233 {
2234         if (cpu_has_vmx_invvpid_global())
2235                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
2236 }
2237
2238 static inline void vpid_sync_context(int vpid)
2239 {
2240         if (cpu_has_vmx_invvpid_single())
2241                 vpid_sync_vcpu_single(vpid);
2242         else
2243                 vpid_sync_vcpu_global();
2244 }
2245
2246 static inline void ept_sync_global(void)
2247 {
2248         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
2249 }
2250
2251 static inline void ept_sync_context(u64 eptp)
2252 {
2253         if (cpu_has_vmx_invept_context())
2254                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
2255         else
2256                 ept_sync_global();
2257 }
2258
2259 static __always_inline void vmcs_check16(unsigned long field)
2260 {
2261         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2262                          "16-bit accessor invalid for 64-bit field");
2263         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2264                          "16-bit accessor invalid for 64-bit high field");
2265         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2266                          "16-bit accessor invalid for 32-bit high field");
2267         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2268                          "16-bit accessor invalid for natural width field");
2269 }
2270
2271 static __always_inline void vmcs_check32(unsigned long field)
2272 {
2273         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2274                          "32-bit accessor invalid for 16-bit field");
2275         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2276                          "32-bit accessor invalid for natural width field");
2277 }
2278
2279 static __always_inline void vmcs_check64(unsigned long field)
2280 {
2281         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2282                          "64-bit accessor invalid for 16-bit field");
2283         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2284                          "64-bit accessor invalid for 64-bit high field");
2285         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2286                          "64-bit accessor invalid for 32-bit field");
2287         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2288                          "64-bit accessor invalid for natural width field");
2289 }
2290
2291 static __always_inline void vmcs_checkl(unsigned long field)
2292 {
2293         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2294                          "Natural width accessor invalid for 16-bit field");
2295         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2296                          "Natural width accessor invalid for 64-bit field");
2297         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2298                          "Natural width accessor invalid for 64-bit high field");
2299         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2300                          "Natural width accessor invalid for 32-bit field");
2301 }
2302
2303 static __always_inline unsigned long __vmcs_readl(unsigned long field)
2304 {
2305         unsigned long value;
2306
2307         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
2308                       : "=a"(value) : "d"(field) : "cc");
2309         return value;
2310 }
2311
2312 static __always_inline u16 vmcs_read16(unsigned long field)
2313 {
2314         vmcs_check16(field);
2315         if (static_branch_unlikely(&enable_evmcs))
2316                 return evmcs_read16(field);
2317         return __vmcs_readl(field);
2318 }
2319
2320 static __always_inline u32 vmcs_read32(unsigned long field)
2321 {
2322         vmcs_check32(field);
2323         if (static_branch_unlikely(&enable_evmcs))
2324                 return evmcs_read32(field);
2325         return __vmcs_readl(field);
2326 }
2327
2328 static __always_inline u64 vmcs_read64(unsigned long field)
2329 {
2330         vmcs_check64(field);
2331         if (static_branch_unlikely(&enable_evmcs))
2332                 return evmcs_read64(field);
2333 #ifdef CONFIG_X86_64
2334         return __vmcs_readl(field);
2335 #else
2336         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
2337 #endif
2338 }
2339
2340 static __always_inline unsigned long vmcs_readl(unsigned long field)
2341 {
2342         vmcs_checkl(field);
2343         if (static_branch_unlikely(&enable_evmcs))
2344                 return evmcs_read64(field);
2345         return __vmcs_readl(field);
2346 }
2347
2348 static noinline void vmwrite_error(unsigned long field, unsigned long value)
2349 {
2350         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
2351                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
2352         dump_stack();
2353 }
2354
2355 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
2356 {
2357         bool error;
2358
2359         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
2360                       : CC_OUT(na) (error) : "a"(value), "d"(field));
2361         if (unlikely(error))
2362                 vmwrite_error(field, value);
2363 }
2364
2365 static __always_inline void vmcs_write16(unsigned long field, u16 value)
2366 {
2367         vmcs_check16(field);
2368         if (static_branch_unlikely(&enable_evmcs))
2369                 return evmcs_write16(field, value);
2370
2371         __vmcs_writel(field, value);
2372 }
2373
2374 static __always_inline void vmcs_write32(unsigned long field, u32 value)
2375 {
2376         vmcs_check32(field);
2377         if (static_branch_unlikely(&enable_evmcs))
2378                 return evmcs_write32(field, value);
2379
2380         __vmcs_writel(field, value);
2381 }
2382
2383 static __always_inline void vmcs_write64(unsigned long field, u64 value)
2384 {
2385         vmcs_check64(field);
2386         if (static_branch_unlikely(&enable_evmcs))
2387                 return evmcs_write64(field, value);
2388
2389         __vmcs_writel(field, value);
2390 #ifndef CONFIG_X86_64
2391         asm volatile ("");
2392         __vmcs_writel(field+1, value >> 32);
2393 #endif
2394 }
2395
2396 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
2397 {
2398         vmcs_checkl(field);
2399         if (static_branch_unlikely(&enable_evmcs))
2400                 return evmcs_write64(field, value);
2401
2402         __vmcs_writel(field, value);
2403 }
2404
2405 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
2406 {
2407         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2408                          "vmcs_clear_bits does not support 64-bit fields");
2409         if (static_branch_unlikely(&enable_evmcs))
2410                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
2411
2412         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
2413 }
2414
2415 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
2416 {
2417         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2418                          "vmcs_set_bits does not support 64-bit fields");
2419         if (static_branch_unlikely(&enable_evmcs))
2420                 return evmcs_write32(field, evmcs_read32(field) | mask);
2421
2422         __vmcs_writel(field, __vmcs_readl(field) | mask);
2423 }
2424
2425 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
2426 {
2427         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
2428 }
2429
2430 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
2431 {
2432         vmcs_write32(VM_ENTRY_CONTROLS, val);
2433         vmx->vm_entry_controls_shadow = val;
2434 }
2435
2436 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
2437 {
2438         if (vmx->vm_entry_controls_shadow != val)
2439                 vm_entry_controls_init(vmx, val);
2440 }
2441
2442 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
2443 {
2444         return vmx->vm_entry_controls_shadow;
2445 }
2446
2447
2448 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2449 {
2450         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
2451 }
2452
2453 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2454 {
2455         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
2456 }
2457
2458 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
2459 {
2460         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
2461 }
2462
2463 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
2464 {
2465         vmcs_write32(VM_EXIT_CONTROLS, val);
2466         vmx->vm_exit_controls_shadow = val;
2467 }
2468
2469 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2470 {
2471         if (vmx->vm_exit_controls_shadow != val)
2472                 vm_exit_controls_init(vmx, val);
2473 }
2474
2475 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2476 {
2477         return vmx->vm_exit_controls_shadow;
2478 }
2479
2480
2481 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2482 {
2483         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2484 }
2485
2486 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2487 {
2488         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2489 }
2490
2491 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2492 {
2493         vmx->segment_cache.bitmask = 0;
2494 }
2495
2496 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2497                                        unsigned field)
2498 {
2499         bool ret;
2500         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2501
2502         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2503                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2504                 vmx->segment_cache.bitmask = 0;
2505         }
2506         ret = vmx->segment_cache.bitmask & mask;
2507         vmx->segment_cache.bitmask |= mask;
2508         return ret;
2509 }
2510
2511 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2512 {
2513         u16 *p = &vmx->segment_cache.seg[seg].selector;
2514
2515         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2516                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2517         return *p;
2518 }
2519
2520 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2521 {
2522         ulong *p = &vmx->segment_cache.seg[seg].base;
2523
2524         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2525                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2526         return *p;
2527 }
2528
2529 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2530 {
2531         u32 *p = &vmx->segment_cache.seg[seg].limit;
2532
2533         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2534                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2535         return *p;
2536 }
2537
2538 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2539 {
2540         u32 *p = &vmx->segment_cache.seg[seg].ar;
2541
2542         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2543                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2544         return *p;
2545 }
2546
2547 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2548 {
2549         u32 eb;
2550
2551         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
2552              (1u << DB_VECTOR) | (1u << AC_VECTOR);
2553         /*
2554          * Guest access to VMware backdoor ports could legitimately
2555          * trigger #GP because of TSS I/O permission bitmap.
2556          * We intercept those #GP and allow access to them anyway
2557          * as VMware does.
2558          */
2559         if (enable_vmware_backdoor)
2560                 eb |= (1u << GP_VECTOR);
2561         if ((vcpu->guest_debug &
2562              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2563             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2564                 eb |= 1u << BP_VECTOR;
2565         if (to_vmx(vcpu)->rmode.vm86_active)
2566                 eb = ~0;
2567         if (enable_ept)
2568                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
2569
2570         /* When we are running a nested L2 guest and L1 specified for it a
2571          * certain exception bitmap, we must trap the same exceptions and pass
2572          * them to L1. When running L2, we will only handle the exceptions
2573          * specified above if L1 did not want them.
2574          */
2575         if (is_guest_mode(vcpu))
2576                 eb |= get_vmcs12(vcpu)->exception_bitmap;
2577
2578         vmcs_write32(EXCEPTION_BITMAP, eb);
2579 }
2580
2581 /*
2582  * Check if MSR is intercepted for currently loaded MSR bitmap.
2583  */
2584 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2585 {
2586         unsigned long *msr_bitmap;
2587         int f = sizeof(unsigned long);
2588
2589         if (!cpu_has_vmx_msr_bitmap())
2590                 return true;
2591
2592         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2593
2594         if (msr <= 0x1fff) {
2595                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2596         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2597                 msr &= 0x1fff;
2598                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2599         }
2600
2601         return true;
2602 }
2603
2604 /*
2605  * Check if MSR is intercepted for L01 MSR bitmap.
2606  */
2607 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2608 {
2609         unsigned long *msr_bitmap;
2610         int f = sizeof(unsigned long);
2611
2612         if (!cpu_has_vmx_msr_bitmap())
2613                 return true;
2614
2615         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2616
2617         if (msr <= 0x1fff) {
2618                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2619         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2620                 msr &= 0x1fff;
2621                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2622         }
2623
2624         return true;
2625 }
2626
2627 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2628                 unsigned long entry, unsigned long exit)
2629 {
2630         vm_entry_controls_clearbit(vmx, entry);
2631         vm_exit_controls_clearbit(vmx, exit);
2632 }
2633
2634 static int find_msr(struct vmx_msrs *m, unsigned int msr)
2635 {
2636         unsigned int i;
2637
2638         for (i = 0; i < m->nr; ++i) {
2639                 if (m->val[i].index == msr)
2640                         return i;
2641         }
2642         return -ENOENT;
2643 }
2644
2645 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2646 {
2647         int i;
2648         struct msr_autoload *m = &vmx->msr_autoload;
2649
2650         switch (msr) {
2651         case MSR_EFER:
2652                 if (cpu_has_load_ia32_efer) {
2653                         clear_atomic_switch_msr_special(vmx,
2654                                         VM_ENTRY_LOAD_IA32_EFER,
2655                                         VM_EXIT_LOAD_IA32_EFER);
2656                         return;
2657                 }
2658                 break;
2659         case MSR_CORE_PERF_GLOBAL_CTRL:
2660                 if (cpu_has_load_perf_global_ctrl) {
2661                         clear_atomic_switch_msr_special(vmx,
2662                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2663                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2664                         return;
2665                 }
2666                 break;
2667         }
2668         i = find_msr(&m->guest, msr);
2669         if (i < 0)
2670                 goto skip_guest;
2671         --m->guest.nr;
2672         m->guest.val[i] = m->guest.val[m->guest.nr];
2673         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2674
2675 skip_guest:
2676         i = find_msr(&m->host, msr);
2677         if (i < 0)
2678                 return;
2679
2680         --m->host.nr;
2681         m->host.val[i] = m->host.val[m->host.nr];
2682         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2683 }
2684
2685 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2686                 unsigned long entry, unsigned long exit,
2687                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2688                 u64 guest_val, u64 host_val)
2689 {
2690         vmcs_write64(guest_val_vmcs, guest_val);
2691         vmcs_write64(host_val_vmcs, host_val);
2692         vm_entry_controls_setbit(vmx, entry);
2693         vm_exit_controls_setbit(vmx, exit);
2694 }
2695
2696 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2697                                   u64 guest_val, u64 host_val, bool entry_only)
2698 {
2699         int i, j = 0;
2700         struct msr_autoload *m = &vmx->msr_autoload;
2701
2702         switch (msr) {
2703         case MSR_EFER:
2704                 if (cpu_has_load_ia32_efer) {
2705                         add_atomic_switch_msr_special(vmx,
2706                                         VM_ENTRY_LOAD_IA32_EFER,
2707                                         VM_EXIT_LOAD_IA32_EFER,
2708                                         GUEST_IA32_EFER,
2709                                         HOST_IA32_EFER,
2710                                         guest_val, host_val);
2711                         return;
2712                 }
2713                 break;
2714         case MSR_CORE_PERF_GLOBAL_CTRL:
2715                 if (cpu_has_load_perf_global_ctrl) {
2716                         add_atomic_switch_msr_special(vmx,
2717                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2718                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2719                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2720                                         HOST_IA32_PERF_GLOBAL_CTRL,
2721                                         guest_val, host_val);
2722                         return;
2723                 }
2724                 break;
2725         case MSR_IA32_PEBS_ENABLE:
2726                 /* PEBS needs a quiescent period after being disabled (to write
2727                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2728                  * provide that period, so a CPU could write host's record into
2729                  * guest's memory.
2730                  */
2731                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2732         }
2733
2734         i = find_msr(&m->guest, msr);
2735         if (!entry_only)
2736                 j = find_msr(&m->host, msr);
2737
2738         if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
2739                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2740                                 "Can't add msr %x\n", msr);
2741                 return;
2742         }
2743         if (i < 0) {
2744                 i = m->guest.nr++;
2745                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2746         }
2747         m->guest.val[i].index = msr;
2748         m->guest.val[i].value = guest_val;
2749
2750         if (entry_only)
2751                 return;
2752
2753         if (j < 0) {
2754                 j = m->host.nr++;
2755                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2756         }
2757         m->host.val[j].index = msr;
2758         m->host.val[j].value = host_val;
2759 }
2760
2761 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2762 {
2763         u64 guest_efer = vmx->vcpu.arch.efer;
2764         u64 ignore_bits = 0;
2765
2766         if (!enable_ept) {
2767                 /*
2768                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2769                  * host CPUID is more efficient than testing guest CPUID
2770                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2771                  */
2772                 if (boot_cpu_has(X86_FEATURE_SMEP))
2773                         guest_efer |= EFER_NX;
2774                 else if (!(guest_efer & EFER_NX))
2775                         ignore_bits |= EFER_NX;
2776         }
2777
2778         /*
2779          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2780          */
2781         ignore_bits |= EFER_SCE;
2782 #ifdef CONFIG_X86_64
2783         ignore_bits |= EFER_LMA | EFER_LME;
2784         /* SCE is meaningful only in long mode on Intel */
2785         if (guest_efer & EFER_LMA)
2786                 ignore_bits &= ~(u64)EFER_SCE;
2787 #endif
2788
2789         clear_atomic_switch_msr(vmx, MSR_EFER);
2790
2791         /*
2792          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2793          * On CPUs that support "load IA32_EFER", always switch EFER
2794          * atomically, since it's faster than switching it manually.
2795          */
2796         if (cpu_has_load_ia32_efer ||
2797             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2798                 if (!(guest_efer & EFER_LMA))
2799                         guest_efer &= ~EFER_LME;
2800                 if (guest_efer != host_efer)
2801                         add_atomic_switch_msr(vmx, MSR_EFER,
2802                                               guest_efer, host_efer, false);
2803                 return false;
2804         } else {
2805                 guest_efer &= ~ignore_bits;
2806                 guest_efer |= host_efer & ignore_bits;
2807
2808                 vmx->guest_msrs[efer_offset].data = guest_efer;
2809                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2810
2811                 return true;
2812         }
2813 }
2814
2815 #ifdef CONFIG_X86_32
2816 /*
2817  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2818  * VMCS rather than the segment table.  KVM uses this helper to figure
2819  * out the current bases to poke them into the VMCS before entry.
2820  */
2821 static unsigned long segment_base(u16 selector)
2822 {
2823         struct desc_struct *table;
2824         unsigned long v;
2825
2826         if (!(selector & ~SEGMENT_RPL_MASK))
2827                 return 0;
2828
2829         table = get_current_gdt_ro();
2830
2831         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2832                 u16 ldt_selector = kvm_read_ldt();
2833
2834                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2835                         return 0;
2836
2837                 table = (struct desc_struct *)segment_base(ldt_selector);
2838         }
2839         v = get_desc_base(&table[selector >> 3]);
2840         return v;
2841 }
2842 #endif
2843
2844 static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2845 {
2846         struct vcpu_vmx *vmx = to_vmx(vcpu);
2847         struct vmcs_host_state *host_state;
2848 #ifdef CONFIG_X86_64
2849         int cpu = raw_smp_processor_id();
2850 #endif
2851         unsigned long fs_base, gs_base;
2852         u16 fs_sel, gs_sel;
2853         int i;
2854
2855         if (vmx->loaded_cpu_state)
2856                 return;
2857
2858         vmx->loaded_cpu_state = vmx->loaded_vmcs;
2859         host_state = &vmx->loaded_cpu_state->host_state;
2860
2861         /*
2862          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2863          * allow segment selectors with cpl > 0 or ti == 1.
2864          */
2865         host_state->ldt_sel = kvm_read_ldt();
2866
2867 #ifdef CONFIG_X86_64
2868         savesegment(ds, host_state->ds_sel);
2869         savesegment(es, host_state->es_sel);
2870
2871         gs_base = cpu_kernelmode_gs_base(cpu);
2872         if (likely(is_64bit_mm(current->mm))) {
2873                 save_fsgs_for_kvm();
2874                 fs_sel = current->thread.fsindex;
2875                 gs_sel = current->thread.gsindex;
2876                 fs_base = current->thread.fsbase;
2877                 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
2878         } else {
2879                 savesegment(fs, fs_sel);
2880                 savesegment(gs, gs_sel);
2881                 fs_base = read_msr(MSR_FS_BASE);
2882                 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
2883         }
2884
2885         if (is_long_mode(&vmx->vcpu))
2886                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2887 #else
2888         savesegment(fs, fs_sel);
2889         savesegment(gs, gs_sel);
2890         fs_base = segment_base(fs_sel);
2891         gs_base = segment_base(gs_sel);
2892 #endif
2893
2894         if (unlikely(fs_sel != host_state->fs_sel)) {
2895                 if (!(fs_sel & 7))
2896                         vmcs_write16(HOST_FS_SELECTOR, fs_sel);
2897                 else
2898                         vmcs_write16(HOST_FS_SELECTOR, 0);
2899                 host_state->fs_sel = fs_sel;
2900         }
2901         if (unlikely(gs_sel != host_state->gs_sel)) {
2902                 if (!(gs_sel & 7))
2903                         vmcs_write16(HOST_GS_SELECTOR, gs_sel);
2904                 else
2905                         vmcs_write16(HOST_GS_SELECTOR, 0);
2906                 host_state->gs_sel = gs_sel;
2907         }
2908         if (unlikely(fs_base != host_state->fs_base)) {
2909                 vmcs_writel(HOST_FS_BASE, fs_base);
2910                 host_state->fs_base = fs_base;
2911         }
2912         if (unlikely(gs_base != host_state->gs_base)) {
2913                 vmcs_writel(HOST_GS_BASE, gs_base);
2914                 host_state->gs_base = gs_base;
2915         }
2916
2917         for (i = 0; i < vmx->save_nmsrs; ++i)
2918                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2919                                    vmx->guest_msrs[i].data,
2920                                    vmx->guest_msrs[i].mask);
2921 }
2922
2923 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
2924 {
2925         struct vmcs_host_state *host_state;
2926
2927         if (!vmx->loaded_cpu_state)
2928                 return;
2929
2930         WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
2931         host_state = &vmx->loaded_cpu_state->host_state;
2932
2933         ++vmx->vcpu.stat.host_state_reload;
2934         vmx->loaded_cpu_state = NULL;
2935
2936 #ifdef CONFIG_X86_64
2937         if (is_long_mode(&vmx->vcpu))
2938                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2939 #endif
2940         if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
2941                 kvm_load_ldt(host_state->ldt_sel);
2942 #ifdef CONFIG_X86_64
2943                 load_gs_index(host_state->gs_sel);
2944 #else
2945                 loadsegment(gs, host_state->gs_sel);
2946 #endif
2947         }
2948         if (host_state->fs_sel & 7)
2949                 loadsegment(fs, host_state->fs_sel);
2950 #ifdef CONFIG_X86_64
2951         if (unlikely(host_state->ds_sel | host_state->es_sel)) {
2952                 loadsegment(ds, host_state->ds_sel);
2953                 loadsegment(es, host_state->es_sel);
2954         }
2955 #endif
2956         invalidate_tss_limit();
2957 #ifdef CONFIG_X86_64
2958         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2959 #endif
2960         load_fixmap_gdt(raw_smp_processor_id());
2961 }
2962
2963 #ifdef CONFIG_X86_64
2964 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
2965 {
2966         if (is_long_mode(&vmx->vcpu)) {
2967                 preempt_disable();
2968                 if (vmx->loaded_cpu_state)
2969                         rdmsrl(MSR_KERNEL_GS_BASE,
2970                                vmx->msr_guest_kernel_gs_base);
2971                 preempt_enable();
2972         }
2973         return vmx->msr_guest_kernel_gs_base;
2974 }
2975
2976 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
2977 {
2978         if (is_long_mode(&vmx->vcpu)) {
2979                 preempt_disable();
2980                 if (vmx->loaded_cpu_state)
2981                         wrmsrl(MSR_KERNEL_GS_BASE, data);
2982                 preempt_enable();
2983         }
2984         vmx->msr_guest_kernel_gs_base = data;
2985 }
2986 #endif
2987
2988 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2989 {
2990         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2991         struct pi_desc old, new;
2992         unsigned int dest;
2993
2994         /*
2995          * In case of hot-plug or hot-unplug, we may have to undo
2996          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2997          * always keep PI.NDST up to date for simplicity: it makes the
2998          * code easier, and CPU migration is not a fast path.
2999          */
3000         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
3001                 return;
3002
3003         /*
3004          * First handle the simple case where no cmpxchg is necessary; just
3005          * allow posting non-urgent interrupts.
3006          *
3007          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
3008          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
3009          * expects the VCPU to be on the blocked_vcpu_list that matches
3010          * PI.NDST.
3011          */
3012         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
3013             vcpu->cpu == cpu) {
3014                 pi_clear_sn(pi_desc);
3015                 return;
3016         }
3017
3018         /* The full case.  */
3019         do {
3020                 old.control = new.control = pi_desc->control;
3021
3022                 dest = cpu_physical_id(cpu);
3023
3024                 if (x2apic_enabled())
3025                         new.ndst = dest;
3026                 else
3027                         new.ndst = (dest << 8) & 0xFF00;
3028
3029                 new.sn = 0;
3030         } while (cmpxchg64(&pi_desc->control, old.control,
3031                            new.control) != old.control);
3032 }
3033
3034 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
3035 {
3036         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
3037         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
3038 }
3039
3040 /*
3041  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
3042  * vcpu mutex is already taken.
3043  */
3044 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3045 {
3046         struct vcpu_vmx *vmx = to_vmx(vcpu);
3047         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
3048
3049         if (!already_loaded) {
3050                 loaded_vmcs_clear(vmx->loaded_vmcs);
3051                 local_irq_disable();
3052                 crash_disable_local_vmclear(cpu);
3053
3054                 /*
3055                  * Read loaded_vmcs->cpu should be before fetching
3056                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
3057                  * See the comments in __loaded_vmcs_clear().
3058                  */
3059                 smp_rmb();
3060
3061                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
3062                          &per_cpu(loaded_vmcss_on_cpu, cpu));
3063                 crash_enable_local_vmclear(cpu);
3064                 local_irq_enable();
3065         }
3066
3067         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
3068                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
3069                 vmcs_load(vmx->loaded_vmcs->vmcs);
3070                 indirect_branch_prediction_barrier();
3071         }
3072
3073         if (!already_loaded) {
3074                 void *gdt = get_current_gdt_ro();
3075                 unsigned long sysenter_esp;
3076
3077                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3078
3079                 /*
3080                  * Linux uses per-cpu TSS and GDT, so set these when switching
3081                  * processors.  See 22.2.4.
3082                  */
3083                 vmcs_writel(HOST_TR_BASE,
3084                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
3085                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
3086
3087                 /*
3088                  * VM exits change the host TR limit to 0x67 after a VM
3089                  * exit.  This is okay, since 0x67 covers everything except
3090                  * the IO bitmap and have have code to handle the IO bitmap
3091                  * being lost after a VM exit.
3092                  */
3093                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
3094
3095                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
3096                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
3097
3098                 vmx->loaded_vmcs->cpu = cpu;
3099         }
3100
3101         /* Setup TSC multiplier */
3102         if (kvm_has_tsc_control &&
3103             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
3104                 decache_tsc_multiplier(vmx);
3105
3106         vmx_vcpu_pi_load(vcpu, cpu);
3107         vmx->host_pkru = read_pkru();
3108         vmx->host_debugctlmsr = get_debugctlmsr();
3109 }
3110
3111 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
3112 {
3113         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
3114
3115         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
3116                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
3117                 !kvm_vcpu_apicv_active(vcpu))
3118                 return;
3119
3120         /* Set SN when the vCPU is preempted */
3121         if (vcpu->preempted)
3122                 pi_set_sn(pi_desc);
3123 }
3124
3125 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
3126 {
3127         vmx_vcpu_pi_put(vcpu);
3128
3129         vmx_prepare_switch_to_host(to_vmx(vcpu));
3130 }
3131
3132 static bool emulation_required(struct kvm_vcpu *vcpu)
3133 {
3134         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3135 }
3136
3137 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
3138
3139 /*
3140  * Return the cr0 value that a nested guest would read. This is a combination
3141  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
3142  * its hypervisor (cr0_read_shadow).
3143  */
3144 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
3145 {
3146         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
3147                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
3148 }
3149 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
3150 {
3151         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
3152                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
3153 }
3154
3155 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
3156 {
3157         unsigned long rflags, save_rflags;
3158
3159         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
3160                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3161                 rflags = vmcs_readl(GUEST_RFLAGS);
3162                 if (to_vmx(vcpu)->rmode.vm86_active) {
3163                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3164                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
3165                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3166                 }
3167                 to_vmx(vcpu)->rflags = rflags;
3168         }
3169         return to_vmx(vcpu)->rflags;
3170 }
3171
3172 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
3173 {
3174         unsigned long old_rflags = vmx_get_rflags(vcpu);
3175
3176         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3177         to_vmx(vcpu)->rflags = rflags;
3178         if (to_vmx(vcpu)->rmode.vm86_active) {
3179                 to_vmx(vcpu)->rmode.save_rflags = rflags;
3180                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3181         }
3182         vmcs_writel(GUEST_RFLAGS, rflags);
3183
3184         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
3185                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
3186 }
3187
3188 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
3189 {
3190         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3191         int ret = 0;
3192
3193         if (interruptibility & GUEST_INTR_STATE_STI)
3194                 ret |= KVM_X86_SHADOW_INT_STI;
3195         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
3196                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
3197
3198         return ret;
3199 }
3200
3201 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
3202 {
3203         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3204         u32 interruptibility = interruptibility_old;
3205
3206         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
3207
3208         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
3209                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
3210         else if (mask & KVM_X86_SHADOW_INT_STI)
3211                 interruptibility |= GUEST_INTR_STATE_STI;
3212
3213         if ((interruptibility != interruptibility_old))
3214                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
3215 }
3216
3217 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
3218 {
3219         unsigned long rip;
3220
3221         rip = kvm_rip_read(vcpu);
3222         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3223         kvm_rip_write(vcpu, rip);
3224
3225         /* skipping an emulated instruction also counts */
3226         vmx_set_interrupt_shadow(vcpu, 0);
3227 }
3228
3229 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3230                                                unsigned long exit_qual)
3231 {
3232         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3233         unsigned int nr = vcpu->arch.exception.nr;
3234         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3235
3236         if (vcpu->arch.exception.has_error_code) {
3237                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3238                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3239         }
3240
3241         if (kvm_exception_is_soft(nr))
3242                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3243         else
3244                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3245
3246         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3247             vmx_get_nmi_mask(vcpu))
3248                 intr_info |= INTR_INFO_UNBLOCK_NMI;
3249
3250         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3251 }
3252
3253 /*
3254  * KVM wants to inject page-faults which it got to the guest. This function
3255  * checks whether in a nested guest, we need to inject them to L1 or L2.
3256  */
3257 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
3258 {
3259         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3260         unsigned int nr = vcpu->arch.exception.nr;
3261
3262         if (nr == PF_VECTOR) {
3263                 if (vcpu->arch.exception.nested_apf) {
3264                         *exit_qual = vcpu->arch.apf.nested_apf_token;
3265                         return 1;
3266                 }
3267                 /*
3268                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
3269                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
3270                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
3271                  * can be written only when inject_pending_event runs.  This should be
3272                  * conditional on a new capability---if the capability is disabled,
3273                  * kvm_multiple_exception would write the ancillary information to
3274                  * CR2 or DR6, for backwards ABI-compatibility.
3275                  */
3276                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
3277                                                     vcpu->arch.exception.error_code)) {
3278                         *exit_qual = vcpu->arch.cr2;
3279                         return 1;
3280                 }
3281         } else {
3282                 if (vmcs12->exception_bitmap & (1u << nr)) {
3283                         if (nr == DB_VECTOR)
3284                                 *exit_qual = vcpu->arch.dr6;
3285                         else
3286                                 *exit_qual = 0;
3287                         return 1;
3288                 }
3289         }
3290
3291         return 0;
3292 }
3293
3294 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
3295 {
3296         /*
3297          * Ensure that we clear the HLT state in the VMCS.  We don't need to
3298          * explicitly skip the instruction because if the HLT state is set,
3299          * then the instruction is already executing and RIP has already been
3300          * advanced.
3301          */
3302         if (kvm_hlt_in_guest(vcpu->kvm) &&
3303                         vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
3304                 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
3305 }
3306
3307 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
3308 {
3309         struct vcpu_vmx *vmx = to_vmx(vcpu);
3310         unsigned nr = vcpu->arch.exception.nr;
3311         bool has_error_code = vcpu->arch.exception.has_error_code;
3312         u32 error_code = vcpu->arch.exception.error_code;
3313         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3314
3315         if (has_error_code) {
3316                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
3317                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3318         }
3319
3320         if (vmx->rmode.vm86_active) {
3321                 int inc_eip = 0;
3322                 if (kvm_exception_is_soft(nr))
3323                         inc_eip = vcpu->arch.event_exit_inst_len;
3324                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
3325                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3326                 return;
3327         }
3328
3329         WARN_ON_ONCE(vmx->emulation_required);
3330
3331         if (kvm_exception_is_soft(nr)) {
3332                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3333                              vmx->vcpu.arch.event_exit_inst_len);
3334                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3335         } else
3336                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3337
3338         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
3339
3340         vmx_clear_hlt(vcpu);
3341 }
3342
3343 static bool vmx_rdtscp_supported(void)
3344 {
3345         return cpu_has_vmx_rdtscp();
3346 }
3347
3348 static bool vmx_invpcid_supported(void)
3349 {
3350         return cpu_has_vmx_invpcid();
3351 }
3352
3353 /*
3354  * Swap MSR entry in host/guest MSR entry array.
3355  */
3356 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
3357 {
3358         struct shared_msr_entry tmp;
3359
3360         tmp = vmx->guest_msrs[to];
3361         vmx->guest_msrs[to] = vmx->guest_msrs[from];
3362         vmx->guest_msrs[from] = tmp;
3363 }
3364
3365 /*
3366  * Set up the vmcs to automatically save and restore system
3367  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
3368  * mode, as fiddling with msrs is very expensive.
3369  */
3370 static void setup_msrs(struct vcpu_vmx *vmx)
3371 {
3372         int save_nmsrs, index;
3373
3374         save_nmsrs = 0;
3375 #ifdef CONFIG_X86_64
3376         if (is_long_mode(&vmx->vcpu)) {
3377                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
3378                 if (index >= 0)
3379                         move_msr_up(vmx, index, save_nmsrs++);
3380                 index = __find_msr_index(vmx, MSR_LSTAR);
3381                 if (index >= 0)
3382                         move_msr_up(vmx, index, save_nmsrs++);
3383                 index = __find_msr_index(vmx, MSR_CSTAR);
3384                 if (index >= 0)
3385                         move_msr_up(vmx, index, save_nmsrs++);
3386                 index = __find_msr_index(vmx, MSR_TSC_AUX);
3387                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
3388                         move_msr_up(vmx, index, save_nmsrs++);
3389                 /*
3390                  * MSR_STAR is only needed on long mode guests, and only
3391                  * if efer.sce is enabled.
3392                  */
3393                 index = __find_msr_index(vmx, MSR_STAR);
3394                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
3395                         move_msr_up(vmx, index, save_nmsrs++);
3396         }
3397 #endif
3398         index = __find_msr_index(vmx, MSR_EFER);
3399         if (index >= 0 && update_transition_efer(vmx, index))
3400                 move_msr_up(vmx, index, save_nmsrs++);
3401
3402         vmx->save_nmsrs = save_nmsrs;
3403
3404         if (cpu_has_vmx_msr_bitmap())
3405                 vmx_update_msr_bitmap(&vmx->vcpu);
3406 }
3407
3408 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3409 {
3410         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3411
3412         if (is_guest_mode(vcpu) &&
3413             (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
3414                 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
3415
3416         return vcpu->arch.tsc_offset;
3417 }
3418
3419 /*
3420  * writes 'offset' into guest's timestamp counter offset register
3421  */
3422 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3423 {
3424         if (is_guest_mode(vcpu)) {
3425                 /*
3426                  * We're here if L1 chose not to trap WRMSR to TSC. According
3427                  * to the spec, this should set L1's TSC; The offset that L1
3428                  * set for L2 remains unchanged, and still needs to be added
3429                  * to the newly set TSC to get L2's TSC.
3430                  */
3431                 struct vmcs12 *vmcs12;
3432                 /* recalculate vmcs02.TSC_OFFSET: */
3433                 vmcs12 = get_vmcs12(vcpu);
3434                 vmcs_write64(TSC_OFFSET, offset +
3435                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3436                          vmcs12->tsc_offset : 0));
3437         } else {
3438                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3439                                            vmcs_read64(TSC_OFFSET), offset);
3440                 vmcs_write64(TSC_OFFSET, offset);
3441         }
3442 }
3443
3444 /*
3445  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
3446  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
3447  * all guests if the "nested" module option is off, and can also be disabled
3448  * for a single guest by disabling its VMX cpuid bit.
3449  */
3450 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
3451 {
3452         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
3453 }
3454
3455 /*
3456  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
3457  * returned for the various VMX controls MSRs when nested VMX is enabled.
3458  * The same values should also be used to verify that vmcs12 control fields are
3459  * valid during nested entry from L1 to L2.
3460  * Each of these control msrs has a low and high 32-bit half: A low bit is on
3461  * if the corresponding bit in the (32-bit) control field *must* be on, and a
3462  * bit in the high half is on if the corresponding bit in the control field
3463  * may be on. See also vmx_control_verify().
3464  */
3465 static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3466 {
3467         if (!nested) {
3468                 memset(msrs, 0, sizeof(*msrs));
3469                 return;
3470         }
3471
3472         /*
3473          * Note that as a general rule, the high half of the MSRs (bits in
3474          * the control fields which may be 1) should be initialized by the
3475          * intersection of the underlying hardware's MSR (i.e., features which
3476          * can be supported) and the list of features we want to expose -
3477          * because they are known to be properly supported in our code.
3478          * Also, usually, the low half of the MSRs (bits which must be 1) can
3479          * be set to 0, meaning that L1 may turn off any of these bits. The
3480          * reason is that if one of these bits is necessary, it will appear
3481          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
3482          * fields of vmcs01 and vmcs02, will turn these bits off - and
3483          * nested_vmx_exit_reflected() will not pass related exits to L1.
3484          * These rules have exceptions below.
3485          */
3486
3487         /* pin-based controls */
3488         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
3489                 msrs->pinbased_ctls_low,
3490                 msrs->pinbased_ctls_high);
3491         msrs->pinbased_ctls_low |=
3492                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3493         msrs->pinbased_ctls_high &=
3494                 PIN_BASED_EXT_INTR_MASK |
3495                 PIN_BASED_NMI_EXITING |
3496                 PIN_BASED_VIRTUAL_NMIS |
3497                 (apicv ? PIN_BASED_POSTED_INTR : 0);
3498         msrs->pinbased_ctls_high |=
3499                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3500                 PIN_BASED_VMX_PREEMPTION_TIMER;
3501
3502         /* exit controls */
3503         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
3504                 msrs->exit_ctls_low,
3505                 msrs->exit_ctls_high);
3506         msrs->exit_ctls_low =
3507                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3508
3509         msrs->exit_ctls_high &=
3510 #ifdef CONFIG_X86_64
3511                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
3512 #endif
3513                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
3514         msrs->exit_ctls_high |=
3515                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
3516                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
3517                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3518
3519         if (kvm_mpx_supported())
3520                 msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
3521
3522         /* We support free control of debug control saving. */
3523         msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3524
3525         /* entry controls */
3526         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
3527                 msrs->entry_ctls_low,
3528                 msrs->entry_ctls_high);
3529         msrs->entry_ctls_low =
3530                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3531         msrs->entry_ctls_high &=
3532 #ifdef CONFIG_X86_64
3533                 VM_ENTRY_IA32E_MODE |
3534 #endif
3535                 VM_ENTRY_LOAD_IA32_PAT;
3536         msrs->entry_ctls_high |=
3537                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3538         if (kvm_mpx_supported())
3539                 msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
3540
3541         /* We support free control of debug control loading. */
3542         msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
3543
3544         /* cpu-based controls */
3545         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
3546                 msrs->procbased_ctls_low,
3547                 msrs->procbased_ctls_high);
3548         msrs->procbased_ctls_low =
3549                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3550         msrs->procbased_ctls_high &=
3551                 CPU_BASED_VIRTUAL_INTR_PENDING |
3552                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
3553                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3554                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3555                 CPU_BASED_CR3_STORE_EXITING |
3556 #ifdef CONFIG_X86_64
3557                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3558 #endif
3559                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
3560                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3561                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3562                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3563                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3564         /*
3565          * We can allow some features even when not supported by the
3566          * hardware. For example, L1 can specify an MSR bitmap - and we
3567          * can use it to avoid exits to L1 - even when L0 runs L2
3568          * without MSR bitmaps.
3569          */
3570         msrs->procbased_ctls_high |=
3571                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3572                 CPU_BASED_USE_MSR_BITMAPS;
3573
3574         /* We support free control of CR3 access interception. */
3575         msrs->procbased_ctls_low &=
3576                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3577
3578         /*
3579          * secondary cpu-based controls.  Do not include those that
3580          * depend on CPUID bits, they are added later by vmx_cpuid_update.
3581          */
3582         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
3583                 msrs->secondary_ctls_low,
3584                 msrs->secondary_ctls_high);
3585         msrs->secondary_ctls_low = 0;
3586         msrs->secondary_ctls_high &=
3587                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3588                 SECONDARY_EXEC_DESC |
3589                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3590                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3591                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3592                 SECONDARY_EXEC_WBINVD_EXITING;
3593         /*
3594          * We can emulate "VMCS shadowing," even if the hardware
3595          * doesn't support it.
3596          */
3597         msrs->secondary_ctls_high |=
3598                 SECONDARY_EXEC_SHADOW_VMCS;
3599
3600         if (enable_ept) {
3601                 /* nested EPT: emulate EPT also to L1 */
3602                 msrs->secondary_ctls_high |=
3603                         SECONDARY_EXEC_ENABLE_EPT;
3604                 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
3605                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
3606                 if (cpu_has_vmx_ept_execute_only())
3607                         msrs->ept_caps |=
3608                                 VMX_EPT_EXECUTE_ONLY_BIT;
3609                 msrs->ept_caps &= vmx_capability.ept;
3610                 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
3611                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3612                         VMX_EPT_1GB_PAGE_BIT;
3613                 if (enable_ept_ad_bits) {
3614                         msrs->secondary_ctls_high |=
3615                                 SECONDARY_EXEC_ENABLE_PML;
3616                         msrs->ept_caps |= VMX_EPT_AD_BIT;
3617                 }
3618         }
3619
3620         if (cpu_has_vmx_vmfunc()) {
3621                 msrs->secondary_ctls_high |=
3622                         SECONDARY_EXEC_ENABLE_VMFUNC;
3623                 /*
3624                  * Advertise EPTP switching unconditionally
3625                  * since we emulate it
3626                  */
3627                 if (enable_ept)
3628                         msrs->vmfunc_controls =
3629                                 VMX_VMFUNC_EPTP_SWITCHING;
3630         }
3631
3632         /*
3633          * Old versions of KVM use the single-context version without
3634          * checking for support, so declare that it is supported even
3635          * though it is treated as global context.  The alternative is
3636          * not failing the single-context invvpid, and it is worse.
3637          */
3638         if (enable_vpid) {