x86/entry/64: Fix CR3 restore in paranoid_exit()
[muen/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/cpu.h>
42 #include <asm/io.h>
43 #include <asm/desc.h>
44 #include <asm/vmx.h>
45 #include <asm/virtext.h>
46 #include <asm/mce.h>
47 #include <asm/fpu/internal.h>
48 #include <asm/perf_event.h>
49 #include <asm/debugreg.h>
50 #include <asm/kexec.h>
51 #include <asm/apic.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/mmu_context.h>
54 #include <asm/nospec-branch.h>
55
56 #include "trace.h"
57 #include "pmu.h"
58
59 #define __ex(x) __kvm_handle_fault_on_reboot(x)
60 #define __ex_clear(x, reg) \
61         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
62
63 MODULE_AUTHOR("Qumranet");
64 MODULE_LICENSE("GPL");
65
66 static const struct x86_cpu_id vmx_cpu_id[] = {
67         X86_FEATURE_MATCH(X86_FEATURE_VMX),
68         {}
69 };
70 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
71
72 static bool __read_mostly enable_vpid = 1;
73 module_param_named(vpid, enable_vpid, bool, 0444);
74
75 static bool __read_mostly enable_vnmi = 1;
76 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
77
78 static bool __read_mostly flexpriority_enabled = 1;
79 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
80
81 static bool __read_mostly enable_ept = 1;
82 module_param_named(ept, enable_ept, bool, S_IRUGO);
83
84 static bool __read_mostly enable_unrestricted_guest = 1;
85 module_param_named(unrestricted_guest,
86                         enable_unrestricted_guest, bool, S_IRUGO);
87
88 static bool __read_mostly enable_ept_ad_bits = 1;
89 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
90
91 static bool __read_mostly emulate_invalid_guest_state = true;
92 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
93
94 static bool __read_mostly fasteoi = 1;
95 module_param(fasteoi, bool, S_IRUGO);
96
97 static bool __read_mostly enable_apicv = 1;
98 module_param(enable_apicv, bool, S_IRUGO);
99
100 static bool __read_mostly enable_shadow_vmcs = 1;
101 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
102 /*
103  * If nested=1, nested virtualization is supported, i.e., guests may use
104  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
105  * use VMX instructions.
106  */
107 static bool __read_mostly nested = 0;
108 module_param(nested, bool, S_IRUGO);
109
110 static u64 __read_mostly host_xss;
111
112 static bool __read_mostly enable_pml = 1;
113 module_param_named(pml, enable_pml, bool, S_IRUGO);
114
115 #define MSR_TYPE_R      1
116 #define MSR_TYPE_W      2
117 #define MSR_TYPE_RW     3
118
119 #define MSR_BITMAP_MODE_X2APIC          1
120 #define MSR_BITMAP_MODE_X2APIC_APICV    2
121 #define MSR_BITMAP_MODE_LM              4
122
123 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
124
125 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
126 static int __read_mostly cpu_preemption_timer_multi;
127 static bool __read_mostly enable_preemption_timer = 1;
128 #ifdef CONFIG_X86_64
129 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
130 #endif
131
132 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
133 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
134 #define KVM_VM_CR0_ALWAYS_ON                                            \
135         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
136 #define KVM_CR4_GUEST_OWNED_BITS                                      \
137         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
138          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
139
140 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
141 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
142
143 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
144
145 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
146
147 /*
148  * Hyper-V requires all of these, so mark them as supported even though
149  * they are just treated the same as all-context.
150  */
151 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
152         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
153         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
154         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
155         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
156
157 /*
158  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
159  * ple_gap:    upper bound on the amount of time between two successive
160  *             executions of PAUSE in a loop. Also indicate if ple enabled.
161  *             According to test, this time is usually smaller than 128 cycles.
162  * ple_window: upper bound on the amount of time a guest is allowed to execute
163  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
164  *             less than 2^12 cycles
165  * Time is measured based on a counter that runs at the same rate as the TSC,
166  * refer SDM volume 3b section 21.6.13 & 22.1.3.
167  */
168 #define KVM_VMX_DEFAULT_PLE_GAP           128
169 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
170 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
171 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
172 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
173                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
174
175 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
176 module_param(ple_gap, int, S_IRUGO);
177
178 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
179 module_param(ple_window, int, S_IRUGO);
180
181 /* Default doubles per-vcpu window every exit. */
182 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
183 module_param(ple_window_grow, int, S_IRUGO);
184
185 /* Default resets per-vcpu window every exit to ple_window. */
186 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
187 module_param(ple_window_shrink, int, S_IRUGO);
188
189 /* Default is to compute the maximum so we can never overflow. */
190 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
191 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
192 module_param(ple_window_max, int, S_IRUGO);
193
194 extern const ulong vmx_return;
195
196 #define NR_AUTOLOAD_MSRS 8
197
198 struct vmcs {
199         u32 revision_id;
200         u32 abort;
201         char data[0];
202 };
203
204 /*
205  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
206  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
207  * loaded on this CPU (so we can clear them if the CPU goes down).
208  */
209 struct loaded_vmcs {
210         struct vmcs *vmcs;
211         struct vmcs *shadow_vmcs;
212         int cpu;
213         bool launched;
214         bool nmi_known_unmasked;
215         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
216         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
217         /* Support for vnmi-less CPUs */
218         int soft_vnmi_blocked;
219         ktime_t entry_time;
220         s64 vnmi_blocked_time;
221         unsigned long *msr_bitmap;
222         struct list_head loaded_vmcss_on_cpu_link;
223 };
224
225 struct shared_msr_entry {
226         unsigned index;
227         u64 data;
228         u64 mask;
229 };
230
231 /*
232  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
233  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
234  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
235  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
236  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
237  * More than one of these structures may exist, if L1 runs multiple L2 guests.
238  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
239  * underlying hardware which will be used to run L2.
240  * This structure is packed to ensure that its layout is identical across
241  * machines (necessary for live migration).
242  * If there are changes in this struct, VMCS12_REVISION must be changed.
243  */
244 typedef u64 natural_width;
245 struct __packed vmcs12 {
246         /* According to the Intel spec, a VMCS region must start with the
247          * following two fields. Then follow implementation-specific data.
248          */
249         u32 revision_id;
250         u32 abort;
251
252         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
253         u32 padding[7]; /* room for future expansion */
254
255         u64 io_bitmap_a;
256         u64 io_bitmap_b;
257         u64 msr_bitmap;
258         u64 vm_exit_msr_store_addr;
259         u64 vm_exit_msr_load_addr;
260         u64 vm_entry_msr_load_addr;
261         u64 tsc_offset;
262         u64 virtual_apic_page_addr;
263         u64 apic_access_addr;
264         u64 posted_intr_desc_addr;
265         u64 vm_function_control;
266         u64 ept_pointer;
267         u64 eoi_exit_bitmap0;
268         u64 eoi_exit_bitmap1;
269         u64 eoi_exit_bitmap2;
270         u64 eoi_exit_bitmap3;
271         u64 eptp_list_address;
272         u64 xss_exit_bitmap;
273         u64 guest_physical_address;
274         u64 vmcs_link_pointer;
275         u64 pml_address;
276         u64 guest_ia32_debugctl;
277         u64 guest_ia32_pat;
278         u64 guest_ia32_efer;
279         u64 guest_ia32_perf_global_ctrl;
280         u64 guest_pdptr0;
281         u64 guest_pdptr1;
282         u64 guest_pdptr2;
283         u64 guest_pdptr3;
284         u64 guest_bndcfgs;
285         u64 host_ia32_pat;
286         u64 host_ia32_efer;
287         u64 host_ia32_perf_global_ctrl;
288         u64 padding64[8]; /* room for future expansion */
289         /*
290          * To allow migration of L1 (complete with its L2 guests) between
291          * machines of different natural widths (32 or 64 bit), we cannot have
292          * unsigned long fields with no explict size. We use u64 (aliased
293          * natural_width) instead. Luckily, x86 is little-endian.
294          */
295         natural_width cr0_guest_host_mask;
296         natural_width cr4_guest_host_mask;
297         natural_width cr0_read_shadow;
298         natural_width cr4_read_shadow;
299         natural_width cr3_target_value0;
300         natural_width cr3_target_value1;
301         natural_width cr3_target_value2;
302         natural_width cr3_target_value3;
303         natural_width exit_qualification;
304         natural_width guest_linear_address;
305         natural_width guest_cr0;
306         natural_width guest_cr3;
307         natural_width guest_cr4;
308         natural_width guest_es_base;
309         natural_width guest_cs_base;
310         natural_width guest_ss_base;
311         natural_width guest_ds_base;
312         natural_width guest_fs_base;
313         natural_width guest_gs_base;
314         natural_width guest_ldtr_base;
315         natural_width guest_tr_base;
316         natural_width guest_gdtr_base;
317         natural_width guest_idtr_base;
318         natural_width guest_dr7;
319         natural_width guest_rsp;
320         natural_width guest_rip;
321         natural_width guest_rflags;
322         natural_width guest_pending_dbg_exceptions;
323         natural_width guest_sysenter_esp;
324         natural_width guest_sysenter_eip;
325         natural_width host_cr0;
326         natural_width host_cr3;
327         natural_width host_cr4;
328         natural_width host_fs_base;
329         natural_width host_gs_base;
330         natural_width host_tr_base;
331         natural_width host_gdtr_base;
332         natural_width host_idtr_base;
333         natural_width host_ia32_sysenter_esp;
334         natural_width host_ia32_sysenter_eip;
335         natural_width host_rsp;
336         natural_width host_rip;
337         natural_width paddingl[8]; /* room for future expansion */
338         u32 pin_based_vm_exec_control;
339         u32 cpu_based_vm_exec_control;
340         u32 exception_bitmap;
341         u32 page_fault_error_code_mask;
342         u32 page_fault_error_code_match;
343         u32 cr3_target_count;
344         u32 vm_exit_controls;
345         u32 vm_exit_msr_store_count;
346         u32 vm_exit_msr_load_count;
347         u32 vm_entry_controls;
348         u32 vm_entry_msr_load_count;
349         u32 vm_entry_intr_info_field;
350         u32 vm_entry_exception_error_code;
351         u32 vm_entry_instruction_len;
352         u32 tpr_threshold;
353         u32 secondary_vm_exec_control;
354         u32 vm_instruction_error;
355         u32 vm_exit_reason;
356         u32 vm_exit_intr_info;
357         u32 vm_exit_intr_error_code;
358         u32 idt_vectoring_info_field;
359         u32 idt_vectoring_error_code;
360         u32 vm_exit_instruction_len;
361         u32 vmx_instruction_info;
362         u32 guest_es_limit;
363         u32 guest_cs_limit;
364         u32 guest_ss_limit;
365         u32 guest_ds_limit;
366         u32 guest_fs_limit;
367         u32 guest_gs_limit;
368         u32 guest_ldtr_limit;
369         u32 guest_tr_limit;
370         u32 guest_gdtr_limit;
371         u32 guest_idtr_limit;
372         u32 guest_es_ar_bytes;
373         u32 guest_cs_ar_bytes;
374         u32 guest_ss_ar_bytes;
375         u32 guest_ds_ar_bytes;
376         u32 guest_fs_ar_bytes;
377         u32 guest_gs_ar_bytes;
378         u32 guest_ldtr_ar_bytes;
379         u32 guest_tr_ar_bytes;
380         u32 guest_interruptibility_info;
381         u32 guest_activity_state;
382         u32 guest_sysenter_cs;
383         u32 host_ia32_sysenter_cs;
384         u32 vmx_preemption_timer_value;
385         u32 padding32[7]; /* room for future expansion */
386         u16 virtual_processor_id;
387         u16 posted_intr_nv;
388         u16 guest_es_selector;
389         u16 guest_cs_selector;
390         u16 guest_ss_selector;
391         u16 guest_ds_selector;
392         u16 guest_fs_selector;
393         u16 guest_gs_selector;
394         u16 guest_ldtr_selector;
395         u16 guest_tr_selector;
396         u16 guest_intr_status;
397         u16 guest_pml_index;
398         u16 host_es_selector;
399         u16 host_cs_selector;
400         u16 host_ss_selector;
401         u16 host_ds_selector;
402         u16 host_fs_selector;
403         u16 host_gs_selector;
404         u16 host_tr_selector;
405 };
406
407 /*
408  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
409  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
410  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
411  */
412 #define VMCS12_REVISION 0x11e57ed0
413
414 /*
415  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
416  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
417  * current implementation, 4K are reserved to avoid future complications.
418  */
419 #define VMCS12_SIZE 0x1000
420
421 /*
422  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
423  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
424  */
425 struct nested_vmx {
426         /* Has the level1 guest done vmxon? */
427         bool vmxon;
428         gpa_t vmxon_ptr;
429         bool pml_full;
430
431         /* The guest-physical address of the current VMCS L1 keeps for L2 */
432         gpa_t current_vmptr;
433         /*
434          * Cache of the guest's VMCS, existing outside of guest memory.
435          * Loaded from guest memory during VMPTRLD. Flushed to guest
436          * memory during VMCLEAR and VMPTRLD.
437          */
438         struct vmcs12 *cached_vmcs12;
439         /*
440          * Indicates if the shadow vmcs must be updated with the
441          * data hold by vmcs12
442          */
443         bool sync_shadow_vmcs;
444
445         bool change_vmcs01_virtual_x2apic_mode;
446         /* L2 must run next, and mustn't decide to exit to L1. */
447         bool nested_run_pending;
448
449         struct loaded_vmcs vmcs02;
450
451         /*
452          * Guest pages referred to in the vmcs02 with host-physical
453          * pointers, so we must keep them pinned while L2 runs.
454          */
455         struct page *apic_access_page;
456         struct page *virtual_apic_page;
457         struct page *pi_desc_page;
458         struct pi_desc *pi_desc;
459         bool pi_pending;
460         u16 posted_intr_nv;
461
462         struct hrtimer preemption_timer;
463         bool preemption_timer_expired;
464
465         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
466         u64 vmcs01_debugctl;
467
468         u16 vpid02;
469         u16 last_vpid;
470
471         /*
472          * We only store the "true" versions of the VMX capability MSRs. We
473          * generate the "non-true" versions by setting the must-be-1 bits
474          * according to the SDM.
475          */
476         u32 nested_vmx_procbased_ctls_low;
477         u32 nested_vmx_procbased_ctls_high;
478         u32 nested_vmx_secondary_ctls_low;
479         u32 nested_vmx_secondary_ctls_high;
480         u32 nested_vmx_pinbased_ctls_low;
481         u32 nested_vmx_pinbased_ctls_high;
482         u32 nested_vmx_exit_ctls_low;
483         u32 nested_vmx_exit_ctls_high;
484         u32 nested_vmx_entry_ctls_low;
485         u32 nested_vmx_entry_ctls_high;
486         u32 nested_vmx_misc_low;
487         u32 nested_vmx_misc_high;
488         u32 nested_vmx_ept_caps;
489         u32 nested_vmx_vpid_caps;
490         u64 nested_vmx_basic;
491         u64 nested_vmx_cr0_fixed0;
492         u64 nested_vmx_cr0_fixed1;
493         u64 nested_vmx_cr4_fixed0;
494         u64 nested_vmx_cr4_fixed1;
495         u64 nested_vmx_vmcs_enum;
496         u64 nested_vmx_vmfunc_controls;
497
498         /* SMM related state */
499         struct {
500                 /* in VMX operation on SMM entry? */
501                 bool vmxon;
502                 /* in guest mode on SMM entry? */
503                 bool guest_mode;
504         } smm;
505 };
506
507 #define POSTED_INTR_ON  0
508 #define POSTED_INTR_SN  1
509
510 /* Posted-Interrupt Descriptor */
511 struct pi_desc {
512         u32 pir[8];     /* Posted interrupt requested */
513         union {
514                 struct {
515                                 /* bit 256 - Outstanding Notification */
516                         u16     on      : 1,
517                                 /* bit 257 - Suppress Notification */
518                                 sn      : 1,
519                                 /* bit 271:258 - Reserved */
520                                 rsvd_1  : 14;
521                                 /* bit 279:272 - Notification Vector */
522                         u8      nv;
523                                 /* bit 287:280 - Reserved */
524                         u8      rsvd_2;
525                                 /* bit 319:288 - Notification Destination */
526                         u32     ndst;
527                 };
528                 u64 control;
529         };
530         u32 rsvd[6];
531 } __aligned(64);
532
533 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
534 {
535         return test_and_set_bit(POSTED_INTR_ON,
536                         (unsigned long *)&pi_desc->control);
537 }
538
539 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
540 {
541         return test_and_clear_bit(POSTED_INTR_ON,
542                         (unsigned long *)&pi_desc->control);
543 }
544
545 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
546 {
547         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
548 }
549
550 static inline void pi_clear_sn(struct pi_desc *pi_desc)
551 {
552         return clear_bit(POSTED_INTR_SN,
553                         (unsigned long *)&pi_desc->control);
554 }
555
556 static inline void pi_set_sn(struct pi_desc *pi_desc)
557 {
558         return set_bit(POSTED_INTR_SN,
559                         (unsigned long *)&pi_desc->control);
560 }
561
562 static inline void pi_clear_on(struct pi_desc *pi_desc)
563 {
564         clear_bit(POSTED_INTR_ON,
565                   (unsigned long *)&pi_desc->control);
566 }
567
568 static inline int pi_test_on(struct pi_desc *pi_desc)
569 {
570         return test_bit(POSTED_INTR_ON,
571                         (unsigned long *)&pi_desc->control);
572 }
573
574 static inline int pi_test_sn(struct pi_desc *pi_desc)
575 {
576         return test_bit(POSTED_INTR_SN,
577                         (unsigned long *)&pi_desc->control);
578 }
579
580 struct vcpu_vmx {
581         struct kvm_vcpu       vcpu;
582         unsigned long         host_rsp;
583         u8                    fail;
584         u8                    msr_bitmap_mode;
585         u32                   exit_intr_info;
586         u32                   idt_vectoring_info;
587         ulong                 rflags;
588         struct shared_msr_entry *guest_msrs;
589         int                   nmsrs;
590         int                   save_nmsrs;
591         unsigned long         host_idt_base;
592 #ifdef CONFIG_X86_64
593         u64                   msr_host_kernel_gs_base;
594         u64                   msr_guest_kernel_gs_base;
595 #endif
596
597         u64                   arch_capabilities;
598         u64                   spec_ctrl;
599
600         u32 vm_entry_controls_shadow;
601         u32 vm_exit_controls_shadow;
602         u32 secondary_exec_control;
603
604         /*
605          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
606          * non-nested (L1) guest, it always points to vmcs01. For a nested
607          * guest (L2), it points to a different VMCS.
608          */
609         struct loaded_vmcs    vmcs01;
610         struct loaded_vmcs   *loaded_vmcs;
611         bool                  __launched; /* temporary, used in vmx_vcpu_run */
612         struct msr_autoload {
613                 unsigned nr;
614                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
615                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
616         } msr_autoload;
617         struct {
618                 int           loaded;
619                 u16           fs_sel, gs_sel, ldt_sel;
620 #ifdef CONFIG_X86_64
621                 u16           ds_sel, es_sel;
622 #endif
623                 int           gs_ldt_reload_needed;
624                 int           fs_reload_needed;
625                 u64           msr_host_bndcfgs;
626         } host_state;
627         struct {
628                 int vm86_active;
629                 ulong save_rflags;
630                 struct kvm_segment segs[8];
631         } rmode;
632         struct {
633                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
634                 struct kvm_save_segment {
635                         u16 selector;
636                         unsigned long base;
637                         u32 limit;
638                         u32 ar;
639                 } seg[8];
640         } segment_cache;
641         int vpid;
642         bool emulation_required;
643
644         u32 exit_reason;
645
646         /* Posted interrupt descriptor */
647         struct pi_desc pi_desc;
648
649         /* Support for a guest hypervisor (nested VMX) */
650         struct nested_vmx nested;
651
652         /* Dynamic PLE window. */
653         int ple_window;
654         bool ple_window_dirty;
655
656         /* Support for PML */
657 #define PML_ENTITY_NUM          512
658         struct page *pml_pg;
659
660         /* apic deadline value in host tsc */
661         u64 hv_deadline_tsc;
662
663         u64 current_tsc_ratio;
664
665         u32 host_pkru;
666
667         /*
668          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
669          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
670          * in msr_ia32_feature_control_valid_bits.
671          */
672         u64 msr_ia32_feature_control;
673         u64 msr_ia32_feature_control_valid_bits;
674 };
675
676 enum segment_cache_field {
677         SEG_FIELD_SEL = 0,
678         SEG_FIELD_BASE = 1,
679         SEG_FIELD_LIMIT = 2,
680         SEG_FIELD_AR = 3,
681
682         SEG_FIELD_NR = 4
683 };
684
685 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
686 {
687         return container_of(vcpu, struct vcpu_vmx, vcpu);
688 }
689
690 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
691 {
692         return &(to_vmx(vcpu)->pi_desc);
693 }
694
695 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
696 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
697 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
698                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
699
700
701 static unsigned long shadow_read_only_fields[] = {
702         /*
703          * We do NOT shadow fields that are modified when L0
704          * traps and emulates any vmx instruction (e.g. VMPTRLD,
705          * VMXON...) executed by L1.
706          * For example, VM_INSTRUCTION_ERROR is read
707          * by L1 if a vmx instruction fails (part of the error path).
708          * Note the code assumes this logic. If for some reason
709          * we start shadowing these fields then we need to
710          * force a shadow sync when L0 emulates vmx instructions
711          * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
712          * by nested_vmx_failValid)
713          */
714         VM_EXIT_REASON,
715         VM_EXIT_INTR_INFO,
716         VM_EXIT_INSTRUCTION_LEN,
717         IDT_VECTORING_INFO_FIELD,
718         IDT_VECTORING_ERROR_CODE,
719         VM_EXIT_INTR_ERROR_CODE,
720         EXIT_QUALIFICATION,
721         GUEST_LINEAR_ADDRESS,
722         GUEST_PHYSICAL_ADDRESS
723 };
724 static int max_shadow_read_only_fields =
725         ARRAY_SIZE(shadow_read_only_fields);
726
727 static unsigned long shadow_read_write_fields[] = {
728         TPR_THRESHOLD,
729         GUEST_RIP,
730         GUEST_RSP,
731         GUEST_CR0,
732         GUEST_CR3,
733         GUEST_CR4,
734         GUEST_INTERRUPTIBILITY_INFO,
735         GUEST_RFLAGS,
736         GUEST_CS_SELECTOR,
737         GUEST_CS_AR_BYTES,
738         GUEST_CS_LIMIT,
739         GUEST_CS_BASE,
740         GUEST_ES_BASE,
741         GUEST_BNDCFGS,
742         CR0_GUEST_HOST_MASK,
743         CR0_READ_SHADOW,
744         CR4_READ_SHADOW,
745         TSC_OFFSET,
746         EXCEPTION_BITMAP,
747         CPU_BASED_VM_EXEC_CONTROL,
748         VM_ENTRY_EXCEPTION_ERROR_CODE,
749         VM_ENTRY_INTR_INFO_FIELD,
750         VM_ENTRY_INSTRUCTION_LEN,
751         VM_ENTRY_EXCEPTION_ERROR_CODE,
752         HOST_FS_BASE,
753         HOST_GS_BASE,
754         HOST_FS_SELECTOR,
755         HOST_GS_SELECTOR
756 };
757 static int max_shadow_read_write_fields =
758         ARRAY_SIZE(shadow_read_write_fields);
759
760 static const unsigned short vmcs_field_to_offset_table[] = {
761         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
762         FIELD(POSTED_INTR_NV, posted_intr_nv),
763         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
764         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
765         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
766         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
767         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
768         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
769         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
770         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
771         FIELD(GUEST_INTR_STATUS, guest_intr_status),
772         FIELD(GUEST_PML_INDEX, guest_pml_index),
773         FIELD(HOST_ES_SELECTOR, host_es_selector),
774         FIELD(HOST_CS_SELECTOR, host_cs_selector),
775         FIELD(HOST_SS_SELECTOR, host_ss_selector),
776         FIELD(HOST_DS_SELECTOR, host_ds_selector),
777         FIELD(HOST_FS_SELECTOR, host_fs_selector),
778         FIELD(HOST_GS_SELECTOR, host_gs_selector),
779         FIELD(HOST_TR_SELECTOR, host_tr_selector),
780         FIELD64(IO_BITMAP_A, io_bitmap_a),
781         FIELD64(IO_BITMAP_B, io_bitmap_b),
782         FIELD64(MSR_BITMAP, msr_bitmap),
783         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
784         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
785         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
786         FIELD64(TSC_OFFSET, tsc_offset),
787         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
788         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
789         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
790         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
791         FIELD64(EPT_POINTER, ept_pointer),
792         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
793         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
794         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
795         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
796         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
797         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
798         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
799         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
800         FIELD64(PML_ADDRESS, pml_address),
801         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
802         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
803         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
804         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
805         FIELD64(GUEST_PDPTR0, guest_pdptr0),
806         FIELD64(GUEST_PDPTR1, guest_pdptr1),
807         FIELD64(GUEST_PDPTR2, guest_pdptr2),
808         FIELD64(GUEST_PDPTR3, guest_pdptr3),
809         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
810         FIELD64(HOST_IA32_PAT, host_ia32_pat),
811         FIELD64(HOST_IA32_EFER, host_ia32_efer),
812         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
813         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
814         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
815         FIELD(EXCEPTION_BITMAP, exception_bitmap),
816         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
817         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
818         FIELD(CR3_TARGET_COUNT, cr3_target_count),
819         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
820         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
821         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
822         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
823         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
824         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
825         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
826         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
827         FIELD(TPR_THRESHOLD, tpr_threshold),
828         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
829         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
830         FIELD(VM_EXIT_REASON, vm_exit_reason),
831         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
832         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
833         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
834         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
835         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
836         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
837         FIELD(GUEST_ES_LIMIT, guest_es_limit),
838         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
839         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
840         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
841         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
842         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
843         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
844         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
845         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
846         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
847         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
848         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
849         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
850         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
851         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
852         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
853         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
854         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
855         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
856         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
857         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
858         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
859         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
860         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
861         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
862         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
863         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
864         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
865         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
866         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
867         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
868         FIELD(EXIT_QUALIFICATION, exit_qualification),
869         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
870         FIELD(GUEST_CR0, guest_cr0),
871         FIELD(GUEST_CR3, guest_cr3),
872         FIELD(GUEST_CR4, guest_cr4),
873         FIELD(GUEST_ES_BASE, guest_es_base),
874         FIELD(GUEST_CS_BASE, guest_cs_base),
875         FIELD(GUEST_SS_BASE, guest_ss_base),
876         FIELD(GUEST_DS_BASE, guest_ds_base),
877         FIELD(GUEST_FS_BASE, guest_fs_base),
878         FIELD(GUEST_GS_BASE, guest_gs_base),
879         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
880         FIELD(GUEST_TR_BASE, guest_tr_base),
881         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
882         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
883         FIELD(GUEST_DR7, guest_dr7),
884         FIELD(GUEST_RSP, guest_rsp),
885         FIELD(GUEST_RIP, guest_rip),
886         FIELD(GUEST_RFLAGS, guest_rflags),
887         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
888         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
889         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
890         FIELD(HOST_CR0, host_cr0),
891         FIELD(HOST_CR3, host_cr3),
892         FIELD(HOST_CR4, host_cr4),
893         FIELD(HOST_FS_BASE, host_fs_base),
894         FIELD(HOST_GS_BASE, host_gs_base),
895         FIELD(HOST_TR_BASE, host_tr_base),
896         FIELD(HOST_GDTR_BASE, host_gdtr_base),
897         FIELD(HOST_IDTR_BASE, host_idtr_base),
898         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
899         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
900         FIELD(HOST_RSP, host_rsp),
901         FIELD(HOST_RIP, host_rip),
902 };
903
904 static inline short vmcs_field_to_offset(unsigned long field)
905 {
906         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
907         unsigned short offset;
908
909         BUILD_BUG_ON(size > SHRT_MAX);
910         if (field >= size)
911                 return -ENOENT;
912
913         field = array_index_nospec(field, size);
914         offset = vmcs_field_to_offset_table[field];
915         if (offset == 0)
916                 return -ENOENT;
917         return offset;
918 }
919
920 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
921 {
922         return to_vmx(vcpu)->nested.cached_vmcs12;
923 }
924
925 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
926 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
927 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
928 static bool vmx_xsaves_supported(void);
929 static void vmx_set_segment(struct kvm_vcpu *vcpu,
930                             struct kvm_segment *var, int seg);
931 static void vmx_get_segment(struct kvm_vcpu *vcpu,
932                             struct kvm_segment *var, int seg);
933 static bool guest_state_valid(struct kvm_vcpu *vcpu);
934 static u32 vmx_segment_access_rights(struct kvm_segment *var);
935 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
936 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
937 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
938 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
939                                             u16 error_code);
940 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
941 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
942                                                           u32 msr, int type);
943
944 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
945 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
946 /*
947  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
948  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
949  */
950 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
951
952 /*
953  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
954  * can find which vCPU should be waken up.
955  */
956 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
957 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
958
959 enum {
960         VMX_IO_BITMAP_A,
961         VMX_IO_BITMAP_B,
962         VMX_VMREAD_BITMAP,
963         VMX_VMWRITE_BITMAP,
964         VMX_BITMAP_NR
965 };
966
967 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
968
969 #define vmx_io_bitmap_a                      (vmx_bitmap[VMX_IO_BITMAP_A])
970 #define vmx_io_bitmap_b                      (vmx_bitmap[VMX_IO_BITMAP_B])
971 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
972 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
973
974 static bool cpu_has_load_ia32_efer;
975 static bool cpu_has_load_perf_global_ctrl;
976
977 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
978 static DEFINE_SPINLOCK(vmx_vpid_lock);
979
980 static struct vmcs_config {
981         int size;
982         int order;
983         u32 basic_cap;
984         u32 revision_id;
985         u32 pin_based_exec_ctrl;
986         u32 cpu_based_exec_ctrl;
987         u32 cpu_based_2nd_exec_ctrl;
988         u32 vmexit_ctrl;
989         u32 vmentry_ctrl;
990 } vmcs_config;
991
992 static struct vmx_capability {
993         u32 ept;
994         u32 vpid;
995 } vmx_capability;
996
997 #define VMX_SEGMENT_FIELD(seg)                                  \
998         [VCPU_SREG_##seg] = {                                   \
999                 .selector = GUEST_##seg##_SELECTOR,             \
1000                 .base = GUEST_##seg##_BASE,                     \
1001                 .limit = GUEST_##seg##_LIMIT,                   \
1002                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
1003         }
1004
1005 static const struct kvm_vmx_segment_field {
1006         unsigned selector;
1007         unsigned base;
1008         unsigned limit;
1009         unsigned ar_bytes;
1010 } kvm_vmx_segment_fields[] = {
1011         VMX_SEGMENT_FIELD(CS),
1012         VMX_SEGMENT_FIELD(DS),
1013         VMX_SEGMENT_FIELD(ES),
1014         VMX_SEGMENT_FIELD(FS),
1015         VMX_SEGMENT_FIELD(GS),
1016         VMX_SEGMENT_FIELD(SS),
1017         VMX_SEGMENT_FIELD(TR),
1018         VMX_SEGMENT_FIELD(LDTR),
1019 };
1020
1021 static u64 host_efer;
1022
1023 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1024
1025 /*
1026  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1027  * away by decrementing the array size.
1028  */
1029 static const u32 vmx_msr_index[] = {
1030 #ifdef CONFIG_X86_64
1031         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1032 #endif
1033         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1034 };
1035
1036 static inline bool is_exception_n(u32 intr_info, u8 vector)
1037 {
1038         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1039                              INTR_INFO_VALID_MASK)) ==
1040                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1041 }
1042
1043 static inline bool is_debug(u32 intr_info)
1044 {
1045         return is_exception_n(intr_info, DB_VECTOR);
1046 }
1047
1048 static inline bool is_breakpoint(u32 intr_info)
1049 {
1050         return is_exception_n(intr_info, BP_VECTOR);
1051 }
1052
1053 static inline bool is_page_fault(u32 intr_info)
1054 {
1055         return is_exception_n(intr_info, PF_VECTOR);
1056 }
1057
1058 static inline bool is_no_device(u32 intr_info)
1059 {
1060         return is_exception_n(intr_info, NM_VECTOR);
1061 }
1062
1063 static inline bool is_invalid_opcode(u32 intr_info)
1064 {
1065         return is_exception_n(intr_info, UD_VECTOR);
1066 }
1067
1068 static inline bool is_external_interrupt(u32 intr_info)
1069 {
1070         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1071                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1072 }
1073
1074 static inline bool is_machine_check(u32 intr_info)
1075 {
1076         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1077                              INTR_INFO_VALID_MASK)) ==
1078                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1079 }
1080
1081 static inline bool cpu_has_vmx_msr_bitmap(void)
1082 {
1083         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1084 }
1085
1086 static inline bool cpu_has_vmx_tpr_shadow(void)
1087 {
1088         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1089 }
1090
1091 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1092 {
1093         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1094 }
1095
1096 static inline bool cpu_has_secondary_exec_ctrls(void)
1097 {
1098         return vmcs_config.cpu_based_exec_ctrl &
1099                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1100 }
1101
1102 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1103 {
1104         return vmcs_config.cpu_based_2nd_exec_ctrl &
1105                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1106 }
1107
1108 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1109 {
1110         return vmcs_config.cpu_based_2nd_exec_ctrl &
1111                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1112 }
1113
1114 static inline bool cpu_has_vmx_apic_register_virt(void)
1115 {
1116         return vmcs_config.cpu_based_2nd_exec_ctrl &
1117                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1118 }
1119
1120 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1121 {
1122         return vmcs_config.cpu_based_2nd_exec_ctrl &
1123                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1124 }
1125
1126 /*
1127  * Comment's format: document - errata name - stepping - processor name.
1128  * Refer from
1129  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1130  */
1131 static u32 vmx_preemption_cpu_tfms[] = {
1132 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1133 0x000206E6,
1134 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1135 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1136 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1137 0x00020652,
1138 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1139 0x00020655,
1140 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1141 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1142 /*
1143  * 320767.pdf - AAP86  - B1 -
1144  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1145  */
1146 0x000106E5,
1147 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1148 0x000106A0,
1149 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1150 0x000106A1,
1151 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1152 0x000106A4,
1153  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1154  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1155  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1156 0x000106A5,
1157 };
1158
1159 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1160 {
1161         u32 eax = cpuid_eax(0x00000001), i;
1162
1163         /* Clear the reserved bits */
1164         eax &= ~(0x3U << 14 | 0xfU << 28);
1165         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1166                 if (eax == vmx_preemption_cpu_tfms[i])
1167                         return true;
1168
1169         return false;
1170 }
1171
1172 static inline bool cpu_has_vmx_preemption_timer(void)
1173 {
1174         return vmcs_config.pin_based_exec_ctrl &
1175                 PIN_BASED_VMX_PREEMPTION_TIMER;
1176 }
1177
1178 static inline bool cpu_has_vmx_posted_intr(void)
1179 {
1180         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1181                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1182 }
1183
1184 static inline bool cpu_has_vmx_apicv(void)
1185 {
1186         return cpu_has_vmx_apic_register_virt() &&
1187                 cpu_has_vmx_virtual_intr_delivery() &&
1188                 cpu_has_vmx_posted_intr();
1189 }
1190
1191 static inline bool cpu_has_vmx_flexpriority(void)
1192 {
1193         return cpu_has_vmx_tpr_shadow() &&
1194                 cpu_has_vmx_virtualize_apic_accesses();
1195 }
1196
1197 static inline bool cpu_has_vmx_ept_execute_only(void)
1198 {
1199         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1200 }
1201
1202 static inline bool cpu_has_vmx_ept_2m_page(void)
1203 {
1204         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1205 }
1206
1207 static inline bool cpu_has_vmx_ept_1g_page(void)
1208 {
1209         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1210 }
1211
1212 static inline bool cpu_has_vmx_ept_4levels(void)
1213 {
1214         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1215 }
1216
1217 static inline bool cpu_has_vmx_ept_mt_wb(void)
1218 {
1219         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1220 }
1221
1222 static inline bool cpu_has_vmx_ept_5levels(void)
1223 {
1224         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1225 }
1226
1227 static inline bool cpu_has_vmx_ept_ad_bits(void)
1228 {
1229         return vmx_capability.ept & VMX_EPT_AD_BIT;
1230 }
1231
1232 static inline bool cpu_has_vmx_invept_context(void)
1233 {
1234         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1235 }
1236
1237 static inline bool cpu_has_vmx_invept_global(void)
1238 {
1239         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1240 }
1241
1242 static inline bool cpu_has_vmx_invvpid_single(void)
1243 {
1244         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1245 }
1246
1247 static inline bool cpu_has_vmx_invvpid_global(void)
1248 {
1249         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1250 }
1251
1252 static inline bool cpu_has_vmx_invvpid(void)
1253 {
1254         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1255 }
1256
1257 static inline bool cpu_has_vmx_ept(void)
1258 {
1259         return vmcs_config.cpu_based_2nd_exec_ctrl &
1260                 SECONDARY_EXEC_ENABLE_EPT;
1261 }
1262
1263 static inline bool cpu_has_vmx_unrestricted_guest(void)
1264 {
1265         return vmcs_config.cpu_based_2nd_exec_ctrl &
1266                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1267 }
1268
1269 static inline bool cpu_has_vmx_ple(void)
1270 {
1271         return vmcs_config.cpu_based_2nd_exec_ctrl &
1272                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1273 }
1274
1275 static inline bool cpu_has_vmx_basic_inout(void)
1276 {
1277         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1278 }
1279
1280 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1281 {
1282         return flexpriority_enabled && lapic_in_kernel(vcpu);
1283 }
1284
1285 static inline bool cpu_has_vmx_vpid(void)
1286 {
1287         return vmcs_config.cpu_based_2nd_exec_ctrl &
1288                 SECONDARY_EXEC_ENABLE_VPID;
1289 }
1290
1291 static inline bool cpu_has_vmx_rdtscp(void)
1292 {
1293         return vmcs_config.cpu_based_2nd_exec_ctrl &
1294                 SECONDARY_EXEC_RDTSCP;
1295 }
1296
1297 static inline bool cpu_has_vmx_invpcid(void)
1298 {
1299         return vmcs_config.cpu_based_2nd_exec_ctrl &
1300                 SECONDARY_EXEC_ENABLE_INVPCID;
1301 }
1302
1303 static inline bool cpu_has_virtual_nmis(void)
1304 {
1305         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1306 }
1307
1308 static inline bool cpu_has_vmx_wbinvd_exit(void)
1309 {
1310         return vmcs_config.cpu_based_2nd_exec_ctrl &
1311                 SECONDARY_EXEC_WBINVD_EXITING;
1312 }
1313
1314 static inline bool cpu_has_vmx_shadow_vmcs(void)
1315 {
1316         u64 vmx_msr;
1317         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1318         /* check if the cpu supports writing r/o exit information fields */
1319         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1320                 return false;
1321
1322         return vmcs_config.cpu_based_2nd_exec_ctrl &
1323                 SECONDARY_EXEC_SHADOW_VMCS;
1324 }
1325
1326 static inline bool cpu_has_vmx_pml(void)
1327 {
1328         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1329 }
1330
1331 static inline bool cpu_has_vmx_tsc_scaling(void)
1332 {
1333         return vmcs_config.cpu_based_2nd_exec_ctrl &
1334                 SECONDARY_EXEC_TSC_SCALING;
1335 }
1336
1337 static inline bool cpu_has_vmx_vmfunc(void)
1338 {
1339         return vmcs_config.cpu_based_2nd_exec_ctrl &
1340                 SECONDARY_EXEC_ENABLE_VMFUNC;
1341 }
1342
1343 static inline bool report_flexpriority(void)
1344 {
1345         return flexpriority_enabled;
1346 }
1347
1348 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1349 {
1350         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1351 }
1352
1353 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1354 {
1355         return vmcs12->cpu_based_vm_exec_control & bit;
1356 }
1357
1358 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1359 {
1360         return (vmcs12->cpu_based_vm_exec_control &
1361                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1362                 (vmcs12->secondary_vm_exec_control & bit);
1363 }
1364
1365 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1366 {
1367         return vmcs12->pin_based_vm_exec_control &
1368                 PIN_BASED_VMX_PREEMPTION_TIMER;
1369 }
1370
1371 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1372 {
1373         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1374 }
1375
1376 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1377 {
1378         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1379 }
1380
1381 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1382 {
1383         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1384 }
1385
1386 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1387 {
1388         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1389 }
1390
1391 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1392 {
1393         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1394 }
1395
1396 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1397 {
1398         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1399 }
1400
1401 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1402 {
1403         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1404 }
1405
1406 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1407 {
1408         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1409 }
1410
1411 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1412 {
1413         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1414 }
1415
1416 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1417 {
1418         return nested_cpu_has_vmfunc(vmcs12) &&
1419                 (vmcs12->vm_function_control &
1420                  VMX_VMFUNC_EPTP_SWITCHING);
1421 }
1422
1423 static inline bool is_nmi(u32 intr_info)
1424 {
1425         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1426                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1427 }
1428
1429 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1430                               u32 exit_intr_info,
1431                               unsigned long exit_qualification);
1432 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1433                         struct vmcs12 *vmcs12,
1434                         u32 reason, unsigned long qualification);
1435
1436 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1437 {
1438         int i;
1439
1440         for (i = 0; i < vmx->nmsrs; ++i)
1441                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1442                         return i;
1443         return -1;
1444 }
1445
1446 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1447 {
1448     struct {
1449         u64 vpid : 16;
1450         u64 rsvd : 48;
1451         u64 gva;
1452     } operand = { vpid, 0, gva };
1453
1454     asm volatile (__ex(ASM_VMX_INVVPID)
1455                   /* CF==1 or ZF==1 --> rc = -1 */
1456                   "; ja 1f ; ud2 ; 1:"
1457                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1458 }
1459
1460 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1461 {
1462         struct {
1463                 u64 eptp, gpa;
1464         } operand = {eptp, gpa};
1465
1466         asm volatile (__ex(ASM_VMX_INVEPT)
1467                         /* CF==1 or ZF==1 --> rc = -1 */
1468                         "; ja 1f ; ud2 ; 1:\n"
1469                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1470 }
1471
1472 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1473 {
1474         int i;
1475
1476         i = __find_msr_index(vmx, msr);
1477         if (i >= 0)
1478                 return &vmx->guest_msrs[i];
1479         return NULL;
1480 }
1481
1482 static void vmcs_clear(struct vmcs *vmcs)
1483 {
1484         u64 phys_addr = __pa(vmcs);
1485         u8 error;
1486
1487         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1488                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1489                       : "cc", "memory");
1490         if (error)
1491                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1492                        vmcs, phys_addr);
1493 }
1494
1495 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1496 {
1497         vmcs_clear(loaded_vmcs->vmcs);
1498         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1499                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1500         loaded_vmcs->cpu = -1;
1501         loaded_vmcs->launched = 0;
1502 }
1503
1504 static void vmcs_load(struct vmcs *vmcs)
1505 {
1506         u64 phys_addr = __pa(vmcs);
1507         u8 error;
1508
1509         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1510                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1511                         : "cc", "memory");
1512         if (error)
1513                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1514                        vmcs, phys_addr);
1515 }
1516
1517 #ifdef CONFIG_KEXEC_CORE
1518 /*
1519  * This bitmap is used to indicate whether the vmclear
1520  * operation is enabled on all cpus. All disabled by
1521  * default.
1522  */
1523 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1524
1525 static inline void crash_enable_local_vmclear(int cpu)
1526 {
1527         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1528 }
1529
1530 static inline void crash_disable_local_vmclear(int cpu)
1531 {
1532         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1533 }
1534
1535 static inline int crash_local_vmclear_enabled(int cpu)
1536 {
1537         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1538 }
1539
1540 static void crash_vmclear_local_loaded_vmcss(void)
1541 {
1542         int cpu = raw_smp_processor_id();
1543         struct loaded_vmcs *v;
1544
1545         if (!crash_local_vmclear_enabled(cpu))
1546                 return;
1547
1548         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1549                             loaded_vmcss_on_cpu_link)
1550                 vmcs_clear(v->vmcs);
1551 }
1552 #else
1553 static inline void crash_enable_local_vmclear(int cpu) { }
1554 static inline void crash_disable_local_vmclear(int cpu) { }
1555 #endif /* CONFIG_KEXEC_CORE */
1556
1557 static void __loaded_vmcs_clear(void *arg)
1558 {
1559         struct loaded_vmcs *loaded_vmcs = arg;
1560         int cpu = raw_smp_processor_id();
1561
1562         if (loaded_vmcs->cpu != cpu)
1563                 return; /* vcpu migration can race with cpu offline */
1564         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1565                 per_cpu(current_vmcs, cpu) = NULL;
1566         crash_disable_local_vmclear(cpu);
1567         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1568
1569         /*
1570          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1571          * is before setting loaded_vmcs->vcpu to -1 which is done in
1572          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1573          * then adds the vmcs into percpu list before it is deleted.
1574          */
1575         smp_wmb();
1576
1577         loaded_vmcs_init(loaded_vmcs);
1578         crash_enable_local_vmclear(cpu);
1579 }
1580
1581 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1582 {
1583         int cpu = loaded_vmcs->cpu;
1584
1585         if (cpu != -1)
1586                 smp_call_function_single(cpu,
1587                          __loaded_vmcs_clear, loaded_vmcs, 1);
1588 }
1589
1590 static inline void vpid_sync_vcpu_single(int vpid)
1591 {
1592         if (vpid == 0)
1593                 return;
1594
1595         if (cpu_has_vmx_invvpid_single())
1596                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1597 }
1598
1599 static inline void vpid_sync_vcpu_global(void)
1600 {
1601         if (cpu_has_vmx_invvpid_global())
1602                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1603 }
1604
1605 static inline void vpid_sync_context(int vpid)
1606 {
1607         if (cpu_has_vmx_invvpid_single())
1608                 vpid_sync_vcpu_single(vpid);
1609         else
1610                 vpid_sync_vcpu_global();
1611 }
1612
1613 static inline void ept_sync_global(void)
1614 {
1615         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1616 }
1617
1618 static inline void ept_sync_context(u64 eptp)
1619 {
1620         if (cpu_has_vmx_invept_context())
1621                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1622         else
1623                 ept_sync_global();
1624 }
1625
1626 static __always_inline void vmcs_check16(unsigned long field)
1627 {
1628         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1629                          "16-bit accessor invalid for 64-bit field");
1630         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1631                          "16-bit accessor invalid for 64-bit high field");
1632         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1633                          "16-bit accessor invalid for 32-bit high field");
1634         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1635                          "16-bit accessor invalid for natural width field");
1636 }
1637
1638 static __always_inline void vmcs_check32(unsigned long field)
1639 {
1640         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1641                          "32-bit accessor invalid for 16-bit field");
1642         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1643                          "32-bit accessor invalid for natural width field");
1644 }
1645
1646 static __always_inline void vmcs_check64(unsigned long field)
1647 {
1648         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1649                          "64-bit accessor invalid for 16-bit field");
1650         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1651                          "64-bit accessor invalid for 64-bit high field");
1652         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1653                          "64-bit accessor invalid for 32-bit field");
1654         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1655                          "64-bit accessor invalid for natural width field");
1656 }
1657
1658 static __always_inline void vmcs_checkl(unsigned long field)
1659 {
1660         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1661                          "Natural width accessor invalid for 16-bit field");
1662         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1663                          "Natural width accessor invalid for 64-bit field");
1664         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1665                          "Natural width accessor invalid for 64-bit high field");
1666         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1667                          "Natural width accessor invalid for 32-bit field");
1668 }
1669
1670 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1671 {
1672         unsigned long value;
1673
1674         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1675                       : "=a"(value) : "d"(field) : "cc");
1676         return value;
1677 }
1678
1679 static __always_inline u16 vmcs_read16(unsigned long field)
1680 {
1681         vmcs_check16(field);
1682         return __vmcs_readl(field);
1683 }
1684
1685 static __always_inline u32 vmcs_read32(unsigned long field)
1686 {
1687         vmcs_check32(field);
1688         return __vmcs_readl(field);
1689 }
1690
1691 static __always_inline u64 vmcs_read64(unsigned long field)
1692 {
1693         vmcs_check64(field);
1694 #ifdef CONFIG_X86_64
1695         return __vmcs_readl(field);
1696 #else
1697         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1698 #endif
1699 }
1700
1701 static __always_inline unsigned long vmcs_readl(unsigned long field)
1702 {
1703         vmcs_checkl(field);
1704         return __vmcs_readl(field);
1705 }
1706
1707 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1708 {
1709         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1710                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1711         dump_stack();
1712 }
1713
1714 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1715 {
1716         u8 error;
1717
1718         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1719                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1720         if (unlikely(error))
1721                 vmwrite_error(field, value);
1722 }
1723
1724 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1725 {
1726         vmcs_check16(field);
1727         __vmcs_writel(field, value);
1728 }
1729
1730 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1731 {
1732         vmcs_check32(field);
1733         __vmcs_writel(field, value);
1734 }
1735
1736 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1737 {
1738         vmcs_check64(field);
1739         __vmcs_writel(field, value);
1740 #ifndef CONFIG_X86_64
1741         asm volatile ("");
1742         __vmcs_writel(field+1, value >> 32);
1743 #endif
1744 }
1745
1746 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1747 {
1748         vmcs_checkl(field);
1749         __vmcs_writel(field, value);
1750 }
1751
1752 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1753 {
1754         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1755                          "vmcs_clear_bits does not support 64-bit fields");
1756         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1757 }
1758
1759 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1760 {
1761         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1762                          "vmcs_set_bits does not support 64-bit fields");
1763         __vmcs_writel(field, __vmcs_readl(field) | mask);
1764 }
1765
1766 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1767 {
1768         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1769 }
1770
1771 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1772 {
1773         vmcs_write32(VM_ENTRY_CONTROLS, val);
1774         vmx->vm_entry_controls_shadow = val;
1775 }
1776
1777 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1778 {
1779         if (vmx->vm_entry_controls_shadow != val)
1780                 vm_entry_controls_init(vmx, val);
1781 }
1782
1783 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1784 {
1785         return vmx->vm_entry_controls_shadow;
1786 }
1787
1788
1789 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1790 {
1791         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1792 }
1793
1794 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1795 {
1796         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1797 }
1798
1799 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1800 {
1801         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1802 }
1803
1804 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1805 {
1806         vmcs_write32(VM_EXIT_CONTROLS, val);
1807         vmx->vm_exit_controls_shadow = val;
1808 }
1809
1810 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1811 {
1812         if (vmx->vm_exit_controls_shadow != val)
1813                 vm_exit_controls_init(vmx, val);
1814 }
1815
1816 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1817 {
1818         return vmx->vm_exit_controls_shadow;
1819 }
1820
1821
1822 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1823 {
1824         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1825 }
1826
1827 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1828 {
1829         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1830 }
1831
1832 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1833 {
1834         vmx->segment_cache.bitmask = 0;
1835 }
1836
1837 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1838                                        unsigned field)
1839 {
1840         bool ret;
1841         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1842
1843         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1844                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1845                 vmx->segment_cache.bitmask = 0;
1846         }
1847         ret = vmx->segment_cache.bitmask & mask;
1848         vmx->segment_cache.bitmask |= mask;
1849         return ret;
1850 }
1851
1852 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1853 {
1854         u16 *p = &vmx->segment_cache.seg[seg].selector;
1855
1856         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1857                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1858         return *p;
1859 }
1860
1861 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1862 {
1863         ulong *p = &vmx->segment_cache.seg[seg].base;
1864
1865         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1866                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1867         return *p;
1868 }
1869
1870 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1871 {
1872         u32 *p = &vmx->segment_cache.seg[seg].limit;
1873
1874         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1875                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1876         return *p;
1877 }
1878
1879 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1880 {
1881         u32 *p = &vmx->segment_cache.seg[seg].ar;
1882
1883         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1884                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1885         return *p;
1886 }
1887
1888 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1889 {
1890         u32 eb;
1891
1892         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1893              (1u << DB_VECTOR) | (1u << AC_VECTOR);
1894         if ((vcpu->guest_debug &
1895              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1896             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1897                 eb |= 1u << BP_VECTOR;
1898         if (to_vmx(vcpu)->rmode.vm86_active)
1899                 eb = ~0;
1900         if (enable_ept)
1901                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1902
1903         /* When we are running a nested L2 guest and L1 specified for it a
1904          * certain exception bitmap, we must trap the same exceptions and pass
1905          * them to L1. When running L2, we will only handle the exceptions
1906          * specified above if L1 did not want them.
1907          */
1908         if (is_guest_mode(vcpu))
1909                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1910
1911         vmcs_write32(EXCEPTION_BITMAP, eb);
1912 }
1913
1914 /*
1915  * Check if MSR is intercepted for currently loaded MSR bitmap.
1916  */
1917 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
1918 {
1919         unsigned long *msr_bitmap;
1920         int f = sizeof(unsigned long);
1921
1922         if (!cpu_has_vmx_msr_bitmap())
1923                 return true;
1924
1925         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
1926
1927         if (msr <= 0x1fff) {
1928                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
1929         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1930                 msr &= 0x1fff;
1931                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
1932         }
1933
1934         return true;
1935 }
1936
1937 /*
1938  * Check if MSR is intercepted for L01 MSR bitmap.
1939  */
1940 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
1941 {
1942         unsigned long *msr_bitmap;
1943         int f = sizeof(unsigned long);
1944
1945         if (!cpu_has_vmx_msr_bitmap())
1946                 return true;
1947
1948         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
1949
1950         if (msr <= 0x1fff) {
1951                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
1952         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1953                 msr &= 0x1fff;
1954                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
1955         }
1956
1957         return true;
1958 }
1959
1960 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1961                 unsigned long entry, unsigned long exit)
1962 {
1963         vm_entry_controls_clearbit(vmx, entry);
1964         vm_exit_controls_clearbit(vmx, exit);
1965 }
1966
1967 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1968 {
1969         unsigned i;
1970         struct msr_autoload *m = &vmx->msr_autoload;
1971
1972         switch (msr) {
1973         case MSR_EFER:
1974                 if (cpu_has_load_ia32_efer) {
1975                         clear_atomic_switch_msr_special(vmx,
1976                                         VM_ENTRY_LOAD_IA32_EFER,
1977                                         VM_EXIT_LOAD_IA32_EFER);
1978                         return;
1979                 }
1980                 break;
1981         case MSR_CORE_PERF_GLOBAL_CTRL:
1982                 if (cpu_has_load_perf_global_ctrl) {
1983                         clear_atomic_switch_msr_special(vmx,
1984                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1985                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1986                         return;
1987                 }
1988                 break;
1989         }
1990
1991         for (i = 0; i < m->nr; ++i)
1992                 if (m->guest[i].index == msr)
1993                         break;
1994
1995         if (i == m->nr)
1996                 return;
1997         --m->nr;
1998         m->guest[i] = m->guest[m->nr];
1999         m->host[i] = m->host[m->nr];
2000         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2001         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2002 }
2003
2004 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2005                 unsigned long entry, unsigned long exit,
2006                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2007                 u64 guest_val, u64 host_val)
2008 {
2009         vmcs_write64(guest_val_vmcs, guest_val);
2010         vmcs_write64(host_val_vmcs, host_val);
2011         vm_entry_controls_setbit(vmx, entry);
2012         vm_exit_controls_setbit(vmx, exit);
2013 }
2014
2015 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2016                                   u64 guest_val, u64 host_val)
2017 {
2018         unsigned i;
2019         struct msr_autoload *m = &vmx->msr_autoload;
2020
2021         switch (msr) {
2022         case MSR_EFER:
2023                 if (cpu_has_load_ia32_efer) {
2024                         add_atomic_switch_msr_special(vmx,
2025                                         VM_ENTRY_LOAD_IA32_EFER,
2026                                         VM_EXIT_LOAD_IA32_EFER,
2027                                         GUEST_IA32_EFER,
2028                                         HOST_IA32_EFER,
2029                                         guest_val, host_val);
2030                         return;
2031                 }
2032                 break;
2033         case MSR_CORE_PERF_GLOBAL_CTRL:
2034                 if (cpu_has_load_perf_global_ctrl) {
2035                         add_atomic_switch_msr_special(vmx,
2036                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2037                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2038                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2039                                         HOST_IA32_PERF_GLOBAL_CTRL,
2040                                         guest_val, host_val);
2041                         return;
2042                 }
2043                 break;
2044         case MSR_IA32_PEBS_ENABLE:
2045                 /* PEBS needs a quiescent period after being disabled (to write
2046                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2047                  * provide that period, so a CPU could write host's record into
2048                  * guest's memory.
2049                  */
2050                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2051         }
2052
2053         for (i = 0; i < m->nr; ++i)
2054                 if (m->guest[i].index == msr)
2055                         break;
2056
2057         if (i == NR_AUTOLOAD_MSRS) {
2058                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2059                                 "Can't add msr %x\n", msr);
2060                 return;
2061         } else if (i == m->nr) {
2062                 ++m->nr;
2063                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2064                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2065         }
2066
2067         m->guest[i].index = msr;
2068         m->guest[i].value = guest_val;
2069         m->host[i].index = msr;
2070         m->host[i].value = host_val;
2071 }
2072
2073 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2074 {
2075         u64 guest_efer = vmx->vcpu.arch.efer;
2076         u64 ignore_bits = 0;
2077
2078         if (!enable_ept) {
2079                 /*
2080                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2081                  * host CPUID is more efficient than testing guest CPUID
2082                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2083                  */
2084                 if (boot_cpu_has(X86_FEATURE_SMEP))
2085                         guest_efer |= EFER_NX;
2086                 else if (!(guest_efer & EFER_NX))
2087                         ignore_bits |= EFER_NX;
2088         }
2089
2090         /*
2091          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2092          */
2093         ignore_bits |= EFER_SCE;
2094 #ifdef CONFIG_X86_64
2095         ignore_bits |= EFER_LMA | EFER_LME;
2096         /* SCE is meaningful only in long mode on Intel */
2097         if (guest_efer & EFER_LMA)
2098                 ignore_bits &= ~(u64)EFER_SCE;
2099 #endif
2100
2101         clear_atomic_switch_msr(vmx, MSR_EFER);
2102
2103         /*
2104          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2105          * On CPUs that support "load IA32_EFER", always switch EFER
2106          * atomically, since it's faster than switching it manually.
2107          */
2108         if (cpu_has_load_ia32_efer ||
2109             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2110                 if (!(guest_efer & EFER_LMA))
2111                         guest_efer &= ~EFER_LME;
2112                 if (guest_efer != host_efer)
2113                         add_atomic_switch_msr(vmx, MSR_EFER,
2114                                               guest_efer, host_efer);
2115                 return false;
2116         } else {
2117                 guest_efer &= ~ignore_bits;
2118                 guest_efer |= host_efer & ignore_bits;
2119
2120                 vmx->guest_msrs[efer_offset].data = guest_efer;
2121                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2122
2123                 return true;
2124         }
2125 }
2126
2127 #ifdef CONFIG_X86_32
2128 /*
2129  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2130  * VMCS rather than the segment table.  KVM uses this helper to figure
2131  * out the current bases to poke them into the VMCS before entry.
2132  */
2133 static unsigned long segment_base(u16 selector)
2134 {
2135         struct desc_struct *table;
2136         unsigned long v;
2137
2138         if (!(selector & ~SEGMENT_RPL_MASK))
2139                 return 0;
2140
2141         table = get_current_gdt_ro();
2142
2143         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2144                 u16 ldt_selector = kvm_read_ldt();
2145
2146                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2147                         return 0;
2148
2149                 table = (struct desc_struct *)segment_base(ldt_selector);
2150         }
2151         v = get_desc_base(&table[selector >> 3]);
2152         return v;
2153 }
2154 #endif
2155
2156 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2157 {
2158         struct vcpu_vmx *vmx = to_vmx(vcpu);
2159         int i;
2160
2161         if (vmx->host_state.loaded)
2162                 return;
2163
2164         vmx->host_state.loaded = 1;
2165         /*
2166          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2167          * allow segment selectors with cpl > 0 or ti == 1.
2168          */
2169         vmx->host_state.ldt_sel = kvm_read_ldt();
2170         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2171         savesegment(fs, vmx->host_state.fs_sel);
2172         if (!(vmx->host_state.fs_sel & 7)) {
2173                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2174                 vmx->host_state.fs_reload_needed = 0;
2175         } else {
2176                 vmcs_write16(HOST_FS_SELECTOR, 0);
2177                 vmx->host_state.fs_reload_needed = 1;
2178         }
2179         savesegment(gs, vmx->host_state.gs_sel);
2180         if (!(vmx->host_state.gs_sel & 7))
2181                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2182         else {
2183                 vmcs_write16(HOST_GS_SELECTOR, 0);
2184                 vmx->host_state.gs_ldt_reload_needed = 1;
2185         }
2186
2187 #ifdef CONFIG_X86_64
2188         savesegment(ds, vmx->host_state.ds_sel);
2189         savesegment(es, vmx->host_state.es_sel);
2190 #endif
2191
2192 #ifdef CONFIG_X86_64
2193         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2194         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2195 #else
2196         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2197         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2198 #endif
2199
2200 #ifdef CONFIG_X86_64
2201         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2202         if (is_long_mode(&vmx->vcpu))
2203                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2204 #endif
2205         if (boot_cpu_has(X86_FEATURE_MPX))
2206                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2207         for (i = 0; i < vmx->save_nmsrs; ++i)
2208                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2209                                    vmx->guest_msrs[i].data,
2210                                    vmx->guest_msrs[i].mask);
2211 }
2212
2213 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2214 {
2215         if (!vmx->host_state.loaded)
2216                 return;
2217
2218         ++vmx->vcpu.stat.host_state_reload;
2219         vmx->host_state.loaded = 0;
2220 #ifdef CONFIG_X86_64
2221         if (is_long_mode(&vmx->vcpu))
2222                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2223 #endif
2224         if (vmx->host_state.gs_ldt_reload_needed) {
2225                 kvm_load_ldt(vmx->host_state.ldt_sel);
2226 #ifdef CONFIG_X86_64
2227                 load_gs_index(vmx->host_state.gs_sel);
2228 #else
2229                 loadsegment(gs, vmx->host_state.gs_sel);
2230 #endif
2231         }
2232         if (vmx->host_state.fs_reload_needed)
2233                 loadsegment(fs, vmx->host_state.fs_sel);
2234 #ifdef CONFIG_X86_64
2235         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2236                 loadsegment(ds, vmx->host_state.ds_sel);
2237                 loadsegment(es, vmx->host_state.es_sel);
2238         }
2239 #endif
2240         invalidate_tss_limit();
2241 #ifdef CONFIG_X86_64
2242         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2243 #endif
2244         if (vmx->host_state.msr_host_bndcfgs)
2245                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2246         load_fixmap_gdt(raw_smp_processor_id());
2247 }
2248
2249 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2250 {
2251         preempt_disable();
2252         __vmx_load_host_state(vmx);
2253         preempt_enable();
2254 }
2255
2256 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2257 {
2258         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2259         struct pi_desc old, new;
2260         unsigned int dest;
2261
2262         /*
2263          * In case of hot-plug or hot-unplug, we may have to undo
2264          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2265          * always keep PI.NDST up to date for simplicity: it makes the
2266          * code easier, and CPU migration is not a fast path.
2267          */
2268         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2269                 return;
2270
2271         /*
2272          * First handle the simple case where no cmpxchg is necessary; just
2273          * allow posting non-urgent interrupts.
2274          *
2275          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2276          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2277          * expects the VCPU to be on the blocked_vcpu_list that matches
2278          * PI.NDST.
2279          */
2280         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2281             vcpu->cpu == cpu) {
2282                 pi_clear_sn(pi_desc);
2283                 return;
2284         }
2285
2286         /* The full case.  */
2287         do {
2288                 old.control = new.control = pi_desc->control;
2289
2290                 dest = cpu_physical_id(cpu);
2291
2292                 if (x2apic_enabled())
2293                         new.ndst = dest;
2294                 else
2295                         new.ndst = (dest << 8) & 0xFF00;
2296
2297                 new.sn = 0;
2298         } while (cmpxchg64(&pi_desc->control, old.control,
2299                            new.control) != old.control);
2300 }
2301
2302 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2303 {
2304         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2305         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2306 }
2307
2308 /*
2309  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2310  * vcpu mutex is already taken.
2311  */
2312 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2313 {
2314         struct vcpu_vmx *vmx = to_vmx(vcpu);
2315         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2316
2317         if (!already_loaded) {
2318                 loaded_vmcs_clear(vmx->loaded_vmcs);
2319                 local_irq_disable();
2320                 crash_disable_local_vmclear(cpu);
2321
2322                 /*
2323                  * Read loaded_vmcs->cpu should be before fetching
2324                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2325                  * See the comments in __loaded_vmcs_clear().
2326                  */
2327                 smp_rmb();
2328
2329                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2330                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2331                 crash_enable_local_vmclear(cpu);
2332                 local_irq_enable();
2333         }
2334
2335         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2336                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2337                 vmcs_load(vmx->loaded_vmcs->vmcs);
2338                 indirect_branch_prediction_barrier();
2339         }
2340
2341         if (!already_loaded) {
2342                 void *gdt = get_current_gdt_ro();
2343                 unsigned long sysenter_esp;
2344
2345                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2346
2347                 /*
2348                  * Linux uses per-cpu TSS and GDT, so set these when switching
2349                  * processors.  See 22.2.4.
2350                  */
2351                 vmcs_writel(HOST_TR_BASE,
2352                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2353                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2354
2355                 /*
2356                  * VM exits change the host TR limit to 0x67 after a VM
2357                  * exit.  This is okay, since 0x67 covers everything except
2358                  * the IO bitmap and have have code to handle the IO bitmap
2359                  * being lost after a VM exit.
2360                  */
2361                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2362
2363                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2364                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2365
2366                 vmx->loaded_vmcs->cpu = cpu;
2367         }
2368
2369         /* Setup TSC multiplier */
2370         if (kvm_has_tsc_control &&
2371             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2372                 decache_tsc_multiplier(vmx);
2373
2374         vmx_vcpu_pi_load(vcpu, cpu);
2375         vmx->host_pkru = read_pkru();
2376 }
2377
2378 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2379 {
2380         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2381
2382         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2383                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2384                 !kvm_vcpu_apicv_active(vcpu))
2385                 return;
2386
2387         /* Set SN when the vCPU is preempted */
2388         if (vcpu->preempted)
2389                 pi_set_sn(pi_desc);
2390 }
2391
2392 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2393 {
2394         vmx_vcpu_pi_put(vcpu);
2395
2396         __vmx_load_host_state(to_vmx(vcpu));
2397 }
2398
2399 static bool emulation_required(struct kvm_vcpu *vcpu)
2400 {
2401         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2402 }
2403
2404 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2405
2406 /*
2407  * Return the cr0 value that a nested guest would read. This is a combination
2408  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2409  * its hypervisor (cr0_read_shadow).
2410  */
2411 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2412 {
2413         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2414                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2415 }
2416 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2417 {
2418         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2419                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2420 }
2421
2422 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2423 {
2424         unsigned long rflags, save_rflags;
2425
2426         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2427                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2428                 rflags = vmcs_readl(GUEST_RFLAGS);
2429                 if (to_vmx(vcpu)->rmode.vm86_active) {
2430                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2431                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2432                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2433                 }
2434                 to_vmx(vcpu)->rflags = rflags;
2435         }
2436         return to_vmx(vcpu)->rflags;
2437 }
2438
2439 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2440 {
2441         unsigned long old_rflags = vmx_get_rflags(vcpu);
2442
2443         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2444         to_vmx(vcpu)->rflags = rflags;
2445         if (to_vmx(vcpu)->rmode.vm86_active) {
2446                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2447                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2448         }
2449         vmcs_writel(GUEST_RFLAGS, rflags);
2450
2451         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2452                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2453 }
2454
2455 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2456 {
2457         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2458         int ret = 0;
2459
2460         if (interruptibility & GUEST_INTR_STATE_STI)
2461                 ret |= KVM_X86_SHADOW_INT_STI;
2462         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2463                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2464
2465         return ret;
2466 }
2467
2468 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2469 {
2470         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2471         u32 interruptibility = interruptibility_old;
2472
2473         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2474
2475         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2476                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2477         else if (mask & KVM_X86_SHADOW_INT_STI)
2478                 interruptibility |= GUEST_INTR_STATE_STI;
2479
2480         if ((interruptibility != interruptibility_old))
2481                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2482 }
2483
2484 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2485 {
2486         unsigned long rip;
2487
2488         rip = kvm_rip_read(vcpu);
2489         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2490         kvm_rip_write(vcpu, rip);
2491
2492         /* skipping an emulated instruction also counts */
2493         vmx_set_interrupt_shadow(vcpu, 0);
2494 }
2495
2496 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2497                                                unsigned long exit_qual)
2498 {
2499         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2500         unsigned int nr = vcpu->arch.exception.nr;
2501         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2502
2503         if (vcpu->arch.exception.has_error_code) {
2504                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2505                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2506         }
2507
2508         if (kvm_exception_is_soft(nr))
2509                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2510         else
2511                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2512
2513         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2514             vmx_get_nmi_mask(vcpu))
2515                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2516
2517         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2518 }
2519
2520 /*
2521  * KVM wants to inject page-faults which it got to the guest. This function
2522  * checks whether in a nested guest, we need to inject them to L1 or L2.
2523  */
2524 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2525 {
2526         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2527         unsigned int nr = vcpu->arch.exception.nr;
2528
2529         if (nr == PF_VECTOR) {
2530                 if (vcpu->arch.exception.nested_apf) {
2531                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2532                         return 1;
2533                 }
2534                 /*
2535                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2536                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2537                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2538                  * can be written only when inject_pending_event runs.  This should be
2539                  * conditional on a new capability---if the capability is disabled,
2540                  * kvm_multiple_exception would write the ancillary information to
2541                  * CR2 or DR6, for backwards ABI-compatibility.
2542                  */
2543                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2544                                                     vcpu->arch.exception.error_code)) {
2545                         *exit_qual = vcpu->arch.cr2;
2546                         return 1;
2547                 }
2548         } else {
2549                 if (vmcs12->exception_bitmap & (1u << nr)) {
2550                         if (nr == DB_VECTOR)
2551                                 *exit_qual = vcpu->arch.dr6;
2552                         else
2553                                 *exit_qual = 0;
2554                         return 1;
2555                 }
2556         }
2557
2558         return 0;
2559 }
2560
2561 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2562 {
2563         struct vcpu_vmx *vmx = to_vmx(vcpu);
2564         unsigned nr = vcpu->arch.exception.nr;
2565         bool has_error_code = vcpu->arch.exception.has_error_code;
2566         u32 error_code = vcpu->arch.exception.error_code;
2567         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2568
2569         if (has_error_code) {
2570                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2571                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2572         }
2573
2574         if (vmx->rmode.vm86_active) {
2575                 int inc_eip = 0;
2576                 if (kvm_exception_is_soft(nr))
2577                         inc_eip = vcpu->arch.event_exit_inst_len;
2578                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2579                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2580                 return;
2581         }
2582
2583         if (kvm_exception_is_soft(nr)) {
2584                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2585                              vmx->vcpu.arch.event_exit_inst_len);
2586                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2587         } else
2588                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2589
2590         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2591 }
2592
2593 static bool vmx_rdtscp_supported(void)
2594 {
2595         return cpu_has_vmx_rdtscp();
2596 }
2597
2598 static bool vmx_invpcid_supported(void)
2599 {
2600         return cpu_has_vmx_invpcid() && enable_ept;
2601 }
2602
2603 /*
2604  * Swap MSR entry in host/guest MSR entry array.
2605  */
2606 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2607 {
2608         struct shared_msr_entry tmp;
2609
2610         tmp = vmx->guest_msrs[to];
2611         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2612         vmx->guest_msrs[from] = tmp;
2613 }
2614
2615 /*
2616  * Set up the vmcs to automatically save and restore system
2617  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2618  * mode, as fiddling with msrs is very expensive.
2619  */
2620 static void setup_msrs(struct vcpu_vmx *vmx)
2621 {
2622         int save_nmsrs, index;
2623
2624         save_nmsrs = 0;
2625 #ifdef CONFIG_X86_64
2626         if (is_long_mode(&vmx->vcpu)) {
2627                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2628                 if (index >= 0)
2629                         move_msr_up(vmx, index, save_nmsrs++);
2630                 index = __find_msr_index(vmx, MSR_LSTAR);
2631                 if (index >= 0)
2632                         move_msr_up(vmx, index, save_nmsrs++);
2633                 index = __find_msr_index(vmx, MSR_CSTAR);
2634                 if (index >= 0)
2635                         move_msr_up(vmx, index, save_nmsrs++);
2636                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2637                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2638                         move_msr_up(vmx, index, save_nmsrs++);
2639                 /*
2640                  * MSR_STAR is only needed on long mode guests, and only
2641                  * if efer.sce is enabled.
2642                  */
2643                 index = __find_msr_index(vmx, MSR_STAR);
2644                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2645                         move_msr_up(vmx, index, save_nmsrs++);
2646         }
2647 #endif
2648         index = __find_msr_index(vmx, MSR_EFER);
2649         if (index >= 0 && update_transition_efer(vmx, index))
2650                 move_msr_up(vmx, index, save_nmsrs++);
2651
2652         vmx->save_nmsrs = save_nmsrs;
2653
2654         if (cpu_has_vmx_msr_bitmap())
2655                 vmx_update_msr_bitmap(&vmx->vcpu);
2656 }
2657
2658 /*
2659  * reads and returns guest's timestamp counter "register"
2660  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2661  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2662  */
2663 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2664 {
2665         u64 host_tsc, tsc_offset;
2666
2667         host_tsc = rdtsc();
2668         tsc_offset = vmcs_read64(TSC_OFFSET);
2669         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2670 }
2671
2672 /*
2673  * writes 'offset' into guest's timestamp counter offset register
2674  */
2675 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2676 {
2677         if (is_guest_mode(vcpu)) {
2678                 /*
2679                  * We're here if L1 chose not to trap WRMSR to TSC. According
2680                  * to the spec, this should set L1's TSC; The offset that L1
2681                  * set for L2 remains unchanged, and still needs to be added
2682                  * to the newly set TSC to get L2's TSC.
2683                  */
2684                 struct vmcs12 *vmcs12;
2685                 /* recalculate vmcs02.TSC_OFFSET: */
2686                 vmcs12 = get_vmcs12(vcpu);
2687                 vmcs_write64(TSC_OFFSET, offset +
2688                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2689                          vmcs12->tsc_offset : 0));
2690         } else {
2691                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2692                                            vmcs_read64(TSC_OFFSET), offset);
2693                 vmcs_write64(TSC_OFFSET, offset);
2694         }
2695 }
2696
2697 /*
2698  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2699  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2700  * all guests if the "nested" module option is off, and can also be disabled
2701  * for a single guest by disabling its VMX cpuid bit.
2702  */
2703 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2704 {
2705         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2706 }
2707
2708 /*
2709  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2710  * returned for the various VMX controls MSRs when nested VMX is enabled.
2711  * The same values should also be used to verify that vmcs12 control fields are
2712  * valid during nested entry from L1 to L2.
2713  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2714  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2715  * bit in the high half is on if the corresponding bit in the control field
2716  * may be on. See also vmx_control_verify().
2717  */
2718 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2719 {
2720         /*
2721          * Note that as a general rule, the high half of the MSRs (bits in
2722          * the control fields which may be 1) should be initialized by the
2723          * intersection of the underlying hardware's MSR (i.e., features which
2724          * can be supported) and the list of features we want to expose -
2725          * because they are known to be properly supported in our code.
2726          * Also, usually, the low half of the MSRs (bits which must be 1) can
2727          * be set to 0, meaning that L1 may turn off any of these bits. The
2728          * reason is that if one of these bits is necessary, it will appear
2729          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2730          * fields of vmcs01 and vmcs02, will turn these bits off - and
2731          * nested_vmx_exit_reflected() will not pass related exits to L1.
2732          * These rules have exceptions below.
2733          */
2734
2735         /* pin-based controls */
2736         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2737                 vmx->nested.nested_vmx_pinbased_ctls_low,
2738                 vmx->nested.nested_vmx_pinbased_ctls_high);
2739         vmx->nested.nested_vmx_pinbased_ctls_low |=
2740                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2741         vmx->nested.nested_vmx_pinbased_ctls_high &=
2742                 PIN_BASED_EXT_INTR_MASK |
2743                 PIN_BASED_NMI_EXITING |
2744                 PIN_BASED_VIRTUAL_NMIS;
2745         vmx->nested.nested_vmx_pinbased_ctls_high |=
2746                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2747                 PIN_BASED_VMX_PREEMPTION_TIMER;
2748         if (kvm_vcpu_apicv_active(&vmx->vcpu))
2749                 vmx->nested.nested_vmx_pinbased_ctls_high |=
2750                         PIN_BASED_POSTED_INTR;
2751
2752         /* exit controls */
2753         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2754                 vmx->nested.nested_vmx_exit_ctls_low,
2755                 vmx->nested.nested_vmx_exit_ctls_high);
2756         vmx->nested.nested_vmx_exit_ctls_low =
2757                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2758
2759         vmx->nested.nested_vmx_exit_ctls_high &=
2760 #ifdef CONFIG_X86_64
2761                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2762 #endif
2763                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2764         vmx->nested.nested_vmx_exit_ctls_high |=
2765                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2766                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2767                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2768
2769         if (kvm_mpx_supported())
2770                 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2771
2772         /* We support free control of debug control saving. */
2773         vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2774
2775         /* entry controls */
2776         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2777                 vmx->nested.nested_vmx_entry_ctls_low,
2778                 vmx->nested.nested_vmx_entry_ctls_high);
2779         vmx->nested.nested_vmx_entry_ctls_low =
2780                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2781         vmx->nested.nested_vmx_entry_ctls_high &=
2782 #ifdef CONFIG_X86_64
2783                 VM_ENTRY_IA32E_MODE |
2784 #endif
2785                 VM_ENTRY_LOAD_IA32_PAT;
2786         vmx->nested.nested_vmx_entry_ctls_high |=
2787                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2788         if (kvm_mpx_supported())
2789                 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2790
2791         /* We support free control of debug control loading. */
2792         vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2793
2794         /* cpu-based controls */
2795         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2796                 vmx->nested.nested_vmx_procbased_ctls_low,
2797                 vmx->nested.nested_vmx_procbased_ctls_high);
2798         vmx->nested.nested_vmx_procbased_ctls_low =
2799                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2800         vmx->nested.nested_vmx_procbased_ctls_high &=
2801                 CPU_BASED_VIRTUAL_INTR_PENDING |
2802                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2803                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2804                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2805                 CPU_BASED_CR3_STORE_EXITING |
2806 #ifdef CONFIG_X86_64
2807                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2808 #endif
2809                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2810                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2811                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2812                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2813                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2814         /*
2815          * We can allow some features even when not supported by the
2816          * hardware. For example, L1 can specify an MSR bitmap - and we
2817          * can use it to avoid exits to L1 - even when L0 runs L2
2818          * without MSR bitmaps.
2819          */
2820         vmx->nested.nested_vmx_procbased_ctls_high |=
2821                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2822                 CPU_BASED_USE_MSR_BITMAPS;
2823
2824         /* We support free control of CR3 access interception. */
2825         vmx->nested.nested_vmx_procbased_ctls_low &=
2826                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2827
2828         /*
2829          * secondary cpu-based controls.  Do not include those that
2830          * depend on CPUID bits, they are added later by vmx_cpuid_update.
2831          */
2832         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2833                 vmx->nested.nested_vmx_secondary_ctls_low,
2834                 vmx->nested.nested_vmx_secondary_ctls_high);
2835         vmx->nested.nested_vmx_secondary_ctls_low = 0;
2836         vmx->nested.nested_vmx_secondary_ctls_high &=
2837                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2838                 SECONDARY_EXEC_DESC |
2839                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2840                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2841                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2842                 SECONDARY_EXEC_WBINVD_EXITING;
2843
2844         if (enable_ept) {
2845                 /* nested EPT: emulate EPT also to L1 */
2846                 vmx->nested.nested_vmx_secondary_ctls_high |=
2847                         SECONDARY_EXEC_ENABLE_EPT;
2848                 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2849                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2850                 if (cpu_has_vmx_ept_execute_only())
2851                         vmx->nested.nested_vmx_ept_caps |=
2852                                 VMX_EPT_EXECUTE_ONLY_BIT;
2853                 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2854                 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2855                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2856                         VMX_EPT_1GB_PAGE_BIT;
2857                 if (enable_ept_ad_bits) {
2858                         vmx->nested.nested_vmx_secondary_ctls_high |=
2859                                 SECONDARY_EXEC_ENABLE_PML;
2860                         vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2861                 }
2862         }
2863
2864         if (cpu_has_vmx_vmfunc()) {
2865                 vmx->nested.nested_vmx_secondary_ctls_high |=
2866                         SECONDARY_EXEC_ENABLE_VMFUNC;
2867                 /*
2868                  * Advertise EPTP switching unconditionally
2869                  * since we emulate it
2870                  */
2871                 if (enable_ept)
2872                         vmx->nested.nested_vmx_vmfunc_controls =
2873                                 VMX_VMFUNC_EPTP_SWITCHING;
2874         }
2875
2876         /*
2877          * Old versions of KVM use the single-context version without
2878          * checking for support, so declare that it is supported even
2879          * though it is treated as global context.  The alternative is
2880          * not failing the single-context invvpid, and it is worse.
2881          */
2882         if (enable_vpid) {
2883                 vmx->nested.nested_vmx_secondary_ctls_high |=
2884                         SECONDARY_EXEC_ENABLE_VPID;
2885                 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2886                         VMX_VPID_EXTENT_SUPPORTED_MASK;
2887         }
2888
2889         if (enable_unrestricted_guest)
2890                 vmx->nested.nested_vmx_secondary_ctls_high |=
2891                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
2892
2893         /* miscellaneous data */
2894         rdmsr(MSR_IA32_VMX_MISC,
2895                 vmx->nested.nested_vmx_misc_low,
2896                 vmx->nested.nested_vmx_misc_high);
2897         vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2898         vmx->nested.nested_vmx_misc_low |=
2899                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2900                 VMX_MISC_ACTIVITY_HLT;
2901         vmx->nested.nested_vmx_misc_high = 0;
2902
2903         /*
2904          * This MSR reports some information about VMX support. We
2905          * should return information about the VMX we emulate for the
2906          * guest, and the VMCS structure we give it - not about the
2907          * VMX support of the underlying hardware.
2908          */
2909         vmx->nested.nested_vmx_basic =
2910                 VMCS12_REVISION |
2911                 VMX_BASIC_TRUE_CTLS |
2912                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2913                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2914
2915         if (cpu_has_vmx_basic_inout())
2916                 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2917
2918         /*
2919          * These MSRs specify bits which the guest must keep fixed on
2920          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2921          * We picked the standard core2 setting.
2922          */
2923 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2924 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
2925         vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2926         vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2927
2928         /* These MSRs specify bits which the guest must keep fixed off. */
2929         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2930         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2931
2932         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2933         vmx->nested.nested_vmx_vmcs_enum = 0x2e;
2934 }
2935
2936 /*
2937  * if fixed0[i] == 1: val[i] must be 1
2938  * if fixed1[i] == 0: val[i] must be 0
2939  */
2940 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2941 {
2942         return ((val & fixed1) | fixed0) == val;
2943 }
2944
2945 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2946 {
2947         return fixed_bits_valid(control, low, high);
2948 }
2949
2950 static inline u64 vmx_control_msr(u32 low, u32 high)
2951 {
2952         return low | ((u64)high << 32);
2953 }
2954
2955 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2956 {
2957         superset &= mask;
2958         subset &= mask;
2959
2960         return (superset | subset) == superset;
2961 }
2962
2963 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2964 {
2965         const u64 feature_and_reserved =
2966                 /* feature (except bit 48; see below) */
2967                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2968                 /* reserved */
2969                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2970         u64 vmx_basic = vmx->nested.nested_vmx_basic;
2971
2972         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2973                 return -EINVAL;
2974
2975         /*
2976          * KVM does not emulate a version of VMX that constrains physical
2977          * addresses of VMX structures (e.g. VMCS) to 32-bits.
2978          */
2979         if (data & BIT_ULL(48))
2980                 return -EINVAL;
2981
2982         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2983             vmx_basic_vmcs_revision_id(data))
2984                 return -EINVAL;
2985
2986         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2987                 return -EINVAL;
2988
2989         vmx->nested.nested_vmx_basic = data;
2990         return 0;
2991 }
2992
2993 static int
2994 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2995 {
2996         u64 supported;
2997         u32 *lowp, *highp;
2998
2999         switch (msr_index) {
3000         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3001                 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
3002                 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
3003                 break;
3004         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3005                 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
3006                 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
3007                 break;
3008         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3009                 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
3010                 highp = &vmx->nested.nested_vmx_exit_ctls_high;
3011                 break;
3012         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3013                 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
3014                 highp = &vmx->nested.nested_vmx_entry_ctls_high;
3015                 break;
3016         case MSR_IA32_VMX_PROCBASED_CTLS2:
3017                 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
3018                 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
3019                 break;
3020         default:
3021                 BUG();
3022         }
3023
3024         supported = vmx_control_msr(*lowp, *highp);
3025
3026         /* Check must-be-1 bits are still 1. */
3027         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3028                 return -EINVAL;
3029
3030         /* Check must-be-0 bits are still 0. */
3031         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3032                 return -EINVAL;
3033
3034         *lowp = data;
3035         *highp = data >> 32;
3036         return 0;
3037 }
3038
3039 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3040 {
3041         const u64 feature_and_reserved_bits =
3042                 /* feature */
3043                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3044                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3045                 /* reserved */
3046                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3047         u64 vmx_misc;
3048
3049         vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
3050                                    vmx->nested.nested_vmx_misc_high);
3051
3052         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3053                 return -EINVAL;
3054
3055         if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3056              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3057             vmx_misc_preemption_timer_rate(data) !=
3058             vmx_misc_preemption_timer_rate(vmx_misc))
3059                 return -EINVAL;
3060
3061         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3062                 return -EINVAL;
3063
3064         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3065                 return -EINVAL;
3066
3067         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3068                 return -EINVAL;
3069
3070         vmx->nested.nested_vmx_misc_low = data;
3071         vmx->nested.nested_vmx_misc_high = data >> 32;
3072         return 0;
3073 }
3074
3075 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3076 {
3077         u64 vmx_ept_vpid_cap;
3078
3079         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3080                                            vmx->nested.nested_vmx_vpid_caps);
3081
3082         /* Every bit is either reserved or a feature bit. */
3083         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3084                 return -EINVAL;
3085
3086         vmx->nested.nested_vmx_ept_caps = data;
3087         vmx->nested.nested_vmx_vpid_caps = data >> 32;
3088         return 0;
3089 }
3090
3091 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3092 {
3093         u64 *msr;
3094
3095         switch (msr_index) {
3096         case MSR_IA32_VMX_CR0_FIXED0:
3097                 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3098                 break;
3099         case MSR_IA32_VMX_CR4_FIXED0:
3100                 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3101                 break;
3102         default:
3103                 BUG();
3104         }
3105
3106         /*
3107          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3108          * must be 1 in the restored value.
3109          */
3110         if (!is_bitwise_subset(data, *msr, -1ULL))
3111                 return -EINVAL;
3112
3113         *msr = data;
3114         return 0;
3115 }
3116
3117 /*
3118  * Called when userspace is restoring VMX MSRs.
3119  *
3120  * Returns 0 on success, non-0 otherwise.
3121  */
3122 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3123 {
3124         struct vcpu_vmx *vmx = to_vmx(vcpu);
3125
3126         switch (msr_index) {
3127         case MSR_IA32_VMX_BASIC:
3128                 return vmx_restore_vmx_basic(vmx, data);
3129         case MSR_IA32_VMX_PINBASED_CTLS:
3130         case MSR_IA32_VMX_PROCBASED_CTLS:
3131         case MSR_IA32_VMX_EXIT_CTLS:
3132         case MSR_IA32_VMX_ENTRY_CTLS:
3133                 /*
3134                  * The "non-true" VMX capability MSRs are generated from the
3135                  * "true" MSRs, so we do not support restoring them directly.
3136                  *
3137                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3138                  * should restore the "true" MSRs with the must-be-1 bits
3139                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3140                  * DEFAULT SETTINGS".
3141                  */
3142                 return -EINVAL;
3143         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3144         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3145         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3146         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3147         case MSR_IA32_VMX_PROCBASED_CTLS2:
3148                 return vmx_restore_control_msr(vmx, msr_index, data);
3149         case MSR_IA32_VMX_MISC:
3150                 return vmx_restore_vmx_misc(vmx, data);
3151         case MSR_IA32_VMX_CR0_FIXED0:
3152         case MSR_IA32_VMX_CR4_FIXED0:
3153                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3154         case MSR_IA32_VMX_CR0_FIXED1:
3155         case MSR_IA32_VMX_CR4_FIXED1:
3156                 /*
3157                  * These MSRs are generated based on the vCPU's CPUID, so we
3158                  * do not support restoring them directly.
3159                  */
3160                 return -EINVAL;
3161         case MSR_IA32_VMX_EPT_VPID_CAP:
3162                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3163         case MSR_IA32_VMX_VMCS_ENUM:
3164                 vmx->nested.nested_vmx_vmcs_enum = data;
3165                 return 0;
3166         default:
3167                 /*
3168                  * The rest of the VMX capability MSRs do not support restore.
3169                  */
3170                 return -EINVAL;
3171         }
3172 }
3173
3174 /* Returns 0 on success, non-0 otherwise. */
3175 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3176 {
3177         struct vcpu_vmx *vmx = to_vmx(vcpu);
3178
3179         switch (msr_index) {
3180         case MSR_IA32_VMX_BASIC:
3181                 *pdata = vmx->nested.nested_vmx_basic;
3182                 break;
3183         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3184         case MSR_IA32_VMX_PINBASED_CTLS:
3185                 *pdata = vmx_control_msr(
3186                         vmx->nested.nested_vmx_pinbased_ctls_low,
3187                         vmx->nested.nested_vmx_pinbased_ctls_high);
3188                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3189                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3190                 break;
3191         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3192         case MSR_IA32_VMX_PROCBASED_CTLS:
3193                 *pdata = vmx_control_msr(
3194                         vmx->nested.nested_vmx_procbased_ctls_low,
3195                         vmx->nested.nested_vmx_procbased_ctls_high);
3196                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3197                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3198                 break;
3199         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3200         case MSR_IA32_VMX_EXIT_CTLS:
3201                 *pdata = vmx_control_msr(
3202                         vmx->nested.nested_vmx_exit_ctls_low,
3203                         vmx->nested.nested_vmx_exit_ctls_high);
3204                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3205                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3206                 break;
3207         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3208         case MSR_IA32_VMX_ENTRY_CTLS:
3209                 *pdata = vmx_control_msr(
3210                         vmx->nested.nested_vmx_entry_ctls_low,
3211                         vmx->nested.nested_vmx_entry_ctls_high);
3212                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3213                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3214                 break;
3215         case MSR_IA32_VMX_MISC:
3216                 *pdata = vmx_control_msr(
3217                         vmx->nested.nested_vmx_misc_low,
3218                         vmx->nested.nested_vmx_misc_high);
3219                 break;
3220         case MSR_IA32_VMX_CR0_FIXED0:
3221                 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3222                 break;
3223         case MSR_IA32_VMX_CR0_FIXED1:
3224                 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3225                 break;
3226         case MSR_IA32_VMX_CR4_FIXED0:
3227                 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3228                 break;
3229         case MSR_IA32_VMX_CR4_FIXED1:
3230                 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3231                 break;
3232         case MSR_IA32_VMX_VMCS_ENUM:
3233                 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3234                 break;
3235         case MSR_IA32_VMX_PROCBASED_CTLS2:
3236                 *pdata = vmx_control_msr(
3237                         vmx->nested.nested_vmx_secondary_ctls_low,
3238                         vmx->nested.nested_vmx_secondary_ctls_high);
3239                 break;
3240         case MSR_IA32_VMX_EPT_VPID_CAP:
3241                 *pdata = vmx->nested.nested_vmx_ept_caps |
3242                         ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3243                 break;
3244         case MSR_IA32_VMX_VMFUNC:
3245                 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3246                 break;
3247         default:
3248                 return 1;
3249         }
3250
3251         return 0;
3252 }
3253
3254 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3255                                                  uint64_t val)
3256 {
3257         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3258
3259         return !(val & ~valid_bits);
3260 }
3261
3262 /*
3263  * Reads an msr value (of 'msr_index') into 'pdata'.
3264  * Returns 0 on success, non-0 otherwise.
3265  * Assumes vcpu_load() was already called.
3266  */
3267 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3268 {
3269         struct shared_msr_entry *msr;
3270
3271         switch (msr_info->index) {
3272 #ifdef CONFIG_X86_64
3273         case MSR_FS_BASE:
3274                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3275                 break;
3276         case MSR_GS_BASE:
3277                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3278                 break;
3279         case MSR_KERNEL_GS_BASE:
3280                 vmx_load_host_state(to_vmx(vcpu));
3281                 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
3282                 break;
3283 #endif
3284         case MSR_EFER:
3285                 return kvm_get_msr_common(vcpu, msr_info);
3286         case MSR_IA32_TSC:
3287                 msr_info->data = guest_read_tsc(vcpu);
3288                 break;
3289         case MSR_IA32_SPEC_CTRL:
3290                 if (!msr_info->host_initiated &&
3291                     !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3292                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3293                         return 1;
3294
3295                 msr_info->data = to_vmx(vcpu)->spec_ctrl;
3296                 break;
3297         case MSR_IA32_ARCH_CAPABILITIES:
3298                 if (!msr_info->host_initiated &&
3299                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3300                         return 1;
3301                 msr_info->data = to_vmx(vcpu)->arch_capabilities;
3302                 break;
3303         case MSR_IA32_SYSENTER_CS:
3304                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3305                 break;
3306         case MSR_IA32_SYSENTER_EIP:
3307                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3308                 break;
3309         case MSR_IA32_SYSENTER_ESP:
3310                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3311                 break;
3312         case MSR_IA32_BNDCFGS:
3313                 if (!kvm_mpx_supported() ||
3314                     (!msr_info->host_initiated &&
3315                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3316                         return 1;
3317                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3318                 break;
3319         case MSR_IA32_MCG_EXT_CTL:
3320                 if (!msr_info->host_initiated &&
3321                     !(to_vmx(vcpu)->msr_ia32_feature_control &
3322                       FEATURE_CONTROL_LMCE))
3323                         return 1;
3324                 msr_info->data = vcpu->arch.mcg_ext_ctl;
3325                 break;
3326         case MSR_IA32_FEATURE_CONTROL:
3327                 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
3328                 break;
3329         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3330                 if (!nested_vmx_allowed(vcpu))
3331                         return 1;
3332                 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3333         case MSR_IA32_XSS:
3334                 if (!vmx_xsaves_supported())
3335                         return 1;
3336                 msr_info->data = vcpu->arch.ia32_xss;
3337                 break;
3338         case MSR_TSC_AUX:
3339                 if (!msr_info->host_initiated &&
3340                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3341                         return 1;
3342                 /* Otherwise falls through */
3343         default:
3344                 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
3345                 if (msr) {
3346                         msr_info->data = msr->data;
3347                         break;
3348                 }
3349                 return kvm_get_msr_common(vcpu, msr_info);
3350         }
3351
3352         return 0;
3353 }
3354
3355 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3356
3357 /*
3358  * Writes msr value into into the appropriate "register".
3359  * Returns 0 on success, non-0 otherwise.
3360  * Assumes vcpu_load() was already called.
3361  */
3362 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3363 {
3364         struct vcpu_vmx *vmx = to_vmx(vcpu);
3365         struct shared_msr_entry *msr;
3366         int ret = 0;
3367         u32 msr_index = msr_info->index;
3368         u64 data = msr_info->data;
3369
3370         switch (msr_index) {
3371         case MSR_EFER:
3372                 ret = kvm_set_msr_common(vcpu, msr_info);
3373                 break;
3374 #ifdef CONFIG_X86_64
3375         case MSR_FS_BASE:
3376                 vmx_segment_cache_clear(vmx);
3377                 vmcs_writel(GUEST_FS_BASE, data);
3378                 break;
3379         case MSR_GS_BASE:
3380                 vmx_segment_cache_clear(vmx);
3381                 vmcs_writel(GUEST_GS_BASE, data);
3382                 break;
3383         case MSR_KERNEL_GS_BASE:
3384                 vmx_load_host_state(vmx);
3385                 vmx->msr_guest_kernel_gs_base = data;
3386                 break;
3387 #endif
3388         case MSR_IA32_SYSENTER_CS:
3389                 vmcs_write32(GUEST_SYSENTER_CS, data);
3390                 break;
3391         case MSR_IA32_SYSENTER_EIP:
3392                 vmcs_writel(GUEST_SYSENTER_EIP, data);
3393                 break;
3394         case MSR_IA32_SYSENTER_ESP:
3395                 vmcs_writel(GUEST_SYSENTER_ESP, data);
3396                 break;
3397         case MSR_IA32_BNDCFGS:
3398                 if (!kvm_mpx_supported() ||
3399                     (!msr_info->host_initiated &&
3400                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3401                         return 1;
3402                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3403                     (data & MSR_IA32_BNDCFGS_RSVD))
3404                         return 1;
3405                 vmcs_write64(GUEST_BNDCFGS, data);
3406                 break;
3407         case MSR_IA32_TSC:
3408                 kvm_write_tsc(vcpu, msr_info);
3409                 break;
3410         case MSR_IA32_SPEC_CTRL:
3411                 if (!msr_info->host_initiated &&
3412                     !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3413                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3414                         return 1;
3415
3416                 /* The STIBP bit doesn't fault even if it's not advertised */
3417                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
3418                         return 1;
3419
3420                 vmx->spec_ctrl = data;
3421
3422                 if (!data)
3423                         break;
3424
3425                 /*
3426                  * For non-nested:
3427                  * When it's written (to non-zero) for the first time, pass
3428                  * it through.
3429                  *
3430                  * For nested:
3431                  * The handling of the MSR bitmap for L2 guests is done in
3432                  * nested_vmx_merge_msr_bitmap. We should not touch the
3433                  * vmcs02.msr_bitmap here since it gets completely overwritten
3434                  * in the merging. We update the vmcs01 here for L1 as well
3435                  * since it will end up touching the MSR anyway now.
3436                  */
3437                 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
3438                                               MSR_IA32_SPEC_CTRL,
3439                                               MSR_TYPE_RW);
3440                 break;
3441         case MSR_IA32_PRED_CMD:
3442                 if (!msr_info->host_initiated &&
3443                     !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
3444                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3445                         return 1;
3446
3447                 if (data & ~PRED_CMD_IBPB)
3448                         return 1;
3449
3450                 if (!data)
3451                         break;
3452
3453                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
3454
3455                 /*
3456                  * For non-nested:
3457                  * When it's written (to non-zero) for the first time, pass
3458                  * it through.
3459                  *
3460                  * For nested:
3461                  * The handling of the MSR bitmap for L2 guests is done in
3462                  * nested_vmx_merge_msr_bitmap. We should not touch the
3463                  * vmcs02.msr_bitmap here since it gets completely overwritten
3464                  * in the merging.
3465                  */
3466                 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
3467                                               MSR_TYPE_W);
3468                 break;
3469         case MSR_IA32_ARCH_CAPABILITIES:
3470                 if (!msr_info->host_initiated)
3471                         return 1;
3472                 vmx->arch_capabilities = data;
3473                 break;
3474         case MSR_IA32_CR_PAT:
3475                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3476                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3477                                 return 1;
3478                         vmcs_write64(GUEST_IA32_PAT, data);
3479                         vcpu->arch.pat = data;
3480                         break;
3481                 }
3482                 ret = kvm_set_msr_common(vcpu, msr_info);
3483                 break;
3484         case MSR_IA32_TSC_ADJUST:
3485                 ret = kvm_set_msr_common(vcpu, msr_info);
3486                 break;
3487         case MSR_IA32_MCG_EXT_CTL:
3488                 if ((!msr_info->host_initiated &&
3489                      !(to_vmx(vcpu)->msr_ia32_feature_control &
3490                        FEATURE_CONTROL_LMCE)) ||
3491                     (data & ~MCG_EXT_CTL_LMCE_EN))
3492                         return 1;
3493                 vcpu->arch.mcg_ext_ctl = data;
3494                 break;
3495         case MSR_IA32_FEATURE_CONTROL:
3496                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3497                     (to_vmx(vcpu)->msr_ia32_feature_control &
3498                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3499                         return 1;
3500                 vmx->msr_ia32_feature_control = data;
3501                 if (msr_info->host_initiated && data == 0)
3502                         vmx_leave_nested(vcpu);
3503                 break;
3504         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3505                 if (!msr_info->host_initiated)
3506                         return 1; /* they are read-only */
3507                 if (!nested_vmx_allowed(vcpu))
3508                         return 1;
3509                 return vmx_set_vmx_msr(vcpu, msr_index, data);
3510         case MSR_IA32_XSS:
3511                 if (!vmx_xsaves_supported())
3512                         return 1;
3513                 /*
3514                  * The only supported bit as of Skylake is bit 8, but
3515                  * it is not supported on KVM.
3516                  */
3517                 if (data != 0)
3518                         return 1;
3519                 vcpu->arch.ia32_xss = data;
3520                 if (vcpu->arch.ia32_xss != host_xss)
3521                         add_atomic_switch_msr(vmx, MSR_IA32_XSS,
3522                                 vcpu->arch.ia32_xss, host_xss);
3523                 else
3524                         clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
3525                 break;
3526         case MSR_TSC_AUX:
3527                 if (!msr_info->host_initiated &&
3528                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3529                         return 1;
3530                 /* Check reserved bit, higher 32 bits should be zero */
3531                 if ((data >> 32) != 0)
3532                         return 1;
3533                 /* Otherwise falls through */
3534         default:
3535                 msr = find_msr_entry(vmx, msr_index);
3536                 if (msr) {
3537                         u64 old_msr_data = msr->data;
3538                         msr->data = data;
3539                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
3540                                 preempt_disable();
3541                                 ret = kvm_set_shared_msr(msr->index, msr->data,
3542                                                          msr->mask);
3543                                 preempt_enable();
3544                                 if (ret)
3545                                         msr->data = old_msr_data;
3546                         }
3547                         break;
3548                 }
3549                 ret = kvm_set_msr_common(vcpu, msr_info);
3550         }
3551
3552         return ret;
3553 }
3554
3555 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
3556 {
3557         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
3558         switch (reg) {
3559         case VCPU_REGS_RSP:
3560                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
3561                 break;
3562         case VCPU_REGS_RIP:
3563                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
3564                 break;
3565         case VCPU_EXREG_PDPTR: