2 * Core of Xen paravirt_ops implementation.
4 * This file contains the xen_paravirt_ops structure itself, and the
6 * - privileged instructions
11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
14 #include <linux/cpu.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp.h>
18 #include <linux/preempt.h>
19 #include <linux/hardirq.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/start_kernel.h>
23 #include <linux/sched.h>
24 #include <linux/kprobes.h>
25 #include <linux/bootmem.h>
26 #include <linux/export.h>
28 #include <linux/page-flags.h>
29 #include <linux/highmem.h>
30 #include <linux/console.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/memblock.h>
34 #include <linux/edd.h>
35 #include <linux/frame.h>
37 #include <linux/kexec.h>
40 #include <xen/events.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/version.h>
43 #include <xen/interface/physdev.h>
44 #include <xen/interface/vcpu.h>
45 #include <xen/interface/memory.h>
46 #include <xen/interface/nmi.h>
47 #include <xen/interface/xen-mca.h>
48 #include <xen/interface/hvm/start_info.h>
49 #include <xen/features.h>
52 #include <xen/hvc-console.h>
55 #include <asm/paravirt.h>
58 #include <asm/xen/pci.h>
59 #include <asm/xen/hypercall.h>
60 #include <asm/xen/hypervisor.h>
61 #include <asm/xen/cpuid.h>
62 #include <asm/fixmap.h>
63 #include <asm/processor.h>
64 #include <asm/proto.h>
65 #include <asm/msr-index.h>
66 #include <asm/traps.h>
67 #include <asm/setup.h>
69 #include <asm/pgalloc.h>
70 #include <asm/pgtable.h>
71 #include <asm/tlbflush.h>
72 #include <asm/reboot.h>
73 #include <asm/stackprotector.h>
74 #include <asm/hypervisor.h>
75 #include <asm/mach_traps.h>
76 #include <asm/mwait.h>
77 #include <asm/pci_x86.h>
79 #include <asm/e820/api.h>
82 #include <linux/acpi.h>
84 #include <acpi/pdc_intel.h>
85 #include <acpi/processor.h>
86 #include <xen/interface/platform.h>
92 #include "multicalls.h"
95 EXPORT_SYMBOL_GPL(hypercall_page);
98 * Pointer to the xen_vcpu_info structure or
99 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
100 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
101 * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
102 * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
103 * acknowledge pending events.
104 * Also more subtly it is used by the patched version of irq enable/disable
105 * e.g. xen_irq_enable_direct and xen_iret in PV mode.
107 * The desire to be able to do those mask/unmask operations as a single
108 * instruction by using the per-cpu offset held in %gs is the real reason
109 * vcpu info is in a per-cpu pointer and the original reason for this
113 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
116 * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
117 * hypercall. This can be used both in PV and PVHVM mode. The structure
118 * overrides the default per_cpu(xen_vcpu, cpu) value.
120 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
122 /* Linux <-> Xen vCPU id mapping */
123 DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
124 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
126 enum xen_domain_type xen_domain_type = XEN_NATIVE;
127 EXPORT_SYMBOL_GPL(xen_domain_type);
129 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
130 EXPORT_SYMBOL(machine_to_phys_mapping);
131 unsigned long machine_to_phys_nr;
132 EXPORT_SYMBOL(machine_to_phys_nr);
134 struct start_info *xen_start_info;
135 EXPORT_SYMBOL_GPL(xen_start_info);
137 struct shared_info xen_dummy_shared_info;
139 void *xen_initial_gdt;
141 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
143 static int xen_cpu_up_prepare_pv(unsigned int cpu);
144 static int xen_cpu_up_prepare_hvm(unsigned int cpu);
145 static int xen_cpu_up_online(unsigned int cpu);
146 static int xen_cpu_dead_pv(unsigned int cpu);
147 static int xen_cpu_dead_hvm(unsigned int cpu);
150 * Point at some empty memory to start with. We map the real shared_info
151 * page as soon as fixmap is up and running.
153 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
156 * Flag to determine whether vcpu info placement is available on all
157 * VCPUs. We assume it is to start with, and then set it to zero on
158 * the first failure. This is because it can succeed on some VCPUs
159 * and not others, since it can involve hypervisor memory allocation,
160 * or because the guest failed to guarantee all the appropriate
161 * constraints on all VCPUs (ie buffer can't cross a page boundary).
163 * Note that any particular CPU may be using a placed vcpu structure,
164 * but we can only optimise if the all are.
166 * 0: not available, 1: available
168 static int have_vcpu_info_placement = 1;
171 struct desc_struct desc[3];
175 * Updating the 3 TLS descriptors in the GDT on every task switch is
176 * surprisingly expensive so we avoid updating them if they haven't
177 * changed. Since Xen writes different descriptors than the one
178 * passed in the update_descriptor hypercall we keep shadow copies to
181 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
183 #ifdef CONFIG_XEN_PVH
187 * xen_pvh and pvh_bootparams need to live in data segment since they
188 * are used after startup_{32|64}, which clear .bss, are invoked.
190 bool xen_pvh __attribute__((section(".data"))) = 0;
191 struct boot_params pvh_bootparams __attribute__((section(".data")));
193 struct hvm_start_info pvh_start_info;
194 unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
197 static void clamp_max_cpus(void)
200 if (setup_max_cpus > MAX_VIRT_CPUS)
201 setup_max_cpus = MAX_VIRT_CPUS;
205 void xen_vcpu_setup(int cpu)
207 struct vcpu_register_vcpu_info info;
209 struct vcpu_info *vcpup;
211 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
214 * This path is called twice on PVHVM - first during bootup via
215 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
216 * hotplugged: cpu_up -> xen_hvm_cpu_notify.
217 * As we can only do the VCPUOP_register_vcpu_info once lets
218 * not over-write its result.
220 * For PV it is called during restore (xen_vcpu_restore) and bootup
221 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
224 if (xen_hvm_domain()) {
225 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
228 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
229 per_cpu(xen_vcpu, cpu) =
230 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
232 if (!have_vcpu_info_placement) {
233 if (cpu >= MAX_VIRT_CPUS)
238 vcpup = &per_cpu(xen_vcpu_info, cpu);
239 info.mfn = arbitrary_virt_to_mfn(vcpup);
240 info.offset = offset_in_page(vcpup);
242 /* Check to see if the hypervisor will put the vcpu_info
243 structure where we want it, which allows direct access via
245 N.B. This hypercall can _only_ be called once per CPU. Subsequent
246 calls will error out with -EINVAL. This is due to the fact that
247 hypervisor has no unregister variant and this hypercall does not
248 allow to over-write info.mfn and info.offset.
250 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
254 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
255 have_vcpu_info_placement = 0;
258 /* This cpu is using the registered vcpu info, even if
259 later ones fail to. */
260 per_cpu(xen_vcpu, cpu) = vcpup;
265 * On restore, set the vcpu placement up again.
266 * If it fails, then we're in a bad state, since
267 * we can't back out from using it...
269 void xen_vcpu_restore(void)
273 for_each_possible_cpu(cpu) {
274 bool other_cpu = (cpu != smp_processor_id());
275 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
278 if (other_cpu && is_up &&
279 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
282 xen_setup_runstate_info(cpu);
284 if (have_vcpu_info_placement)
287 if (other_cpu && is_up &&
288 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
293 static void __init xen_banner(void)
295 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
296 struct xen_extraversion extra;
297 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
299 pr_info("Booting paravirtualized kernel %son %s\n",
300 xen_feature(XENFEAT_auto_translated_physmap) ?
301 "with PVH extensions " : "", pv_info.name);
302 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
303 version >> 16, version & 0xffff, extra.extraversion,
304 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
306 /* Check if running on Xen version (major, minor) or later */
308 xen_running_on_version_or_later(unsigned int major, unsigned int minor)
310 unsigned int version;
315 version = HYPERVISOR_xen_version(XENVER_version, NULL);
316 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
317 ((version >> 16) > major))
322 #define CPUID_THERM_POWER_LEAF 6
323 #define APERFMPERF_PRESENT 0
325 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
326 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
328 static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
329 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
330 static __read_mostly unsigned int cpuid_leaf5_edx_val;
332 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
333 unsigned int *cx, unsigned int *dx)
335 unsigned maskebx = ~0;
336 unsigned maskecx = ~0;
337 unsigned maskedx = ~0;
340 * Mask out inconvenient features, to try and disable as many
341 * unsupported kernel subsystems as possible.
345 maskecx = cpuid_leaf1_ecx_mask;
346 setecx = cpuid_leaf1_ecx_set_mask;
347 maskedx = cpuid_leaf1_edx_mask;
350 case CPUID_MWAIT_LEAF:
351 /* Synthesize the values.. */
354 *cx = cpuid_leaf5_ecx_val;
355 *dx = cpuid_leaf5_edx_val;
358 case CPUID_THERM_POWER_LEAF:
359 /* Disabling APERFMPERF for kernel usage */
360 maskecx = ~(1 << APERFMPERF_PRESENT);
364 /* Suppress extended topology stuff */
369 asm(XEN_EMULATE_PREFIX "cpuid"
374 : "0" (*ax), "2" (*cx));
381 STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */
383 static bool __init xen_check_mwait(void)
386 struct xen_platform_op op = {
387 .cmd = XENPF_set_processor_pminfo,
388 .u.set_pminfo.id = -1,
389 .u.set_pminfo.type = XEN_PM_PDC,
392 unsigned int ax, bx, cx, dx;
393 unsigned int mwait_mask;
395 /* We need to determine whether it is OK to expose the MWAIT
396 * capability to the kernel to harvest deeper than C3 states from ACPI
397 * _CST using the processor_harvest_xen.c module. For this to work, we
398 * need to gather the MWAIT_LEAF values (which the cstate.c code
399 * checks against). The hypervisor won't expose the MWAIT flag because
400 * it would break backwards compatibility; so we will find out directly
401 * from the hardware and hypercall.
403 if (!xen_initial_domain())
407 * When running under platform earlier than Xen4.2, do not expose
408 * mwait, to avoid the risk of loading native acpi pad driver
410 if (!xen_running_on_version_or_later(4, 2))
416 native_cpuid(&ax, &bx, &cx, &dx);
418 mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
419 (1 << (X86_FEATURE_MWAIT % 32));
421 if ((cx & mwait_mask) != mwait_mask)
424 /* We need to emulate the MWAIT_LEAF and for that we need both
425 * ecx and edx. The hypercall provides only partial information.
428 ax = CPUID_MWAIT_LEAF;
433 native_cpuid(&ax, &bx, &cx, &dx);
435 /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
436 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
438 buf[0] = ACPI_PDC_REVISION_ID;
440 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
442 set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
444 if ((HYPERVISOR_platform_op(&op) == 0) &&
445 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
446 cpuid_leaf5_ecx_val = cx;
447 cpuid_leaf5_edx_val = dx;
454 static void __init xen_init_cpuid_mask(void)
456 unsigned int ax, bx, cx, dx;
457 unsigned int xsave_mask;
459 cpuid_leaf1_edx_mask =
460 ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
461 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
463 if (!xen_initial_domain())
464 cpuid_leaf1_edx_mask &=
465 ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
467 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
471 cpuid(1, &ax, &bx, &cx, &dx);
474 (1 << (X86_FEATURE_XSAVE % 32)) |
475 (1 << (X86_FEATURE_OSXSAVE % 32));
477 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
478 if ((cx & xsave_mask) != xsave_mask)
479 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
480 if (xen_check_mwait())
481 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
484 static void xen_set_debugreg(int reg, unsigned long val)
486 HYPERVISOR_set_debugreg(reg, val);
489 static unsigned long xen_get_debugreg(int reg)
491 return HYPERVISOR_get_debugreg(reg);
494 static void xen_end_context_switch(struct task_struct *next)
497 paravirt_end_context_switch(next);
500 static unsigned long xen_store_tr(void)
506 * Set the page permissions for a particular virtual address. If the
507 * address is a vmalloc mapping (or other non-linear mapping), then
508 * find the linear mapping of the page and also set its protections to
511 static void set_aliased_prot(void *v, pgprot_t prot)
520 ptep = lookup_address((unsigned long)v, &level);
521 BUG_ON(ptep == NULL);
523 pfn = pte_pfn(*ptep);
524 page = pfn_to_page(pfn);
526 pte = pfn_pte(pfn, prot);
529 * Careful: update_va_mapping() will fail if the virtual address
530 * we're poking isn't populated in the page tables. We don't
531 * need to worry about the direct map (that's always in the page
532 * tables), but we need to be careful about vmap space. In
533 * particular, the top level page table can lazily propagate
534 * entries between processes, so if we've switched mms since we
535 * vmapped the target in the first place, we might not have the
536 * top-level page table entry populated.
538 * We disable preemption because we want the same mm active when
539 * we probe the target and when we issue the hypercall. We'll
540 * have the same nominal mm, but if we're a kernel thread, lazy
541 * mm dropping could change our pgd.
543 * Out of an abundance of caution, this uses __get_user() to fault
544 * in the target address just in case there's some obscure case
545 * in which the target address isn't readable.
550 probe_kernel_read(&dummy, v, 1);
552 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
555 if (!PageHighMem(page)) {
556 void *av = __va(PFN_PHYS(pfn));
559 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
567 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
569 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
573 * We need to mark the all aliases of the LDT pages RO. We
574 * don't need to call vm_flush_aliases(), though, since that's
575 * only responsible for flushing aliases out the TLBs, not the
576 * page tables, and Xen will flush the TLB for us if needed.
578 * To avoid confusing future readers: none of this is necessary
579 * to load the LDT. The hypervisor only checks this when the
580 * LDT is faulted in due to subsequent descriptor access.
583 for(i = 0; i < entries; i += entries_per_page)
584 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
587 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
589 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
592 for(i = 0; i < entries; i += entries_per_page)
593 set_aliased_prot(ldt + i, PAGE_KERNEL);
596 static void xen_set_ldt(const void *addr, unsigned entries)
598 struct mmuext_op *op;
599 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
601 trace_xen_cpu_set_ldt(addr, entries);
604 op->cmd = MMUEXT_SET_LDT;
605 op->arg1.linear_addr = (unsigned long)addr;
606 op->arg2.nr_ents = entries;
608 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
610 xen_mc_issue(PARAVIRT_LAZY_CPU);
613 static void xen_load_gdt(const struct desc_ptr *dtr)
615 unsigned long va = dtr->address;
616 unsigned int size = dtr->size + 1;
617 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
618 unsigned long frames[pages];
622 * A GDT can be up to 64k in size, which corresponds to 8192
623 * 8-byte entries, or 16 4k pages..
626 BUG_ON(size > 65536);
627 BUG_ON(va & ~PAGE_MASK);
629 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
632 unsigned long pfn, mfn;
636 * The GDT is per-cpu and is in the percpu data area.
637 * That can be virtually mapped, so we need to do a
638 * page-walk to get the underlying MFN for the
639 * hypercall. The page can also be in the kernel's
640 * linear range, so we need to RO that mapping too.
642 ptep = lookup_address(va, &level);
643 BUG_ON(ptep == NULL);
645 pfn = pte_pfn(*ptep);
646 mfn = pfn_to_mfn(pfn);
647 virt = __va(PFN_PHYS(pfn));
651 make_lowmem_page_readonly((void *)va);
652 make_lowmem_page_readonly(virt);
655 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
660 * load_gdt for early boot, when the gdt is only mapped once
662 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
664 unsigned long va = dtr->address;
665 unsigned int size = dtr->size + 1;
666 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
667 unsigned long frames[pages];
671 * A GDT can be up to 64k in size, which corresponds to 8192
672 * 8-byte entries, or 16 4k pages..
675 BUG_ON(size > 65536);
676 BUG_ON(va & ~PAGE_MASK);
678 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
680 unsigned long pfn, mfn;
682 pfn = virt_to_pfn(va);
683 mfn = pfn_to_mfn(pfn);
685 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
687 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
693 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
697 static inline bool desc_equal(const struct desc_struct *d1,
698 const struct desc_struct *d2)
700 return d1->a == d2->a && d1->b == d2->b;
703 static void load_TLS_descriptor(struct thread_struct *t,
704 unsigned int cpu, unsigned int i)
706 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
707 struct desc_struct *gdt;
709 struct multicall_space mc;
711 if (desc_equal(shadow, &t->tls_array[i]))
714 *shadow = t->tls_array[i];
716 gdt = get_cpu_gdt_rw(cpu);
717 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
718 mc = __xen_mc_entry(0);
720 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
723 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
726 * XXX sleazy hack: If we're being called in a lazy-cpu zone
727 * and lazy gs handling is enabled, it means we're in a
728 * context switch, and %gs has just been saved. This means we
729 * can zero it out to prevent faults on exit from the
730 * hypervisor if the next process has no %gs. Either way, it
731 * has been saved, and the new value will get loaded properly.
732 * This will go away as soon as Xen has been modified to not
733 * save/restore %gs for normal hypercalls.
735 * On x86_64, this hack is not used for %gs, because gs points
736 * to KERNEL_GS_BASE (and uses it for PDA references), so we
737 * must not zero %gs on x86_64
739 * For x86_64, we need to zero %fs, otherwise we may get an
740 * exception between the new %fs descriptor being loaded and
741 * %fs being effectively cleared at __switch_to().
743 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
753 load_TLS_descriptor(t, cpu, 0);
754 load_TLS_descriptor(t, cpu, 1);
755 load_TLS_descriptor(t, cpu, 2);
757 xen_mc_issue(PARAVIRT_LAZY_CPU);
761 static void xen_load_gs_index(unsigned int idx)
763 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
768 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
771 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
772 u64 entry = *(u64 *)ptr;
774 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
779 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
785 static int cvt_gate_to_trap(int vector, const gate_desc *val,
786 struct trap_info *info)
790 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
793 info->vector = vector;
795 addr = gate_offset(*val);
798 * Look for known traps using IST, and substitute them
799 * appropriately. The debugger ones are the only ones we care
800 * about. Xen will handle faults like double_fault,
801 * so we should never see them. Warn if
802 * there's an unexpected IST-using fault handler.
804 if (addr == (unsigned long)debug)
805 addr = (unsigned long)xen_debug;
806 else if (addr == (unsigned long)int3)
807 addr = (unsigned long)xen_int3;
808 else if (addr == (unsigned long)stack_segment)
809 addr = (unsigned long)xen_stack_segment;
810 else if (addr == (unsigned long)double_fault) {
811 /* Don't need to handle these */
813 #ifdef CONFIG_X86_MCE
814 } else if (addr == (unsigned long)machine_check) {
816 * when xen hypervisor inject vMCE to guest,
817 * use native mce handler to handle it
821 } else if (addr == (unsigned long)nmi)
823 * Use the native version as well.
827 /* Some other trap using IST? */
828 if (WARN_ON(val->ist != 0))
831 #endif /* CONFIG_X86_64 */
832 info->address = addr;
834 info->cs = gate_segment(*val);
835 info->flags = val->dpl;
836 /* interrupt gates clear IF */
837 if (val->type == GATE_INTERRUPT)
838 info->flags |= 1 << 2;
843 /* Locations of each CPU's IDT */
844 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
846 /* Set an IDT entry. If the entry is part of the current IDT, then
848 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
850 unsigned long p = (unsigned long)&dt[entrynum];
851 unsigned long start, end;
853 trace_xen_cpu_write_idt_entry(dt, entrynum, g);
857 start = __this_cpu_read(idt_desc.address);
858 end = start + __this_cpu_read(idt_desc.size) + 1;
862 native_write_idt_entry(dt, entrynum, g);
864 if (p >= start && (p + 8) <= end) {
865 struct trap_info info[2];
869 if (cvt_gate_to_trap(entrynum, g, &info[0]))
870 if (HYPERVISOR_set_trap_table(info))
877 static void xen_convert_trap_info(const struct desc_ptr *desc,
878 struct trap_info *traps)
880 unsigned in, out, count;
882 count = (desc->size+1) / sizeof(gate_desc);
885 for (in = out = 0; in < count; in++) {
886 gate_desc *entry = (gate_desc*)(desc->address) + in;
888 if (cvt_gate_to_trap(in, entry, &traps[out]))
891 traps[out].address = 0;
894 void xen_copy_trap_info(struct trap_info *traps)
896 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
898 xen_convert_trap_info(desc, traps);
901 /* Load a new IDT into Xen. In principle this can be per-CPU, so we
902 hold a spinlock to protect the static traps[] array (static because
903 it avoids allocation, and saves stack space). */
904 static void xen_load_idt(const struct desc_ptr *desc)
906 static DEFINE_SPINLOCK(lock);
907 static struct trap_info traps[257];
909 trace_xen_cpu_load_idt(desc);
913 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
915 xen_convert_trap_info(desc, traps);
918 if (HYPERVISOR_set_trap_table(traps))
924 /* Write a GDT descriptor entry. Ignore LDT descriptors, since
925 they're handled differently. */
926 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
927 const void *desc, int type)
929 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
940 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
943 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
953 * Version of write_gdt_entry for use at early boot-time needed to
954 * update an entry as simply as possible.
956 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
957 const void *desc, int type)
959 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
968 xmaddr_t maddr = virt_to_machine(&dt[entry]);
970 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
971 dt[entry] = *(struct desc_struct *)desc;
977 static void xen_load_sp0(struct tss_struct *tss,
978 struct thread_struct *thread)
980 struct multicall_space mcs;
982 mcs = xen_mc_entry(0);
983 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
984 xen_mc_issue(PARAVIRT_LAZY_CPU);
985 tss->x86_tss.sp0 = thread->sp0;
988 void xen_set_iopl_mask(unsigned mask)
990 struct physdev_set_iopl set_iopl;
992 /* Force the change at ring 0. */
993 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
994 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
997 static void xen_io_delay(void)
1001 static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
1003 static unsigned long xen_read_cr0(void)
1005 unsigned long cr0 = this_cpu_read(xen_cr0_value);
1007 if (unlikely(cr0 == 0)) {
1008 cr0 = native_read_cr0();
1009 this_cpu_write(xen_cr0_value, cr0);
1015 static void xen_write_cr0(unsigned long cr0)
1017 struct multicall_space mcs;
1019 this_cpu_write(xen_cr0_value, cr0);
1021 /* Only pay attention to cr0.TS; everything else is
1023 mcs = xen_mc_entry(0);
1025 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1027 xen_mc_issue(PARAVIRT_LAZY_CPU);
1030 static void xen_write_cr4(unsigned long cr4)
1032 cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
1034 native_write_cr4(cr4);
1036 #ifdef CONFIG_X86_64
1037 static inline unsigned long xen_read_cr8(void)
1041 static inline void xen_write_cr8(unsigned long val)
1047 static u64 xen_read_msr_safe(unsigned int msr, int *err)
1051 if (pmu_msr_read(msr, &val, err))
1054 val = native_read_msr_safe(msr, err);
1056 case MSR_IA32_APICBASE:
1057 #ifdef CONFIG_X86_X2APIC
1058 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
1060 val &= ~X2APIC_ENABLE;
1066 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1073 #ifdef CONFIG_X86_64
1077 case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1078 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1079 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1082 base = ((u64)high << 32) | low;
1083 if (HYPERVISOR_set_segment_base(which, base) != 0)
1091 case MSR_SYSCALL_MASK:
1092 case MSR_IA32_SYSENTER_CS:
1093 case MSR_IA32_SYSENTER_ESP:
1094 case MSR_IA32_SYSENTER_EIP:
1095 /* Fast syscall setup is all done in hypercalls, so
1096 these are all ignored. Stub them out here to stop
1097 Xen console noise. */
1101 if (!pmu_msr_write(msr, low, high, &ret))
1102 ret = native_write_msr_safe(msr, low, high);
1108 static u64 xen_read_msr(unsigned int msr)
1111 * This will silently swallow a #GP from RDMSR. It may be worth
1116 return xen_read_msr_safe(msr, &err);
1119 static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
1122 * This will silently swallow a #GP from WRMSR. It may be worth
1125 xen_write_msr_safe(msr, low, high);
1128 void xen_setup_shared_info(void)
1130 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1131 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1132 xen_start_info->shared_info);
1134 HYPERVISOR_shared_info =
1135 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1137 HYPERVISOR_shared_info =
1138 (struct shared_info *)__va(xen_start_info->shared_info);
1141 /* In UP this is as good a place as any to set up shared info */
1142 xen_setup_vcpu_info_placement();
1145 xen_setup_mfn_list_list();
1148 /* This is called once we have the cpu_possible_mask */
1149 void xen_setup_vcpu_info_placement(void)
1153 for_each_possible_cpu(cpu) {
1154 /* Set up direct vCPU id mapping for PV guests. */
1155 per_cpu(xen_vcpu_id, cpu) = cpu;
1156 xen_vcpu_setup(cpu);
1160 * xen_vcpu_setup managed to place the vcpu_info within the
1161 * percpu area for all cpus, so make use of it.
1163 if (have_vcpu_info_placement) {
1164 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1165 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1166 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1167 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1168 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1172 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1173 unsigned long addr, unsigned len)
1175 char *start, *end, *reloc;
1178 start = end = reloc = NULL;
1180 #define SITE(op, x) \
1181 case PARAVIRT_PATCH(op.x): \
1182 if (have_vcpu_info_placement) { \
1183 start = (char *)xen_##x##_direct; \
1184 end = xen_##x##_direct_end; \
1185 reloc = xen_##x##_direct_reloc; \
1190 SITE(pv_irq_ops, irq_enable);
1191 SITE(pv_irq_ops, irq_disable);
1192 SITE(pv_irq_ops, save_fl);
1193 SITE(pv_irq_ops, restore_fl);
1197 if (start == NULL || (end-start) > len)
1200 ret = paravirt_patch_insns(insnbuf, len, start, end);
1202 /* Note: because reloc is assigned from something that
1203 appears to be an array, gcc assumes it's non-null,
1204 but doesn't know its relationship with start and
1206 if (reloc > start && reloc < end) {
1207 int reloc_off = reloc - start;
1208 long *relocp = (long *)(insnbuf + reloc_off);
1209 long delta = start - (char *)addr;
1217 ret = paravirt_patch_default(type, clobbers, insnbuf,
1225 static const struct pv_info xen_info __initconst = {
1226 .shared_kernel_pmd = 0,
1228 #ifdef CONFIG_X86_64
1229 .extra_user_64bit_cs = FLAT_USER_CS64,
1234 static const struct pv_init_ops xen_init_ops __initconst = {
1238 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1241 .set_debugreg = xen_set_debugreg,
1242 .get_debugreg = xen_get_debugreg,
1244 .read_cr0 = xen_read_cr0,
1245 .write_cr0 = xen_write_cr0,
1247 .read_cr4 = native_read_cr4,
1248 .write_cr4 = xen_write_cr4,
1250 #ifdef CONFIG_X86_64
1251 .read_cr8 = xen_read_cr8,
1252 .write_cr8 = xen_write_cr8,
1255 .wbinvd = native_wbinvd,
1257 .read_msr = xen_read_msr,
1258 .write_msr = xen_write_msr,
1260 .read_msr_safe = xen_read_msr_safe,
1261 .write_msr_safe = xen_write_msr_safe,
1263 .read_pmc = xen_read_pmc,
1266 #ifdef CONFIG_X86_64
1267 .usergs_sysret64 = xen_sysret64,
1270 .load_tr_desc = paravirt_nop,
1271 .set_ldt = xen_set_ldt,
1272 .load_gdt = xen_load_gdt,
1273 .load_idt = xen_load_idt,
1274 .load_tls = xen_load_tls,
1275 #ifdef CONFIG_X86_64
1276 .load_gs_index = xen_load_gs_index,
1279 .alloc_ldt = xen_alloc_ldt,
1280 .free_ldt = xen_free_ldt,
1282 .store_idt = native_store_idt,
1283 .store_tr = xen_store_tr,
1285 .write_ldt_entry = xen_write_ldt_entry,
1286 .write_gdt_entry = xen_write_gdt_entry,
1287 .write_idt_entry = xen_write_idt_entry,
1288 .load_sp0 = xen_load_sp0,
1290 .set_iopl_mask = xen_set_iopl_mask,
1291 .io_delay = xen_io_delay,
1293 /* Xen takes care of %gs when switching to usermode for us */
1294 .swapgs = paravirt_nop,
1296 .start_context_switch = paravirt_start_context_switch,
1297 .end_context_switch = xen_end_context_switch,
1300 static void xen_reboot(int reason)
1302 struct sched_shutdown r = { .reason = reason };
1305 for_each_online_cpu(cpu)
1306 xen_pmu_finish(cpu);
1308 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1312 static void xen_restart(char *msg)
1314 xen_reboot(SHUTDOWN_reboot);
1317 static void xen_emergency_restart(void)
1319 xen_reboot(SHUTDOWN_reboot);
1322 static void xen_machine_halt(void)
1324 xen_reboot(SHUTDOWN_poweroff);
1327 static void xen_machine_power_off(void)
1331 xen_reboot(SHUTDOWN_poweroff);
1334 static void xen_crash_shutdown(struct pt_regs *regs)
1336 xen_reboot(SHUTDOWN_crash);
1340 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1342 if (!kexec_crash_loaded())
1343 xen_reboot(SHUTDOWN_crash);
1347 static struct notifier_block xen_panic_block = {
1348 .notifier_call= xen_panic_event,
1352 int xen_panic_handler_init(void)
1354 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1358 static const struct machine_ops xen_machine_ops __initconst = {
1359 .restart = xen_restart,
1360 .halt = xen_machine_halt,
1361 .power_off = xen_machine_power_off,
1362 .shutdown = xen_machine_halt,
1363 .crash_shutdown = xen_crash_shutdown,
1364 .emergency_restart = xen_emergency_restart,
1367 static unsigned char xen_get_nmi_reason(void)
1369 unsigned char reason = 0;
1371 /* Construct a value which looks like it came from port 0x61. */
1372 if (test_bit(_XEN_NMIREASON_io_error,
1373 &HYPERVISOR_shared_info->arch.nmi_reason))
1374 reason |= NMI_REASON_IOCHK;
1375 if (test_bit(_XEN_NMIREASON_pci_serr,
1376 &HYPERVISOR_shared_info->arch.nmi_reason))
1377 reason |= NMI_REASON_SERR;
1382 static void __init xen_boot_params_init_edd(void)
1384 #if IS_ENABLED(CONFIG_EDD)
1385 struct xen_platform_op op;
1386 struct edd_info *edd_info;
1391 edd_info = boot_params.eddbuf;
1392 mbr_signature = boot_params.edd_mbr_sig_buffer;
1394 op.cmd = XENPF_firmware_info;
1396 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1397 for (nr = 0; nr < EDDMAXNR; nr++) {
1398 struct edd_info *info = edd_info + nr;
1400 op.u.firmware_info.index = nr;
1401 info->params.length = sizeof(info->params);
1402 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1404 ret = HYPERVISOR_platform_op(&op);
1408 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1411 C(interface_support);
1412 C(legacy_max_cylinder);
1414 C(legacy_sectors_per_track);
1417 boot_params.eddbuf_entries = nr;
1419 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1420 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1421 op.u.firmware_info.index = nr;
1422 ret = HYPERVISOR_platform_op(&op);
1425 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1427 boot_params.edd_mbr_sig_buf_entries = nr;
1432 * Set up the GDT and segment registers for -fstack-protector. Until
1433 * we do this, we have to be careful not to call any stack-protected
1434 * function, which is most of the kernel.
1436 static void xen_setup_gdt(int cpu)
1438 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1439 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1441 setup_stack_canary_segment(0);
1442 switch_to_new_gdt(0);
1444 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1445 pv_cpu_ops.load_gdt = xen_load_gdt;
1448 static void __init xen_dom0_set_legacy_features(void)
1450 x86_platform.legacy.rtc = 1;
1453 static int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
1454 int (*cpu_dead_cb)(unsigned int))
1458 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
1459 "x86/xen/hvm_guest:prepare",
1460 cpu_up_prepare_cb, cpu_dead_cb);
1462 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1463 "x86/xen/hvm_guest:online",
1464 xen_cpu_up_online, NULL);
1466 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
1469 return rc >= 0 ? 0 : rc;
1472 /* First C function to be called on Xen boot */
1473 asmlinkage __visible void __init xen_start_kernel(void)
1475 struct physdev_set_iopl set_iopl;
1476 unsigned long initrd_start = 0;
1479 if (!xen_start_info)
1482 xen_domain_type = XEN_PV_DOMAIN;
1484 xen_setup_features();
1486 xen_setup_machphys_mapping();
1488 /* Install Xen paravirt ops */
1490 pv_init_ops = xen_init_ops;
1491 pv_cpu_ops = xen_cpu_ops;
1493 x86_platform.get_nmi_reason = xen_get_nmi_reason;
1495 x86_init.resources.memory_setup = xen_memory_setup;
1496 x86_init.oem.arch_setup = xen_arch_setup;
1497 x86_init.oem.banner = xen_banner;
1499 xen_init_time_ops();
1502 * Set up some pagetable state before starting to set any ptes.
1507 /* Prevent unwanted bits from being set in PTEs. */
1508 __supported_pte_mask &= ~_PAGE_GLOBAL;
1511 * Prevent page tables from being allocated in highmem, even
1512 * if CONFIG_HIGHPTE is enabled.
1514 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1516 /* Work out if we support NX */
1520 xen_build_dynamic_phys_to_machine();
1523 * Set up kernel GDT and segment registers, mainly so that
1524 * -fstack-protector code can be executed.
1529 xen_init_cpuid_mask();
1531 #ifdef CONFIG_X86_LOCAL_APIC
1533 * set up the basic apic ops.
1538 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1539 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1540 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1543 machine_ops = xen_machine_ops;
1546 * The only reliable way to retain the initial address of the
1547 * percpu gdt_page is to remember it here, so we can go and
1548 * mark it RW later, when the initial percpu area is freed.
1550 xen_initial_gdt = &per_cpu(gdt_page, 0);
1554 #ifdef CONFIG_ACPI_NUMA
1556 * The pages we from Xen are not related to machine pages, so
1557 * any NUMA information the kernel tries to get from ACPI will
1558 * be meaningless. Prevent it from trying.
1562 /* Don't do the full vcpu_info placement stuff until we have a
1563 possible map and a non-dummy shared_info. */
1564 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1566 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
1568 local_irq_disable();
1569 early_boot_irqs_disabled = true;
1571 xen_raw_console_write("mapping kernel into physical memory\n");
1572 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
1573 xen_start_info->nr_pages);
1574 xen_reserve_special_pages();
1576 /* keep using Xen gdt for now; no urgent need to change it */
1578 #ifdef CONFIG_X86_32
1579 pv_info.kernel_rpl = 1;
1580 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1581 pv_info.kernel_rpl = 0;
1583 pv_info.kernel_rpl = 0;
1585 /* set the limit of our address space */
1589 * We used to do this in xen_arch_setup, but that is too late
1590 * on AMD were early_cpu_init (run before ->arch_setup()) calls
1591 * early_amd_init which pokes 0xcf8 port.
1594 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1596 xen_raw_printk("physdev_op failed %d\n", rc);
1598 #ifdef CONFIG_X86_32
1599 /* set up basic CPUID stuff */
1600 cpu_detect(&new_cpu_data);
1601 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1602 new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
1605 if (xen_start_info->mod_start) {
1606 if (xen_start_info->flags & SIF_MOD_START_PFN)
1607 initrd_start = PFN_PHYS(xen_start_info->mod_start);
1609 initrd_start = __pa(xen_start_info->mod_start);
1612 /* Poke various useful things into boot_params */
1613 boot_params.hdr.type_of_loader = (9 << 4) | 0;
1614 boot_params.hdr.ramdisk_image = initrd_start;
1615 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1616 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1617 boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
1619 if (!xen_initial_domain()) {
1620 add_preferred_console("xenboot", 0, NULL);
1621 add_preferred_console("tty", 0, NULL);
1622 add_preferred_console("hvc", 0, NULL);
1624 x86_init.pci.arch_init = pci_xen_init;
1626 const struct dom0_vga_console_info *info =
1627 (void *)((char *)xen_start_info +
1628 xen_start_info->console.dom0.info_off);
1629 struct xen_platform_op op = {
1630 .cmd = XENPF_firmware_info,
1631 .interface_version = XENPF_INTERFACE_VERSION,
1632 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1635 x86_platform.set_legacy_features =
1636 xen_dom0_set_legacy_features;
1637 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1638 xen_start_info->console.domU.mfn = 0;
1639 xen_start_info->console.domU.evtchn = 0;
1641 if (HYPERVISOR_platform_op(&op) == 0)
1642 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1644 /* Make sure ACS will be enabled */
1647 xen_acpi_sleep_register();
1649 /* Avoid searching for BIOS MP tables */
1650 x86_init.mpparse.find_smp_config = x86_init_noop;
1651 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1653 xen_boot_params_init_edd();
1656 /* PCI BIOS service won't work from a PV guest. */
1657 pci_probe &= ~PCI_PROBE_BIOS;
1659 xen_raw_console_write("about to get started...\n");
1661 /* Let's presume PV guests always boot on vCPU with id 0. */
1662 per_cpu(xen_vcpu_id, 0) = 0;
1664 xen_setup_runstate_info(0);
1668 /* Start the world */
1669 #ifdef CONFIG_X86_32
1670 i386_start_kernel();
1672 cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1673 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1677 #ifdef CONFIG_XEN_PVH
1679 static void xen_pvh_arch_setup(void)
1682 /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
1683 if (nr_ioapics == 0)
1684 acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
1688 static void __init init_pvh_bootparams(void)
1690 struct xen_memory_map memmap;
1694 memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
1696 memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
1697 set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
1698 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
1700 xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
1704 if (memmap.nr_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
1705 pvh_bootparams.e820_table[memmap.nr_entries].addr =
1707 pvh_bootparams.e820_table[memmap.nr_entries].size =
1708 ISA_END_ADDRESS - ISA_START_ADDRESS;
1709 pvh_bootparams.e820_table[memmap.nr_entries].type =
1711 memmap.nr_entries++;
1713 xen_raw_printk("Warning: Can fit ISA range into e820\n");
1715 pvh_bootparams.e820_entries = memmap.nr_entries;
1716 for (i = 0; i < pvh_bootparams.e820_entries; i++)
1717 e820__range_add(pvh_bootparams.e820_table[i].addr,
1718 pvh_bootparams.e820_table[i].size,
1719 pvh_bootparams.e820_table[i].type);
1721 e820__update_table(e820_table);
1723 pvh_bootparams.hdr.cmd_line_ptr =
1724 pvh_start_info.cmdline_paddr;
1726 /* The first module is always ramdisk. */
1727 if (pvh_start_info.nr_modules) {
1728 struct hvm_modlist_entry *modaddr =
1729 __va(pvh_start_info.modlist_paddr);
1730 pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
1731 pvh_bootparams.hdr.ramdisk_size = modaddr->size;
1735 * See Documentation/x86/boot.txt.
1737 * Version 2.12 supports Xen entry point but we will use default x86/PC
1738 * environment (i.e. hardware_subarch 0).
1740 pvh_bootparams.hdr.version = 0x212;
1741 pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
1745 * This routine (and those that it might call) should not use
1746 * anything that lives in .bss since that segment will be cleared later.
1748 void __init xen_prepare_pvh(void)
1753 if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
1754 xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
1755 pvh_start_info.magic);
1761 msr = cpuid_ebx(xen_cpuid_base() + 2);
1762 pfn = __pa(hypercall_page);
1763 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1765 init_pvh_bootparams();
1767 x86_init.oem.arch_setup = xen_pvh_arch_setup;
1771 void __ref xen_hvm_init_shared_info(void)
1774 struct xen_add_to_physmap xatp;
1775 static struct shared_info *shared_info_page = 0;
1777 if (!shared_info_page)
1778 shared_info_page = (struct shared_info *)
1779 extend_brk(PAGE_SIZE, PAGE_SIZE);
1780 xatp.domid = DOMID_SELF;
1782 xatp.space = XENMAPSPACE_shared_info;
1783 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1784 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1787 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1789 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1790 * page, we use it in the event channel upcall and in some pvclock
1791 * related functions. We don't need the vcpu_info placement
1792 * optimizations because we don't use any pv_mmu or pv_irq op on
1794 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1795 * online but xen_hvm_init_shared_info is run at resume time too and
1796 * in that case multiple vcpus might be online. */
1797 for_each_online_cpu(cpu) {
1798 /* Leave it to be NULL. */
1799 if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
1801 per_cpu(xen_vcpu, cpu) =
1802 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
1806 #ifdef CONFIG_XEN_PVHVM
1807 static void __init init_hvm_pv_info(void)
1810 uint32_t eax, ebx, ecx, edx, base;
1812 base = xen_cpuid_base();
1813 eax = cpuid_eax(base + 1);
1816 minor = eax & 0xffff;
1817 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1819 xen_domain_type = XEN_HVM_DOMAIN;
1821 /* PVH set up hypercall page in xen_prepare_pvh(). */
1822 if (xen_pvh_domain())
1823 pv_info.name = "Xen PVH";
1828 pv_info.name = "Xen HVM";
1829 msr = cpuid_ebx(base + 2);
1830 pfn = __pa(hypercall_page);
1831 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1834 xen_setup_features();
1836 cpuid(base + 4, &eax, &ebx, &ecx, &edx);
1837 if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
1838 this_cpu_write(xen_vcpu_id, ebx);
1840 this_cpu_write(xen_vcpu_id, smp_processor_id());
1844 static int xen_cpu_up_prepare_pv(unsigned int cpu)
1848 xen_setup_timer(cpu);
1850 rc = xen_smp_intr_init(cpu);
1852 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
1859 static int xen_cpu_up_prepare_hvm(unsigned int cpu)
1864 * This can happen if CPU was offlined earlier and
1865 * offlining timed out in common_cpu_die().
1867 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
1868 xen_smp_intr_free(cpu);
1869 xen_uninit_lock_cpu(cpu);
1872 if (cpu_acpi_id(cpu) != U32_MAX)
1873 per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
1875 per_cpu(xen_vcpu_id, cpu) = cpu;
1876 xen_vcpu_setup(cpu);
1878 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1879 xen_setup_timer(cpu);
1881 rc = xen_smp_intr_init(cpu);
1883 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
1890 static int xen_cpu_dead_pv(unsigned int cpu)
1892 xen_smp_intr_free(cpu);
1894 xen_teardown_timer(cpu);
1899 static int xen_cpu_dead_hvm(unsigned int cpu)
1901 xen_smp_intr_free(cpu);
1903 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1904 xen_teardown_timer(cpu);
1909 static int xen_cpu_up_online(unsigned int cpu)
1911 xen_init_lock_cpu(cpu);
1915 #ifdef CONFIG_XEN_PVHVM
1916 #ifdef CONFIG_KEXEC_CORE
1917 static void xen_hvm_shutdown(void)
1919 native_machine_shutdown();
1920 if (kexec_in_progress)
1921 xen_reboot(SHUTDOWN_soft_reset);
1924 static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1926 native_machine_crash_shutdown(regs);
1927 xen_reboot(SHUTDOWN_soft_reset);
1931 static void __init xen_hvm_guest_init(void)
1933 if (xen_pv_domain())
1938 xen_hvm_init_shared_info();
1940 xen_panic_handler_init();
1942 BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
1945 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
1946 xen_unplug_emulated_devices();
1947 x86_init.irqs.intr_init = xen_init_IRQ;
1948 xen_hvm_init_time_ops();
1949 xen_hvm_init_mmu_ops();
1951 if (xen_pvh_domain())
1952 machine_ops.emergency_restart = xen_emergency_restart;
1953 #ifdef CONFIG_KEXEC_CORE
1954 machine_ops.shutdown = xen_hvm_shutdown;
1955 machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1960 static bool xen_nopv = false;
1961 static __init int xen_parse_nopv(char *arg)
1966 early_param("xen_nopv", xen_parse_nopv);
1968 static uint32_t __init xen_platform_pv(void)
1970 if (xen_pv_domain())
1971 return xen_cpuid_base();
1976 static uint32_t __init xen_platform_hvm(void)
1978 if (xen_pv_domain() || xen_nopv)
1981 return xen_cpuid_base();
1984 bool xen_hvm_need_lapic(void)
1988 if (xen_pv_domain())
1990 if (!xen_hvm_domain())
1992 if (xen_feature(XENFEAT_hvm_pirqs))
1996 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1998 static void xen_set_cpu_features(struct cpuinfo_x86 *c)
2000 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
2001 set_cpu_cap(c, X86_FEATURE_XENPV);
2004 static void xen_pin_vcpu(int cpu)
2006 static bool disable_pinning;
2007 struct sched_pin_override pin_override;
2010 if (disable_pinning)
2013 pin_override.pcpu = cpu;
2014 ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);
2016 /* Ignore errors when removing override. */
2022 pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
2024 disable_pinning = true;
2027 WARN(1, "Trying to pin vcpu without having privilege to do so\n");
2028 disable_pinning = true;
2032 pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
2038 WARN(1, "rc %d while trying to pin vcpu\n", ret);
2039 disable_pinning = true;
2043 const struct hypervisor_x86 x86_hyper_xen_pv = {
2045 .detect = xen_platform_pv,
2046 .set_cpu_features = xen_set_cpu_features,
2047 .pin_vcpu = xen_pin_vcpu,
2049 EXPORT_SYMBOL(x86_hyper_xen_pv);
2051 const struct hypervisor_x86 x86_hyper_xen_hvm = {
2053 .detect = xen_platform_hvm,
2054 .init_platform = xen_hvm_guest_init,
2055 .pin_vcpu = xen_pin_vcpu,
2056 .x2apic_available = xen_x2apic_para_available,
2058 EXPORT_SYMBOL(x86_hyper_xen_hvm);
2060 #ifdef CONFIG_HOTPLUG_CPU
2061 void xen_arch_register_cpu(int num)
2063 arch_register_cpu(num);
2065 EXPORT_SYMBOL(xen_arch_register_cpu);
2067 void xen_arch_unregister_cpu(int num)
2069 arch_unregister_cpu(num);
2071 EXPORT_SYMBOL(xen_arch_unregister_cpu);