device tree bindings for your controller.
GPIOs mappings are defined in the consumer device's node, in a property named
-<function>-gpios, where <function> is the function the driver will request
-through gpiod_get(). For example:
+either <function>-gpios or <function>-gpio, where <function> is the function
+the driver will request through gpiod_get(). For example:
foo_device {
compatible = "acme,foo";
<&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
<&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
- power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
};
This property will make GPIOs 15, 16 and 17 available to the driver under the
struct gpio_desc *red, *green, *blue, *power;
- red = gpiod_get_index(dev, "led", 0);
- green = gpiod_get_index(dev, "led", 1);
- blue = gpiod_get_index(dev, "led", 2);
+ red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
+ green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
+ blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
- power = gpiod_get(dev, "power");
+ power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
gpiod_is_active_low(power) will be true).
+The second parameter of the gpiod_get() functions, the con_id string, has to be
+the <function>-prefix of the GPIO suffixes ("gpios" or "gpio", automatically
+looked up by the gpiod functions internally) used in the device tree. With above
+"led-gpios" example, use the prefix without the "-" as con_id parameter: "led".
+
+Internally, the GPIO subsystem prefixes the GPIO suffix ("gpios" or "gpio")
+with the string passed in con_id to get the resulting string
+(snprintf(... "%s-%s", con_id, gpio_suffixes[]).
+
ACPI
----
ACPI also supports function names for GPIOs in a similar fashion to DT.
struct gpio_desc *red, *green, *blue, *power;
- red = gpiod_get_index(dev, "led", 0);
- green = gpiod_get_index(dev, "led", 1);
- blue = gpiod_get_index(dev, "led", 2);
+ red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
+ green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
+ blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
- power = gpiod_get(dev, "power");
- gpiod_direction_output(power, 1);
+ power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
-Since the "power" GPIO is mapped as active-low, its actual signal will be 0
-after this code. Contrary to the legacy integer GPIO interface, the active-low
-property is handled during mapping and is thus transparent to GPIO consumers.
+Since the "led" GPIOs are mapped as active-high, this example will switch their
+signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
+as active-low, its actual signal will be 0 after this code. Contrary to the legacy
+integer GPIO interface, the active-low property is handled during mapping and is
+thus transparent to GPIO consumers.
const char *con_id, unsigned int idx,
enum gpiod_flags flags)
+For a more detailed description of the con_id parameter in the DeviceTree case
+see Documentation/gpio/board.txt
+
The flags parameter is used to optionally specify a direction and initial value
for the GPIO. Values can be:
Prefix: 'nct6792'
Addresses scanned: ISA address retrieved from Super I/O registers
Datasheet: Available from Nuvoton upon request
+ * Nuvoton NCT6793D
+ Prefix: 'nct6793'
+ Addresses scanned: ISA address retrieved from Super I/O registers
+ Datasheet: Available from Nuvoton upon request
Authors:
Guenter Roeck <linux@roeck-us.net>
DEFINE_STATIC_KEY_TRUE(key);
DEFINE_STATIC_KEY_FALSE(key);
-static_key_likely()
-statick_key_unlikely()
+static_branch_likely()
+static_branch_unlikely()
0) Abstract
LTP (Linux Test Project)
M: Mike Frysinger <vapier@gentoo.org>
M: Cyril Hrubis <chrubis@suse.cz>
-M: Wanlong Gao <gaowanlong@cn.fujitsu.com>
+M: Wanlong Gao <wanlong.gao@gmail.com>
M: Jan Stancek <jstancek@redhat.com>
M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
M: Alexey Kodanev <alexey.kodanev@oracle.com>
-L: ltp-list@lists.sourceforge.net (subscribers-only)
+L: ltp@lists.linux.it (subscribers-only)
W: http://linux-test-project.github.io/
T: git git://github.com/linux-test-project/ltp.git
S: Maintained
M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
-W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
S: Supported
M: Mark Brown <broonie@kernel.org>
M: Liam Girdwood <lrg@slimlogic.co.uk>
L: linux-input@vger.kernel.org
-T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
-W: http://opensource.wolfsonmicro.com/node/7
+W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
F: drivers/input/touchscreen/*wm97*
F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
L: patches@opensource.wolfsonmicro.com
-T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
-T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
-W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
+T: git https://github.com/CirrusLogic/linux-drivers.git
+W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
F: Documentation/hwmon/wm83??
F: arch/arm/mach-s3c64xx/mach-crag6410*
VERSION = 4
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
unsigned long size)
{
return ioremap(offset, size);
-}
+}
+
+#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr)
{
}
irq_enter();
- generic_handle_irq_desc(irq, desc);
+ generic_handle_irq_desc(desc);
irq_exit();
}
" bgt %0,1b"
: "=&r" (tmp), "=r" (loops) : "1"(loops));
}
+EXPORT_SYMBOL(__delay);
#ifdef CONFIG_SMP
#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy
static int idu_first_irq;
-static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc)
+static void idu_cascade_isr(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
unsigned int core_irq = irq_desc_get_irq(desc);
LD += -EL
endif
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
+
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
}
}
-void it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
+void it8152_irq_demux(struct irq_desc *desc)
{
int bits_pd, bits_lp, bits_ld;
int i;
},
};
-static void locomo_handler(unsigned int __irq, struct irq_desc *desc)
+static void locomo_handler(struct irq_desc *desc)
{
struct locomo *lchip = irq_desc_get_chip_data(desc);
int req, i;
* active IRQs causes the interrupt output to pulse, the upper levels
* will call us again if there are more interrupts to process.
*/
-static void
-sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void sa1111_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
unsigned int stat0, stat1, i;
struct sa1111 *sachip = irq_desc_get_handler_data(desc);
void __iomem *mapbase = sachip->base + SA1111_INTC;
sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
if (stat0 == 0 && stat1 == 0) {
- do_bad_IRQ(irq, desc);
+ do_bad_IRQ(desc);
return;
}
#endif
.endm
- .macro uaccess_save_and_disable, tmp
- uaccess_save \tmp
- uaccess_disable \tmp
- .endm
-
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
"2:\t.asciz " #__file "\n" \
".popsection\n" \
".pushsection __bug_table,\"a\"\n" \
+ ".align 2\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
".popsection"); \
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
+#include <asm/thread_info.h>
#endif
/*
asm(
"mrc p15, 0, %0, c3, c0 @ get domain"
- : "=r" (domain));
+ : "=r" (domain)
+ : "m" (current_thread_info()->cpu_domain));
return domain;
}
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
- : : "r" (val));
+ : : "r" (val) : "memory");
isb();
}
struct pci_dev;
struct pci_sys_data;
-extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
+extern void it8152_irq_demux(struct irq_desc *desc);
extern void it8152_init_irq(void);
extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
pr_crit("unexpected IRQ trap at vector %02x\n", irq);
}
-void set_irq_flags(unsigned int irq, unsigned int flags);
-
-#define IRQF_VALID (1 << 0)
-#define IRQF_PROBE (1 << 1)
-#define IRQF_NOAUTOEN (1 << 2)
-
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
#endif
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#include <kvm/arm_vgic.h>
+#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
struct kvm_vcpu_stat {
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
};
/*
* This is for easy migration, but should be changed in the source
*/
-#define do_bad_IRQ(irq,desc) \
+#define do_bad_IRQ(desc) \
do { \
raw_spin_lock(&desc->lock); \
- handle_bad_irq(irq, desc); \
+ handle_bad_irq(desc); \
raw_spin_unlock(&desc->lock); \
} while(0)
struct task_struct;
#include <asm/types.h>
-#include <asm/domain.h>
typedef unsigned long mm_segment_t;
handle_IRQ(irq, regs);
}
-void set_irq_flags(unsigned int irq, unsigned int iflags)
-{
- unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
-
- if (irq >= nr_irqs) {
- pr_err("Trying to set irq flags for IRQ%d\n", irq);
- return;
- }
-
- if (iflags & IRQF_VALID)
- clr |= IRQ_NOREQUEST;
- if (iflags & IRQF_PROBE)
- clr |= IRQ_NOPROBE;
- if (!(iflags & IRQF_NOAUTOEN))
- clr |= IRQ_NOAUTOEN;
- /* Order is clear bits in "clr" then set bits in "set" */
- irq_modify_status(irq, clr, set & ~clr);
-}
-EXPORT_SYMBOL_GPL(set_irq_flags);
-
void __init init_IRQ(void)
{
int ret;
if (err)
return err;
- patch_text((void *)bpt->bpt_addr,
- *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
return err;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
return 0;
}
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+#ifdef CONFIG_CPU_USE_DOMAINS
/*
* Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been
* kernel/fork.c
*/
thread->cpu_domain = get_domain();
+#endif
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
*/
thumb = handler & 1;
-#if __LINUX_ARM_ARCH__ >= 7
/*
- * Clear the If-Then Thumb-2 execution state
- * ARM spec requires this to be all 000s in ARM mode
- * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
- * signal transition without this.
+ * Clear the If-Then Thumb-2 execution state. ARM spec
+ * requires this to be all 000s in ARM mode. Snapdragon
+ * S4/Krait misbehaves on a Thumb=>ARM signal transition
+ * without this.
+ *
+ * We must do this whenever we are running on a Thumb-2
+ * capable CPU, which includes ARMv6T2. However, we elect
+ * to always do this to simplify the code; this field is
+ * marked UNK/SBZP for older architectures.
*/
cpsr &= ~PSR_IT_MASK;
-#endif
if (thumb) {
cpsr |= PSR_T_BIT;
---help---
Provides host support for ARM processors.
-config KVM_ARM_MAX_VCPUS
- int "Number maximum supported virtual CPUs per VM"
- depends on KVM_ARM_HOST
- default 4
- help
- Static number of max supported virtual CPUs per VM.
-
- If you choose a high number, the vcpu structures will be quite
- large, so only choose a reasonable number that you expect to
- actually use.
-
endif # VIRTUALIZATION
* Map the VGIC hardware resources before running a vcpu the first
* time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
+ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
- bic r2, #1 @ Clear ENABLE
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
isb
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
+ mov r2, #0 @ Clear ENABLE
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
- phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
- vm_start - vma->vm_start;
+ phys_addr_t pa;
+
+ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
- int i;
+ int i, matching_cpus = 0;
unsigned long mpidr;
unsigned long target_affinity;
unsigned long target_affinity_mask;
*/
kvm_for_each_vcpu(i, tmp, kvm) {
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
- if (((mpidr & target_affinity_mask) == target_affinity) &&
- !tmp->arch.pause) {
- return PSCI_0_2_AFFINITY_LEVEL_ON;
+ if ((mpidr & target_affinity_mask) == target_affinity) {
+ matching_cpus++;
+ if (!tmp->arch.pause)
+ return PSCI_0_2_AFFINITY_LEVEL_ON;
}
}
+ if (!matching_cpus)
+ return PSCI_RET_INVALID_PARAMS;
+
return PSCI_0_2_AFFINITY_LEVEL_OFF;
}
.irq_ack = pmu_irq_ack,
};
-static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void pmu_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
unsigned long cause = readl(PMU_INTERRUPT_CAUSE);
+ unsigned int irq;
cause &= readl(PMU_INTERRUPT_MASK);
if (cause == 0) {
- do_bad_IRQ(irq, desc);
+ do_bad_IRQ(desc);
return;
}
.irq_unmask = isa_unmask_pic_hi_irq,
};
-static void
-isa_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void isa_irq_handler(struct irq_desc *desc)
{
unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE;
if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) {
- do_bad_IRQ(isa_irq, desc);
+ do_bad_IRQ(desc);
return;
}
return 0;
}
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
{
unsigned int port = (unsigned int)irq_desc_get_handler_data(desc);
unsigned int gpio_irq_no, irq_stat;
.resource = smsc911x_resources,
};
-static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mxc_expio_irq_handler(struct irq_desc *desc)
{
u32 imr_val;
u32 int_valid;
imx31_add_imx_uart0(&uart_pdata);
}
-static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mx31ads_expio_irq_handler(struct irq_desc *desc)
{
u32 imr_val;
u32 int_valid;
write_imipr_3,
};
-static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc)
+static void iop13xx_msi_handler(struct irq_desc *desc)
{
int i, j;
unsigned long status;
.irq_set_wake = lpc32xx_irq_wake
};
-static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
+static void lpc32xx_sic1_handler(struct irq_desc *desc)
{
unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE));
}
}
-static void lpc32xx_sic2_handler(unsigned int irq, struct irq_desc *desc)
+static void lpc32xx_sic2_handler(struct irq_desc *desc)
{
unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE));
#define DEBUG_IRQ(fmt...) while (0) {}
#endif
-static void
-netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
+static void netx_hif_demux_handler(struct irq_desc *desc)
{
unsigned int irq = NETX_IRQ_HIF_CHAINED(0);
unsigned int stat;
fpga_ack_irq(d);
}
-static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc)
+static void innovator_fpga_IRQ_demux(struct irq_desc *desc)
{
u32 stat;
int fpga_irq;
* dispatched accordingly. Clearing of the wakeup events should be
* done by the SoC specific individual handlers.
*/
-static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void omap_prcm_irq_handler(struct irq_desc *desc)
{
unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
.irq_unmask = balloon3_unmask_irq,
};
-static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void balloon3_irq_handler(struct irq_desc *desc)
{
unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
balloon3_irq_enabled;
void __iomem *it8152_base_address;
static int cmx2xx_it8152_irq_gpio;
-static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void cmx2xx_it8152_irq_demux(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
/* clear our parent irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
- it8152_irq_demux(irq, desc);
+ it8152_irq_demux(desc);
}
void __cmx2xx_pci_init_irq(int irq_gpio)
.irq_unmask = lpd270_unmask_irq,
};
-static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void lpd270_irq_handler(struct irq_desc *desc)
{
unsigned int irq;
unsigned long pending;
.irq_unmask = pcm990_unmask_irq,
};
-static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void pcm990_irq_handler(struct irq_desc *desc)
{
unsigned int irq;
unsigned long pending;
viper_irq_enabled_mask;
}
-static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void viper_irq_handler(struct irq_desc *desc)
{
unsigned int irq;
unsigned long pending;
return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
}
-static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void zeus_irq_handler(struct irq_desc *desc)
{
unsigned int irq;
unsigned long pending;
}
}
-static void
-ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ecard_irq_handler(struct irq_desc *desc)
{
ecard_t *ec;
int called = 0;
.irq_ack = bast_pc104_maskack
};
-static void
-bast_irq_pc104_demux(unsigned int irq,
- struct irq_desc *desc)
+static void bast_irq_pc104_demux(struct irq_desc *desc)
{
unsigned int stat;
unsigned int irqno;
}
}
-static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint0_3(struct irq_desc *desc)
{
s3c_irq_demux_eint(0, 3);
}
-static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint4_11(struct irq_desc *desc)
{
s3c_irq_demux_eint(4, 11);
}
-static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint12_19(struct irq_desc *desc)
{
s3c_irq_demux_eint(12, 19);
}
-static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux_eint20_27(struct irq_desc *desc)
{
s3c_irq_demux_eint(20, 27);
}
* ensure that the IRQ signal is deasserted before returning. This
* is rather unfortunate.
*/
-static void neponset_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void neponset_irq_handler(struct irq_desc *desc)
{
struct neponset_drvdata *d = irq_desc_get_handler_data(desc);
unsigned int irr;
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t dma_addr, iova;
- int i, ret = DMA_ERROR_CODE;
+ int i;
dma_addr = __alloc_iova(mapping, size);
if (dma_addr == DMA_ERROR_CODE)
iova = dma_addr;
for (i = 0; i < count; ) {
+ int ret;
+
unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
phys_addr_t phys = page_to_phys(pages[i]);
unsigned int len, j;
reteq r4 @ no, return failure
next:
+ uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC
-
+ uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000
teqne r2, #0x0D000000
return 0;
}
-static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
{
struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc);
u32 cause, type;
mov r1, r2
mov r2, r3
ldr r3, [sp, #8]
+ /*
+ * Privcmd calls are issued by the userspace. We need to allow the
+ * kernel to access the userspace memory before issuing the hypercall.
+ */
+ uaccess_enable r4
+
+ /* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4]
__HVC(XEN_IMM)
+
+ /*
+ * Disable userspace access from kernel. This is fine to do it
+ * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
+ * called before.
+ */
+ uaccess_disable r4
+
ldm sp!, {r4}
ret lr
ENDPROC(privcmd_call);
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
+ select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
If unsure, say Y.
+config ARM64_ERRATUM_843419
+ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+ depends on MODULES
+ default y
+ help
+ This option builds kernel modules using the large memory model in
+ order to avoid the use of the ADRP instruction, which can cause
+ a subsequent memory access to use an incorrect address on Cortex-A53
+ parts up to r0p4.
+
+ Note that the kernel itself must be linked with a version of ld
+ which fixes potentially affected ADRP instructions through the
+ use of veneers.
+
+ If unsure, say Y.
+
endmenu
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+CFLAGS_MODULE += -mcmodel=large
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
irq_err_count++;
}
-/*
- * No arch-specific IRQ flags.
- */
-#define set_irq_flags(irq, flags)
-
#endif /* __ASM_HARDIRQ_H */
SCTLR_EL2_SA | SCTLR_EL2_I)
/* TCR_EL2 Registers bits */
+#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS (7 << 16)
#define TCR_EL2_PS_40B (2 << 16)
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
-#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
+#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
/* VTCR_EL2 Registers bits */
+#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_PS_MASK (7 << 16)
#define VTCR_EL2_TG0_MASK (1 << 14)
#define VTCR_EL2_TG0_4K (0 << 14)
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
- VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
+ VTCR_EL2_RES1)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else
/*
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
- VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
+ VTCR_EL2_RES1)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
-#define HSTR_EL2_TTEE (1 << 16)
#define HSTR_EL2_T(x) (1 << x)
/* Hyp Coproccessor Trap Register Shifts */
#define IFSR32_EL2 25 /* Instruction Fault Status Register */
#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
-#define TEECR32_EL1 28 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 30
+#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
+#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
+
#define KVM_VCPU_MAX_FEATURES 3
int __attribute_const__ kvm_target_cpu(void);
struct kvm_vcpu_stat {
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
};
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
+#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
-#ifdef CONFIG_ARM64_HW_AFDBM
-#define PTE_WRITE (PTE_DBM) /* same as DBM */
-#else
-#define PTE_WRITE (_AT(pteval_t, 1) << 57)
-#endif
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#ifdef CONFIG_ARM64_HW_AFDBM
-#define pte_hw_dirty(pte) (!(pte_val(pte) & PTE_RDONLY))
+#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
#else
#define pte_hw_dirty(pte) (0)
#endif
* When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
* the page fault mechanism. Checking the dirty status of a pte becomes:
*
- * PTE_DIRTY || !PTE_RDONLY
+ * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
*/
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
- newprot |= PTE_DIRTY;
+ pte = pte_mkdirty(pte);
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
unsigned long action, void *data)
{
int cpu = (unsigned long)data;
- if (action == CPU_ONLINE)
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, clear_os_lock, NULL, 1);
return NOTIFY_OK;
}
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif
+ /* EL2 debug */
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
+
/* Stage-2 translation */
msr vttbr_el2, xzr
void *hcpu)
{
int cpu = (long)hcpu;
- if (action == CPU_ONLINE)
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR);
break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
+#endif
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
/*
* VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
*/
+union __fpsimd_vreg {
+ __uint128_t raw;
+ struct {
+#ifdef __AARCH64EB__
+ u64 hi;
+ u64 lo;
+#else
+ u64 lo;
+ u64 hi;
+#endif
+ };
+};
+
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
- int err = 0;
+ int i, err = 0;
/*
* Save the hardware registers to the fpsimd_state structure.
/*
* Now copy the FP registers. Since the registers are packed,
* we can copy the prefix we want (V0-V15) as it is.
- * FIXME: Won't work if big endian.
*/
- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
- sizeof(frame->ufp.fpregs));
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg = {
+ .raw = fpsimd->vregs[i >> 1],
+ };
+
+ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ }
/* Create an AArch32 fpscr from the fpsr and the fpcr. */
fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
- int err = 0;
+ int i, err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
- /*
- * Copy the FP registers into the start of the fpsimd_state.
- * FIXME: Won't work if big endian.
- */
- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
- sizeof(frame->ufp.fpregs));
+ /* Copy the FP registers into the start of the fpsimd_state. */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg;
+
+ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ fpsimd.vregs[i >> 1] = vreg.raw;
+ }
/* Extract the fpsr and the fpcr from the fpscr */
__get_user_error(fpscr, &frame->ufp.fpscr, err);
---help---
Provides host support for ARM processors.
-config KVM_ARM_MAX_VCPUS
- int "Number maximum supported virtual CPUs per VM"
- depends on KVM_ARM_HOST
- default 4
- help
- Static number of max supported virtual CPUs per VM.
-
- If you choose a high number, the vcpu structures will be quite
- large, so only choose a reasonable number that you expect to
- actually use.
-
endif # VIRTUALIZATION
mrs x5, ifsr32_el2
stp x4, x5, [x3]
- skip_fpsimd_state x8, 3f
+ skip_fpsimd_state x8, 2f
mrs x6, fpexc32_el2
str x6, [x3, #16]
-3:
- skip_debug_state x8, 2f
+2:
+ skip_debug_state x8, 1f
mrs x7, dbgvcr32_el2
str x7, [x3, #24]
-2:
- skip_tee_state x8, 1f
-
- add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
- mrs x4, teecr32_el1
- mrs x5, teehbr32_el1
- stp x4, x5, [x3]
1:
.endm
msr dacr32_el2, x4
msr ifsr32_el2, x5
- skip_debug_state x8, 2f
+ skip_debug_state x8, 1f
ldr x7, [x3, #24]
msr dbgvcr32_el2, x7
-2:
- skip_tee_state x8, 1f
-
- add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
- ldp x4, x5, [x3]
- msr teecr32_el1, x4
- msr teehbr32_el1, x5
1:
.endm
mrs x3, cntv_ctl_el0
and x3, x3, #3
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
- bic x3, x3, #1 // Clear Enable
- msr cntv_ctl_el0, x3
isb
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
1:
+ // Disable the virtual timer
+ msr cntv_ctl_el0, xzr
+
// Allow physical timer/counter access for the host
mrs x2, cnthctl_el2
orr x2, x2, #3
// Guest context
add x2, x0, #VCPU_CONTEXT
+ // We must restore the 32-bit state before the sysregs, thanks
+ // to Cortex-A57 erratum #852523.
+ restore_guest_32bit_state
bl __restore_sysregs
skip_debug_state x3, 1f
kern_hyp_va x3
bl __restore_debug
1:
- restore_guest_32bit_state
restore_guest_regs
// That's it, no more messing around.
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
- if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0;
}
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
- if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0;
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
- if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0;
}
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
- if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0;
}
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
trap_dbgauthstatus_el1 },
- /* TEECR32_EL1 */
- { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
- NULL, reset_val, TEECR32_EL1, 0 },
- /* TEEHBR32_EL1 */
- { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
- NULL, reset_val, TEEHBR32_EL1, 0 },
-
/* MDCCSR_EL1 */
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
trap_raz_wi },
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA;
- if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
+ if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) {
struct page *page;
void *addr;
.irq_set_type = eic_set_irq_type,
};
-static void demux_eic_irq(unsigned int irq, struct irq_desc *desc)
+static void demux_eic_irq(struct irq_desc *desc)
{
struct eic *eic = irq_desc_get_handler_data(desc);
unsigned long status, pending;
.irq_set_type = gpio_irq_type,
};
-static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
{
struct pio_device *pio = irq_desc_get_chip_data(desc);
unsigned gpio_irq;
extern void bfin_internal_unmask_irq(unsigned int irq);
struct irq_desc;
-extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *);
-extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *);
+extern void bfin_demux_mac_status_irq(struct irq_desc *);
+extern void bfin_demux_gpio_irq(struct irq_desc *);
#endif
* than crashing, do something sensible.
*/
if (irq >= NR_IRQS)
- handle_bad_irq(irq, &bad_irq_desc);
+ handle_bad_irq(&bad_irq_desc);
else
generic_handle_irq(irq);
.irq_unmask = bf537_generic_error_unmask_irq,
};
-static void bf537_demux_error_irq(unsigned int int_err_irq,
- struct irq_desc *inta_desc)
+static void bf537_demux_error_irq(struct irq_desc *inta_desc)
{
int irq = 0;
.irq_unmask = bf537_mac_rx_unmask_irq,
};
-static void bf537_demux_mac_rx_irq(unsigned int __int_irq,
- struct irq_desc *desc)
+static void bf537_demux_mac_rx_irq(struct irq_desc *desc)
{
- unsigned int int_irq = irq_desc_get_irq(desc);
-
if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
bfin_handle_irq(IRQ_MAC_RX);
else
- bfin_demux_gpio_irq(int_irq, desc);
+ bfin_demux_gpio_irq(desc);
}
#endif
.irq_set_wake = bfin_mac_status_set_wake,
};
-void bfin_demux_mac_status_irq(unsigned int int_err_irq,
- struct irq_desc *inta_desc)
+void bfin_demux_mac_status_irq(struct irq_desc *inta_desc)
{
int i, irq = 0;
u32 status = bfin_read_EMAC_SYSTAT();
}
}
-void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc)
+void bfin_demux_gpio_irq(struct irq_desc *desc)
{
unsigned int inta_irq = irq_desc_get_irq(desc);
unsigned int irq;
.irq_unmask = unmask_megamod,
};
-static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc)
+static void megamod_irq_cascade(struct irq_desc *desc)
{
struct megamod_cascade_data *cascade;
struct megamod_pic *pic;
-#define NR_syscalls 319 /* length of syscall table */
+#define NR_syscalls 321 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
#define __NR_memfd_create 1340
#define __NR_bpf 1341
#define __NR_execveat 1342
+#define __NR_userfaultfd 1343
+#define __NR_membarrier 1344
#endif /* _UAPI_ASM_IA64_UNISTD_H */
data8 sys_memfd_create // 1340
data8 sys_bpf
data8 sys_execveat
+ data8 sys_userfaultfd
+ data8 sys_membarrier
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
* The builtin Amiga hardware interrupt handlers.
*/
-static void ami_int1(unsigned int irq, struct irq_desc *desc)
+static void ami_int1(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
}
}
-static void ami_int3(unsigned int irq, struct irq_desc *desc)
+static void ami_int3(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
}
}
-static void ami_int4(unsigned int irq, struct irq_desc *desc)
+static void ami_int4(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
}
}
-static void ami_int5(unsigned int irq, struct irq_desc *desc)
+static void ami_int5(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
* We need to be careful with the masking/acking due to the side effects
* of masking an interrupt.
*/
-static void intc_external_irq(unsigned int __irq, struct irq_desc *desc)
+static void intc_external_irq(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
-
irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
- handle_simple_irq(irq, desc);
+ handle_simple_irq(desc);
}
static struct irq_chip intc_irq_chip = {
struct pt_regs *));
extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
extern void m68k_setup_irq_controller(struct irq_chip *,
- void (*handle)(unsigned int irq,
- struct irq_desc *desc),
+ void (*handle)(struct irq_desc *desc),
unsigned int irq, unsigned int cnt);
extern unsigned int irq_canonicalize(unsigned int irq);
extern void via_irq_disable(int);
extern void via_nubus_irq_startup(int irq);
extern void via_nubus_irq_shutdown(int irq);
-extern void via1_irq(unsigned int irq, struct irq_desc *desc);
+extern void via1_irq(struct irq_desc *desc);
extern void via1_set_head(int);
extern int via2_scsi_drq_pending(void);
* Baboon interrupt handler. This works a lot like a VIA.
*/
-static void baboon_irq(unsigned int irq, struct irq_desc *desc)
+static void baboon_irq(struct irq_desc *desc)
{
int irq_bit, irq_num;
unsigned char events;
* Handle miscellaneous OSS interrupts.
*/
-static void oss_irq(unsigned int __irq, struct irq_desc *desc)
+static void oss_irq(struct irq_desc *desc)
{
int events = oss->irq_pending &
(OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
* Unlike the VIA/RBV this is on its own autovector interrupt level.
*/
-static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
+static void oss_nubus_irq(struct irq_desc *desc)
{
int events, irq_bit, i;
* PSC interrupt handler. It's a lot like the VIA interrupt handler.
*/
-static void psc_irq(unsigned int __irq, struct irq_desc *desc)
+static void psc_irq(struct irq_desc *desc)
{
unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
unsigned int irq = irq_desc_get_irq(desc);
* via6522.c :-), disable/pending masks added.
*/
-void via1_irq(unsigned int irq, struct irq_desc *desc)
+void via1_irq(struct irq_desc *desc)
{
int irq_num;
unsigned char irq_bit, events;
} while (events >= irq_bit);
}
-static void via2_irq(unsigned int irq, struct irq_desc *desc)
+static void via2_irq(struct irq_desc *desc)
{
int irq_num;
unsigned char irq_bit, events;
* VIA2 dispatcher as a fast interrupt handler.
*/
-void via_nubus_irq(unsigned int irq, struct irq_desc *desc)
+static void via_nubus_irq(struct irq_desc *desc)
{
int slot_irq;
unsigned char slot_bit, events;
"MOV D0.5,%0\n"
"MOV D1Ar1,%1\n"
"MOV D1RtP,%2\n"
- "MOV D0Ar2,%3\n"
"SWAP A0StP,D0.5\n"
"SWAP PC,D1RtP\n"
"MOV A0StP,D0.5\n"
:
- : "r" (isp), "r" (irq), "r" (desc->handle_irq),
- "r" (desc)
+ : "r" (isp), "r" (desc), "r" (desc->handle_irq)
: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
"D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
"D0.5"
/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
#define DISP(name, base, addr) \
-static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \
+static void au1000_##name##_dispatch(struct irq_desc *d) \
{ \
unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
if (likely(r)) \
DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
-static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d)
+static void alchemy_gpic_dispatch(struct irq_desc *d)
{
int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
/*
* DB1200/PB1200 CPLD IRQ muxer
*/
-static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
+static void bcsr_csc_handler(struct irq_desc *d)
{
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
struct irq_chip *chip = irq_desc_get_chip(d);
.name = "ar2315-ahb-error",
};
-static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc)
+static void ar2315_misc_irq_handler(struct irq_desc *desc)
{
u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
ar2315_rst_reg_read(AR2315_IMR);
.name = "ar5312-ahb-error",
};
-static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc)
+static void ar5312_misc_irq_handler(struct irq_desc *desc)
{
u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
ar5312_rst_reg_read(AR5312_IMR);
#include "common.h"
#include "machtypes.h"
-static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ath79_misc_irq_handler(struct irq_desc *desc)
{
void __iomem *base = ath79_reset_base;
u32 pending;
irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
}
-static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
{
u32 status;
irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
}
-static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
{
u32 status;
}
}
-static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
{
u32 status;
if (irqd_get_trigger_type(irq_data) &
IRQ_TYPE_EDGE_BOTH)
cvmx_write_csr(host_data->raw_reg, 1ull << i);
- generic_handle_irq_desc(irq, desc);
+ generic_handle_irq_desc(desc);
}
}
u32 msa_disabled_exits;
u32 flush_dcache_exits;
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
};
#include <asm/mach-netlogic/multi-node.h>
struct irq_desc;
-void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
-void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
+void nlm_smp_function_ipi_handler(struct irq_desc *desc);
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc);
void nlm_smp_irq_init(int hwcpuid);
void nlm_boot_secondary_cpus(void);
int nlm_wakeup_secondary_cpus(void);
writel(mask, reg);
}
-static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
+static void jz_gpio_irq_demux_handler(struct irq_desc *desc)
{
uint32_t flag;
unsigned int gpio_irq;
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
+ { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
{NULL}
};
}
/* IRQ_IPI_SMP_FUNCTION Handler */
-void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
+void nlm_smp_function_ipi_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
clear_c0_eimr(irq);
}
/* IRQ_IPI_SMP_RESCHEDULE handler */
-void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc)
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
clear_c0_eimr(irq);
return 0;
}
-static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc)
+static void ar2315_pci_irq_handler(struct irq_desc *desc)
{
struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
.write = ar71xx_pci_write_config,
};
-static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ar71xx_pci_irq_handler(struct irq_desc *desc)
{
struct ar71xx_pci_controller *apc;
void __iomem *base = ath79_reset_base;
.write = ar724x_pci_write,
};
-static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ar724x_pci_irq_handler(struct irq_desc *desc)
{
struct ar724x_pci_controller *apc;
void __iomem *base;
rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
}
-static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void rt3883_pci_irq_handler(struct irq_desc *desc)
{
struct rt3883_pci_controller *rpc;
u32 pending;
return CP0_LEGACY_COMPARE_IRQ;
}
-static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ralink_intc_irq_handler(struct irq_desc *desc)
{
u32 pending = rt_intc_r32(INTC_REG_STATUS0);
endif
ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
+else
+BOOTCFLAGS += -mlittle-endian
+BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
u32 dbell_exits;
u32 gdbell_exits;
#ifdef CONFIG_QUICC_ENGINE
void qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(unsigned int irq, struct irq_desc *desc),
- void (*high_handler)(unsigned int irq, struct irq_desc *desc));
+ void (*low_handler)(struct irq_desc *desc),
+ void (*high_handler)(struct irq_desc *desc));
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
#else
static inline void qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(unsigned int irq, struct irq_desc *desc),
- void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+ void (*low_handler)(struct irq_desc *desc),
+ void (*high_handler)(struct irq_desc *desc))
{}
static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{ return 0; }
int qe_ic_set_priority(unsigned int virq, unsigned int priority);
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
-static inline void qe_ic_cascade_low_ipic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
generic_handle_irq(cascade_irq);
}
-static inline void qe_ic_cascade_high_ipic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
generic_handle_irq(cascade_irq);
}
-static inline void qe_ic_cascade_low_mpic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
chip->irq_eoi(&desc->irq_data);
}
-static inline void qe_ic_cascade_high_mpic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
chip->irq_eoi(&desc->irq_data);
}
-static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)
PPC64ONLY(switch_endian)
+SYSCALL_SPU(userfaultfd)
extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
extern void tsi108_pci_int_init(struct device_node *node);
-extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
+extern void tsi108_irq_cascade(struct irq_desc *desc);
extern void tsi108_clear_pci_cfg_error(void);
#endif /* _ASM_POWERPC_TSI108_PCI_H */
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 364
+#define __NR_syscalls 365
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
#define __NR_bpf 361
#define __NR_execveat 362
#define __NR_switch_endian 363
+#define __NR_userfaultfd 364
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
chip = irq_data_get_irq_chip(data);
- cpumask_and(mask, data->affinity, map);
+ cpumask_and(mask, irq_data_get_affinity_mask(data), map);
if (cpumask_any(mask) >= nr_cpu_ids) {
pr_warn("Breaking affinity for irq %i\n", irq);
cpumask_copy(mask, map);
#include <asm/udbg.h>
#include <asm/mmu_context.h>
#include <asm/epapr_hcalls.h>
+#include <asm/code-patching.h>
#define DBG(fmt...)
* This is called very early on the boot process, after a minimal
* MMU environment has been set up but before MMU_init is called.
*/
+extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
+
notrace void __init machine_init(u64 dt_ptr)
{
lockdep_init();
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
+ patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
+ patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
+
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "queue_intr", VCPU_STAT(queue_intr) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
+ { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "pf_storage", VCPU_STAT(pf_storage) },
{ "sp_storage", VCPU_STAT(sp_storage) },
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+ { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "doorbell", VCPU_STAT(dbell_exits) },
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
* area is cacheable. -- paulus
+ *
+ * During early init, cache might not be active yet, so dcbz cannot be used.
+ * We therefore skip the optimised bloc that uses dcbz. This jump is
+ * replaced by a nop once cache is active. This is done in machine_init()
*/
_GLOBAL(memset)
rlwimi r4,r4,8,16,23
subf r6,r0,r6
cmplwi 0,r4,0
bne 2f /* Use normal procedure if r4 is not zero */
+_GLOBAL(memset_nocache_branch)
+ b 2f /* Skip optimised bloc until cache is enabled */
clrlwi r7,r6,32-LG_CACHELINE_BYTES
add r8,r7,r5
* the destination area is cacheable.
* We only use this version if the source and dest don't overlap.
* -- paulus.
+ *
+ * During early init, cache might not be active yet, so dcbz cannot be used.
+ * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
+ * replaced by a nop once cache is active. This is done in machine_init()
*/
_GLOBAL(memmove)
cmplw 0,r3,r4
/* fall through */
_GLOBAL(memcpy)
+ b generic_memcpy
add r7,r3,r5 /* test if the src & dst overlap */
add r8,r4,r5
cmplw 0,r4,r7
BUG_ON(index >= 4096);
vpn = hpt_vpn(ea, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
hpte_slot_array = get_hpte_slot_array(pmdp);
if (psize == MMU_PAGE_4K) {
/*
valid = hpte_valid(hpte_slot_array, index);
if (valid) {
/* update the hpte bits */
+ hash = hpt_hash(vpn, shift, ssize);
hidx = hpte_hash_index(hpte_slot_array, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
if (!valid) {
unsigned long hpte_group;
+ hash = hpt_hash(vpn, shift, ssize);
/* insert new entry */
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
new_pmd |= _PAGE_HASHPTE;
return irq_linear_revmap(cpld_pic_host, cpld_irq);
}
-static void
-cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpld_pic_cascade(struct irq_desc *desc)
{
+ unsigned int irq;
+
irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
&cpld_regs->pci_mask);
if (irq != NO_IRQ) {
.irq_mask_ack = media5200_irq_mask,
};
-void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void media5200_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int sub_virq, val;
.irq_set_type = mpc52xx_gpt_irq_set_type,
};
-void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
{
struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
int sub_virq;
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
- __irq_set_handler_locked(d->irq, handler);
+ irq_set_handler_locked(d, handler);
return 0;
}
.irq_disable = pq2ads_pci_mask_irq
};
-static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void pq2ads_pci_irq_demux(struct irq_desc *desc)
{
struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
u32 stat, mask, pend;
return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
}
#ifdef CONFIG_CPM2
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpm2_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
}
#ifdef CONFIG_PPC_I8259
-static void mpc85xx_8259_cascade_handler(unsigned int irq,
- struct irq_desc *desc)
+static void mpc85xx_8259_cascade_handler(struct irq_desc *desc)
{
unsigned int cascade_irq = i8259_irq();
generic_handle_irq(cascade_irq);
/* check for any interrupts from the shared IRQ line */
- handle_fasteoi_irq(irq, desc);
+ handle_fasteoi_irq(desc);
}
static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
#endif
#ifdef CONFIG_PPC_I8259
-static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc85xx_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
(irq_hw_number_t)i);
}
-void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void socrates_fpga_pic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
unsigned int cascade_irq;
/*
#include <asm/i8259.h>
#ifdef CONFIG_PPC_I8259
-static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc86xx_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
panic("Restart failed\n");
}
-static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpm_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq = cpm_get_irq();
dcr_write(msic->dcr_host, dcr_n, val);
}
-static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
+static void axon_msi_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct axon_msic *msic = irq_desc_get_handler_data(desc);
{
}
-static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
+static void iic_ioexc_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct cbe_iic_regs __iomem *node_iic =
(void __iomem *)irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
unsigned long bits, ack;
int cascade;
.xlate = spider_host_xlate,
};
-static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void spider_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct spider_pic *pic = irq_desc_get_handler_data(desc);
if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
}
-static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void chrp_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
return irq_linear_revmap(h, irq);
}
-static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
- struct irq_desc *desc)
+static void hlwd_pic_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
static phys_addr_t pci_membase;
static u_char *restart;
-static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mvme5100_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}
return;
struct iommu_table *tbl = NULL;
long rc;
+ /*
+ * crashkernel= specifies the kdump kernel's maximum memory at
+ * some offset and there is no guaranteed the result is a power
+ * of 2, which will cause errors later.
+ */
+ const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
+
+ /*
+ * In memory constrained environments, e.g. kdump kernel, the
+ * DMA window can be larger than available memory, which will
+ * cause errors later.
+ */
+ const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
+
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
IOMMU_PAGE_SHIFT_4K,
- pe->table_group.tce32_size,
+ window_size,
POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
if (rc) {
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
if (WARN_ON(!phb))
return;
for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&phb->msi_bmp,
- virq_to_hw(entry->irq) - phb->msi_base, 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
}
}
#endif /* CONFIG_PCI_MSI */
dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
of_node_put(parent);
- if (!dn)
+ if (!dn) {
+ dlpar_release_drc(drc_index);
return -EINVAL;
+ }
rc = dlpar_attach_node(dn);
if (rc) {
fwnmi_active = 1;
}
-static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void pseries_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW)
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
/* internal IRQ senses are LEVEL_LOW
* EXT IRQ and Port C IRQ senses are programmable
{
struct msi_desc *entry;
struct fsl_msi *msi_data;
+ irq_hw_number_t hwirq;
for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
return;
* should be masked out.
*/
-void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void gef_pic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
#ifndef __GEF_PIC_H__
#define __GEF_PIC_H__
-
-void gef_pic_cascade(unsigned int, struct irq_desc *);
unsigned int gef_pic_get_irq(void);
void gef_pic_init(struct device_node *);
irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
d->chip = &ipic_level_irq_chip;
} else {
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
d->chip = &ipic_edge_irq_chip;
}
unsigned int siel = in_be32(&siu_reg->sc_siel);
siel |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_siel, siel);
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
}
return 0;
}
}
/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
-static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mpic *mpic = irq_desc_get_handler_data(desc);
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}
return;
{
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+ irq_hw_number_t hwirq;
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
for_each_pci_msi_entry(entry, dev) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}
}
void __init qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(unsigned int irq, struct irq_desc *desc),
- void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+ void (*low_handler)(struct irq_desc *desc),
+ void (*high_handler)(struct irq_desc *desc))
{
struct qe_ic *qe_ic;
struct resource res;
init_pci_source();
}
-void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc)
+void tsi108_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = get_pci_source();
.xlate = irq_domain_xlate_twocell,
};
-void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void uic_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_data *idata = irq_desc_get_irq_data(desc);
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
- server = xics_get_irq_server(d->irq, d->affinity, 0);
+ server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
server = ics_opal_mangle_server(server);
rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY);
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
- server = xics_get_irq_server(d->irq, d->affinity, 0);
+ server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
DEFAULT_PRIORITY);
/*
* Support code for cascading to 8259 interrupt controllers
*/
-static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void xilinx_i8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
u32 exit_validity;
u32 exit_instruction;
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
u32 instruction_lctl;
u32 instruction_lctlg;
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+ { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
- atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+ atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
static void __iomem *se7343_irq_regs;
struct irq_domain *se7343_irq_domain;
-static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void se7343_irq_demux(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
static void __iomem *se7722_irq_regs;
struct irq_domain *se7722_irq_domain;
-static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void se7722_irq_demux(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
.irq_unmask = enable_se7724_irq,
};
-static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void se7724_irq_demux(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct fpga_irq set = get_fpga_irq(irq);
return virq;
}
-static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void x3proto_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
.irq_unmask = hd64461_unmask_irq,
};
-static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void hd64461_irq_demux(struct irq_desc *desc)
{
unsigned short intv = __raw_readw(HD64461_NIRR);
unsigned int ext_irq = HD64461_IRQBASE;
}
/* Handle one or multiple IRQs from the extended interrupt controller */
-static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
+static void leon_handle_ext_irq(struct irq_desc *desc)
{
unsigned int eirq;
struct irq_bucket *p;
};
/* Handle one or multiple IRQs from the PCI core */
-static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+static void grpci1_pci_flow_irq(struct irq_desc *desc)
{
struct grpci1_priv *priv = grpci1priv;
int i, ack = 0;
};
/* Handle one or multiple IRQs from the PCI core */
-static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+static void grpci2_pci_flow_irq(struct irq_desc *desc)
{
struct grpci2_priv *priv = grpci2priv;
int i, ack = 0;
* to Linux which just calls handle_level_irq() after clearing the
* MAC INTx Assert status bit associated with this interrupt.
*/
-static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc)
+static void trio_handle_level_irq(struct irq_desc *desc)
{
struct pci_controller *controller = irq_desc_get_handler_data(desc);
gxio_trio_context_t *trio_context = controller->trio;
uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
- unsigned int irq = irq_desc_get_irq(desc);
int mac = controller->mac;
unsigned int reg_offset;
uint64_t level_mask;
- handle_level_irq(irq, desc);
+ handle_level_irq(desc);
/*
* Clear the INTx Level status, otherwise future interrupts are
* irq_controller_lock held, and IRQs disabled. Decode the IRQ
* and call the handler.
*/
-static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc)
+static void puv3_gpio_handler(struct irq_desc *desc)
{
unsigned int mask, irq;
depends on X86_MCE_INTEL
config X86_LEGACY_VM86
- bool "Legacy VM86 support (obsolete)"
+ bool "Legacy VM86 support"
default n
depends on X86_32
---help---
available to accelerate real mode DOS programs. However, any
recent version of DOSEMU, X, or vbetool should be fully
functional even without kernel VM86 support, as they will all
- fall back to (pretty well performing) software emulation.
+ fall back to software emulation. Nevertheless, if you are using
+ a 16-bit DOS program where 16-bit performance matters, vm86
+ mode might be faster than emulation and you might want to
+ enable this option.
- Anything that works on a 64-bit kernel is unlikely to need
- this option, as 64-bit kernels don't, and can't, support V8086
- mode. This option is also unrelated to 16-bit protected mode
- and is not needed to run most 16-bit programs under Wine.
+ Note that any app that works on a 64-bit kernel is unlikely to
+ need this option, as 64-bit kernels don't, and can't, support
+ V8086 mode. This option is also unrelated to 16-bit protected
+ mode and is not needed to run most 16-bit programs under Wine.
- Enabling this option adds considerable attack surface to the
- kernel and slows down system calls and exception handling.
+ Enabling this option increases the complexity of the kernel
+ and slows down exception handling a tiny bit.
- Unless you use very old userspace or need the last drop of
- performance in your real mode DOS games and can't use KVM,
- say N here.
+ If unsure, say N here.
config VM86
bool
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
u32 nmi_window_exits;
u32 halt_exits;
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
u32 request_irq_exits;
u32 irq_exits;
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
- unsigned long (*get_tsc_khz)(void);
};
struct pv_cpu_ops {
}
#endif
-#define virt_queued_spin_lock virt_queued_spin_lock
-
-static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifdef CONFIG_PARAVIRT
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
{
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
- while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
- cpu_relax();
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+ do {
+ while (atomic_read(&lock->val) != 0)
+ cpu_relax();
+ } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
return true;
}
+#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
{
+ unsigned long flags;
+
if (instr[0] != 0x90)
return;
+ local_irq_save(flags);
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+ sync_core();
+ local_irq_restore(flags);
DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen);
apic_write(APIC_LVTT, lvtt_value);
if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+ /*
+ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
+ * According to Intel, MFENCE can do the serialization here.
+ */
+ asm volatile("mfence" : : : "memory");
+
printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
int pin, ioapic, irq, irq_entry;
const struct cpumask *mask;
struct irq_data *idata;
+ struct irq_chip *chip;
if (skip_ioapic_setup == 1)
return;
else
mask = apic->target_cpus();
- irq_set_affinity(irq, mask);
+ chip = irq_data_get_irq_chip(idata);
+ chip->irq_set_affinity(idata, mask, false);
}
-
}
#endif
err = assign_irq_vector(irq, data, dest);
if (err) {
- struct irq_data *top = irq_get_irq_data(irq);
-
if (assign_irq_vector(irq, data,
- irq_data_get_affinity_mask(top)))
+ irq_data_get_affinity_mask(irq_data)))
pr_err("Failed to recover vector for irq %d\n", irq);
return err;
}
else
printk(KERN_CONT "%d86", c->x86);
- printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model);
+ printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
if (c->x86_mask || c->cpuid_level >= 0)
- printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask);
+ printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask);
else
printk(KERN_CONT ")\n");
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
- struct event_constraint *c1 = cpuc->event_constraint[idx];
+ struct event_constraint *c1 = NULL;
struct event_constraint *c2;
+ if (idx >= 0) /* fake does < 0 */
+ c1 = cpuc->event_constraint[idx];
+
/*
* first time only
* - static constraint: no change across incremental scheduling calls
if (!buf || bts_buffer_is_full(buf, bts))
return;
+ event->hw.itrace_started = 1;
event->hw.state = 0;
if (!buf->snapshot)
return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
}
-static inline int
-execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
{
struct irq_stack *curstk, *irqstk;
- u32 *isp, *prev_esp, arg1, arg2;
+ u32 *isp, *prev_esp, arg1;
curstk = (struct irq_stack *) current_stack();
irqstk = __this_cpu_read(hardirq_stack);
asm volatile("xchgl %%ebx,%%esp \n"
"call *%%edi \n"
"movl %%ebx,%%esp \n"
- : "=a" (arg1), "=d" (arg2), "=b" (isp)
- : "0" (irq), "1" (desc), "2" (isp),
+ : "=a" (arg1), "=b" (isp)
+ : "0" (desc), "1" (isp),
"D" (desc->handle_irq)
: "memory", "cc", "ecx");
return 1;
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
- unsigned int irq;
- int overflow;
-
- overflow = check_stack_overflow();
+ int overflow = check_stack_overflow();
if (IS_ERR_OR_NULL(desc))
return false;
- irq = irq_desc_get_irq(desc);
- if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
if (unlikely(overflow))
print_stack_overflow();
- generic_handle_irq_desc(irq, desc);
+ generic_handle_irq_desc(desc);
}
return true;
if (unlikely(IS_ERR_OR_NULL(desc)))
return false;
- generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
+ generic_handle_irq_desc(desc);
return true;
}
if (alloc_size > PAGE_SIZE)
new_ldt->entries = vzalloc(alloc_size);
else
- new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
if (!new_ldt->entries) {
kfree(new_ldt);
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(ldt->entries);
else
- kfree(ldt->entries);
+ free_page((unsigned long)ldt->entries);
kfree(ldt);
}
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
{
- *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+ *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
if (!*dev)
*dev = &x86_dma_fallback_dev;
#include <asm/hypervisor.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
+#include <asm/geode.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
static void __init check_system_tsc_reliable(void)
{
-#ifdef CONFIG_MGEODE_LX
- /* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+ if (is_geode_lx()) {
+ /* RTSC counts during suspend */
#define RTSC_SUSP 0x100
- unsigned long res_low, res_high;
+ unsigned long res_low, res_high;
- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
- /* Geode_LX - the OLPC CPU has a very reliable TSC */
- if (res_low & RTSC_SUSP)
- tsc_clocksource_reliable = 1;
+ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+ /* Geode_LX - the OLPC CPU has a very reliable TSC */
+ if (res_low & RTSC_SUSP)
+ tsc_clocksource_reliable = 1;
+ }
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
#include <linux/audit.h>
#include <linux/stddef.h>
#include <linux/slab.h>
+#include <linux/security.h>
#include <asm/uaccess.h>
#include <asm/io.h>
struct pt_regs *regs = current_pt_regs();
unsigned long err = 0;
+ err = security_mmap_addr(0);
+ if (err) {
+ /*
+ * vm86 cannot virtualize the address space, so vm86 users
+ * need to manage the low 1MB themselves using mmap. Given
+ * that BIOS places important data in the first page, vm86
+ * is essentially useless if mmap_min_addr != 0. DOSEMU,
+ * for example, won't even bother trying to use vm86 if it
+ * can't map a page at virtual address 0.
+ *
+ * To reduce the available kernel attack surface, simply
+ * disallow vm86(old) for users who cannot mmap at va 0.
+ *
+ * The implementation of security_mmap_addr will allow
+ * suitably privileged users to map va 0 even if
+ * vm.mmap_min_addr is set above 0, and we want this
+ * behavior for vm86 as well, as it ensures that legacy
+ * tools like vbetool will not fail just because of
+ * vm.mmap_min_addr.
+ */
+ pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
+ current->comm, task_pid_nr(current),
+ from_kuid_munged(&init_user_ns, current_uid()));
+ return -EPERM;
+ }
+
if (!vm86) {
if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
return -ENOMEM;
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
{ "halt_exits", VCPU_STAT(halt_exits) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+ { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) },
* This is the Guest timer interrupt handler (hardware interrupt 0). We just
* call the clockevent infrastructure and it does whatever needs doing.
*/
-static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
+static void lguest_time_irq(struct irq_desc *desc)
{
unsigned long flags;
node_set(node, numa_nodes_parsed);
- pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n",
+ pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1,
- hotpluggable ? " hotplug" : "");
+ hotpluggable ? " hotplug" : "",
+ ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
/* Mark hotplug range in memblock. */
if (hotpluggable && memblock_mark_hotplug(start, ma->length))
iv = bip->bip_vec + bip->bip_vcnt;
+ if (bip->bip_vcnt &&
+ bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
+ &bip->bip_vec[bip->bip_vcnt - 1], offset))
+ return 0;
+
iv->bv_page = page;
iv->bv_len = len;
iv->bv_offset = offset;
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
+
+ q->root_blkg = NULL;
+ q->root_rl.blkg = NULL;
}
/*
q->limits.max_integrity_segments)
return false;
+ if (integrity_req_gap_back_merge(req, next->bio))
+ return false;
+
return true;
}
EXPORT_SYMBOL(blk_integrity_merge_rq);
#include "blk.h"
+static bool iovec_gap_to_prv(struct request_queue *q,
+ struct iovec *prv, struct iovec *cur)
+{
+ unsigned long prev_end;
+
+ if (!queue_virt_boundary(q))
+ return false;
+
+ if (prv->iov_base == NULL && prv->iov_len == 0)
+ /* prv is not set - don't check */
+ return false;
+
+ prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
+
+ return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
+ prev_end & queue_virt_boundary(q));