index.txt - File index, Mailing list and Links (this document)
-intel-pstate.txt - Intel pstate cpufreq driver specific file.
-
pcc-cpufreq.txt - PCC cpufreq driver specific file.
compatible = "st,stm32h743-rcc", "st,stm32-rcc";
reg = <0x58024400 0x400>;
#reset-cells = <1>;
- #clock-cells = <2>;
+ #clock-cells = <1>;
clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>;
st,syscfg = <&pwrcfg>;
the firmware event log
- linux,sml-size : size of the memory allocated for the firmware event log
+Optional properties:
+
+- powered-while-suspended: present when the TPM is left powered on between
+ suspend and resume (makes the suspend/resume
+ callbacks do nothing).
+
Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C)
----------------------------------------------------------
This isn't an exhaustive list, but you should add new prefixes to it before
using them to avoid name-space collisions.
-abcn Abracon Corporation
abilis Abilis Systems
+abracon Abracon Corporation
actions Actions Semiconductor Co., Ltd.
active-semi Active-Semi International Inc
ad Avionic Design GmbH
Support for power domains is provided through the :c:member:`pm_domain` field of
|struct device|. This field is a pointer to an object of type
-|struct dev_pm_domain|, defined in :file:`include/linux/pm.h``, providing a set
+|struct dev_pm_domain|, defined in :file:`include/linux/pm.h`, providing a set
of power management callbacks analogous to the subsystem-level and device driver
callbacks that are executed for the given device during all power transitions,
instead of the respective subsystem-level callbacks. Specifically, if a
jeq #14, good /* __NR_rt_sigprocmask */
jeq #13, good /* __NR_rt_sigaction */
jeq #35, good /* __NR_nanosleep */
- bad: ret #0 /* SECCOMP_RET_KILL */
+ bad: ret #0 /* SECCOMP_RET_KILL_THREAD */
good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */
The above example code can be placed into a file (here called "foo"), and
2: Enable DAD, and disable IPv6 operation if MAC-based duplicate
link-local address has been found.
+ DAD operation and mode on a given interface will be selected according
+ to the maximum value of conf/{all,interface}/accept_dad.
+
force_tllao - BOOLEAN
Enable sending the target link-layer address option even when
responding to a unicast neighbor solicitation.
optimistic_dad - BOOLEAN
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
- 0: disabled (default)
- 1: enabled
+ 0: disabled (default)
+ 1: enabled
+
+ Optimistic Duplicate Address Detection for the interface will be enabled
+ if at least one of conf/{all,interface}/optimistic_dad is set to 1,
+ it will be disabled otherwise.
use_optimistic - BOOLEAN
If enabled, do not classify optimistic addresses as deprecated during
source address selection. Preferred addresses will still be chosen
before optimistic addresses, subject to other ranking in the source
address selection algorithm.
- 0: disabled (default)
- 1: enabled
+ 0: disabled (default)
+ 1: enabled
+
+ This will be enabled if at least one of
+ conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
stable_secret - IPv6 address
This IPv6 address will be used as a secret to generate IPv6
with SR-IOV or soft switches, such as OVS, are possible.
- User-space tools
-
- user space |
- +-------------------------------------------------------------------+
- kernel | Netlink
- |
- +--------------+-------------------------------+
- | Network stack |
- | (Linux) |
- | |
- +----------------------------------------------+
+ User-space tools
+
+ user space |
+ +-------------------------------------------------------------------+
+ kernel | Netlink
+ |
+ +--------------+-------------------------------+
+ | Network stack |
+ | (Linux) |
+ | |
+ +----------------------------------------------+
sw1p2 sw1p4 sw1p6
- sw1p1 + sw1p3 + sw1p5 + eth1
- + | + | + | +
- | | | | | | |
- +--+----+----+----+-+--+----+---+ +-----+-----+
- | Switch driver | | mgmt |
- | (this document) | | driver |
- | | | |
- +--------------+----------------+ +-----------+
- |
- kernel | HW bus (eg PCI)
- +-------------------------------------------------------------------+
- hardware |
- +--------------+---+------------+
- | Switch device (sw1) |
- | +----+ +--------+
- | | v offloaded data path | mgmt port
- | | | |
- +--|----|----+----+----+----+---+
- | | | | | |
- + + + + + +
- p1 p2 p3 p4 p5 p6
-
- front-panel ports
+ sw1p1 + sw1p3 + sw1p5 + eth1
+ + | + | + | +
+ | | | | | | |
+ +--+----+----+----+----+----+---+ +-----+-----+
+ | Switch driver | | mgmt |
+ | (this document) | | driver |
+ | | | |
+ +--------------+----------------+ +-----------+
+ |
+ kernel | HW bus (eg PCI)
+ +-------------------------------------------------------------------+
+ hardware |
+ +--------------+----------------+
+ | Switch device (sw1) |
+ | +----+ +--------+
+ | | v offloaded data path | mgmt port
+ | | | |
+ +--|----|----+----+----+----+---+
+ | | | | | |
+ + + + + + +
+ p1 p2 p3 p4 p5 p6
+
+ front-panel ports
Fig 1.
- reboot-cmd [ SPARC only ]
- rtsig-max
- rtsig-nr
+- seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
- sem
- sem_next_id [ sysv ipc ]
- sg-big-buff [ generic SCSI device (sg) ]
A seccomp filter may return any of the following values. If multiple
filters exist, the return value for the evaluation of a given system
call will always use the highest precedent value. (For example,
-``SECCOMP_RET_KILL`` will always take precedence.)
+``SECCOMP_RET_KILL_PROCESS`` will always take precedence.)
In precedence order, they are:
-``SECCOMP_RET_KILL``:
+``SECCOMP_RET_KILL_PROCESS``:
+ Results in the entire process exiting immediately without executing
+ the system call. The exit status of the task (``status & 0x7f``)
+ will be ``SIGSYS``, not ``SIGKILL``.
+
+``SECCOMP_RET_KILL_THREAD``:
Results in the task exiting immediately without executing the
system call. The exit status of the task (``status & 0x7f``) will
be ``SIGSYS``, not ``SIGKILL``.
allow use of ptrace, even of other sandboxed processes, without
extreme care; ptracers can use this mechanism to escape.)
+``SECCOMP_RET_LOG``:
+ Results in the system call being executed after it is logged. This
+ should be used by application developers to learn which syscalls their
+ application needs without having to iterate through multiple test and
+ development cycles to build the list.
+
+ This action will only be logged if "log" is present in the
+ actions_logged sysctl string.
+
``SECCOMP_RET_ALLOW``:
Results in the system call being executed.
and a more generic example of a higher level macro interface for BPF
program generation.
+Sysctls
+=======
+Seccomp's sysctl files can be found in the ``/proc/sys/kernel/seccomp/``
+directory. Here's a description of each file in that directory:
+
+``actions_avail``:
+ A read-only ordered list of seccomp return values (refer to the
+ ``SECCOMP_RET_*`` macros above) in string form. The ordering, from
+ left-to-right, is the least permissive return value to the most
+ permissive return value.
+
+ The list represents the set of seccomp return values supported
+ by the kernel. A userspace program may use this list to
+ determine if the actions found in the ``seccomp.h``, when the
+ program was built, differs from the set of actions actually
+ supported in the current running kernel.
+
+``actions_logged``:
+ A read-write ordered list of seccomp return values (refer to the
+ ``SECCOMP_RET_*`` macros above) that are allowed to be logged. Writes
+ to the file do not need to be in ordered form but reads from the file
+ will be ordered in the same way as the actions_avail sysctl.
+
+ It is important to note that the value of ``actions_logged`` does not
+ prevent certain actions from being logged when the audit subsystem is
+ configured to audit a task. If the action is not found in
+ ``actions_logged`` list, the final decision on whether to audit the
+ action for that task is ultimately left up to the audit subsystem to
+ decide for all seccomp return values other than ``SECCOMP_RET_ALLOW``.
+
+ The ``allow`` string is not accepted in the ``actions_logged`` sysctl
+ as it is not possible to log ``SECCOMP_RET_ALLOW`` actions. Attempting
+ to write ``allow`` to the sysctl will result in an EINVAL being
+ returned.
Adding architecture support
===========================
S: Maintained
F: drivers/acpi/arm64
+ACPI PMIC DRIVERS
+M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: Len Brown <lenb@kernel.org>
+R: Andy Shevchenko <andy@infradead.org>
+R: Mika Westerberg <mika.westerberg@linux.intel.com>
+L: linux-acpi@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-acpi/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
+S: Supported
+F: drivers/acpi/pmic/
+
ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@cavium.com>
M: Ariel Elior <ariel.elior@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
M: Stefan Schmidt <stefan@osg.samsung.com>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
S: Maintained
F: net/ieee802154/
F: net/mac802154/
F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Fearless Coyote
# *DOCUMENTATION*
PHONY += kselftest
kselftest:
- $(Q)$(MAKE) -C tools/testing/selftests run_tests
+ $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
PHONY += kselftest-clean
kselftest-clean:
- $(Q)$(MAKE) -C tools/testing/selftests clean
+ $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
PHONY += kselftest-merge
kselftest-merge:
#endif
-#define copy_segments(tsk, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
-#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */
-#define TIF_SYSCALL_TRACE 5 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
-#define TIF_SYSCALL_TRACEPOINT 7 /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
+#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_UPROBE (1 << TIF_UPROBE)
-#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_FSCHECK)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
- /* On user-mode return, check fs is correct */
- set_thread_flag(TIF_FSCHECK);
}
#define segment_eq(a, b) ((a) == (b))
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
+#include <asm/memory.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
+ ldr r2, [tsk, #TI_ADDR_LIMIT]
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK
- bne fast_work_pending
- tst r1, #_TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
+
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
+ ldr r2, [tsk, #TI_ADDR_LIMIT]
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK
- bne fast_work_pending
- tst r1, #_TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
-fast_work_pending:
#endif
tst r1, #_TIF_SYSCALL_WORK
ret_slow_syscall:
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
+ ldr r2, [tsk, #TI_ADDR_LIMIT]
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne slow_work_pending
* Update the trace code with the current status.
*/
trace_hardirqs_off();
-
- /* Check valid user FS if needed */
- addr_limit_user_check();
-
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
return page;
}
+
+/* Defer to generic check */
+asmlinkage void addr_limit_check_failed(void)
+{
+ addr_limit_user_check();
+}
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
+KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
+KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
AS += -EB
LD += -EB
+LDFLAGS += -maarch64linuxb
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
AS += -EL
LD += -EL
+LDFLAGS += -maarch64linux
UTS_MACHINE := aarch64
endif
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-#define __ALIGN .align 4
-#define __ALIGN_STR ".align 4"
+#define __ALIGN .align 2
+#define __ALIGN_STR ".align 2"
#endif
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
}
EXPORT_SYMBOL(kernel_neon_end);
+#ifdef CONFIG_EFI
+
static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
kernel_neon_end();
}
+#endif /* CONFIG_EFI */
+
#endif /* CONFIG_KERNEL_MODE_NEON */
#ifdef CONFIG_CPU_PM
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
+ msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq 1f
*/
trace_hardirqs_off();
- /* Check valid user FS if needed */
- addr_limit_user_check();
-
do {
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
} else {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
- { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{
}
-#define copy_segments(tsk, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-
/*
* saved kernel SP and DP of a blocked thread.
*/
extern asmlinkage void save_user_regs(struct user_context *target);
extern asmlinkage void *restore_user_regs(const struct user_context *target, ...);
-#define copy_segments(tsk, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-#define forget_segments() do { } while (0)
-
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Copy and release all segment info associated with a VM */
-extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
-extern void release_segments(struct mm_struct * mm);
-
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.lr)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
{
}
-#define copy_segments(tsk, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-
/*
* Return saved PC of a blocked thread.
*/
# Endianness selection
choice
prompt "Endianness selection"
- default CPU_BIG_ENDIAN
+ default CPU_LITTLE_ENDIAN
help
microblaze architectures can be configured for either little or
big endian formats. Be sure to select the appropriate mode.
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
+generic-y += kvm_para.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
unsigned long attrs)
{
#ifdef CONFIG_MMU
- unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
#include "pci.h"
static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
-static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
-static unsigned ath79_pci_nr_irqs __initdata;
+static const struct ath79_pci_irq *ath79_pci_irq_map;
+static unsigned ath79_pci_nr_irqs;
-static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
+static const struct ath79_pci_irq ar71xx_pci_irq_map[] = {
{
.slot = 17,
.pin = 1,
}
};
-static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
+static const struct ath79_pci_irq ar724x_pci_irq_map[] = {
{
.slot = 0,
.pin = 1,
}
};
-static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = {
+static const struct ath79_pci_irq qca955x_pci_irq_map[] = {
{
.bus = 0,
.slot = 0,
},
};
-int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
+int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
{
int irq = -1;
int i;
#define __write_64bit_c0_split(source, sel, val) \
do { \
+ unsigned long long __tmp; \
unsigned long __flags; \
\
local_irq_save(__flags); \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L0, 32\n\t" \
+ "dsll\t%L0, %L1, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M0, 32\n\t" \
+ "dsll\t%M0, %M1, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \
".set\tmips0" \
- : : "r" (val)); \
+ : "=&r,r" (__tmp) \
+ : "r,0" (val)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L0, 32\n\t" \
+ "dsll\t%L0, %L1, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M0, 32\n\t" \
+ "dsll\t%M0, %M1, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0" \
- : : "r" (val)); \
+ : "=&r,r" (__tmp) \
+ : "r,0" (val)); \
local_irq_restore(__flags); \
} while (0)
return -ENOENT;
}
- if ((unsigned int)event->cpu >= nr_cpumask_bits ||
- (event->cpu >= 0 && !cpu_online(event->cpu)))
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
#define INTC PC104PLUS_INTC_IRQ
#define INTD PC104PLUS_INTD_IRQ
-static char irq_tab_capcella[][5] __initdata = {
+static char irq_tab_capcella[][5] = {
[11] = { -1, INT1, INT1, INT1, INT1 },
[12] = { -1, INT2, INT2, INT2, INT2 },
[14] = { -1, INTA, INTB, INTC, INTD }
};
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return irq_tab_capcella[slot][pin];
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
qube_raq_via_board_id_fixup);
-static char irq_tab_qube1[] __initdata = {
+static char irq_tab_qube1[] = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
[COBALT_PCICONF_ETH1] = 0
};
-static char irq_tab_cobalt[] __initdata = {
+static char irq_tab_cobalt[] = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
[COBALT_PCICONF_ETH1] = ETH1_IRQ
};
-static char irq_tab_raq2[] __initdata = {
+static char irq_tab_raq2[] = {
[COBALT_PCICONF_CPU] = 0,
[COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
[COBALT_PCICONF_ETH1] = ETH1_IRQ
};
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
return irq_tab_qube1[slot];
*/
#define MAX_SLOT_NUM 10
-static unsigned char irq_map[][5] __initdata = {
+static unsigned char irq_map[][5] = {
[3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC,
MARKEINS_PCI_IRQ_INTD, 0,},
[4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,},
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH,
emma2rh_pci_host_fixup);
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return irq_map[slot][pin];
}
/* South bridge slot number is set by the pci probe process */
static u8 sb_slot = 5;
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = 0;
#define INTB MACEPCI_SHARED0_IRQ
#define INTC MACEPCI_SHARED1_IRQ
#define INTD MACEPCI_SHARED2_IRQ
-static char irq_tab_mace[][5] __initdata = {
+static char irq_tab_mace[][5] = {
/* Dummy INT#A INT#B INT#C INT#D */
{0, 0, 0, 0, 0}, /* This is placeholder row - never used */
{0, SCSI0, SCSI0, SCSI0, SCSI0},
* irqs. I suppose a device without a pin A will thank us for doing it
* right if there exists such a broken piece of crap.
*/
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return irq_tab_mace[slot][pin];
}
#include <asm/txx9/pci.h>
#include <asm/txx9/jmr3927.h>
-int __init jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char irq = pin;
return 0;
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return of_irq_parse_and_map_pci(dev, slot, pin);
}
#define PCID 7
/* all the pci device has the PCIA pin, check the datasheet. */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0}, /* 11: Unused */
{0, 0, 0, 0, 0}, /* 12: Unused */
{0, 0, 0, 0, 0}, /* 27: Unused */
};
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int virq;
pdev->vendor, pdev->device, pdev->irq);
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
print_fixup_info(dev);
return dev->irq;
static char pci_irq[5] = {
};
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */
{0, 0, 0, 0, 0 }, /* 1: Unused */
{0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */
};
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int virq;
virq = irq_tab[slot][pin];
#include <asm/vr41xx/mpc30x.h>
-static const int internal_func_irqs[] __initconst = {
+static const int internal_func_irqs[] = {
VRC4173_CASCADE_IRQ,
VRC4173_AC97_IRQ,
VRC4173_USB_IRQ,
};
-static const int irq_tab_mpc30x[] __initconst = {
+static const int irq_tab_mpc30x[] = {
[12] = VRC4173_PCMCIA1_IRQ,
[13] = VRC4173_PCMCIA2_IRQ,
[29] = MQ200_IRQ,
};
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (slot == 30)
return internal_func_irqs[PCI_FUNC(dev->devfn)];
#if defined(CONFIG_PMC_MSP7120_GW)
/* Garibaldi Board IRQ wiring to PCI slots */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
{0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
#elif defined(CONFIG_PMC_MSP7120_EVAL)
/* MSP7120 Eval Board IRQ wiring to PCI slots */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
{0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
#else
/* Unknown board -- don't assign any IRQs */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
{0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
* RETURNS: IRQ number
*
****************************************************************************/
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
#if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL)
printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n");
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4927.h>
-int __init rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char irq = pin;
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4938.h>
-int __init rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = tx4938_pcic1_map_irq(dev, slot);
* seem to be a documentation error. At least on my RM200C the Cirrus
* Logic CL-GD5434 VGA is device 3.
*/
-static char irq_tab_rm200[8][5] __initdata = {
+static char irq_tab_rm200[8][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
*
* The VGA card is optional for RM300 systems.
*/
-static char irq_tab_rm300d[8][5] __initdata = {
+static char irq_tab_rm300d[8][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
};
-static char irq_tab_rm300e[5][5] __initdata = {
+static char irq_tab_rm300e[5][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
#define INTC PCIT_IRQ_INTC
#define INTD PCIT_IRQ_INTD
-static char irq_tab_pcit[13][5] __initdata = {
+static char irq_tab_pcit[13][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */
{ 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
};
-static char irq_tab_pcit_cplus[13][5] __initdata = {
+static char irq_tab_pcit_cplus[13][5] = {
/* INTA INTB INTC INTD */
{ 0, 0, 0, 0, 0 }, /* HOST bridge */
{ 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
return (csmsr & 0xa0) == 0x20;
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (sni_brd_type) {
case SNI_BRD_PCI_TOWER_CPLUS:
#include <asm/vr41xx/tb0219.h>
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = -1;
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/tb0226.h>
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = -1;
#include <asm/vr41xx/tb0287.h>
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char bus;
int irq = -1;
arch_initcall(alchemy_pci_init);
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct alchemy_pci_context *ctx = dev->sysdata;
if (ctx && ctx->board_map_irq)
#include <linux/bcma/bcma.h>
#include <bcm47xx.h>
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return 0;
}
#define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7)
#define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8)
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (slot) {
case 1:
return 0;
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u16 cmd;
u32 val;
} s;
};
-int __initconst (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
- u8 slot, u8 pin);
+int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
/**
* as it goes through each bridge.
* Returns Interrupt number for the device
*/
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (octeon_pcibios_map_irq)
return octeon_pcibios_map_irq(dev, slot, pin);
spin_unlock_irqrestore(&rt2880_pci_lock, flags);
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u16 cmd;
int irq = -1;
return err;
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return of_irq_parse_and_map_pci(dev, slot, pin);
}
return pciclk;
}
-int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
+int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
{
if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) {
switch (slot) {
((pciclk + 50000) / 100000) % 10);
}
-int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
+int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
{
if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) {
switch (slot) {
return -1;
}
-int __init tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq = tx4939_pcic1_map_irq(dev, slot);
return PCI_SLOT(lnkdev->devfn) / 8;
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pci_dev *lnkdev;
int lnkfunc, node;
}
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return get_irq_vector(dev);
}
* as it goes through each bridge.
* Returns Interrupt number for the device
*/
-int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
- u8 slot, u8 pin)
+int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
* The EBH5600 board with the PCI to PCIe bridge mistakenly
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <asm/setup.h>
+
#ifdef CONFIG_MIPS_MT_SMP
#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */
return 0;
}
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int (*txx9_pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- return txx9_board_vec->pci_map_irq(dev, slot, pin);
+ return txx9_pci_map_irq(dev, slot, pin);
}
char * (*txx9_board_pcibios_setup)(char *str) __initdata;
txx9_pci_err_action = TXX9_PCI_ERR_IGNORE;
return NULL;
}
+
+ txx9_pci_map_irq = txx9_board_vec->pci_map_irq;
+
return str;
}
}
#endif
-void release_segments(struct mm_struct *mm)
-{
-}
-
void machine_restart(char *cmd)
{
#ifdef CONFIG_KERNEL_DEBUGGER
{
}
-/*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
- */
-void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
-{
-}
-
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
endchoice
+config PARISC_SELF_EXTRACT
+ bool "Build kernel as self-extracting executable"
+ default y
+ help
+ Say Y if you want to build the parisc kernel as a kind of
+ self-extracting executable.
+
+ If you say N here, the kernel will be compressed with gzip
+ which can be loaded by the palo bootloader directly too.
+
+ If you don't know what to do here, say Y.
+
config SMP
bool "Symmetric multi-processing support"
---help---
bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ifdef CONFIG_PARISC_SELF_EXTRACT
vmlinuz: bzImage
$(OBJCOPY) $(boot)/bzImage $@
+else
+vmlinuz: vmlinux
+ @gzip -cf -9 $< > $@
+endif
install:
$(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
-KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs
+KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
ifndef CONFIG_64BIT
KBUILD_CFLAGS += -mfast-indirect-calls
endif
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
-extern __le32 output_len; /* at unaligned address, little-endian */
+/* output_len is inserted by the linker possibly at an unaligned address */
+extern __le32 output_len __aligned(1);
extern char _text, _end;
extern char _bss, _ebss;
extern char _startcode_end;
/* wrapper-functions from pdc.c */
int pdc_add_valid(unsigned long address);
+int pdc_instr(unsigned int *instr);
int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
int pdc_chassis_disp(unsigned long disp);
int pdc_chassis_warn(unsigned long *warn);
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+extern int init_per_cpu(int cpuid);
#if defined(CONFIG_SMP)
}
EXPORT_SYMBOL(pdc_add_valid);
+/**
+ * pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler.
+ * @instr: Pointer to variable which will get instruction opcode.
+ *
+ * The return value is PDC_OK (0) in case call succeeded.
+ */
+int __init pdc_instr(unsigned int *instr)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result));
+ convert_to_wide(pdc_result);
+ *instr = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
/**
* pdc_chassis_info - Return chassis information.
* @result: The return buffer.
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/kthread.h>
+#include <linux/initrd.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
}
for (i = 0; i < pdt_status.pdt_entries; i++) {
+ unsigned long addr;
+
report_mem_err(pdt_entry[i]);
+ addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
+ addr >= initrd_start && addr < initrd_end)
+ pr_crit("CRITICAL: initrd possibly broken "
+ "due to bad memory!\n");
+
/* mark memory page bad */
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
}
*
* o Enable CPU profiling hooks.
*/
-int init_per_cpu(int cpunum)
+int __init init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/start_kernel.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/unwind.h>
+#include <asm/smp.h>
static char __initdata command_line[COMMAND_LINE_SIZE];
}
#endif
-extern int init_per_cpu(int cpuid);
extern void collect_boot_cpu_data(void);
void __init setup_arch(char **cmdline_p)
}
arch_initcall(parisc_init);
-void start_parisc(void)
+void __init start_parisc(void)
{
- extern void start_kernel(void);
extern void early_trap_init(void);
int ret, cpunum;
static void __init
smp_cpu_init(int cpunum)
{
- extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
/* Set modes and Enable floating point coprocessor */
- (void) init_per_cpu(cpunum);
+ init_per_cpu(cpunum);
disable_sr_hashing();
u32 check = 0;
u32 *ivap;
u32 *hpmcp;
- u32 length;
+ u32 length, instr;
if (strcmp((const char *)iva, "cows can fly"))
panic("IVT invalid");
for (i = 0; i < 8; i++)
*ivap++ = 0;
+ /*
+ * Use PDC_INSTR firmware function to get instruction that invokes
+ * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
+ * the PA 1.1 Firmware Architecture document.
+ */
+ if (pdc_instr(&instr) == PDC_OK)
+ ivap[0] = instr;
+
/* Compute Checksum for HPMC handler */
length = os_hpmc_size;
ivap[7] = length;
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h>
+#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/assembly.h>
info->prev_sp = sp - 64;
info->prev_ip = 0;
+
+ /* The stack is at the end inside the thread_union
+ * struct. If we reach data, we have reached the
+ * beginning of the stack and should stop unwinding. */
+ if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
+ info->prev_sp < ((unsigned long) task_thread_info(info->t)
+ + THREAD_SZ_ALGN)) {
+ info->prev_sp = 0;
+ break;
+ }
+
if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
break;
info->prev_ip = tmp;
#include <linux/interrupt.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
+#include <linux/hugetlb.h>
#include <asm/traps.h>
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long acc_type;
- int fault;
+ int fault = 0;
unsigned int flags;
if (faulthandler_disabled())
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
+ else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ VM_FAULT_HWPOISON_LARGE))
goto bad_area;
BUG();
}
if (user_mode(regs)) {
struct siginfo si;
-
- show_signal_msg(regs, code, address, tsk, vma);
+ unsigned int lsb = 0;
switch (code) {
case 15: /* Data TLB miss fault/Data page fault */
si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
break;
}
+
+#ifdef CONFIG_MEMORY_FAILURE
+ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ printk(KERN_ERR
+ "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
+ tsk->comm, tsk->pid, address);
+ si.si_signo = SIGBUS;
+ si.si_code = BUS_MCEERR_AR;
+ }
+#endif
+
+ /*
+ * Either small page or large page may be poisoned.
+ * In other words, VM_FAULT_HWPOISON_LARGE and
+ * VM_FAULT_HWPOISON are mutually exclusive.
+ */
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+ else if (fault & VM_FAULT_HWPOISON)
+ lsb = PAGE_SHIFT;
+ else
+ show_signal_msg(regs, code, address, tsk, vma);
+ si.si_addr_lsb = lsb;
+
si.si_errno = 0;
si.si_addr = (void __user *) address;
force_sig_info(si.si_signo, &si, current);
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQUENCER_OSS=m
CONFIG_SND_POWERMAC=m
CONFIG_SND_AOA=m
CONFIG_SND_AOA_FABRIC_LAYOUT=m
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_USB_USX2Y=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
CONFIG_SND_DUMMY=m
CONFIG_SND_POWERMAC=m
CONFIG_SND_AOA=m
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
CONFIG_SND_POWERMAC=m
CONFIG_SND_AOA=m
CONFIG_SND_AOA_FABRIC_LAYOUT=m
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
CONFIG_HID_DRAGONRISE=y
CONFIG_HID_GYRATION=y
CONFIG_HID_TWINHAN=y
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_DYNAMIC_MINORS=y
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
CONFIG_SND_DEBUG_VERBOSE=y
CONFIG_SND_PCM_XRUN_DEBUG=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
+CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQUENCER_OSS=y
CONFIG_HID_APPLE=m
CONFIG_HID_WACOM=m
CONFIG_MMC=y
} else if ((ret = eeh_ops->init()))
return ret;
+ /* Initialize PHB PEs */
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+ eeh_dev_phb_init_dynamic(hose);
+
/* Initialize EEH event */
ret = eeh_event_init();
if (ret)
/* EEH PE for PHB */
eeh_phb_pe_create(phb);
}
-
-/**
- * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
- *
- * Scan all the existing PHBs and create EEH devices for their OF
- * nodes and their children OF nodes
- */
-static int __init eeh_dev_phb_init(void)
-{
- struct pci_controller *phb, *tmp;
-
- list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
- eeh_dev_phb_init_dynamic(phb);
-
- return 0;
-}
-
-core_initcall(eeh_dev_phb_init);
* and that can be emulated.
*/
if (!is_conditional_branch(*p->ainsn.insn) &&
- analyse_instr(&op, ®s, *p->ainsn.insn))
+ analyse_instr(&op, ®s, *p->ainsn.insn) == 1) {
+ emulate_update_regs(®s, &op);
nip = regs.nip;
+ }
return nip;
}
* in the appropriate thread structures from live.
*/
- if (tsk != current)
+ if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
return;
if (MSR_TM_SUSPENDED(mfmsr())) {
int machine_check_e500mc(struct pt_regs *regs)
{
unsigned long mcsr = mfspr(SPRN_MCSR);
+ unsigned long pvr = mfspr(SPRN_PVR);
unsigned long reason = mcsr;
int recoverable = 1;
* may still get logged and cause a machine check. We should
* only treat the non-write shadow case as non-recoverable.
*/
- if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
- recoverable = 0;
+ /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
+ * is not implemented but L1 data cache always runs in write
+ * shadow mode. Hence on data cache parity errors HW will
+ * automatically invalidate the L1 Data Cache.
+ */
+ if (PVR_VER(pvr) != PVR_VER_E6500) {
+ if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
+ recoverable = 0;
+ }
}
if (reason & MCSR_L2MMU_MHIT) {
BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+/* Move canary into DSISR to check for later */
+BEGIN_FTR_SECTION
+ li r0, 0x7fff
+ mtspr SPRN_HDSISR, r0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4)
kvmppc_hdsi:
ld r3, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r3)
- cmpwi r0, 0
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
+BEGIN_FTR_SECTION
+ /* Look for DSISR canary. If we find it, retry instruction */
+ cmpdi r6, 0x7fff
+ beq 6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ cmpwi r0, 0
bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
: "r" (addr), "i" (-EFAULT), "0" (err))
static nokprobe_inline void set_cr0(const struct pt_regs *regs,
- struct instruction_op *op, int rd)
+ struct instruction_op *op)
{
- long val = regs->gpr[rd];
+ long val = op->val;
op->type |= SETCC;
op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
case 13: /* addic. */
imm = (short) instr;
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
- set_cr0(regs, op, rd);
+ set_cr0(regs, op);
return 1;
case 14: /* addi */
case 28: /* andi. */
op->val = regs->gpr[rd] & (unsigned short) instr;
- set_cr0(regs, op, ra);
+ set_cr0(regs, op);
goto logical_done_nocc;
case 29: /* andis. */
imm = (unsigned short) instr;
op->val = regs->gpr[rd] & (imm << 16);
- set_cr0(regs, op, ra);
+ set_cr0(regs, op);
goto logical_done_nocc;
#ifdef __powerpc64__
op->type = COMPUTE + SETCC;
imm = 0xf0000000UL;
val = regs->gpr[rd];
- op->val = regs->ccr;
+ op->ccval = regs->ccr;
for (sh = 0; sh < 8; ++sh) {
if (instr & (0x80000 >> sh))
- op->val = (op->val & ~imm) |
+ op->ccval = (op->ccval & ~imm) |
(val & imm);
imm >>= 4;
}
goto arith_done;
case 235: /* mullw */
- op->val = (unsigned int) regs->gpr[ra] *
- (unsigned int) regs->gpr[rb];
+ op->val = (long)(int) regs->gpr[ra] *
+ (int) regs->gpr[rb];
+
goto arith_done;
case 266: /* add */
logical_done:
if (instr & 1)
- set_cr0(regs, op, ra);
+ set_cr0(regs, op);
logical_done_nocc:
op->reg = ra;
op->type |= SETREG;
arith_done:
if (instr & 1)
- set_cr0(regs, op, rd);
+ set_cr0(regs, op);
compute_done:
op->reg = rd;
op->type |= SETREG;
u32 pmcs[MAX_HWEVENTS];
int i;
+ if (!ppmu) {
+ pr_info("Performance monitor hardware not registered.\n");
+ return;
+ }
+
if (!ppmu->n_counter)
return;
u64 pir = get_hard_smp_processor_id(cpu);
mtspr(SPRN_LPCR, lpcr_val);
- opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+
+ /*
+ * Program the LPCR via stop-api only if the deepest stop state
+ * can lose hypervisor context.
+ */
+ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
+ opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
}
/*
return rc;
}
- of_node_put(dn->parent);
return 0;
}
}
dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
- of_node_put(parent);
if (!dn) {
pr_warn("Failed call to configure-connector, drc index: %x\n",
drc_index);
dlpar_release_drc(drc_index);
+ of_node_put(parent);
return -EINVAL;
}
rc = dlpar_attach_node(dn, parent);
+
+ /* Regardless we are done with parent now */
+ of_node_put(parent);
+
if (rc) {
saved_rc = rc;
pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
return -ENOENT;
dn = dlpar_configure_connector(drc_index, parent_dn);
- if (!dn)
+ if (!dn) {
+ of_node_put(parent_dn);
return -ENOENT;
+ }
rc = dlpar_attach_node(dn, parent_dn);
if (rc)
static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+
+ pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
#endif
}
-static int __init topology_setup(char *str)
-{
- bool enabled;
- int rc;
-
- rc = kstrtobool(str, &enabled);
- if (!rc && !enabled)
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY;
- return rc;
-}
-early_param("topology", topology_setup);
-
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
}
/* Check online status of the CPU to which the event is pinned */
- if ((unsigned int)event->cpu >= nr_cpumask_bits ||
- (event->cpu >= 0 && !cpu_online(event->cpu)))
- return -ENODEV;
+ if (event->cpu >= 0) {
+ if ((unsigned int)event->cpu >= nr_cpumask_bits)
+ return -ENODEV;
+ if (!cpu_online(event->cpu))
+ return -ENODEV;
+ }
/* Force reset of idle/hv excludes regardless of what the
* user requested.
#include <linux/workqueue.h>
#include <linux/bootmem.h>
+#include <linux/uaccess.h>
+#include <linux/sysctl.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
#define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL)
+enum {
+ TOPOLOGY_MODE_HW,
+ TOPOLOGY_MODE_SINGLE,
+ TOPOLOGY_MODE_PACKAGE,
+ TOPOLOGY_MODE_UNINITIALIZED
+};
+
struct mask_info {
struct mask_info *next;
unsigned char id;
cpumask_t mask;
};
+static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!MACHINE_HAS_TOPOLOGY)
- return mask;
- for (; info; info = info->next) {
- if (cpumask_test_cpu(cpu, &info->mask))
- return info->mask;
+ switch (topology_mode) {
+ case TOPOLOGY_MODE_HW:
+ while (info) {
+ if (cpumask_test_cpu(cpu, &info->mask)) {
+ mask = info->mask;
+ break;
+ }
+ info = info->next;
+ }
+ if (cpumask_empty(&mask))
+ cpumask_copy(&mask, cpumask_of(cpu));
+ break;
+ case TOPOLOGY_MODE_PACKAGE:
+ cpumask_copy(&mask, cpu_present_mask);
+ break;
+ default:
+ /* fallthrough */
+ case TOPOLOGY_MODE_SINGLE:
+ cpumask_copy(&mask, cpumask_of(cpu));
+ break;
}
return mask;
}
int i;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!MACHINE_HAS_TOPOLOGY)
+ if (topology_mode != TOPOLOGY_MODE_HW)
return mask;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
{
int cpu;
- mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu)
smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
- mutex_unlock(&smp_cpu_state_mutex);
}
static int ptf(unsigned long fc)
static void update_cpu_masks(void)
{
struct cpu_topology_s390 *topo;
- int cpu;
+ int cpu, id;
for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu];
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
- if (!MACHINE_HAS_TOPOLOGY) {
+ if (topology_mode != TOPOLOGY_MODE_HW) {
+ id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
topo->thread_id = cpu;
topo->core_id = cpu;
- topo->socket_id = cpu;
- topo->book_id = cpu;
- topo->drawer_id = cpu;
+ topo->socket_id = id;
+ topo->book_id = id;
+ topo->drawer_id = id;
if (cpu_present(cpu))
cpumask_set_cpu(cpu, &cpus_with_topology);
}
struct sysinfo_15_1_x *info = tl_info;
int rc = 0;
+ mutex_lock(&smp_cpu_state_mutex);
cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
+ mutex_unlock(&smp_cpu_state_mutex);
return rc;
}
schedule_work(&topology_work);
}
+static void topology_flush_work(void)
+{
+ flush_work(&topology_work);
+}
+
static void topology_timer_fn(unsigned long ignored)
{
if (ptf(PTF_CHECK))
struct sysinfo_15_1_x *info;
set_sched_topology(s390_topology);
+ if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
+ if (MACHINE_HAS_TOPOLOGY)
+ topology_mode = TOPOLOGY_MODE_HW;
+ else
+ topology_mode = TOPOLOGY_MODE_SINGLE;
+ }
if (!MACHINE_HAS_TOPOLOGY)
goto out;
tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
__arch_update_cpu_topology();
}
+static inline int topology_get_mode(int enabled)
+{
+ if (!enabled)
+ return TOPOLOGY_MODE_SINGLE;
+ return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
+}
+
+static inline int topology_is_enabled(void)
+{
+ return topology_mode != TOPOLOGY_MODE_SINGLE;
+}
+
+static int __init topology_setup(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (rc)
+ return rc;
+ topology_mode = topology_get_mode(enabled);
+ return 0;
+}
+early_param("topology", topology_setup);
+
+static int topology_ctl_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ unsigned int len;
+ int new_mode;
+ char buf[2];
+
+ if (!*lenp || *ppos) {
+ *lenp = 0;
+ return 0;
+ }
+ if (!write) {
+ strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
+ ARRAY_SIZE(buf));
+ len = strnlen(buf, ARRAY_SIZE(buf));
+ if (len > *lenp)
+ len = *lenp;
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ goto out;
+ }
+ len = *lenp;
+ if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
+ return -EFAULT;
+ if (buf[0] != '0' && buf[0] != '1')
+ return -EINVAL;
+ mutex_lock(&smp_cpu_state_mutex);
+ new_mode = topology_get_mode(buf[0] == '1');
+ if (topology_mode != new_mode) {
+ topology_mode = new_mode;
+ topology_schedule_update();
+ }
+ mutex_unlock(&smp_cpu_state_mutex);
+ topology_flush_work();
+out:
+ *lenp = len;
+ *ppos += len;
+ return 0;
+}
+
+static struct ctl_table topology_ctl_table[] = {
+ {
+ .procname = "topology",
+ .mode = 0644,
+ .proc_handler = topology_ctl_handler,
+ },
+ { },
+};
+
+static struct ctl_table topology_dir_table[] = {
+ {
+ .procname = "s390",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = topology_ctl_table,
+ },
+ { },
+};
+
static int __init topology_init(void)
{
if (MACHINE_HAS_TOPOLOGY)
set_topology_timer();
else
topology_update_polarization_simple();
+ register_sysctl_table(topology_dir_table);
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
- unsigned long mask, result;
struct page *head, *page;
+ unsigned long mask;
int refs;
- result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
- mask = result | _SEGMENT_ENTRY_INVALID;
- if ((pmd_val(pmd) & mask) != result)
+ mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
+ if ((pmd_val(pmd) & mask) != 0)
return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm) do { } while(0)
-#define release_segments(mm) do { } while(0)
-
/*
* FPU lazy state save handling.
*/
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-#define forget_segments() do { } while (0)
/*
* FPU lazy state save handling.
*/
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_CCITT=m
CONFIG_CRC7=m
{
char *endp;
unsigned long long maxnodemem;
- long node;
+ unsigned long node;
node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
if (node >= MAX_NUMNODES || *endp != ':')
{
}
-static inline void mm_copy_segments(struct mm_struct *from_mm,
- struct mm_struct *new_mm)
-{
-}
-
#define init_stack (init_thread_union.stack)
/*
#define s3 ((16 + 2 + (3 * 256)) * 4)
/* register macros */
-#define CTX %rdi
+#define CTX %r12
#define RIO %rsi
#define RX0 %rax
#define RX2bh %ch
#define RX3bh %dh
-#define RT0 %rbp
+#define RT0 %rdi
#define RT1 %rsi
#define RT2 %r8
#define RT3 %r9
-#define RT0d %ebp
+#define RT0d %edi
#define RT1d %esi
#define RT2d %r8d
#define RT3d %r9d
ENTRY(__blowfish_enc_blk)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
- movq %rbp, %r11;
+ movq %r12, %r11;
+ movq %rdi, CTX;
movq %rsi, %r10;
movq %rdx, RIO;
round_enc(14);
add_roundkey_enc(16);
- movq %r11, %rbp;
+ movq %r11, %r12;
movq %r10, RIO;
test %cl, %cl;
ENTRY(blowfish_dec_blk)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
- movq %rbp, %r11;
+ movq %r12, %r11;
+ movq %rdi, CTX;
movq %rsi, %r10;
movq %rdx, RIO;
movq %r10, RIO;
write_block();
- movq %r11, %rbp;
+ movq %r11, %r12;
ret;
ENDPROC(blowfish_dec_blk)
ENTRY(__blowfish_enc_blk_4way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
- pushq %rbp;
+ pushq %r12;
pushq %rbx;
pushq %rcx;
- preload_roundkey_enc(0);
-
+ movq %rdi, CTX
movq %rsi, %r11;
movq %rdx, RIO;
+ preload_roundkey_enc(0);
+
read_block4();
round_enc4(0);
round_enc4(14);
add_preloaded_roundkey4();
- popq %rbp;
+ popq %r12;
movq %r11, RIO;
- test %bpl, %bpl;
+ test %r12b, %r12b;
jnz .L__enc_xor4;
write_block4();
popq %rbx;
- popq %rbp;
+ popq %r12;
ret;
.L__enc_xor4:
xor_block4();
popq %rbx;
- popq %rbp;
+ popq %r12;
ret;
ENDPROC(__blowfish_enc_blk_4way)
ENTRY(blowfish_dec_blk_4way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
- pushq %rbp;
+ pushq %r12;
pushq %rbx;
- preload_roundkey_dec(17);
- movq %rsi, %r11;
+ movq %rdi, CTX;
+ movq %rsi, %r11
movq %rdx, RIO;
+ preload_roundkey_dec(17);
read_block4();
round_dec4(17);
write_block4();
popq %rbx;
- popq %rbp;
+ popq %r12;
ret;
ENDPROC(blowfish_dec_blk_4way)
#define RCD1bh %dh
#define RT0 %rsi
-#define RT1 %rbp
+#define RT1 %r12
#define RT2 %r8
#define RT0d %esi
-#define RT1d %ebp
+#define RT1d %r12d
#define RT2d %r8d
#define RT2bl %r8b
#define RXOR %r9
-#define RRBP %r10
+#define RR12 %r10
#define RDST %r11
#define RXORd %r9d
* %rdx: src
* %rcx: bool xor
*/
- movq %rbp, RRBP;
+ movq %r12, RR12;
movq %rcx, RXOR;
movq %rsi, RDST;
enc_outunpack(mov, RT1);
- movq RRBP, %rbp;
+ movq RR12, %r12;
ret;
.L__enc_xor:
enc_outunpack(xor, RT1);
- movq RRBP, %rbp;
+ movq RR12, %r12;
ret;
ENDPROC(__camellia_enc_blk)
movl $24, RXORd;
cmovel RXORd, RT2d; /* max */
- movq %rbp, RRBP;
+ movq %r12, RR12;
movq %rsi, RDST;
movq %rdx, RIO;
dec_outunpack();
- movq RRBP, %rbp;
+ movq RR12, %r12;
ret;
ENDPROC(camellia_dec_blk)
*/
pushq %rbx;
- movq %rbp, RRBP;
+ movq %r12, RR12;
movq %rcx, RXOR;
movq %rsi, RDST;
movq %rdx, RIO;
enc_outunpack2(mov, RT2);
- movq RRBP, %rbp;
+ movq RR12, %r12;
popq %rbx;
ret;
.L__enc2_xor:
enc_outunpack2(xor, RT2);
- movq RRBP, %rbp;
+ movq RR12, %r12;
popq %rbx;
ret;
ENDPROC(__camellia_enc_blk_2way)
cmovel RXORd, RT2d; /* max */
movq %rbx, RXOR;
- movq %rbp, RRBP;
+ movq %r12, RR12;
movq %rsi, RDST;
movq %rdx, RIO;
dec_outunpack2();
- movq RRBP, %rbp;
+ movq RR12, %r12;
movq RXOR, %rbx;
ret;
ENDPROC(camellia_dec_blk_2way)
/**********************************************************************
16-way AVX cast5
**********************************************************************/
-#define CTX %rdi
+#define CTX %r15
#define RL1 %xmm0
#define RR1 %xmm1
#define RTMP %xmm15
-#define RID1 %rbp
-#define RID1d %ebp
+#define RID1 %rdi
+#define RID1d %edi
#define RID2 %rsi
#define RID2d %esi
.align 16
__cast5_enc_blk16:
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* RL1: blocks 1 and 2
* RR1: blocks 3 and 4
* RL2: blocks 5 and 6
* RR4: encrypted blocks 15 and 16
*/
- pushq %rbp;
+ pushq %r15;
pushq %rbx;
+ movq %rdi, CTX;
+
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
.L__skip_enc:
popq %rbx;
- popq %rbp;
+ popq %r15;
vmovdqa .Lbswap_mask, RKM;
.align 16
__cast5_dec_blk16:
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* RL1: encrypted blocks 1 and 2
* RR1: encrypted blocks 3 and 4
* RL2: encrypted blocks 5 and 6
* RR4: decrypted blocks 15 and 16
*/
- pushq %rbp;
+ pushq %r15;
pushq %rbx;
+ movq %rdi, CTX;
+
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
vmovdqa .Lbswap_mask, RKM;
popq %rbx;
- popq %rbp;
+ popq %r15;
outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
ENTRY(cast5_ecb_enc_16way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
FRAME_BEGIN
+ pushq %r15;
+ movq %rdi, CTX;
movq %rsi, %r11;
vmovdqu (0*4*4)(%rdx), RL1;
vmovdqu RR4, (6*4*4)(%r11);
vmovdqu RL4, (7*4*4)(%r11);
+ popq %r15;
FRAME_END
ret;
ENDPROC(cast5_ecb_enc_16way)
ENTRY(cast5_ecb_dec_16way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
FRAME_BEGIN
+ pushq %r15;
+
+ movq %rdi, CTX;
movq %rsi, %r11;
vmovdqu (0*4*4)(%rdx), RL1;
vmovdqu RR4, (6*4*4)(%r11);
vmovdqu RL4, (7*4*4)(%r11);
+ popq %r15;
FRAME_END
ret;
ENDPROC(cast5_ecb_dec_16way)
ENTRY(cast5_cbc_dec_16way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
FRAME_BEGIN
-
pushq %r12;
+ pushq %r15;
+ movq %rdi, CTX;
movq %rsi, %r11;
movq %rdx, %r12;
vmovdqu RR4, (6*16)(%r11);
vmovdqu RL4, (7*16)(%r11);
+ popq %r15;
popq %r12;
-
FRAME_END
ret;
ENDPROC(cast5_cbc_dec_16way)
ENTRY(cast5_ctr_16way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
* %rcx: iv (big endian, 64bit)
*/
FRAME_BEGIN
-
pushq %r12;
+ pushq %r15;
+ movq %rdi, CTX;
movq %rsi, %r11;
movq %rdx, %r12;
vmovdqu RR4, (6*16)(%r11);
vmovdqu RL4, (7*16)(%r11);
+ popq %r15;
popq %r12;
-
FRAME_END
ret;
ENDPROC(cast5_ctr_16way)
/**********************************************************************
8-way AVX cast6
**********************************************************************/
-#define CTX %rdi
+#define CTX %r15
#define RA1 %xmm0
#define RB1 %xmm1
#define RTMP %xmm15
-#define RID1 %rbp
-#define RID1d %ebp
+#define RID1 %rdi
+#define RID1d %edi
#define RID2 %rsi
#define RID2d %esi
.align 8
__cast6_enc_blk8:
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
* output:
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
*/
- pushq %rbp;
+ pushq %r15;
pushq %rbx;
+ movq %rdi, CTX;
+
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
QBAR(11);
popq %rbx;
- popq %rbp;
+ popq %r15;
vmovdqa .Lbswap_mask, RKM;
.align 8
__cast6_dec_blk8:
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
* output:
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
*/
- pushq %rbp;
+ pushq %r15;
pushq %rbx;
+ movq %rdi, CTX;
+
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
QBAR(0);
popq %rbx;
- popq %rbp;
+ popq %r15;
vmovdqa .Lbswap_mask, RKM;
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
ENTRY(cast6_ecb_enc_8way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
FRAME_BEGIN
+ pushq %r15;
+ movq %rdi, CTX;
movq %rsi, %r11;
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ popq %r15;
FRAME_END
ret;
ENDPROC(cast6_ecb_enc_8way)
ENTRY(cast6_ecb_dec_8way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
FRAME_BEGIN
+ pushq %r15;
+ movq %rdi, CTX;
movq %rsi, %r11;
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ popq %r15;
FRAME_END
ret;
ENDPROC(cast6_ecb_dec_8way)
ENTRY(cast6_cbc_dec_8way)
/* input:
- * %rdi: ctx, CTX
+ * %rdi: ctx
* %rsi: dst
* %rdx: src
*/
FRAME_BEGIN
-
pushq %r12;
+ pushq %r15;
+ movq %rdi, CTX;
movq %rsi, %r11;
movq %rdx, %r12;
store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ popq %r15;
popq %r12;
-
FRAME_END
ret;
ENDPROC(cast6_cbc_dec_8way)
* %rcx: iv (little endian, 128bit)
*/
FRAME_BEGIN
-
pushq %r12;
+ pushq %r15
+ movq %rdi, CTX;
movq %rsi, %r11;
movq %rdx, %r12;
store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ popq %r15;
popq %r12;
-
FRAME_END
ret;
ENDPROC(cast6_ctr_8way)
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
FRAME_BEGIN
+ pushq %r15;
+ movq %rdi, CTX
movq %rsi, %r11;
/* regs <= src, dst <= IVs, regs <= regs xor IVs */
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ popq %r15;
FRAME_END
ret;
ENDPROC(cast6_xts_enc_8way)
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
FRAME_BEGIN
+ pushq %r15;
+ movq %rdi, CTX
movq %rsi, %r11;
/* regs <= src, dst <= IVs, regs <= regs xor IVs */
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ popq %r15;
FRAME_END
ret;
ENDPROC(cast6_xts_dec_8way)
#define RW2bh %ch
#define RT0 %r15
-#define RT1 %rbp
+#define RT1 %rsi
#define RT2 %r14
#define RT3 %rdx
#define RT0d %r15d
-#define RT1d %ebp
+#define RT1d %esi
#define RT2d %r14d
#define RT3d %edx
* %rsi: dst
* %rdx: src
*/
- pushq %rbp;
pushq %rbx;
pushq %r12;
pushq %r13;
pushq %r14;
pushq %r15;
+ pushq %rsi; /* dst */
+
read_block(%rdx, RL0, RR0);
initial_permutation(RL0, RR0);
round1(32+15, RL0, RR0, dummy2);
final_permutation(RR0, RL0);
+
+ popq %rsi /* dst */
write_block(%rsi, RR0, RL0);
popq %r15;
popq %r13;
popq %r12;
popq %rbx;
- popq %rbp;
ret;
ENDPROC(des3_ede_x86_64_crypt_blk)
* %rdx: src (3 blocks)
*/
- pushq %rbp;
pushq %rbx;
pushq %r12;
pushq %r13;
pushq %r14;
pushq %r15;
+ pushq %rsi /* dst */
+
/* load input */
movl 0 * 4(%rdx), RL0d;
movl 1 * 4(%rdx), RR0d;
bswapl RR2d;
bswapl RL2d;
+ popq %rsi /* dst */
movl RR0d, 0 * 4(%rsi);
movl RL0d, 1 * 4(%rsi);
movl RR1d, 2 * 4(%rsi);
popq %r13;
popq %r12;
popq %rbx;
- popq %rbp;
ret;
ENDPROC(des3_ede_x86_64_crypt_blk_3way)
#define REG_RE %rdx
#define REG_RTA %r12
#define REG_RTB %rbx
-#define REG_T1 %ebp
+#define REG_T1 %r11d
#define xmm_mov vmovups
#define avx2_zeroupper vzeroupper
#define RND_F1 1
ENTRY(\name)
push %rbx
- push %rbp
push %r12
push %r13
push %r14
pop %r14
pop %r13
pop %r12
- pop %rbp
pop %rbx
ret
#define REG_A %ecx
#define REG_B %esi
#define REG_C %edi
-#define REG_D %ebp
+#define REG_D %r12d
#define REG_E %edx
#define REG_T1 %eax
ENTRY(\name)
push %rbx
- push %rbp
push %r12
+ push %rbp
+ mov %rsp, %rbp
- mov %rsp, %r12
sub $64, %rsp # allocate workspace
and $~15, %rsp # align stack
xor %rax, %rax
rep stosq
- mov %r12, %rsp # deallocate workspace
-
- pop %r12
+ mov %rbp, %rsp # deallocate workspace
pop %rbp
+ pop %r12
pop %rbx
ret
c = %ecx
d = %r8d
e = %edx
-TBL = %rbp
+TBL = %r12
a = %eax
b = %ebx
ENTRY(sha256_transform_avx)
.align 32
pushq %rbx
- pushq %rbp
+ pushq %r12
pushq %r13
pushq %r14
pushq %r15
- pushq %r12
+ pushq %rbp
+ movq %rsp, %rbp
- mov %rsp, %r12
subq $STACK_SIZE, %rsp # allocate stack space
and $~15, %rsp # align stack pointer
done_hash:
- mov %r12, %rsp
-
- popq %r12
+ mov %rbp, %rsp
+ popq %rbp
popq %r15
popq %r14
popq %r13
- popq %rbp
+ popq %r12
popq %rbx
ret
ENDPROC(sha256_transform_avx)
e = %edx # clobbers NUM_BLKS
y3 = %esi # clobbers INP
-
-TBL = %rbp
SRND = CTX # SRND is same register as CTX
a = %eax
ENTRY(sha256_transform_rorx)
.align 32
pushq %rbx
- pushq %rbp
pushq %r12
pushq %r13
pushq %r14
mov CTX, _CTX(%rsp)
loop0:
- lea K256(%rip), TBL
-
## Load first 16 dwords from two blocks
VMOVDQ 0*32(INP),XTMP0
VMOVDQ 1*32(INP),XTMP1
.align 16
loop1:
- vpaddd 0*32(TBL, SRND), X0, XFER
+ vpaddd K256+0*32(SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 0*32
- vpaddd 1*32(TBL, SRND), X0, XFER
+ vpaddd K256+1*32(SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 1*32
- vpaddd 2*32(TBL, SRND), X0, XFER
+ vpaddd K256+2*32(SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 2*32
- vpaddd 3*32(TBL, SRND), X0, XFER
+ vpaddd K256+3*32(SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 3*32
loop2:
## Do last 16 rounds with no scheduling
- vpaddd 0*32(TBL, SRND), X0, XFER
+ vpaddd K256+0*32(SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 0*32
- vpaddd 1*32(TBL, SRND), X1, XFER
+
+ vpaddd K256+1*32(SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 1*32
add $2*32, SRND
ja done_hash
do_last_block:
- #### do last block
- lea K256(%rip), TBL
-
VMOVDQ 0*16(INP),XWORD0
VMOVDQ 1*16(INP),XWORD1
VMOVDQ 2*16(INP),XWORD2
popq %r14
popq %r13
popq %r12
- popq %rbp
popq %rbx
ret
ENDPROC(sha256_transform_rorx)
c = %ecx
d = %r8d
e = %edx
-TBL = %rbp
+TBL = %r12
a = %eax
b = %ebx
ENTRY(sha256_transform_ssse3)
.align 32
pushq %rbx
- pushq %rbp
+ pushq %r12
pushq %r13
pushq %r14
pushq %r15
- pushq %r12
+ pushq %rbp
+ mov %rsp, %rbp
- mov %rsp, %r12
subq $STACK_SIZE, %rsp
and $~15, %rsp
done_hash:
- mov %r12, %rsp
-
- popq %r12
+ mov %rbp, %rsp
+ popq %rbp
popq %r15
popq %r14
popq %r13
- popq %rbp
+ popq %r12
popq %rbx
ret
BYTE_FLIP_MASK = %ymm9
-# 1st arg
-CTX = %rdi
+# 1st arg is %rdi, which is saved to the stack and accessed later via %r12
+CTX1 = %rdi
+CTX2 = %r12
# 2nd arg
INP = %rsi
# 3rd arg
e = %rdx
y3 = %rsi
-TBL = %rbp
+TBL = %rdi # clobbers CTX1
a = %rax
b = %rbx
h = %r11
old_h = %r11
-T1 = %r12
+T1 = %r12 # clobbers CTX2
y0 = %r13
y1 = %r14
y2 = %r15
-y4 = %r12
-
# Local variables (stack frame)
XFER_SIZE = 4*8
SRND_SIZE = 1*8
INP_SIZE = 1*8
INPEND_SIZE = 1*8
+CTX_SIZE = 1*8
RSPSAVE_SIZE = 1*8
-GPRSAVE_SIZE = 6*8
+GPRSAVE_SIZE = 5*8
frame_XFER = 0
frame_SRND = frame_XFER + XFER_SIZE
frame_INP = frame_SRND + SRND_SIZE
frame_INPEND = frame_INP + INP_SIZE
-frame_RSPSAVE = frame_INPEND + INPEND_SIZE
+frame_CTX = frame_INPEND + INPEND_SIZE
+frame_RSPSAVE = frame_CTX + CTX_SIZE
frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
frame_size = frame_GPRSAVE + GPRSAVE_SIZE
mov %rax, frame_RSPSAVE(%rsp)
# Save GPRs
- mov %rbp, frame_GPRSAVE(%rsp)
- mov %rbx, 8*1+frame_GPRSAVE(%rsp)
- mov %r12, 8*2+frame_GPRSAVE(%rsp)
- mov %r13, 8*3+frame_GPRSAVE(%rsp)
- mov %r14, 8*4+frame_GPRSAVE(%rsp)
- mov %r15, 8*5+frame_GPRSAVE(%rsp)
+ mov %rbx, 8*0+frame_GPRSAVE(%rsp)
+ mov %r12, 8*1+frame_GPRSAVE(%rsp)
+ mov %r13, 8*2+frame_GPRSAVE(%rsp)
+ mov %r14, 8*3+frame_GPRSAVE(%rsp)
+ mov %r15, 8*4+frame_GPRSAVE(%rsp)
shl $7, NUM_BLKS # convert to bytes
jz done_hash
mov NUM_BLKS, frame_INPEND(%rsp)
## load initial digest
- mov 8*0(CTX),a
- mov 8*1(CTX),b
- mov 8*2(CTX),c
- mov 8*3(CTX),d
- mov 8*4(CTX),e
- mov 8*5(CTX),f
- mov 8*6(CTX),g
- mov 8*7(CTX),h
+ mov 8*0(CTX1), a
+ mov 8*1(CTX1), b
+ mov 8*2(CTX1), c
+ mov 8*3(CTX1), d
+ mov 8*4(CTX1), e
+ mov 8*5(CTX1), f
+ mov 8*6(CTX1), g
+ mov 8*7(CTX1), h
+
+ # save %rdi (CTX) before it gets clobbered
+ mov %rdi, frame_CTX(%rsp)
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
subq $1, frame_SRND(%rsp)
jne loop2
- addm 8*0(CTX),a
- addm 8*1(CTX),b
- addm 8*2(CTX),c
- addm 8*3(CTX),d
- addm 8*4(CTX),e
- addm 8*5(CTX),f
- addm 8*6(CTX),g
- addm 8*7(CTX),h
+ mov frame_CTX(%rsp), CTX2
+ addm 8*0(CTX2), a
+ addm 8*1(CTX2), b
+ addm 8*2(CTX2), c
+ addm 8*3(CTX2), d
+ addm 8*4(CTX2), e
+ addm 8*5(CTX2), f
+ addm 8*6(CTX2), g
+ addm 8*7(CTX2), h
mov frame_INP(%rsp), INP
add $128, INP
done_hash:
# Restore GPRs
- mov frame_GPRSAVE(%rsp) ,%rbp
- mov 8*1+frame_GPRSAVE(%rsp) ,%rbx
- mov 8*2+frame_GPRSAVE(%rsp) ,%r12
- mov 8*3+frame_GPRSAVE(%rsp) ,%r13
- mov 8*4+frame_GPRSAVE(%rsp) ,%r14
- mov 8*5+frame_GPRSAVE(%rsp) ,%r15
+ mov 8*0+frame_GPRSAVE(%rsp), %rbx
+ mov 8*1+frame_GPRSAVE(%rsp), %r12
+ mov 8*2+frame_GPRSAVE(%rsp), %r13
+ mov 8*3+frame_GPRSAVE(%rsp), %r14
+ mov 8*4+frame_GPRSAVE(%rsp), %r15
# Restore Stack Pointer
mov frame_RSPSAVE(%rsp), %rsp
#define RT %xmm14
#define RR %xmm15
-#define RID1 %rbp
-#define RID1d %ebp
+#define RID1 %r13
+#define RID1d %r13d
#define RID2 %rsi
#define RID2d %esi
vmovdqu w(CTX), RK1;
- pushq %rbp;
+ pushq %r13;
pushq %rbx;
pushq %rcx;
popq %rcx;
popq %rbx;
- popq %rbp;
+ popq %r13;
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
vmovdqu (w+4*4)(CTX), RK1;
- pushq %rbp;
+ pushq %r13;
pushq %rbx;
inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
vmovdqu (w)(CTX), RK1;
popq %rbx;
- popq %rbp;
+ popq %r13;
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
- if (fpu->fpstate_active) {
+ if (fpu->initialized) {
unsigned long fx_aligned, math_size;
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
output, input...) \
{ \
- register void *__sp asm(_ASM_SP); \
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2) \
- : output, "+r" (__sp) \
+ : output, ASM_CALL_CONSTRAINT \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input); \
}
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
+#ifndef __ASSEMBLY__
+/*
+ * This output constraint should be used for any inline asm which has a "call"
+ * instruction. Otherwise the asm may be inserted before the frame pointer
+ * gets set up by the containing function. If you forget to do this, objtool
+ * may print a "call without frame pointer save/setup" warning.
+ */
+register unsigned int __asm_call_sp asm("esp");
+#define ASM_CALL_CONSTRAINT "+r" (__asm_call_sp)
+#endif
+
#endif /* _ASM_X86_ASM_H */
/*
* High level FPU state handling functions:
*/
-extern void fpu__activate_curr(struct fpu *fpu);
-extern void fpu__activate_fpstate_read(struct fpu *fpu);
-extern void fpu__activate_fpstate_write(struct fpu *fpu);
-extern void fpu__current_fpstate_write_begin(void);
-extern void fpu__current_fpstate_write_end(void);
+extern void fpu__initialize(struct fpu *fpu);
+extern void fpu__prepare_read(struct fpu *fpu);
+extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
err; \
})
-#define check_insn(insn, output, input...) \
-({ \
- int err; \
+#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl $-1,%[err]\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : [err] "=r" (err), output \
- : "0"(0), input); \
- err; \
-})
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
+ : output : input)
static inline int copy_fregs_to_user(struct fregs_state __user *fx)
{
static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{
- int err;
-
if (IS_ENABLED(CONFIG_X86_32)) {
- err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
- err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+ kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
- err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
+ kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
}
}
- /* Copying from a kernel buffer to FPU registers should never fail: */
- WARN_ON_FPU(err);
}
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
static inline void copy_kernel_to_fregs(struct fregs_state *fx)
{
- int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-
- WARN_ON_FPU(err);
+ kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
* XSAVE area format.
*/
-#define XSTATE_XRESTORE(st, lmask, hmask, err) \
+#define XSTATE_XRESTORE(st, lmask, hmask) \
asm volatile(ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
- "xor %[err], %[err]\n" \
"3:\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "4: movl $-2, %[err]\n" \
- "jmp 3b\n" \
- ".popsection\n" \
- _ASM_EXTABLE(661b, 4b) \
- : [err] "=r" (err) \
+ _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
+ : \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
- /* We should never fault when copying from a kernel buffer: */
+ /*
+ * We should never fault when copying from a kernel buffer, and the FPU
+ * state we set at boot time should be valid.
+ */
WARN_ON_FPU(err);
}
u32 hmask = mask >> 32;
int err;
- WARN_ON(!alternatives_patched);
+ WARN_ON_FPU(!alternatives_patched);
XSTATE_XSAVE(xstate, lmask, hmask, err);
{
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err;
-
- XSTATE_XRESTORE(xstate, lmask, hmask, err);
- /* We should never fault when copying from a kernel buffer: */
- WARN_ON_FPU(err);
+ XSTATE_XRESTORE(xstate, lmask, hmask);
}
/*
*/
static inline void fpregs_deactivate(struct fpu *fpu)
{
- WARN_ON_FPU(!fpu->fpregs_active);
-
- fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
trace_x86_fpu_regs_deactivated(fpu);
}
static inline void fpregs_activate(struct fpu *fpu)
{
- WARN_ON_FPU(fpu->fpregs_active);
-
- fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
trace_x86_fpu_regs_activated(fpu);
}
-/*
- * The question "does this thread have fpu access?"
- * is slightly racy, since preemption could come in
- * and revoke it immediately after the test.
- *
- * However, even in that very unlikely scenario,
- * we can just assume we have FPU access - typically
- * to save the FP state - we'll just take a #NM
- * fault and get the FPU access back.
- */
-static inline int fpregs_active(void)
-{
- return current->thread.fpu.fpregs_active;
-}
-
/*
* FPU state switching for scheduling.
*
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- if (old_fpu->fpregs_active) {
+ if (old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
old_fpu->last_cpu = cpu;
/* But leave fpu_fpregs_owner_ctx! */
- old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active;
+ new_fpu->initialized;
if (preload) {
if (!fpregs_state_valid(new_fpu, cpu))
struct fpu *fpu = ¤t->thread.fpu;
preempt_disable();
- if (!fpregs_active())
- fpregs_activate(fpu);
+ fpregs_activate(fpu);
preempt_enable();
}
/* Default value for fxregs_state.mxcsr: */
#define MXCSR_DEFAULT 0x1f80
+/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
+#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
+
/*
* Software based FPU emulation state. This is arbitrary really,
* it matches the x87 format to make it easier to understand:
unsigned int last_cpu;
/*
- * @fpstate_active:
+ * @initialized:
*
- * This flag indicates whether this context is active: if the task
+ * This flag indicates whether this context is initialized: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
- unsigned char fpstate_active;
-
- /*
- * @fpregs_active:
- *
- * This flag determines whether a given context is actively
- * loaded into the FPU's registers and that those registers
- * represent the task's current FPU state.
- *
- * Note the interaction with fpstate_active:
- *
- * # task does not use the FPU:
- * fpstate_active == 0
- *
- * # task uses the FPU and regs are active:
- * fpstate_active == 1 && fpregs_active == 1
- *
- * # the regs are inactive but still match fpstate:
- * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
- *
- * The third state is what we use for the lazy restore optimization
- * on lazy-switching CPUs.
- */
- unsigned char fpregs_active;
+ unsigned char initialized;
/*
* @state:
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void);
-int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
- void __user *ubuf, struct xregs_state *xsave);
-int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
- struct xregs_state *xsave);
+int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
+int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
+int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
+int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
+
+/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
+extern int validate_xstate_header(const struct xstate_header *hdr);
+
#endif
return __pkru_allows_pkey(vma_pkey(vma), write);
}
+/*
+ * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
+ * bits. This serves two purposes. It prevents a nasty situation in
+ * which PCID-unaware code saves CR3, loads some other value (with PCID
+ * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
+ * the saved ASID was nonzero. It also means that any bugs involving
+ * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
+ * deterministically.
+ */
+
+static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
+{
+ if (static_cpu_has(X86_FEATURE_PCID)) {
+ VM_WARN_ON_ONCE(asid > 4094);
+ return __sme_pa(mm->pgd) | (asid + 1);
+ } else {
+ VM_WARN_ON_ONCE(asid != 0);
+ return __sme_pa(mm->pgd);
+ }
+}
+
+static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
+{
+ VM_WARN_ON_ONCE(asid > 4094);
+ return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
+}
/*
* This can be used from process context to figure out what the value of
*/
static inline unsigned long __get_current_cr3_fast(void)
{
- unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
-
- if (static_cpu_has(X86_FEATURE_PCID))
- cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/* For now, be very restrictive about when this can be called. */
VM_WARN_ON(in_nmi() || preemptible());
u64 input_address = input ? virt_to_phys(input) : 0;
u64 output_address = output ? virt_to_phys(output) : 0;
u64 hv_status;
- register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64
if (!hv_hypercall_pg)
__asm__ __volatile__("mov %4, %%r8\n"
"call *%5"
- : "=a" (hv_status), "+r" (__sp),
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input_address)
: "r" (output_address), "m" (hv_hypercall_pg)
: "cc", "memory", "r8", "r9", "r10", "r11");
__asm__ __volatile__("call *%7"
: "=A" (hv_status),
- "+c" (input_address_lo), "+r" (__sp)
+ "+c" (input_address_lo), ASM_CALL_CONSTRAINT
: "A" (control),
"b" (input_address_hi),
"D"(output_address_hi), "S"(output_address_lo),
static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
{
u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
- register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64
{
__asm__ __volatile__("call *%4"
- : "=a" (hv_status), "+r" (__sp),
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1)
: "m" (hv_hypercall_pg)
: "cc", "r8", "r9", "r10", "r11");
__asm__ __volatile__ ("call *%5"
: "=A"(hv_status),
"+c"(input1_lo),
- "+r"(__sp)
+ ASM_CALL_CONSTRAINT
: "A" (control),
"b" (input1_hi),
"m" (hv_hypercall_pg)
*/
#ifdef CONFIG_X86_32
#define PVOP_VCALL_ARGS \
- unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \
- register void *__sp asm("esp")
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
+
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
- __edx = __edx, __ecx = __ecx, __eax = __eax; \
- register void *__sp asm("rsp")
+ __edx = __edx, __ecx = __ecx, __eax = __eax;
+
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : call_clbr, "+r" (__sp) \
+ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : call_clbr, "+r" (__sp) \
+ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : call_clbr, "+r" (__sp) \
+ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \