Merge tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Oct 2018 10:14:47 +0000 (11:14 +0100)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Oct 2018 10:14:47 +0000 (11:14 +0100)
Pull s390 updates from Martin Schwidefsky:

 - Improved access control for the zcrypt driver, multiple device nodes
   can now be created with different access control lists

 - Extend the pkey API to provide random protected keys, this is useful
   for encrypted swap device with ephemeral protected keys

 - Add support for virtually mapped kernel stacks

 - Rework the early boot code, this moves the memory detection into the
   boot code that runs prior to decompression.

 - Add KASAN support

 - Bug fixes and cleanups

* tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (83 commits)
  s390/pkey: move pckmo subfunction available checks away from module init
  s390/kasan: support preemptible kernel build
  s390/pkey: Load pkey kernel module automatically
  s390/perf: Return error when debug_register fails
  s390/sthyi: Fix machine name validity indication
  s390/zcrypt: fix broken zcrypt_send_cprb in-kernel api function
  s390/vmalloc: fix VMALLOC_START calculation
  s390/mem_detect: add missing include
  s390/dumpstack: print psw mask and address again
  s390/crypto: Enhance paes cipher to accept variable length key material
  s390/pkey: Introduce new API for transforming key blobs
  s390/pkey: Introduce new API for random protected key verification
  s390/pkey: Add sysfs attributes to emit secure key blobs
  s390/pkey: Add sysfs attributes to emit protected key blobs
  s390/pkey: Define protected key blob format
  s390/pkey: Introduce new API for random protected key generation
  s390/zcrypt: add ap_adapter_mask sysfs attribute
  s390/zcrypt: provide apfs failure code on type 86 error reply
  s390/zcrypt: zcrypt device driver cleanup
  s390/kasan: add support for mem= kernel parameter
  ...

123 files changed:
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/boot/.gitignore
arch/s390/boot/Makefile
arch/s390/boot/boot.h [new file with mode: 0644]
arch/s390/boot/cmdline.c [new file with mode: 0644]
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/decompressor.c [new file with mode: 0644]
arch/s390/boot/compressed/decompressor.h [new file with mode: 0644]
arch/s390/boot/compressed/head.S [deleted file]
arch/s390/boot/compressed/misc.c [deleted file]
arch/s390/boot/compressed/vmlinux.lds.S
arch/s390/boot/compressed/vmlinux.scr.lds.S [deleted file]
arch/s390/boot/ctype.c [new file with mode: 0644]
arch/s390/boot/head.S
arch/s390/boot/ipl_parm.c [new file with mode: 0644]
arch/s390/boot/ipl_vmparm.c [new file with mode: 0644]
arch/s390/boot/mem_detect.c [new file with mode: 0644]
arch/s390/boot/startup.c [new file with mode: 0644]
arch/s390/boot/string.c [new file with mode: 0644]
arch/s390/crypto/paes_s390.c
arch/s390/defconfig
arch/s390/hypfs/hypfs_sprp.c
arch/s390/include/asm/appldata.h
arch/s390/include/asm/boot_data.h [new file with mode: 0644]
arch/s390/include/asm/ccwgroup.h
arch/s390/include/asm/facility.h
arch/s390/include/asm/ipl.h
arch/s390/include/asm/kasan.h [new file with mode: 0644]
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/mem_detect.h [new file with mode: 0644]
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/pkey.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/sclp.h
arch/s390/include/asm/sections.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/string.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/vmlinux.lds.h [new file with mode: 0644]
arch/s390/include/uapi/asm/pkey.h
arch/s390/include/uapi/asm/zcrypt.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/base.S
arch/s390/kernel/dumpstack.c
arch/s390/kernel/early.c
arch/s390/kernel/early_nobss.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/head64.S
arch/s390/kernel/ipl.c
arch/s390/kernel/ipl_vmparm.c [new file with mode: 0644]
arch/s390/kernel/irq.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/module.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/sthyi.c
arch/s390/kernel/swsusp.S
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/s390/kernel/vmlinux.lds.S
arch/s390/lib/Makefile
arch/s390/lib/mem.S
arch/s390/mm/Makefile
arch/s390/mm/dump_pagetables.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/mm/kasan_init.c [new file with mode: 0644]
arch/s390/mm/maccess.c
arch/s390/mm/mem_detect.c [deleted file]
arch/s390/purgatory/head.S
drivers/crypto/Kconfig
drivers/s390/block/dasd.c
drivers/s390/char/Makefile
drivers/s390/char/monwriter.c
drivers/s390/char/sclp.h
drivers/s390/char/sclp_cmd.c
drivers/s390/char/sclp_early.c
drivers/s390/char/sclp_early_core.c
drivers/s390/char/sclp_pci.c
drivers/s390/char/tape_3590.c
drivers/s390/char/vmlogrdr.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/crypto/Makefile
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_api.h
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_cca_key.h
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2a.h
drivers/s390/crypto/zcrypt_cex2c.c [new file with mode: 0644]
drivers/s390/crypto/zcrypt_cex2c.h [new file with mode: 0644]
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype50.h
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/s390/crypto/zcrypt_msgtype6.h
drivers/s390/crypto/zcrypt_pcixcc.c [deleted file]
drivers/s390/crypto/zcrypt_pcixcc.h [deleted file]
drivers/s390/crypto/zcrypt_queue.c
include/linux/compiler-gcc.h
include/linux/start_kernel.h
init/main.c
lib/Kconfig.kasan

index 9a9c7a6fe925915f561dd48c454e1e02d5ee7a51..cc8313550493a47b15c76700e9fb35e94a28eb0d 100644 (file)
@@ -56,6 +56,12 @@ config PCI_QUIRKS
 config ARCH_SUPPORTS_UPROBES
        def_bool y
 
+config KASAN_SHADOW_OFFSET
+       hex
+       depends on KASAN
+       default 0x18000000000000 if KASAN_S390_4_LEVEL_PAGING
+       default 0x30000000000
+
 config S390
        def_bool y
        select ARCH_BINFMT_ELF_STATE
@@ -120,11 +126,13 @@ config S390
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KASAN
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_VMAP_STACK
        select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
@@ -649,6 +657,7 @@ config PACK_STACK
 
 config CHECK_STACK
        def_bool y
+       depends on !VMAP_STACK
        prompt "Detect kernel stack overflow"
        help
          This option enables the compiler option -mstack-guard and
index ee65185bbc807284b8729b5c9d52c129fafba30e..0b33577932c3bd9c552c62cfe473979987c97313 100644 (file)
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
 UTS_MACHINE    := s390x
-STACK_SIZE     := 16384
+STACK_SIZE     := $(if $(CONFIG_KASAN),32768,16384)
 CHECKFLAGS     += -D__s390__ -D__s390x__
 
 export LD_BFD
index 9bf8489df6e62d00c804189285f71269cb53a634..e4b58240ec5370791d1174132fbd164cff63c26b 100644 (file)
@@ -137,6 +137,14 @@ static void appldata_work_fn(struct work_struct *work)
        mutex_unlock(&appldata_ops_mutex);
 }
 
+static struct appldata_product_id appldata_id = {
+       .prod_nr    = {0xD3, 0xC9, 0xD5, 0xE4,
+                      0xE7, 0xD2, 0xD9},       /* "LINUXKR" */
+       .prod_fn    = 0xD5D3,                   /* "NL" */
+       .version_nr = 0xF2F6,                   /* "26" */
+       .release_nr = 0xF0F1,                   /* "01" */
+};
+
 /*
  * appldata_diag()
  *
@@ -145,17 +153,22 @@ static void appldata_work_fn(struct work_struct *work)
 int appldata_diag(char record_nr, u16 function, unsigned long buffer,
                        u16 length, char *mod_lvl)
 {
-       struct appldata_product_id id = {
-               .prod_nr    = {0xD3, 0xC9, 0xD5, 0xE4,
-                              0xE7, 0xD2, 0xD9},       /* "LINUXKR" */
-               .prod_fn    = 0xD5D3,                   /* "NL" */
-               .version_nr = 0xF2F6,                   /* "26" */
-               .release_nr = 0xF0F1,                   /* "01" */
-       };
+       struct appldata_parameter_list *parm_list;
+       struct appldata_product_id *id;
+       int rc;
 
-       id.record_nr = record_nr;
-       id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
-       return appldata_asm(&id, function, (void *) buffer, length);
+       parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
+       id = kmemdup(&appldata_id, sizeof(appldata_id), GFP_KERNEL);
+       rc = -ENOMEM;
+       if (parm_list && id) {
+               id->record_nr = record_nr;
+               id->mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
+               rc = appldata_asm(parm_list, id, function,
+                                 (void *) buffer, length);
+       }
+       kfree(id);
+       kfree(parm_list);
+       return rc;
 }
 /************************ timer, work, DIAG <END> ****************************/
 
index 017d5912ad2d59a1a85024c20732298b7ad347a4..16ff906e46103b99ddcc4ae65d4ef93108f77419 100644 (file)
@@ -1,2 +1,3 @@
 image
 bzImage
+section_cmp.*
index 9e6668ee93de83122fbfc1c3ade9ab9b0519d87c..d5ad724f5c9621665219f0a13786a4de94188fd7 100644 (file)
@@ -6,6 +6,7 @@
 KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
@@ -27,15 +28,32 @@ endif
 
 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
-obj-y  := head.o als.o ebcdic.o sclp_early_core.o mem.o
-targets        := bzImage startup.a $(obj-y)
+obj-y  := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o
+obj-y  += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o
+targets        := bzImage startup.a section_cmp.boot.data $(obj-y)
 subdir-        := compressed
 
 OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
-$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
+quiet_cmd_section_cmp = SECTCMP $*
+define cmd_section_cmp
+       s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \
+               sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
+       s2=`$(OBJDUMP) -t -j "$*" "$(word 2,$^)" | sort | \
+               sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
+       if [ "$$s1" != "$$s2" ]; then \
+               echo "error: section $* differs between $< and $(word 2,$^)" >&2; \
+               exit 1; \
+       fi; \
+       touch $@
+endef
+
+$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE
        $(call if_changed,objcopy)
 
+$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
+       $(call if_changed,section_cmp)
+
 $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
        $(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
new file mode 100644 (file)
index 0000000..fc41e22
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_BOOT_H
+#define BOOT_BOOT_H
+
+void startup_kernel(void);
+void detect_memory(void);
+void store_ipl_parmblock(void);
+void setup_boot_command_line(void);
+void setup_memory_end(void);
+
+#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/cmdline.c b/arch/s390/boot/cmdline.c
new file mode 100644 (file)
index 0000000..73d826c
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../lib/cmdline.c"
index 04609478d18b99303909a01d7b313df8c799873b..593039620487a6cdad8e076272b8e97cacff0153 100644 (file)
@@ -8,14 +8,16 @@
 KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
-obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
+obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) piggy.o info.o
 targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
+targets += info.bin $(obj-y)
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+OBJCOPYFLAGS :=
 
 OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
@@ -23,23 +25,16 @@ LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
        $(call if_changed,ld)
 
-# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
-
-quiet_cmd_sizes = GEN     $@
-      cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
-
-$(obj)/sizes.h: vmlinux
-       $(call if_changed,sizes)
-
-AFLAGS_head.o += -I$(objtree)/$(obj)
-$(obj)/head.o: $(obj)/sizes.h
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+$(obj)/info.bin: vmlinux FORCE
+       $(call if_changed,objcopy)
 
-CFLAGS_misc.o += -I$(objtree)/$(obj)
-$(obj)/misc.o: $(obj)/sizes.h
+OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info
+$(obj)/info.o: $(obj)/info.bin FORCE
+       $(call if_changed,objcopy)
 
-OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
-$(obj)/vmlinux.bin: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S
+$(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
 
 vmlinux.bin.all-y := $(obj)/vmlinux.bin
@@ -64,10 +59,10 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
 $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
        $(call if_changed,xzkern)
 
-LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
-       $(call if_changed,ld)
+OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
+$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
+       $(call if_changed,objcopy)
 
-chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
+chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS))
 chkbss-target := $(obj)/vmlinux.bin
 include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
new file mode 100644 (file)
index 0000000..4504663
--- /dev/null
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Definitions and wrapper functions for kernel decompressor
+ *
+ * Copyright IBM Corp. 2010
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include "decompressor.h"
+
+/*
+ * gzip declarations
+ */
+#define STATIC static
+#define STATIC_RW_DATA static __section(.data)
+
+#undef memset
+#undef memcpy
+#undef memmove
+#define memmove memmove
+#define memzero(s, n) memset((s), 0, (n))
+
+/* Symbols defined by linker scripts */
+extern char _end[];
+extern unsigned char _compressed_start[];
+extern unsigned char _compressed_end[];
+
+#ifdef CONFIG_HAVE_KERNEL_BZIP2
+#define HEAP_SIZE      0x400000
+#else
+#define HEAP_SIZE      0x10000
+#endif
+
+static unsigned long free_mem_ptr = (unsigned long) _end;
+static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE;
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+#define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE)
+
+unsigned long mem_safe_offset(void)
+{
+       /*
+        * due to 4MB HEAD_SIZE for bzip2
+        * 'decompress_offset + vmlinux.image_size' could be larger than
+        * kernel at final position + its .bss, so take the larger of two
+        */
+       return max(decompress_offset + vmlinux.image_size,
+                  vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
+}
+
+void *decompress_kernel(void)
+{
+       void *output = (void *)decompress_offset;
+
+       __decompress(_compressed_start, _compressed_end - _compressed_start,
+                    NULL, NULL, output, 0, NULL, error);
+       return output;
+}
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
new file mode 100644 (file)
index 0000000..e1c1f2e
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
+#define BOOT_COMPRESSED_DECOMPRESSOR_H
+
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+static inline void *decompress_kernel(void) {}
+#else
+void *decompress_kernel(void);
+#endif
+unsigned long mem_safe_offset(void);
+void error(char *m);
+
+struct vmlinux_info {
+       unsigned long default_lma;
+       void (*entry)(void);
+       unsigned long image_size;       /* does not include .bss */
+       unsigned long bss_size;         /* uncompressed image .bss size */
+       unsigned long bootdata_off;
+       unsigned long bootdata_size;
+};
+
+extern char _vmlinux_info[];
+#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
+
+#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
deleted file mode 100644 (file)
index df8dbbc..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Startup glue code to uncompress the kernel
- *
- * Copyright IBM Corp. 2010
- *
- *   Author(s):        Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include "sizes.h"
-
-__HEAD
-ENTRY(startup_decompressor)
-       basr    %r13,0                  # get base
-.LPG1:
-       # setup stack
-       lg      %r15,.Lstack-.LPG1(%r13)
-       aghi    %r15,-160
-       brasl   %r14,decompress_kernel
-       # Set up registers for memory mover. We move the decompressed image to
-       # 0x100000, where startup_continue of the decompressed image is supposed
-       # to be.
-       lgr     %r4,%r2
-       lg      %r2,.Loffset-.LPG1(%r13)
-       lg      %r3,.Lmvsize-.LPG1(%r13)
-       lgr     %r5,%r3
-       # Move the memory mover someplace safe so it doesn't overwrite itself.
-       la      %r1,0x200
-       mvc     0(mover_end-mover,%r1),mover-.LPG1(%r13)
-       # When the memory mover is done we pass control to
-       # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
-       # the decompressed image.
-       lgr     %r6,%r2
-       br      %r1
-mover:
-       mvcle   %r2,%r4,0
-       jo      mover
-       br      %r6
-mover_end:
-
-       .align  8
-.Lstack:
-       .quad   0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
-.Loffset:
-       .quad   0x100000
-.Lmvsize:
-       .quad   SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
deleted file mode 100644 (file)
index f66ad73..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Definitions and wrapper functions for kernel decompressor
- *
- * Copyright IBM Corp. 2010
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/uaccess.h>
-#include <asm/page.h>
-#include <asm/sclp.h>
-#include <asm/ipl.h>
-#include "sizes.h"
-
-/*
- * gzip declarations
- */
-#define STATIC static
-
-#undef memset
-#undef memcpy
-#undef memmove
-#define memmove memmove
-#define memzero(s, n) memset((s), 0, (n))
-
-/* Symbols defined by linker scripts */
-extern char input_data[];
-extern int input_len;
-extern char _end[];
-extern char _bss[], _ebss[];
-
-static void error(char *m);
-
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#ifdef CONFIG_HAVE_KERNEL_BZIP2
-#define HEAP_SIZE      0x400000
-#else
-#define HEAP_SIZE      0x10000
-#endif
-
-#ifdef CONFIG_KERNEL_GZIP
-#include "../../../../lib/decompress_inflate.c"
-#endif
-
-#ifdef CONFIG_KERNEL_BZIP2
-#include "../../../../lib/decompress_bunzip2.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZ4
-#include "../../../../lib/decompress_unlz4.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZMA
-#include "../../../../lib/decompress_unlzma.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZO
-#include "../../../../lib/decompress_unlzo.c"
-#endif
-
-#ifdef CONFIG_KERNEL_XZ
-#include "../../../../lib/decompress_unxz.c"
-#endif
-
-static int puts(const char *s)
-{
-       sclp_early_printk(s);
-       return 0;
-}
-
-static void error(char *x)
-{
-       unsigned long long psw = 0x000a0000deadbeefULL;
-
-       puts("\n\n");
-       puts(x);
-       puts("\n\n -- System halted");
-
-       asm volatile("lpsw %0" : : "Q" (psw));
-}
-
-unsigned long decompress_kernel(void)
-{
-       void *output, *kernel_end;
-
-       output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
-       kernel_end = output + SZ__bss_start;
-
-#ifdef CONFIG_BLK_DEV_INITRD
-       /*
-        * Move the initrd right behind the end of the decompressed
-        * kernel image. This also prevents initrd corruption caused by
-        * bss clearing since kernel_end will always be located behind the
-        * current bss section..
-        */
-       if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
-               memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
-               INITRD_START = (unsigned long) kernel_end;
-       }
-#endif
-
-       /*
-        * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
-        * initialized afterwards since they reside in bss.
-        */
-       memset(_bss, 0, _ebss - _bss);
-       free_mem_ptr = (unsigned long) _end;
-       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-
-       __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
-       return (unsigned long) output;
-}
-
index b16ac8b3c439390e35fa83ccb8ba17e35d3e2901..7efc3938f5955dae79720d5323636def4138d9df 100644 (file)
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
@@ -8,9 +9,6 @@ ENTRY(startup)
 
 SECTIONS
 {
-       /* Be careful parts of head_64.S assume startup_32 is at
-        * address 0.
-        */
        . = 0;
        .head.text : {
                _head = . ;
@@ -26,7 +24,7 @@ SECTIONS
        .rodata : {
                _rodata = . ;
                *(.rodata)       /* read-only data */
-               *(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
+               *(.rodata.*)
                _erodata = . ;
        }
        .data : {
@@ -35,14 +33,28 @@ SECTIONS
                *(.data.*)
                _edata = . ;
        }
-       startup_continue = 0x100000;
+       BOOT_DATA
+
+       /*
+        * uncompressed image info used by the decompressor it should match
+        * struct vmlinux_info. It comes from .vmlinux.info section of
+        * uncompressed vmlinux in a form of info.o
+        */
+       . = ALIGN(8);
+       .vmlinux.info : {
+               _vmlinux_info = .;
+               *(.vmlinux.info)
+       }
+
 #ifdef CONFIG_KERNEL_UNCOMPRESSED
        . = 0x100000;
 #else
        . = ALIGN(8);
 #endif
        .rodata.compressed : {
-               *(.rodata.compressed)
+               _compressed_start = .;
+               *(.vmlinux.bin.compressed)
+               _compressed_end = .;
        }
        . = ALIGN(256);
        .bss : {
diff --git a/arch/s390/boot/compressed/vmlinux.scr.lds.S b/arch/s390/boot/compressed/vmlinux.scr.lds.S
deleted file mode 100644 (file)
index ff01d18..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-SECTIONS
-{
-  .rodata.compressed : {
-#ifndef CONFIG_KERNEL_UNCOMPRESSED
-       input_len = .;
-       LONG(input_data_end - input_data) input_data = .;
-#endif
-       *(.data)
-#ifndef CONFIG_KERNEL_UNCOMPRESSED
-       output_len = . - 4;
-       input_data_end = .;
-#endif
-       }
-}
diff --git a/arch/s390/boot/ctype.c b/arch/s390/boot/ctype.c
new file mode 100644 (file)
index 0000000..2495810
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../lib/ctype.c"
index f721913b73f10c10c7a89d9a400dacb8ff42902a..ce2cbbc417428f6a0a813c04d53cc4037f493cf6 100644 (file)
@@ -60,6 +60,9 @@ __HEAD
        .long   0x02000690,0x60000050
        .long   0x020006e0,0x20000050
 
+       .org    0x1a0
+       .quad   0,iplstart
+
        .org    0x200
 
 #
@@ -308,16 +311,11 @@ ENTRY(startup_kdump)
        spt     6f-.LPG0(%r13)
        mvc     __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
        l       %r15,.Lstack-.LPG0(%r13)
-       ahi     %r15,-STACK_FRAME_OVERHEAD
        brasl   %r14,verify_facilities
-#ifdef CONFIG_KERNEL_UNCOMPRESSED
-       jg      startup_continue
-#else
-       jg      startup_decompressor
-#endif
+       brasl   %r14,startup_kernel
 
 .Lstack:
-       .long   0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
+       .long   0x8000 + (1<<(PAGE_SHIFT+BOOT_STACK_ORDER)) - STACK_FRAME_OVERHEAD
        .align  8
 6:     .long   0x7fffffff,0xffffffff
 
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
new file mode 100644 (file)
index 0000000..9dab596
--- /dev/null
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
+#include "boot.h"
+
+char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
+struct ipl_parameter_block __bootdata(early_ipl_block);
+int __bootdata(early_ipl_block_valid);
+
+unsigned long __bootdata(memory_end);
+int __bootdata(memory_end_set);
+int __bootdata(noexec_disabled);
+
+static inline int __diag308(unsigned long subcode, void *addr)
+{
+       register unsigned long _addr asm("0") = (unsigned long)addr;
+       register unsigned long _rc asm("1") = 0;
+       unsigned long reg1, reg2;
+       psw_t old = S390_lowcore.program_new_psw;
+
+       asm volatile(
+               "       epsw    %0,%1\n"
+               "       st      %0,%[psw_pgm]\n"
+               "       st      %1,%[psw_pgm]+4\n"
+               "       larl    %0,1f\n"
+               "       stg     %0,%[psw_pgm]+8\n"
+               "       diag    %[addr],%[subcode],0x308\n"
+               "1:     nopr    %%r7\n"
+               : "=&d" (reg1), "=&a" (reg2),
+                 [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
+                 [addr] "+d" (_addr), "+d" (_rc)
+               : [subcode] "d" (subcode)
+               : "cc", "memory");
+       S390_lowcore.program_new_psw = old;
+       return _rc;
+}
+
+void store_ipl_parmblock(void)
+{
+       int rc;
+
+       rc = __diag308(DIAG308_STORE, &early_ipl_block);
+       if (rc == DIAG308_RC_OK &&
+           early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+               early_ipl_block_valid = 1;
+}
+
+static size_t scpdata_length(const char *buf, size_t count)
+{
+       while (count) {
+               if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
+                       break;
+               count--;
+       }
+       return count;
+}
+
+static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
+                                         const struct ipl_parameter_block *ipb)
+{
+       size_t count;
+       size_t i;
+       int has_lowercase;
+
+       count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
+                                            ipb->ipl_info.fcp.scp_data_len));
+       if (!count)
+               goto out;
+
+       has_lowercase = 0;
+       for (i = 0; i < count; i++) {
+               if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+                       count = 0;
+                       goto out;
+               }
+               if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+                       has_lowercase = 1;
+       }
+
+       if (has_lowercase)
+               memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+       else
+               for (i = 0; i < count; i++)
+                       dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+out:
+       dest[count] = '\0';
+       return count;
+}
+
+static void append_ipl_block_parm(void)
+{
+       char *parm, *delim;
+       size_t len, rc = 0;
+
+       len = strlen(early_command_line);
+
+       delim = early_command_line + len;    /* '\0' character position */
+       parm = early_command_line + len + 1; /* append right after '\0' */
+
+       switch (early_ipl_block.hdr.pbt) {
+       case DIAG308_IPL_TYPE_CCW:
+               rc = ipl_block_get_ascii_vmparm(
+                       parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+               break;
+       case DIAG308_IPL_TYPE_FCP:
+               rc = ipl_block_get_ascii_scpdata(
+                       parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+               break;
+       }
+       if (rc) {
+               if (*parm == '=')
+                       memmove(early_command_line, parm + 1, rc);
+               else
+                       *delim = ' '; /* replace '\0' with space */
+       }
+}
+
+static inline int has_ebcdic_char(const char *str)
+{
+       int i;
+
+       for (i = 0; str[i]; i++)
+               if (str[i] & 0x80)
+                       return 1;
+       return 0;
+}
+
+void setup_boot_command_line(void)
+{
+       COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+       /* convert arch command line to ascii if necessary */
+       if (has_ebcdic_char(COMMAND_LINE))
+               EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
+       /* copy arch command line */
+       strcpy(early_command_line, strim(COMMAND_LINE));
+
+       /* append IPL PARM data to the boot command line */
+       if (early_ipl_block_valid)
+               append_ipl_block_parm();
+}
+
+static char command_line_buf[COMMAND_LINE_SIZE] __section(.data);
+static void parse_mem_opt(void)
+{
+       char *param, *val;
+       bool enabled;
+       char *args;
+       int rc;
+
+       args = strcpy(command_line_buf, early_command_line);
+       while (*args) {
+               args = next_arg(args, &param, &val);
+
+               if (!strcmp(param, "mem")) {
+                       memory_end = memparse(val, NULL);
+                       memory_end_set = 1;
+               }
+
+               if (!strcmp(param, "noexec")) {
+                       rc = kstrtobool(val, &enabled);
+                       if (!rc && !enabled)
+                               noexec_disabled = 1;
+               }
+       }
+}
+
+void setup_memory_end(void)
+{
+       parse_mem_opt();
+#ifdef CONFIG_CRASH_DUMP
+       if (!OLDMEM_BASE && early_ipl_block_valid &&
+           early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
+           early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) {
+               if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
+                       memory_end_set = 1;
+       }
+#endif
+}
diff --git a/arch/s390/boot/ipl_vmparm.c b/arch/s390/boot/ipl_vmparm.c
new file mode 100644 (file)
index 0000000..8dacd5f
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ipl_vmparm.c"
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
new file mode 100644 (file)
index 0000000..4cb771b
--- /dev/null
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/mem_detect.h>
+#include <asm/sparsemem.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
+
+/* up to 256 storage elements, 1020 subincrements each */
+#define ENTRIES_EXTENDED_MAX                                                  \
+       (256 * (1020 / 2) * sizeof(struct mem_detect_block))
+
+/*
+ * To avoid corrupting old kernel memory during dump, find lowest memory
+ * chunk possible either right after the kernel end (decompressed kernel) or
+ * after initrd (if it is present and there is no hole between the kernel end
+ * and initrd)
+ */
+static void *mem_detect_alloc_extended(void)
+{
+       unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
+
+       if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+           INITRD_START < offset + ENTRIES_EXTENDED_MAX)
+               offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+
+       return (void *)offset;
+}
+
+static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+{
+       if (n < MEM_INLINED_ENTRIES)
+               return &mem_detect.entries[n];
+       if (unlikely(!mem_detect.entries_extended))
+               mem_detect.entries_extended = mem_detect_alloc_extended();
+       return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+}
+
+/*
+ * sequential calls to add_mem_detect_block with adjacent memory areas
+ * are merged together into single memory block.
+ */
+void add_mem_detect_block(u64 start, u64 end)
+{
+       struct mem_detect_block *block;
+
+       if (mem_detect.count) {
+               block = __get_mem_detect_block_ptr(mem_detect.count - 1);
+               if (block->end == start) {
+                       block->end = end;
+                       return;
+               }
+       }
+
+       block = __get_mem_detect_block_ptr(mem_detect.count);
+       block->start = start;
+       block->end = end;
+       mem_detect.count++;
+}
+
+static unsigned long get_mem_detect_end(void)
+{
+       if (mem_detect.count)
+               return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
+       return 0;
+}
+
+static int __diag260(unsigned long rx1, unsigned long rx2)
+{
+       register unsigned long _rx1 asm("2") = rx1;
+       register unsigned long _rx2 asm("3") = rx2;
+       register unsigned long _ry asm("4") = 0x10; /* storage configuration */
+       int rc = -1;                                /* fail */
+       unsigned long reg1, reg2;
+       psw_t old = S390_lowcore.program_new_psw;
+
+       asm volatile(
+               "       epsw    %0,%1\n"
+               "       st      %0,%[psw_pgm]\n"
+               "       st      %1,%[psw_pgm]+4\n"
+               "       larl    %0,1f\n"
+               "       stg     %0,%[psw_pgm]+8\n"
+               "       diag    %[rx],%[ry],0x260\n"
+               "       ipm     %[rc]\n"
+               "       srl     %[rc],28\n"
+               "1:\n"
+               : "=&d" (reg1), "=&a" (reg2),
+                 [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
+                 [rc] "+&d" (rc), [ry] "+d" (_ry)
+               : [rx] "d" (_rx1), "d" (_rx2)
+               : "cc", "memory");
+       S390_lowcore.program_new_psw = old;
+       return rc == 0 ? _ry : -1;
+}
+
+static int diag260(void)
+{
+       int rc, i;
+
+       struct {
+               unsigned long start;
+               unsigned long end;
+       } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
+
+       memset(storage_extents, 0, sizeof(storage_extents));
+       rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
+       if (rc == -1)
+               return -1;
+
+       for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
+               add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
+       return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+       unsigned long pgm_addr;
+       int rc = -EFAULT;
+       psw_t old = S390_lowcore.program_new_psw;
+
+       S390_lowcore.program_new_psw.mask = __extract_psw();
+       asm volatile(
+               "       larl    %[pgm_addr],1f\n"
+               "       stg     %[pgm_addr],%[psw_pgm_addr]\n"
+               "       tprot   0(%[addr]),0\n"
+               "       ipm     %[rc]\n"
+               "       srl     %[rc],28\n"
+               "1:\n"
+               : [pgm_addr] "=&d"(pgm_addr),
+                 [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
+                 [rc] "+&d"(rc)
+               : [addr] "a"(addr)
+               : "cc", "memory");
+       S390_lowcore.program_new_psw = old;
+       return rc;
+}
+
+static void search_mem_end(void)
+{
+       unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+       unsigned long offset = 0;
+       unsigned long pivot;
+
+       while (range > 1) {
+               range >>= 1;
+               pivot = offset + range;
+               if (!tprot(pivot << 20))
+                       offset = pivot;
+       }
+
+       add_mem_detect_block(0, (offset + 1) << 20);
+}
+
+void detect_memory(void)
+{
+       sclp_early_get_memsize(&max_physmem_end);
+
+       if (!sclp_early_read_storage_info()) {
+               mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+               return;
+       }
+
+       if (!diag260()) {
+               mem_detect.info_source = MEM_DETECT_DIAG260;
+               return;
+       }
+
+       if (max_physmem_end) {
+               add_mem_detect_block(0, max_physmem_end);
+               mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+               return;
+       }
+
+       search_mem_end();
+       mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+       max_physmem_end = get_mem_detect_end();
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
new file mode 100644 (file)
index 0000000..4d44131
--- /dev/null
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/string.h>
+#include <asm/setup.h>
+#include <asm/sclp.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+extern char __boot_data_start[], __boot_data_end[];
+
+void error(char *x)
+{
+       sclp_early_printk("\n\n");
+       sclp_early_printk(x);
+       sclp_early_printk("\n\n -- System halted");
+
+       disabled_wait(0xdeadbeef);
+}
+
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+unsigned long mem_safe_offset(void)
+{
+       return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
+}
+#endif
+
+static void rescue_initrd(void)
+{
+       unsigned long min_initrd_addr;
+
+       if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
+               return;
+       if (!INITRD_START || !INITRD_SIZE)
+               return;
+       min_initrd_addr = mem_safe_offset();
+       if (min_initrd_addr <= INITRD_START)
+               return;
+       memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
+       INITRD_START = min_initrd_addr;
+}
+
+static void copy_bootdata(void)
+{
+       if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
+               error(".boot.data section size mismatch");
+       memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
+}
+
+void startup_kernel(void)
+{
+       void *img;
+
+       rescue_initrd();
+       sclp_early_read_info();
+       store_ipl_parmblock();
+       setup_boot_command_line();
+       setup_memory_end();
+       detect_memory();
+       if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
+               img = decompress_kernel();
+               memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
+       }
+       copy_bootdata();
+       vmlinux.entry();
+}
diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c
new file mode 100644 (file)
index 0000000..25aca07
--- /dev/null
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include "../lib/string.c"
+
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+       unsigned char c1, c2;
+
+       while (count) {
+               c1 = *cs++;
+               c2 = *ct++;
+               if (c1 != c2)
+                       return c1 < c2 ? -1 : 1;
+               if (!c1)
+                       break;
+               count--;
+       }
+       return 0;
+}
+
+char *skip_spaces(const char *str)
+{
+       while (isspace(*str))
+               ++str;
+       return (char *)str;
+}
+
+char *strim(char *s)
+{
+       size_t size;
+       char *end;
+
+       size = strlen(s);
+       if (!size)
+               return s;
+
+       end = s + size - 1;
+       while (end >= s && isspace(*end))
+               end--;
+       *(end + 1) = '\0';
+
+       return skip_spaces(s);
+}
+
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+static unsigned int simple_guess_base(const char *cp)
+{
+       if (cp[0] == '0') {
+               if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+                       return 16;
+               else
+                       return 8;
+       } else {
+               return 10;
+       }
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+
+unsigned long long simple_strtoull(const char *cp, char **endp,
+                                  unsigned int base)
+{
+       unsigned long long result = 0;
+
+       if (!base)
+               base = simple_guess_base(cp);
+
+       if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+               cp += 2;
+
+       while (isxdigit(*cp)) {
+               unsigned int value;
+
+               value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+               if (value >= base)
+                       break;
+               result = result * base + value;
+               cp++;
+       }
+       if (endp)
+               *endp = (char *)cp;
+
+       return result;
+}
+
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+       if (*cp == '-')
+               return -simple_strtoull(cp + 1, endp, base);
+
+       return simple_strtoull(cp, endp, base);
+}
+
+int kstrtobool(const char *s, bool *res)
+{
+       if (!s)
+               return -EINVAL;
+
+       switch (s[0]) {
+       case 'y':
+       case 'Y':
+       case '1':
+               *res = true;
+               return 0;
+       case 'n':
+       case 'N':
+       case '0':
+               *res = false;
+               return 0;
+       case 'o':
+       case 'O':
+               switch (s[1]) {
+               case 'n':
+               case 'N':
+                       *res = true;
+                       return 0;
+               case 'f':
+               case 'F':
+                       *res = false;
+                       return 0;
+               default:
+                       break;
+               }
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
index ab9a0ebecc199b52507246b47db7b79dd0420058..e8d9fa54569cd9254541d6c28c230ff86548949d 100644 (file)
@@ -30,26 +30,31 @@ static DEFINE_SPINLOCK(ctrblk_lock);
 
 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
 
+struct key_blob {
+       __u8 key[MAXKEYBLOBSIZE];
+       unsigned int keylen;
+};
+
 struct s390_paes_ctx {
-       struct pkey_seckey sk;
+       struct key_blob kb;
        struct pkey_protkey pk;
        unsigned long fc;
 };
 
 struct s390_pxts_ctx {
-       struct pkey_seckey sk[2];
+       struct key_blob kb[2];
        struct pkey_protkey pk[2];
        unsigned long fc;
 };
 
-static inline int __paes_convert_key(struct pkey_seckey *sk,
+static inline int __paes_convert_key(struct key_blob *kb,
                                     struct pkey_protkey *pk)
 {
        int i, ret;
 
        /* try three times in case of failure */
        for (i = 0; i < 3; i++) {
-               ret = pkey_skey2pkey(sk, pk);
+               ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
                if (ret == 0)
                        break;
        }
@@ -61,7 +66,7 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->sk, &ctx->pk))
+       if (__paes_convert_key(&ctx->kb, &ctx->pk))
                return -EINVAL;
 
        /* Pick the correct function code based on the protected key type */
@@ -80,10 +85,8 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 {
        struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (key_len != SECKEYBLOBSIZE)
-               return -EINVAL;
-
-       memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
+       memcpy(ctx->kb.key, in_key, key_len);
+       ctx->kb.keylen = key_len;
        if (__paes_set_key(ctx)) {
                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
                return -EINVAL;
@@ -147,8 +150,8 @@ static struct crypto_alg ecb_paes_alg = {
        .cra_list               =       LIST_HEAD_INIT(ecb_paes_alg.cra_list),
        .cra_u                  =       {
                .blkcipher = {
-                       .min_keysize            =       SECKEYBLOBSIZE,
-                       .max_keysize            =       SECKEYBLOBSIZE,
+                       .min_keysize            =       MINKEYBLOBSIZE,
+                       .max_keysize            =       MAXKEYBLOBSIZE,
                        .setkey                 =       ecb_paes_set_key,
                        .encrypt                =       ecb_paes_encrypt,
                        .decrypt                =       ecb_paes_decrypt,
@@ -160,7 +163,7 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->sk, &ctx->pk))
+       if (__paes_convert_key(&ctx->kb, &ctx->pk))
                return -EINVAL;
 
        /* Pick the correct function code based on the protected key type */
@@ -179,7 +182,8 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 {
        struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
+       memcpy(ctx->kb.key, in_key, key_len);
+       ctx->kb.keylen = key_len;
        if (__cbc_paes_set_key(ctx)) {
                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
                return -EINVAL;
@@ -250,8 +254,8 @@ static struct crypto_alg cbc_paes_alg = {
        .cra_list               =       LIST_HEAD_INIT(cbc_paes_alg.cra_list),
        .cra_u                  =       {
                .blkcipher = {
-                       .min_keysize            =       SECKEYBLOBSIZE,
-                       .max_keysize            =       SECKEYBLOBSIZE,
+                       .min_keysize            =       MINKEYBLOBSIZE,
+                       .max_keysize            =       MAXKEYBLOBSIZE,
                        .ivsize                 =       AES_BLOCK_SIZE,
                        .setkey                 =       cbc_paes_set_key,
                        .encrypt                =       cbc_paes_encrypt,
@@ -264,8 +268,8 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
-           __paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
+       if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
+           __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
                return -EINVAL;
 
        if (ctx->pk[0].type != ctx->pk[1].type)
@@ -287,10 +291,16 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 {
        struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
        u8 ckey[2 * AES_MAX_KEY_SIZE];
-       unsigned int ckey_len;
+       unsigned int ckey_len, keytok_len;
+
+       if (key_len % 2)
+               return -EINVAL;
 
-       memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
-       memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
+       keytok_len = key_len / 2;
+       memcpy(ctx->kb[0].key, in_key, keytok_len);
+       ctx->kb[0].keylen = keytok_len;
+       memcpy(ctx->kb[1].key, in_key + keytok_len, keytok_len);
+       ctx->kb[1].keylen = keytok_len;
        if (__xts_paes_set_key(ctx)) {
                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
                return -EINVAL;
@@ -386,8 +396,8 @@ static struct crypto_alg xts_paes_alg = {
        .cra_list               =       LIST_HEAD_INIT(xts_paes_alg.cra_list),
        .cra_u                  =       {
                .blkcipher = {
-                       .min_keysize            =       2 * SECKEYBLOBSIZE,
-                       .max_keysize            =       2 * SECKEYBLOBSIZE,
+                       .min_keysize            =       2 * MINKEYBLOBSIZE,
+                       .max_keysize            =       2 * MAXKEYBLOBSIZE,
                        .ivsize                 =       AES_BLOCK_SIZE,
                        .setkey                 =       xts_paes_set_key,
                        .encrypt                =       xts_paes_encrypt,
@@ -400,7 +410,7 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
 {
        unsigned long fc;
 
-       if (__paes_convert_key(&ctx->sk, &ctx->pk))
+       if (__paes_convert_key(&ctx->kb, &ctx->pk))
                return -EINVAL;
 
        /* Pick the correct function code based on the protected key type */
@@ -420,7 +430,8 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 {
        struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memcpy(ctx->sk.seckey, in_key, key_len);
+       memcpy(ctx->kb.key, in_key, key_len);
+       ctx->kb.keylen = key_len;
        if (__ctr_paes_set_key(ctx)) {
                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
                return -EINVAL;
@@ -532,8 +543,8 @@ static struct crypto_alg ctr_paes_alg = {
        .cra_list               =       LIST_HEAD_INIT(ctr_paes_alg.cra_list),
        .cra_u                  =       {
                .blkcipher = {
-                       .min_keysize            =       SECKEYBLOBSIZE,
-                       .max_keysize            =       SECKEYBLOBSIZE,
+                       .min_keysize            =       MINKEYBLOBSIZE,
+                       .max_keysize            =       MAXKEYBLOBSIZE,
                        .ivsize                 =       AES_BLOCK_SIZE,
                        .setkey                 =       ctr_paes_set_key,
                        .encrypt                =       ctr_paes_encrypt,
index f40600eb17628cbeaa44857a479544b60c1a2068..20add000dd6d600b3baccd5656d78e8f188b3991 100644 (file)
@@ -232,6 +232,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_ZCRYPT=m
+CONFIG_ZCRYPT_MULTIDEVNODES=y
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
index 5d85a039391c6193454b40addda2319ea66a6ffe..601b70786dc857804d5fe263a9ef2e7c9b710502 100644 (file)
@@ -68,40 +68,44 @@ static int hypfs_sprp_create(void **data_ptr, void **free_ptr, size_t *size)
 
 static int __hypfs_sprp_ioctl(void __user *user_area)
 {
-       struct hypfs_diag304 diag304;
+       struct hypfs_diag304 *diag304;
        unsigned long cmd;
        void __user *udata;
        void *data;
        int rc;
 
-       if (copy_from_user(&diag304, user_area, sizeof(diag304)))
-               return -EFAULT;
-       if ((diag304.args[0] >> 8) != 0 || diag304.args[1] > DIAG304_CMD_MAX)
-               return -EINVAL;
-
+       rc = -ENOMEM;
        data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
-       if (!data)
-               return -ENOMEM;
-
-       udata = (void __user *)(unsigned long) diag304.data;
-       if (diag304.args[1] == DIAG304_SET_WEIGHTS ||
-           diag304.args[1] == DIAG304_SET_CAPPING)
-               if (copy_from_user(data, udata, PAGE_SIZE)) {
-                       rc = -EFAULT;
+       diag304 = kzalloc(sizeof(*diag304), GFP_KERNEL);
+       if (!data || !diag304)
+               goto out;
+
+       rc = -EFAULT;
+       if (copy_from_user(diag304, user_area, sizeof(*diag304)))
+               goto out;
+       rc = -EINVAL;
+       if ((diag304->args[0] >> 8) != 0 || diag304->args[1] > DIAG304_CMD_MAX)
+               goto out;
+
+       rc = -EFAULT;
+       udata = (void __user *)(unsigned long) diag304->data;
+       if (diag304->args[1] == DIAG304_SET_WEIGHTS ||
+           diag304->args[1] == DIAG304_SET_CAPPING)
+               if (copy_from_user(data, udata, PAGE_SIZE))
                        goto out;
-               }
 
-       cmd = *(unsigned long *) &diag304.args[0];
-       diag304.rc = hypfs_sprp_diag304(data, cmd);
+       cmd = *(unsigned long *) &diag304->args[0];
+       diag304->rc = hypfs_sprp_diag304(data, cmd);
 
-       if (diag304.args[1] == DIAG304_QUERY_PRP)
+       if (diag304->args[1] == DIAG304_QUERY_PRP)
                if (copy_to_user(udata, data, PAGE_SIZE)) {
                        rc = -EFAULT;
                        goto out;
                }
 
-       rc = copy_to_user(user_area, &diag304, sizeof(diag304)) ? -EFAULT : 0;
+       rc = copy_to_user(user_area, diag304, sizeof(*diag304)) ? -EFAULT : 0;
 out:
+       kfree(diag304);
        free_page((unsigned long) data);
        return rc;
 }
index 4afbb5938726e568e3c23a88dd2bcaeaa0fe2c68..c5bd9f4437e59d0754cbc72a6784a693312df6cf 100644 (file)
@@ -40,26 +40,27 @@ struct appldata_product_id {
        u16  mod_lvl;           /* modification level */
 } __attribute__ ((packed));
 
-static inline int appldata_asm(struct appldata_product_id *id,
+
+static inline int appldata_asm(struct appldata_parameter_list *parm_list,
+                              struct appldata_product_id *id,
                               unsigned short fn, void *buffer,
                               unsigned short length)
 {
-       struct appldata_parameter_list parm_list;
        int ry;
 
        if (!MACHINE_IS_VM)
                return -EOPNOTSUPP;
-       parm_list.diag = 0xdc;
-       parm_list.function = fn;
-       parm_list.parlist_length = sizeof(parm_list);
-       parm_list.buffer_length = length;
-       parm_list.product_id_addr = (unsigned long) id;
-       parm_list.buffer_addr = virt_to_phys(buffer);
+       parm_list->diag = 0xdc;
+       parm_list->function = fn;
+       parm_list->parlist_length = sizeof(*parm_list);
+       parm_list->buffer_length = length;
+       parm_list->product_id_addr = (unsigned long) id;
+       parm_list->buffer_addr = virt_to_phys(buffer);
        diag_stat_inc(DIAG_STAT_X0DC);
        asm volatile(
                "       diag    %1,%0,0xdc"
                : "=d" (ry)
-               : "d" (&parm_list), "m" (parm_list), "m" (*id)
+               : "d" (parm_list), "m" (*parm_list), "m" (*id)
                : "cc");
        return ry;
 }
diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h
new file mode 100644 (file)
index 0000000..2d999cc
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_BOOT_DATA_H
+
+#include <asm/setup.h>
+#include <asm/ipl.h>
+
+extern char early_command_line[COMMAND_LINE_SIZE];
+extern struct ipl_parameter_block early_ipl_block;
+extern int early_ipl_block_valid;
+
+#endif /* _ASM_S390_BOOT_DATA_H */
index 860cab7479c3bc89a0bd9aef2b386f94cce331a6..7293c139dd79d3cc306ac01cd891dd89d1e1112f 100644 (file)
@@ -64,6 +64,8 @@ extern int  ccwgroup_driver_register   (struct ccwgroup_driver *cdriver);
 extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
 int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
                        int num_devices, const char *buf);
+struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
+                                                char *bus_id);
 
 extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
 extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
index 99c8ce30b3cd1a4f70540ffa0d03cbb99e35df4d..e78cda94456bcdc7586c64d14fae059d117ff0ec 100644 (file)
@@ -64,11 +64,10 @@ static inline int test_facility(unsigned long nr)
  * @stfle_fac_list: array where facility list can be stored
  * @size: size of passed in array in double words
  */
-static inline void stfle(u64 *stfle_fac_list, int size)
+static inline void __stfle(u64 *stfle_fac_list, int size)
 {
        unsigned long nr;
 
-       preempt_disable();
        asm volatile(
                "       stfl    0(0)\n"
                : "=m" (S390_lowcore.stfl_fac_list));
@@ -85,6 +84,12 @@ static inline void stfle(u64 *stfle_fac_list, int size)
                nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
        }
        memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+}
+
+static inline void stfle(u64 *stfle_fac_list, int size)
+{
+       preempt_disable();
+       __stfle(stfle_fac_list, size);
        preempt_enable();
 }
 
index ae5135704616934fa0028754024aeacfaa1df578..a8389e2d2f034f36af377a8bf04b2dbcad590b50 100644 (file)
@@ -89,8 +89,8 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
 
 extern void s390_reset_system(void);
 extern void ipl_store_parameters(void);
-extern size_t append_ipl_vmparm(char *, size_t);
-extern size_t append_ipl_scpdata(char *, size_t);
+extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
+                                        const struct ipl_parameter_block *ipb);
 
 enum ipl_type {
        IPL_TYPE_UNKNOWN        = 1,
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
new file mode 100644 (file)
index 0000000..70930fe
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#ifdef CONFIG_KASAN_S390_4_LEVEL_PAGING
+#define KASAN_SHADOW_SIZE                                                     \
+       (_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#else
+#define KASAN_SHADOW_SIZE                                                     \
+       (_AC(1, UL) << (_REGION2_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#endif
+#define KASAN_SHADOW_OFFSET    _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_START     KASAN_SHADOW_OFFSET
+#define KASAN_SHADOW_END       (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+extern void kasan_early_init(void);
+extern void kasan_copy_shadow(pgd_t *dst);
+extern void kasan_free_early_identity(void);
+#else
+static inline void kasan_early_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *dst) { }
+static inline void kasan_free_early_identity(void) { }
+#endif
+
+#endif
index 406d940173ab7ad229076e55291e05975b6da0a5..cc0947e08b6ffef09419a52eb04f817535016127 100644 (file)
@@ -102,9 +102,9 @@ struct lowcore {
        __u64   current_task;                   /* 0x0338 */
        __u64   kernel_stack;                   /* 0x0340 */
 
-       /* Interrupt, panic and restart stack. */
+       /* Interrupt, DAT-off and restartstack. */
        __u64   async_stack;                    /* 0x0348 */
-       __u64   panic_stack;                    /* 0x0350 */
+       __u64   nodat_stack;                    /* 0x0350 */
        __u64   restart_stack;                  /* 0x0358 */
 
        /* Restart function and parameter. */
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
new file mode 100644 (file)
index 0000000..6114b92
--- /dev/null
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum mem_info_source {
+       MEM_DETECT_NONE = 0,
+       MEM_DETECT_SCLP_STOR_INFO,
+       MEM_DETECT_DIAG260,
+       MEM_DETECT_SCLP_READ_INFO,
+       MEM_DETECT_BIN_SEARCH
+};
+
+struct mem_detect_block {
+       u64 start;
+       u64 end;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
+ * If more mem_detect_blocks are required, a block of memory from already
+ * known mem_detect_block is taken (entries_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct mem_detect_info {
+       u32 count;
+       u8 info_source;
+       struct mem_detect_block entries[MEM_INLINED_ENTRIES];
+       struct mem_detect_block *entries_extended;
+};
+extern struct mem_detect_info mem_detect;
+
+void add_mem_detect_block(u64 start, u64 end);
+
+static inline int __get_mem_detect_block(u32 n, unsigned long *start,
+                                        unsigned long *end)
+{
+       if (n >= mem_detect.count) {
+               *start = 0;
+               *end = 0;
+               return -1;
+       }
+
+       if (n < MEM_INLINED_ENTRIES) {
+               *start = (unsigned long)mem_detect.entries[n].start;
+               *end = (unsigned long)mem_detect.entries[n].end;
+       } else {
+               *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
+               *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
+       }
+       return 0;
+}
+
+/**
+ * for_each_mem_detect_block - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges.
+ */
+#define for_each_mem_detect_block(i, p_start, p_end)                   \
+       for (i = 0, __get_mem_detect_block(i, p_start, p_end);          \
+            i < mem_detect.count;                                      \
+            i++, __get_mem_detect_block(i, p_start, p_end))
+
+static inline void get_mem_detect_reserved(unsigned long *start,
+                                          unsigned long *size)
+{
+       *start = (unsigned long)mem_detect.entries_extended;
+       if (mem_detect.count > MEM_INLINED_ENTRIES)
+               *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
+       else
+               *size = 0;
+}
+
+#endif
index a8418e1379eb7ee08c92acd034eae000cb19c695..bcfb6371086f2319f6901d2cc52a1d8c44fd0a1a 100644 (file)
@@ -32,6 +32,8 @@ typedef struct {
        unsigned int uses_cmm:1;
        /* The gmaps associated with this context are allowed to use huge pages. */
        unsigned int allow_gmap_hpage_1m:1;
+       /* The mmu context is for compat task */
+       unsigned int compat_mm:1;
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(name)                                             \
index 0717ee76885d634cfc10dd0ce790004639737dd2..dbd689d556ce5dd9368392a1e0676c18163acc3c 100644 (file)
@@ -25,6 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
        atomic_set(&mm->context.flush_count, 0);
        mm->context.gmap_asce = 0;
        mm->context.flush_mm = 0;
+       mm->context.compat_mm = 0;
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
index 41e3908b397f8f2faa5bab59266fec25c635a6a7..a4d38092530abacb2295a3fe08589a8a3b1f7844 100644 (file)
@@ -161,6 +161,7 @@ static inline int devmem_is_allowed(unsigned long pfn)
 
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn)      pfn_to_virt(pfn)
 
 #define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 #define page_to_virt(page)     pfn_to_virt(page_to_pfn(page))
index 0e7cb0dc9c33b7f5a8187df912aefb8b15c100fc..411d435e7a7d2a5a8c650c812017d66f9738710a 100644 (file)
@@ -341,6 +341,8 @@ static inline int is_module_addr(void *addr)
 #define PTRS_PER_P4D   _CRST_ENTRIES
 #define PTRS_PER_PGD   _CRST_ENTRIES
 
+#define MAX_PTRS_PER_P4D       PTRS_PER_P4D
+
 /*
  * Segment table and region3 table entry encoding
  * (R = read-only, I = invalid, y = young bit):
@@ -466,6 +468,12 @@ static inline int is_module_addr(void *addr)
                                 _SEGMENT_ENTRY_YOUNG | \
                                 _SEGMENT_ENTRY_PROTECT | \
                                 _SEGMENT_ENTRY_NOEXEC)
+#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |  \
+                                _SEGMENT_ENTRY_LARGE | \
+                                _SEGMENT_ENTRY_READ |  \
+                                _SEGMENT_ENTRY_WRITE | \
+                                _SEGMENT_ENTRY_YOUNG | \
+                                _SEGMENT_ENTRY_DIRTY)
 
 /*
  * Region3 entry (large page) protection definitions.
@@ -599,6 +607,14 @@ static inline int pgd_bad(pgd_t pgd)
        return (pgd_val(pgd) & mask) != 0;
 }
 
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+       unsigned long origin_mask;
+
+       origin_mask = _REGION_ENTRY_ORIGIN;
+       return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
+}
+
 static inline int p4d_folded(p4d_t p4d)
 {
        return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
@@ -1171,6 +1187,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 
 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1210,7 +1227,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 
 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
-#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
+#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
+#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
 
 /* Find an entry in the lowest level page table.. */
 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
index 053117ba7328ca72322a0122f2b99930e8ea868f..9b6e79077866b253f211820a7e64074ffee81f50 100644 (file)
@@ -109,4 +109,30 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
                   u16 *pcardnr, u16 *pdomain,
                   u16 *pkeysize, u32 *pattributes);
 
+/*
+ * In-kernel API: Generate (AES) random protected key.
+ * @param keytype one of the PKEY_KEYTYPE values
+ * @param protkey pointer to buffer receiving the protected key
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey);
+
+/*
+ * In-kernel API: Verify an (AES) protected key.
+ * @param protkey pointer to buffer containing the protected key to verify
+ * @return 0 on success, negative errno value on failure. In case the protected
+ * key is not valid -EKEYREJECTED is returned
+ */
+int pkey_verifyprotkey(const struct pkey_protkey *protkey);
+
+/*
+ * In-kernel API: Transform an key blob (of any type) into a protected key.
+ * @param key pointer to a buffer containing the key blob
+ * @param keylen size of the key blob in bytes
+ * @param protkey pointer to buffer receiving the protected key
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
+                     struct pkey_protkey *protkey);
+
 #endif /* _KAPI_PKEY_H */
index 7f2953c15c37b1577039b9a9d7217727797b7823..34768e6ef4fb7c963b5d3f397d6a7535115eb8eb 100644 (file)
@@ -242,7 +242,7 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
-static inline unsigned short stap(void)
+static __no_sanitize_address_or_inline unsigned short stap(void)
 {
        unsigned short cpu_address;
 
@@ -250,6 +250,55 @@ static inline unsigned short stap(void)
        return cpu_address;
 }
 
+#define CALL_ARGS_0()                                                  \
+       register unsigned long r2 asm("2")
+#define CALL_ARGS_1(arg1)                                              \
+       register unsigned long r2 asm("2") = (unsigned long)(arg1)
+#define CALL_ARGS_2(arg1, arg2)                                                \
+       CALL_ARGS_1(arg1);                                              \
+       register unsigned long r3 asm("3") = (unsigned long)(arg2)
+#define CALL_ARGS_3(arg1, arg2, arg3)                                  \
+       CALL_ARGS_2(arg1, arg2);                                        \
+       register unsigned long r4 asm("4") = (unsigned long)(arg3)
+#define CALL_ARGS_4(arg1, arg2, arg3, arg4)                            \
+       CALL_ARGS_3(arg1, arg2, arg3);                                  \
+       register unsigned long r4 asm("5") = (unsigned long)(arg4)
+#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5)                      \
+       CALL_ARGS_4(arg1, arg2, arg3, arg4);                            \
+       register unsigned long r4 asm("6") = (unsigned long)(arg5)
+
+#define CALL_FMT_0
+#define CALL_FMT_1 CALL_FMT_0, "0" (r2)
+#define CALL_FMT_2 CALL_FMT_1, "d" (r3)
+#define CALL_FMT_3 CALL_FMT_2, "d" (r4)
+#define CALL_FMT_4 CALL_FMT_3, "d" (r5)
+#define CALL_FMT_5 CALL_FMT_4, "d" (r6)
+
+#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
+#define CALL_CLOBBER_4 CALL_CLOBBER_5
+#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
+#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
+#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
+#define CALL_CLOBBER_0 CALL_CLOBBER_1
+
+#define CALL_ON_STACK(fn, stack, nr, args...)                          \
+({                                                                     \
+       CALL_ARGS_##nr(args);                                           \
+       unsigned long prev;                                             \
+                                                                       \
+       asm volatile(                                                   \
+               "       la      %[_prev],0(15)\n"                       \
+               "       la      15,0(%[_stack])\n"                      \
+               "       stg     %[_prev],%[_bc](15)\n"                  \
+               "       brasl   14,%[_fn]\n"                            \
+               "       la      15,0(%[_prev])\n"                       \
+               : "+&d" (r2), [_prev] "=&a" (prev)                      \
+               : [_stack] "a" (stack),                                 \
+                 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+                 [_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr);    \
+       r2;                                                             \
+})
+
 /*
  * Give up the time slice of the virtual PU.
  */
@@ -287,7 +336,7 @@ static inline void __load_psw(psw_t psw)
  * Set PSW mask to specified value, while leaving the
  * PSW addr pointing to the next instruction.
  */
-static inline void __load_psw_mask(unsigned long mask)
+static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
 {
        unsigned long addr;
        psw_t psw;
index 9c9970a5dfb10798ddd459dbcff7beae2d9ea42c..d46edde7e4587e96d4bd148bf6c11f6bce963ee7 100644 (file)
@@ -252,13 +252,11 @@ struct slsb {
  *   (for communication with upper layer programs)
  *   (only required for use with completion queues)
  * @flags: flags indicating state of buffer
- * @aob: pointer to QAOB used for the particular SBAL
  * @user: pointer to upper layer program's state information related to SBAL
  *        (stored in user1 data of QAOB)
  */
 struct qdio_outbuf_state {
        u8 flags;
-       struct qaob *aob;
        void *user;
 };
 
index e44a8d7959f513ebbd2982314369a5706bda3528..0cd4bda85eb1e0d281d3036ca745faa28f992eed 100644 (file)
@@ -95,6 +95,7 @@ extern struct sclp_info sclp;
 struct zpci_report_error_header {
        u8 version;     /* Interface version byte */
        u8 action;      /* Action qualifier byte
+                        * 0: Adapter Reset Request
                         * 1: Deconfigure and repair action requested
                         *      (OpenCrypto Problem Call Home)
                         * 2: Informational Report
@@ -104,6 +105,8 @@ struct zpci_report_error_header {
        u8 data[0];     /* Subsequent Data passed verbatim to SCLP ET 24 */
 } __packed;
 
+int sclp_early_read_info(void);
+int sclp_early_read_storage_info(void);
 int sclp_early_get_core_info(struct sclp_core_info *info);
 void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
 void sclp_early_detect(void);
@@ -111,6 +114,8 @@ void sclp_early_printk(const char *s);
 void sclp_early_printk_force(const char *s);
 void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
 
+int sclp_early_get_memsize(unsigned long *mem);
+int sclp_early_get_hsa_size(unsigned long *hsa_size);
 int _sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
 int sclp_core_deconfigure(u8 core);
index 724faede8ac52d565db7b4f4d5dd40391030e9d6..7afe4620685c93aac9b4bb9efaf196aacd4b055c 100644 (file)
@@ -4,4 +4,16 @@
 
 #include <asm-generic/sections.h>
 
+/*
+ * .boot.data section contains variables "shared" between the decompressor and
+ * the decompressed kernel. The decompressor will store values in them, and
+ * copy over to the decompressed image before starting it.
+ *
+ * Each variable end up in its own intermediate section .boot.data.<var name>,
+ * those sections are later sorted by alignment + name and merged together into
+ * final .boot.data section, which should be identical in the decompressor and
+ * the decompressed kernel (that is checked during the build).
+ */
+#define __bootdata(var) __section(.boot.data.var) var
+
 #endif
index 1d66016f417020ee99324db641fc813c56b0cfc5..efda97804aa4a5dcedba8a2512c3c890411c587d 100644 (file)
 #define OLDMEM_SIZE    (*(unsigned long *)  (OLDMEM_SIZE_OFFSET))
 #define COMMAND_LINE   ((char *)            (COMMAND_LINE_OFFSET))
 
+extern int noexec_disabled;
 extern int memory_end_set;
 extern unsigned long memory_end;
 extern unsigned long max_physmem_end;
 
-extern void detect_memory_memblock(void);
-
 #define MACHINE_IS_VM          (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
 #define MACHINE_IS_KVM         (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
 #define MACHINE_IS_LPAR                (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
index 50f26fc9acb27f72e390f9cc83e2bf6a792bac8b..116cc15a4b8a793ccff0d06a18508221f8c0960f 100644 (file)
@@ -53,6 +53,27 @@ char *strstr(const char *s1, const char *s2);
 #undef __HAVE_ARCH_STRSEP
 #undef __HAVE_ARCH_STRSPN
 
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+extern void *__memcpy(void *dest, const void *src, size_t n);
+extern void *__memset(void *s, int c, size_t n);
+extern void *__memmove(void *dest, const void *src, size_t n);
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
+
 void *__memset16(uint16_t *s, uint16_t v, size_t count);
 void *__memset32(uint32_t *s, uint32_t v, size_t count);
 void *__memset64(uint64_t *s, uint64_t v, size_t count);
index 3c883c368eb0587daef70a03c74010b6579f9084..27248f42a03c4561a9e1481fbea205b3b866f928 100644 (file)
 #include <linux/const.h>
 
 /*
- * Size of kernel stack for each process
+ * General size of kernel stacks
  */
+#ifdef CONFIG_KASAN
+#define THREAD_SIZE_ORDER 3
+#else
 #define THREAD_SIZE_ORDER 2
-#define ASYNC_ORDER  2
-
+#endif
+#define BOOT_STACK_ORDER  2
 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define ASYNC_SIZE  (PAGE_SIZE << ASYNC_ORDER)
 
 #ifndef __ASSEMBLY__
 #include <asm/lowcore.h>
 #include <asm/page.h>
 #include <asm/processor.h>
 
+#define STACK_INIT_OFFSET \
+       (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
+
 /*
  * low level task data that entry.S needs immediate access to
  * - this struct should fit entirely inside of one cache line
diff --git a/arch/s390/include/asm/vmlinux.lds.h b/arch/s390/include/asm/vmlinux.lds.h
new file mode 100644 (file)
index 0000000..2d127f9
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/page.h>
+
+/*
+ * .boot.data section is shared between the decompressor code and the
+ * decompressed kernel. The decompressor will store values in it, and copy
+ * over to the decompressed image before starting it.
+ *
+ * .boot.data variables are kept in separate .boot.data.<var name> sections,
+ * which are sorted by alignment first, then by name before being merged
+ * into single .boot.data section. This way big holes cased by page aligned
+ * structs are avoided and linker produces consistent result.
+ */
+#define BOOT_DATA                                                      \
+       . = ALIGN(PAGE_SIZE);                                           \
+       .boot.data : {                                                  \
+               __boot_data_start = .;                                  \
+               *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*)))         \
+               __boot_data_end = .;                                    \
+       }
index 6f84a53c3270eebb1bf4bbb88b1b79595b6a6e2e..c0e86ce4a00b095048c6402f355e2cd2b0afcee6 100644 (file)
 #define PKEY_IOCTL_MAGIC 'p'
 
 #define SECKEYBLOBSIZE 64     /* secure key blob size is always 64 bytes */
+#define PROTKEYBLOBSIZE 80  /* protected key blob size is always 80 bytes */
 #define MAXPROTKEYSIZE 64  /* a protected key blob may be up to 64 bytes */
 #define MAXCLRKEYSIZE  32     /* a clear key value may be up to 32 bytes */
 
+#define MINKEYBLOBSIZE SECKEYBLOBSIZE      /* Minimum size of a key blob */
+#define MAXKEYBLOBSIZE PROTKEYBLOBSIZE     /* Maximum size of a key blob */
+
 /* defines for the type field within the pkey_protkey struct */
 #define PKEY_KEYTYPE_AES_128  1
 #define PKEY_KEYTYPE_AES_192  2
@@ -129,4 +133,34 @@ struct pkey_verifykey {
 #define PKEY_VERIFY_ATTR_AES      0x00000001  /* key is an AES key */
 #define PKEY_VERIFY_ATTR_OLD_MKVP  0x00000100  /* key has old MKVP value */
 
+/*
+ * Generate (AES) random protected key.
+ */
+struct pkey_genprotk {
+       __u32 keytype;                         /* in: key type to generate */
+       struct pkey_protkey protkey;           /* out: the protected key   */
+};
+
+#define PKEY_GENPROTK _IOWR(PKEY_IOCTL_MAGIC, 0x08, struct pkey_genprotk)
+
+/*
+ * Verify an (AES) protected key.
+ */
+struct pkey_verifyprotk {
+       struct pkey_protkey protkey;    /* in: the protected key to verify */
+};
+
+#define PKEY_VERIFYPROTK _IOW(PKEY_IOCTL_MAGIC, 0x09, struct pkey_verifyprotk)
+
+/*
+ * Transform an key blob (of any type) into a protected key
+ */
+struct pkey_kblob2pkey {
+       __u8 __user *key;               /* in: the key blob        */
+       __u32 keylen;                   /* in: the key blob length */
+       struct pkey_protkey protkey;    /* out: the protected key  */
+};
+
+#define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
+
 #endif /* _UAPI_PKEY_H */
index 2bb1f3bb98ac5cc6f3c7bb0ac74dedda5e202058..42c81a95e97ba8aee708388f1c76bcdde12a6e78 100644 (file)
@@ -2,9 +2,9 @@
 /*
  *  include/asm-s390/zcrypt.h
  *
- *  zcrypt 2.1.0 (user-visible header)
+ *  zcrypt 2.2.1 (user-visible header)
  *
- *  Copyright IBM Corp. 2001, 2006
+ *  Copyright IBM Corp. 2001, 2018
  *  Author(s): Robert Burroughs
  *            Eric Rossman (edrossma@us.ibm.com)
  *
 #define __ASM_S390_ZCRYPT_H
 
 #define ZCRYPT_VERSION 2
-#define ZCRYPT_RELEASE 1
+#define ZCRYPT_RELEASE 2
 #define ZCRYPT_VARIANT 1
 
 #include <linux/ioctl.h>
 #include <linux/compiler.h>
 
+/* Name of the zcrypt device driver. */
+#define ZCRYPT_NAME "zcrypt"
+
 /**
  * struct ica_rsa_modexpo
  *
@@ -309,6 +312,16 @@ struct zcrypt_device_matrix_ext {
 #define ZCRYPT_QDEPTH_MASK   _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT])
 #define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT])
 
+/*
+ * Support for multiple zcrypt device nodes.
+ */
+
+/* Nr of minor device node numbers to allocate. */
+#define ZCRYPT_MAX_MINOR_NODES 256
+
+/* Max amount of possible ioctls */
+#define MAX_ZDEV_IOCTLS (1 << _IOC_NRBITS)
+
 /*
  * Only deprecated defines, structs and ioctls below this line.
  */
index dbfd1730e631acfb71d8688ca4383ff98385a106..386b1abb217bca267a6be64dacdf378ffeb0acbc 100644 (file)
@@ -23,6 +23,10 @@ KCOV_INSTRUMENT_early_nobss.o        := n
 UBSAN_SANITIZE_early.o         := n
 UBSAN_SANITIZE_early_nobss.o   := n
 
+KASAN_SANITIZE_early_nobss.o   := n
+KASAN_SANITIZE_ipl.o           := n
+KASAN_SANITIZE_machine_kexec.o := n
+
 #
 # Passing null pointers is ok for smp code, since we access the lowcore here.
 #
@@ -47,7 +51,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
 obj-y  += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
 obj-y  += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
 obj-y  += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
-obj-y  += nospec-branch.o
+obj-y  += nospec-branch.o ipl_vmparm.o
 
 extra-y                                += head64.o vmlinux.lds
 
index 66e830f1c7bfefc2711305172cbdacaac48e0527..164bec175628ad3356bfa7bc1ab1ca849e5f0935 100644 (file)
@@ -159,7 +159,7 @@ int main(void)
        OFFSET(__LC_CURRENT, lowcore, current_task);
        OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
        OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
-       OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
+       OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack);
        OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
        OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
        OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
index b65874b0b412e40ea1baea814fb1169d04f02104..f268fca67e822a1e4b9d1547400aa1353c10af5e 100644 (file)
@@ -18,7 +18,7 @@
 
 ENTRY(s390_base_mcck_handler)
        basr    %r13,0
-0:     lg      %r15,__LC_PANIC_STACK   # load panic stack
+0:     lg      %r15,__LC_NODAT_STACK   # load panic stack
        aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r1,s390_base_mcck_handler_fn
        lg      %r9,0(%r1)
index 5b23c4f6e50cd452177477105b914774d67898ad..cb7f55bbe06e87eeb16a6e3f38d54fdc9be63824 100644 (file)
@@ -30,7 +30,7 @@
  * The stack trace can start at any of the three stacks and can potentially
  * touch all of them. The order is: panic stack, async stack, sync stack.
  */
-static unsigned long
+static unsigned long __no_sanitize_address
 __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
             unsigned long low, unsigned long high)
 {
@@ -77,11 +77,11 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
        frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 #ifdef CONFIG_CHECK_STACK
        sp = __dump_trace(func, data, sp,
-                         S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
-                         S390_lowcore.panic_stack + frame_size);
+                         S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
+                         S390_lowcore.nodat_stack + frame_size);
 #endif
        sp = __dump_trace(func, data, sp,
-                         S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                         S390_lowcore.async_stack + frame_size - THREAD_SIZE,
                          S390_lowcore.async_stack + frame_size);
        task = task ?: current;
        __dump_trace(func, data, sp,
@@ -124,7 +124,7 @@ void show_registers(struct pt_regs *regs)
        char *mode;
 
        mode = user_mode(regs) ? "User" : "Krnl";
-       printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
+       printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
        if (!user_mode(regs))
                pr_cont(" (%pSR)", (void *)regs->psw.addr);
        pr_cont("\n");
index 5b28b434f8a153d27ca8a7124d156ec05897e71e..af5c2b3f706567f5f9927686ccfd210b50366221 100644 (file)
 #include <asm/cpcmd.h>
 #include <asm/sclp.h>
 #include <asm/facility.h>
+#include <asm/boot_data.h>
 #include "entry.h"
 
-static void __init setup_boot_command_line(void);
-
 /*
  * Initialize storage key for kernel pages
  */
@@ -284,51 +283,11 @@ static int __init cad_setup(char *str)
 }
 early_param("cad", cad_setup);
 
-/* Set up boot command line */
-static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
-{
-       char *parm, *delim;
-       size_t rc, len;
-
-       len = strlen(boot_command_line);
-
-       delim = boot_command_line + len;        /* '\0' character position */
-       parm  = boot_command_line + len + 1;    /* append right after '\0' */
-
-       rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
-       if (rc) {
-               if (*parm == '=')
-                       memmove(boot_command_line, parm + 1, rc);
-               else
-                       *delim = ' ';           /* replace '\0' with space */
-       }
-}
-
-static inline int has_ebcdic_char(const char *str)
-{
-       int i;
-
-       for (i = 0; str[i]; i++)
-               if (str[i] & 0x80)
-                       return 1;
-       return 0;
-}
-
+char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
 static void __init setup_boot_command_line(void)
 {
-       COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
-       /* convert arch command line to ascii if necessary */
-       if (has_ebcdic_char(COMMAND_LINE))
-               EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
        /* copy arch command line */
-       strlcpy(boot_command_line, strstrip(COMMAND_LINE),
-               ARCH_COMMAND_LINE_SIZE);
-
-       /* append IPL PARM data to the boot command line */
-       if (MACHINE_IS_VM)
-               append_to_cmdline(append_ipl_vmparm);
-
-       append_to_cmdline(append_ipl_scpdata);
+       strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
 }
 
 static void __init check_image_bootable(void)
index 2d84fc48df3a7d44caaa08fb70157f58442f621a..8d73f7fae16e00422fa4ea9eb54c39ad0901fa89 100644 (file)
@@ -13,8 +13,8 @@
 #include <linux/string.h>
 #include <asm/sections.h>
 #include <asm/lowcore.h>
-#include <asm/setup.h>
 #include <asm/timex.h>
+#include <asm/kasan.h>
 #include "entry.h"
 
 static void __init reset_tod_clock(void)
@@ -32,26 +32,6 @@ static void __init reset_tod_clock(void)
        S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
 }
 
-static void __init rescue_initrd(void)
-{
-       unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
-
-       /*
-        * Just like in case of IPL from VM reader we make sure there is a
-        * gap of 4MB between end of kernel and start of initrd.
-        * That way we can also be sure that saving an NSS will succeed,
-        * which however only requires different segments.
-        */
-       if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
-               return;
-       if (!INITRD_START || !INITRD_SIZE)
-               return;
-       if (INITRD_START >= min_initrd_addr)
-               return;
-       memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
-       INITRD_START = min_initrd_addr;
-}
-
 static void __init clear_bss_section(void)
 {
        memset(__bss_start, 0, __bss_stop - __bss_start);
@@ -60,6 +40,6 @@ static void __init clear_bss_section(void)
 void __init startup_init_nobss(void)
 {
        reset_tod_clock();
-       rescue_initrd();
        clear_bss_section();
+       kasan_early_init();
 }
index 150130c897c39938d03d04e497100cca77d0a353..724fba4d09d2df3a35c372224ddc944c9def3ace 100644 (file)
@@ -85,14 +85,34 @@ _LPP_OFFSET = __LC_LPP
 #endif
        .endm
 
-       .macro  CHECK_STACK stacksize,savearea
+       .macro  CHECK_STACK savearea
 #ifdef CONFIG_CHECK_STACK
-       tml     %r15,\stacksize - CONFIG_STACK_GUARD
+       tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
        lghi    %r14,\savearea
        jz      stack_overflow
 #endif
        .endm
 
+       .macro  CHECK_VMAP_STACK savearea,oklabel
+#ifdef CONFIG_VMAP_STACK
+       lgr     %r14,%r15
+       nill    %r14,0x10000 - STACK_SIZE
+       oill    %r14,STACK_INIT
+       clg     %r14,__LC_KERNEL_STACK
+       je      \oklabel
+       clg     %r14,__LC_ASYNC_STACK
+       je      \oklabel
+       clg     %r14,__LC_NODAT_STACK
+       je      \oklabel
+       clg     %r14,__LC_RESTART_STACK
+       je      \oklabel
+       lghi    %r14,\savearea
+       j       stack_overflow
+#else
+       j       \oklabel
+#endif
+       .endm
+
        .macro  SWITCH_ASYNC savearea,timer
        tmhh    %r8,0x0001              # interrupting from user ?
        jnz     1f
@@ -104,11 +124,11 @@ _LPP_OFFSET       = __LC_LPP
        brasl   %r14,cleanup_critical
        tmhh    %r8,0x0001              # retest problem state after cleanup
        jnz     1f
-0:     lg      %r14,__LC_ASYNC_STACK   # are we already on the async stack?
+0:     lg      %r14,__LC_ASYNC_STACK   # are we already on the target stack?
        slgr    %r14,%r15
        srag    %r14,%r14,STACK_SHIFT
        jnz     2f
-       CHECK_STACK 1<<STACK_SHIFT,\savearea
+       CHECK_STACK \savearea
        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
        j       3f
 1:     UPDATE_VTIME %r14,%r15,\timer
@@ -600,9 +620,10 @@ ENTRY(pgm_check_handler)
        jnz     1f                      # -> enabled, can't be a double fault
        tm      __LC_PGM_ILC+3,0x80     # check for per exception
        jnz     .Lpgm_svcper            # -> single stepped svc
-1:     CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+1:     CHECK_STACK __LC_SAVE_AREA_SYNC
        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       j       4f
+       # CHECK_VMAP_STACK branches to stack_overflow or 4f
+       CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
 2:     UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
        BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
        lg      %r15,__LC_KERNEL_STACK
@@ -1136,7 +1157,8 @@ ENTRY(mcck_int_handler)
        jnz     4f
        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
        jno     .Lmcck_panic
-4:     SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
+4:     ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
+       SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
 .Lmcck_skip:
        lghi    %r14,__LC_GPREGS_SAVE_AREA+64
        stmg    %r0,%r7,__PT_R0(%r11)
@@ -1163,7 +1185,6 @@ ENTRY(mcck_int_handler)
        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
        la      %r11,STACK_FRAME_OVERHEAD(%r1)
        lgr     %r15,%r1
-       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
        TSTMSK  __LC_CPU_FLAGS,_CIF_MCCK_PENDING
        jno     .Lmcck_return
        TRACE_IRQS_OFF
@@ -1182,7 +1203,7 @@ ENTRY(mcck_int_handler)
        lpswe   __LC_RETURN_MCCK_PSW
 
 .Lmcck_panic:
-       lg      %r15,__LC_PANIC_STACK
+       lg      %r15,__LC_NODAT_STACK
        la      %r11,STACK_FRAME_OVERHEAD(%r15)
        j       .Lmcck_skip
 
@@ -1193,12 +1214,10 @@ ENTRY(restart_int_handler)
        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
        stg     %r15,__LC_SAVE_AREA_RESTART
        lg      %r15,__LC_RESTART_STACK
-       aghi    %r15,-__PT_SIZE                 # create pt_regs on stack
-       xc      0(__PT_SIZE,%r15),0(%r15)
-       stmg    %r0,%r14,__PT_R0(%r15)
-       mvc     __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
-       mvc     __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
-       aghi    %r15,-STACK_FRAME_OVERHEAD      # create stack frame on stack
+       xc      STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
+       stmg    %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+       mvc     STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+       mvc     STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
        xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
        lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
        lg      %r2,__LC_RESTART_DATA
@@ -1216,14 +1235,14 @@ ENTRY(restart_int_handler)
 
        .section .kprobes.text, "ax"
 
-#ifdef CONFIG_CHECK_STACK
+#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
 /*
  * The synchronous or the asynchronous stack overflowed. We are dead.
  * No need to properly save the registers, we are going to panic anyway.
  * Setup a pt_regs so that show_trace can provide a good call trace.
  */
 stack_overflow:
-       lg      %r15,__LC_PANIC_STACK   # change to panic stack
+       lg      %r15,__LC_NODAT_STACK   # change to panic stack
        la      %r11,STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r7,__PT_R0(%r11)
        stmg    %r8,%r9,__PT_PSW(%r11)
index 472fa2f1a4a593f9ac96dfc99089b5bcab732e51..c3816ae108b085afca4a9326ac2d0eb9c3f3b6a2 100644 (file)
@@ -86,4 +86,7 @@ DECLARE_PER_CPU(u64, mt_cycles[8]);
 void gs_load_bc_cb(struct pt_regs *regs);
 void set_fs_fixup(void);
 
+unsigned long stack_alloc(void);
+void stack_free(unsigned long stack);
+
 #endif /* _ENTRY_H */
index 6d14ad42ba883b0e1c7a9a3065633aa2b2d40240..57bba24b1c278c769552ad313a8192087ab37b57 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 
 __HEAD
 ENTRY(startup_continue)
@@ -35,10 +36,7 @@ ENTRY(startup_continue)
 #
        larl    %r14,init_task
        stg     %r14,__LC_CURRENT
-       larl    %r15,init_thread_union
-       aghi    %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
-       stg     %r15,__LC_KERNEL_STACK  # set end of kernel stack
-       aghi    %r15,-160
+       larl    %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
 #
 # Early setup functions that may not rely on an initialized bss section,
 # like moving the initrd. Returns with an initialized bss section.
index 4296d7e61fb6a94d59264d2957c22eb52d3bda79..18a5d6317accd69f2e0c594bfeceed7433fb31ef 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/checksum.h>
 #include <asm/debug.h>
 #include <asm/os_info.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
 #include "entry.h"
 
 #define IPL_PARM_BLOCK_VERSION 0
@@ -117,6 +119,9 @@ static char *dump_type_str(enum dump_type type)
        }
 }
 
+struct ipl_parameter_block __bootdata(early_ipl_block);
+int __bootdata(early_ipl_block_valid);
+
 static int ipl_block_valid;
 static struct ipl_parameter_block ipl_block;
 
@@ -151,6 +156,8 @@ static inline int __diag308(unsigned long subcode, void *addr)
 
 int diag308(unsigned long subcode, void *addr)
 {
+       if (IS_ENABLED(CONFIG_KASAN))
+               __arch_local_irq_stosm(0x04); /* enable DAT */
        diag_stat_inc(DIAG_STAT_X308);
        return __diag308(subcode, addr);
 }
@@ -262,115 +269,16 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
 
 static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
 
-/* VM IPL PARM routines */
-static size_t reipl_get_ascii_vmparm(char *dest, size_t size,
-                                    const struct ipl_parameter_block *ipb)
-{
-       int i;
-       size_t len;
-       char has_lowercase = 0;
-
-       len = 0;
-       if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
-           (ipb->ipl_info.ccw.vm_parm_len > 0)) {
-
-               len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
-               memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
-               /* If at least one character is lowercase, we assume mixed
-                * case; otherwise we convert everything to lowercase.
-                */
-               for (i = 0; i < len; i++)
-                       if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
-                           (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
-                           (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
-                               has_lowercase = 1;
-                               break;
-                       }
-               if (!has_lowercase)
-                       EBC_TOLOWER(dest, len);
-               EBCASC(dest, len);
-       }
-       dest[len] = 0;
-
-       return len;
-}
-
-size_t append_ipl_vmparm(char *dest, size_t size)
-{
-       size_t rc;
-
-       rc = 0;
-       if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)
-               rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
-       else
-               dest[0] = 0;
-       return rc;
-}
-
 static ssize_t ipl_vm_parm_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *page)
 {
        char parm[DIAG308_VMPARM_SIZE + 1] = {};
 
-       append_ipl_vmparm(parm, sizeof(parm));
+       if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
+               ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
        return sprintf(page, "%s\n", parm);
 }
 
-static size_t scpdata_length(const char* buf, size_t count)
-{
-       while (count) {
-               if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
-                       break;
-               count--;
-       }
-       return count;
-}
-
-static size_t reipl_append_ascii_scpdata(char *dest, size_t size,
-                                        const struct ipl_parameter_block *ipb)
-{
-       size_t count;
-       size_t i;
-       int has_lowercase;
-
-       count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
-                                            ipb->ipl_info.fcp.scp_data_len));
-       if (!count)
-               goto out;
-
-       has_lowercase = 0;
-       for (i = 0; i < count; i++) {
-               if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
-                       count = 0;
-                       goto out;
-               }
-               if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
-                       has_lowercase = 1;
-       }
-
-       if (has_lowercase)
-               memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
-       else
-               for (i = 0; i < count; i++)
-                       dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
-out:
-       dest[count] = '\0';
-       return count;
-}
-
-size_t append_ipl_scpdata(char *dest, size_t len)
-{
-       size_t rc;
-
-       rc = 0;
-       if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
-               rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
-       else
-               dest[0] = 0;
-       return rc;
-}
-
-
 static struct kobj_attribute sys_ipl_vm_parm_attr =
        __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
 
@@ -564,7 +472,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
 {
        char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
 
-       reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
+       ipl_block_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
        return sprintf(page, "%s\n", vmparm);
 }
 
@@ -1769,11 +1677,10 @@ void __init setup_ipl(void)
 
 void __init ipl_store_parameters(void)
 {
-       int rc;
-
-       rc = diag308(DIAG308_STORE, &ipl_block);
-       if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+       if (early_ipl_block_valid) {
+               memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block));
                ipl_block_valid = 1;
+       }
 }
 
 void s390_reset_system(void)
diff --git a/arch/s390/kernel/ipl_vmparm.c b/arch/s390/kernel/ipl_vmparm.c
new file mode 100644 (file)
index 0000000..411838c
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/ebcdic.h>
+#include <asm/ipl.h>
+
+/* VM IPL PARM routines */
+size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
+                                 const struct ipl_parameter_block *ipb)
+{
+       int i;
+       size_t len;
+       char has_lowercase = 0;
+
+       len = 0;
+       if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
+           (ipb->ipl_info.ccw.vm_parm_len > 0)) {
+
+               len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
+               memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
+               /* If at least one character is lowercase, we assume mixed
+                * case; otherwise we convert everything to lowercase.
+                */
+               for (i = 0; i < len; i++)
+                       if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
+                           (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
+                           (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
+                               has_lowercase = 1;
+                               break;
+                       }
+               if (!has_lowercase)
+                       EBC_TOLOWER(dest, len);
+               EBCASC(dest, len);
+       }
+       dest[len] = 0;
+
+       return len;
+}
index 3d17c41074ca55d59fbe156c5967605912af9734..0e8d68bac82c29356886e24b24088d0463c50880 100644 (file)
@@ -172,15 +172,7 @@ void do_softirq_own_stack(void)
        /* Check against async. stack address range. */
        new = S390_lowcore.async_stack;
        if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
-               /* Need to switch to the async. stack. */
-               new -= STACK_FRAME_OVERHEAD;
-               ((struct stack_frame *) new)->back_chain = old;
-               asm volatile("   la    15,0(%0)\n"
-                            "   brasl 14,__do_softirq\n"
-                            "   la    15,0(%1)\n"
-                            : : "a" (new), "a" (old)
-                            : "0", "1", "2", "3", "4", "5", "14",
-                              "cc", "memory" );
+               CALL_ON_STACK(__do_softirq, new, 0);
        } else {
                /* We are already on the async stack. */
                __do_softirq();
index b7020e721ae3182c371bbd33825853a534e41735..cb582649aba6b491a687c758a7460de90a23af3b 100644 (file)
@@ -142,18 +142,27 @@ static noinline void __machine_kdump(void *image)
 }
 #endif
 
+static unsigned long do_start_kdump(unsigned long addr)
+{
+       struct kimage *image = (struct kimage *) addr;
+       int (*start_kdump)(int) = (void *)image->start;
+       int rc;
+
+       __arch_local_irq_stnsm(0xfb); /* disable DAT */
+       rc = start_kdump(0);
+       __arch_local_irq_stosm(0x04); /* enable DAT */
+       return rc;
+}
+
 /*
  * Check if kdump checksums are valid: We call purgatory with parameter "0"
  */
 static bool kdump_csum_valid(struct kimage *image)
 {
 #ifdef CONFIG_CRASH_DUMP
-       int (*start_kdump)(int) = (void *)image->start;
        int rc;
 
-       __arch_local_irq_stnsm(0xfb); /* disable DAT */
-       rc = start_kdump(0);
-       __arch_local_irq_stosm(0x04); /* enable DAT */
+       rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
        return rc == 0;
 #else
        return false;
index d298d3cb46d0e716c7093324f0e04bc6b5f7b593..31889db609e904cbafbc2bdab9aa9500fe793b09 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/kasan.h>
 #include <linux/moduleloader.h>
 #include <linux/bug.h>
 #include <asm/alternative.h>
 
 void *module_alloc(unsigned long size)
 {
+       void *p;
+
        if (PAGE_ALIGN(size) > MODULES_LEN)
                return NULL;
-       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                                   GFP_KERNEL, PAGE_KERNEL_EXEC,
-                                   0, NUMA_NO_NODE,
-                                   __builtin_return_address(0));
+       p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+                                GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+                                __builtin_return_address(0));
+       if (p && (kasan_module_alloc(p, size) < 0)) {
+               vfree(p);
+               return NULL;
+       }
+       return p;
 }
 
 void module_arch_freeing_init(struct module *mod)
index 5c53e977be62710ad9ed2001e739ca863e2ca02e..7bf604ff50a1bd082024c85fb5d32e06cca9c4f8 100644 (file)
@@ -2045,14 +2045,17 @@ static int __init init_cpum_sampling_pmu(void)
        }
 
        sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
-       if (!sfdbg)
+       if (!sfdbg) {
                pr_err("Registering for s390dbf failed\n");
+               return -ENOMEM;
+       }
        debug_register_view(sfdbg, &debug_sprintf_view);
 
        err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
                                    cpumf_measurement_alert);
        if (err) {
                pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
+               debug_unregister(sfdbg);
                goto out;
        }
 
@@ -2061,6 +2064,7 @@ static int __init init_cpum_sampling_pmu(void)
                pr_cpumsf_err(RS_INIT_FAILURE_PERF);
                unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
                                        cpumf_measurement_alert);
+               debug_unregister(sfdbg);
                goto out;
        }
 
index c637c12f9e37ccef3c0ab9a35bbe312259f75414..a2e952b662487453b8baedefc53402eb04d70a83 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/crash_dump.h>
 #include <linux/memory.h>
 #include <linux/compat.h>
+#include <linux/start_kernel.h>
 
 #include <asm/ipl.h>
 #include <asm/facility.h>
@@ -69,6 +70,7 @@
 #include <asm/numa.h>
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
+#include <asm/mem_detect.h>
 #include "entry.h"
 
 /*
@@ -88,9 +90,11 @@ char elf_platform[ELF_PLATFORM_SIZE];
 
 unsigned long int_hwcap = 0;
 
-int __initdata memory_end_set;
-unsigned long __initdata memory_end;
-unsigned long __initdata max_physmem_end;
+int __bootdata(noexec_disabled);
+int __bootdata(memory_end_set);
+unsigned long __bootdata(memory_end);
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
 
 unsigned long VMALLOC_START;
 EXPORT_SYMBOL(VMALLOC_START);
@@ -283,15 +287,6 @@ void machine_power_off(void)
 void (*pm_power_off)(void) = machine_power_off;
 EXPORT_SYMBOL_GPL(pm_power_off);
 
-static int __init early_parse_mem(char *p)
-{
-       memory_end = memparse(p, &p);
-       memory_end &= PAGE_MASK;
-       memory_end_set = 1;
-       return 0;
-}
-early_param("mem", early_parse_mem);
-
 static int __init parse_vmalloc(char *arg)
 {
        if (!arg)
@@ -303,6 +298,78 @@ early_param("vmalloc", parse_vmalloc);
 
 void *restart_stack __section(.data);
 
+unsigned long stack_alloc(void)
+{
+#ifdef CONFIG_VMAP_STACK
+       return (unsigned long)
+               __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
+                                    VMALLOC_START, VMALLOC_END,
+                                    THREADINFO_GFP,
+                                    PAGE_KERNEL, 0, NUMA_NO_NODE,
+                                    __builtin_return_address(0));
+#else
+       return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+#endif
+}
+
+void stack_free(unsigned long stack)
+{
+#ifdef CONFIG_VMAP_STACK
+       vfree((void *) stack);
+#else
+       free_pages(stack, THREAD_SIZE_ORDER);
+#endif
+}
+
+int __init arch_early_irq_init(void)
+{
+       unsigned long stack;
+
+       stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+       if (!stack)
+               panic("Couldn't allocate async stack");
+       S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
+       return 0;
+}
+
+static int __init async_stack_realloc(void)
+{
+       unsigned long old, new;
+
+       old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
+       new = stack_alloc();
+       if (!new)
+               panic("Couldn't allocate async stack");
+       S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
+       free_pages(old, THREAD_SIZE_ORDER);
+       return 0;
+}
+early_initcall(async_stack_realloc);
+
+void __init arch_call_rest_init(void)
+{
+       struct stack_frame *frame;
+       unsigned long stack;
+
+       stack = stack_alloc();
+       if (!stack)
+               panic("Couldn't allocate kernel stack");
+       current->stack = (void *) stack;
+#ifdef CONFIG_VMAP_STACK
+       current->stack_vm_area = (void *) stack;
+#endif
+       set_task_stack_end_magic(current);
+       stack += STACK_INIT_OFFSET;
+       S390_lowcore.kernel_stack = stack;
+       frame = (struct stack_frame *) stack;
+       memset(frame, 0, sizeof(*frame));
+       /* Branch to rest_init on the new stack, never returns */
+       asm volatile(
+               "       la      15,0(%[_frame])\n"
+               "       jg      rest_init\n"
+               : : [_frame] "a" (frame));
+}
+
 static void __init setup_lowcore(void)
 {
        struct lowcore *lc;
@@ -329,14 +396,8 @@ static void __init setup_lowcore(void)
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->io_new_psw.addr = (unsigned long) io_int_handler;
        lc->clock_comparator = clock_comparator_max;
-       lc->kernel_stack = ((unsigned long) &init_thread_union)
+       lc->nodat_stack = ((unsigned long) &init_thread_union)
                + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-       lc->async_stack = (unsigned long)
-               memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
-               + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-       lc->panic_stack = (unsigned long)
-               memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
-               + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
        lc->current_task = (unsigned long)&init_task;
        lc->lpp = LPP_MAGIC;
        lc->machine_flags = S390_lowcore.machine_flags;
@@ -357,8 +418,12 @@ static void __init setup_lowcore(void)
        lc->last_update_timer = S390_lowcore.last_update_timer;
        lc->last_update_clock = S390_lowcore.last_update_clock;
 
-       restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
-       restart_stack += ASYNC_SIZE;
+       /*
+        * Allocate the global restart stack which is the same for
+        * all CPUs in cast *one* of them does a PSW restart.
+        */
+       restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
+       restart_stack += STACK_INIT_OFFSET;
 
        /*
         * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
@@ -467,19 +532,26 @@ static void __init setup_memory_end(void)
 {
        unsigned long vmax, vmalloc_size, tmp;
 
-       /* Choose kernel address space layout: 2, 3, or 4 levels. */
+       /* Choose kernel address space layout: 3 or 4 levels. */
        vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
-       tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
-       tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
-       if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
-               vmax = _REGION2_SIZE; /* 3-level kernel page table */
-       else
-               vmax = _REGION1_SIZE; /* 4-level kernel page table */
+       if (IS_ENABLED(CONFIG_KASAN)) {
+               vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
+                          ? _REGION1_SIZE
+                          : _REGION2_SIZE;
+       } else {
+               tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
+               tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
+               if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
+                       vmax = _REGION2_SIZE; /* 3-level kernel page table */
+               else
+                       vmax = _REGION1_SIZE; /* 4-level kernel page table */
+       }
+
        /* module area is at the end of the kernel address space. */
        MODULES_END = vmax;
        MODULES_VADDR = MODULES_END - MODULES_LEN;
        VMALLOC_END = MODULES_VADDR;
-       VMALLOC_START = vmax - vmalloc_size;
+       VMALLOC_START = VMALLOC_END - vmalloc_size;
 
        /* Split remaining virtual space between 1:1 mapping & vmemmap array */
        tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
@@ -491,7 +563,12 @@ static void __init setup_memory_end(void)
        vmemmap = (struct page *) tmp;
 
        /* Take care that memory_end is set and <= vmemmap */
-       memory_end = min(memory_end ?: max_physmem_end, tmp);
+       memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
+#ifdef CONFIG_KASAN
+       /* fit in kasan shadow memory region between 1:1 and vmemmap */
+       memory_end = min(memory_end, KASAN_SHADOW_START);
+       vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
+#endif
        max_pfn = max_low_pfn = PFN_DOWN(memory_end);
        memblock_remove(memory_end, ULONG_MAX);
 
@@ -532,17 +609,8 @@ static struct notifier_block kdump_mem_nb = {
  */
 static void reserve_memory_end(void)
 {
-#ifdef CONFIG_CRASH_DUMP
-       if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
-           !OLDMEM_BASE && sclp.hsa_size) {
-               memory_end = sclp.hsa_size;
-               memory_end &= PAGE_MASK;
-               memory_end_set = 1;
-       }
-#endif
-       if (!memory_end_set)
-               return;
-       memblock_reserve(memory_end, ULONG_MAX);
+       if (memory_end_set)
+               memblock_reserve(memory_end, ULONG_MAX);
 }
 
 /*
@@ -649,6 +717,62 @@ static void __init reserve_initrd(void)
 #endif
 }
 
+static void __init reserve_mem_detect_info(void)
+{
+       unsigned long start, size;
+
+       get_mem_detect_reserved(&start, &size);
+       if (size)
+               memblock_reserve(start, size);
+}
+
+static void __init free_mem_detect_info(void)
+{
+       unsigned long start, size;
+
+       get_mem_detect_reserved(&start, &size);
+       if (size)
+               memblock_free(start, size);
+}
+
+static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
+{
+       memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
+                    start, start + size - 1);
+       memblock_add_range(&memblock.memory, start, size, 0, 0);
+       memblock_add_range(&memblock.physmem, start, size, 0, 0);
+}
+
+static const char * __init get_mem_info_source(void)
+{
+       switch (mem_detect.info_source) {
+       case MEM_DETECT_SCLP_STOR_INFO:
+               return "sclp storage info";
+       case MEM_DETECT_DIAG260:
+               return "diag260";
+       case MEM_DETECT_SCLP_READ_INFO:
+               return "sclp read info";
+       case MEM_DETECT_BIN_SEARCH:
+               return "binary search";
+       }
+       return "none";
+}
+
+static void __init memblock_add_mem_detect_info(void)
+{
+       unsigned long start, end;
+       int i;
+
+       memblock_dbg("physmem info source: %s (%hhd)\n",
+                    get_mem_info_source(), mem_detect.info_source);
+       /* keep memblock lists close to the kernel */
+       memblock_set_bottom_up(true);
+       for_each_mem_detect_block(i, &start, &end)
+               memblock_physmem_add(start, end - start);
+       memblock_set_bottom_up(false);
+       memblock_dump_all();
+}
+
 /*
  * Check for initrd being in usable memory
  */
@@ -913,11 +1037,13 @@ void __init setup_arch(char **cmdline_p)
        reserve_oldmem();
        reserve_kernel();
        reserve_initrd();
+       reserve_mem_detect_info();
        memblock_allow_resize();
 
        /* Get information about *all* installed memory */
-       detect_memory_memblock();
+       memblock_add_mem_detect_info();
 
+       free_mem_detect_info();
        remove_oldmem();
 
        /*
index 2f8f7d7dd9a8387b2152999e5331f478bebabbbb..1b3188f57b58f6dc1995f3cb9ae1134506450d27 100644 (file)
@@ -186,36 +186,34 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
        pcpu_sigp_retry(pcpu, order, 0);
 }
 
-#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
-#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
-
 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 {
-       unsigned long async_stack, panic_stack;
+       unsigned long async_stack, nodat_stack;
        struct lowcore *lc;
 
        if (pcpu != &pcpu_devices[0]) {
                pcpu->lowcore = (struct lowcore *)
                        __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-               async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-               panic_stack = __get_free_page(GFP_KERNEL);
-               if (!pcpu->lowcore || !panic_stack || !async_stack)
+               nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+               if (!pcpu->lowcore || !nodat_stack)
                        goto out;
        } else {
-               async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
-               panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
+               nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
        }
+       async_stack = stack_alloc();
+       if (!async_stack)
+               goto out;
        lc = pcpu->lowcore;
        memcpy(lc, &S390_lowcore, 512);
        memset((char *) lc + 512, 0, sizeof(*lc) - 512);
-       lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
-       lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
+       lc->async_stack = async_stack + STACK_INIT_OFFSET;
+       lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
        lc->cpu_nr = cpu;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
        lc->spinlock_index = 0;
        lc->br_r1_trampoline = 0x07f1;  /* br %r1 */
        if (nmi_alloc_per_cpu(lc))
-               goto out;
+               goto out_async;
        if (vdso_alloc_per_cpu(lc))
                goto out_mcesa;
        lowcore_ptr[cpu] = lc;
@@ -224,10 +222,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 
 out_mcesa:
        nmi_free_per_cpu(lc);
+out_async:
+       stack_free(async_stack);
 out:
        if (pcpu != &pcpu_devices[0]) {
-               free_page(panic_stack);
-               free_pages(async_stack, ASYNC_ORDER);
+               free_pages(nodat_stack, THREAD_SIZE_ORDER);
                free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
        }
        return -ENOMEM;
@@ -237,15 +236,21 @@ out:
 
 static void pcpu_free_lowcore(struct pcpu *pcpu)
 {
+       unsigned long async_stack, nodat_stack, lowcore;
+
+       nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
+       async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
+       lowcore = (unsigned long) pcpu->lowcore;
+
        pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
        lowcore_ptr[pcpu - pcpu_devices] = NULL;
        vdso_free_per_cpu(pcpu->lowcore);
        nmi_free_per_cpu(pcpu->lowcore);
+       stack_free(async_stack);
        if (pcpu == &pcpu_devices[0])
                return;
-       free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
-       free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
-       free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
+       free_pages(nodat_stack, THREAD_SIZE_ORDER);
+       free_pages(lowcore, LC_ORDER);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -293,7 +298,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 {
        struct lowcore *lc = pcpu->lowcore;
 
-       lc->restart_stack = lc->kernel_stack;
+       lc->restart_stack = lc->nodat_stack;
        lc->restart_fn = (unsigned long) func;
        lc->restart_data = (unsigned long) data;
        lc->restart_source = -1UL;
@@ -303,15 +308,21 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 /*
  * Call function via PSW restart on pcpu and stop the current cpu.
  */
-static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
-                         void *data, unsigned long stack)
+static void __pcpu_delegate(void (*func)(void*), void *data)
+{
+       func(data);     /* should not return */
+}
+
+static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
+                                               void (*func)(void *),
+                                               void *data, unsigned long stack)
 {
        struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
        unsigned long source_cpu = stap();
 
-       __load_psw_mask(PSW_KERNEL_BITS);
+       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        if (pcpu->address == source_cpu)
-               func(data);     /* should not return */
+               CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
        /* Stop target cpu (if func returns this stops the current cpu). */
        pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
        /* Restart func on the target cpu and stop the current cpu. */
@@ -372,8 +383,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
        pcpu_delegate(&pcpu_devices[0], func, data,
-                     pcpu_devices->lowcore->panic_stack -
-                     PANIC_FRAME_OFFSET + PAGE_SIZE);
+                     pcpu_devices->lowcore->nodat_stack);
 }
 
 int smp_find_processor_id(u16 address)
@@ -791,37 +801,42 @@ void __init smp_detect_cpus(void)
        memblock_free_early((unsigned long)info, sizeof(*info));
 }
 
-/*
- *     Activate a secondary processor.
- */
-static void smp_start_secondary(void *cpuvoid)
+static void smp_init_secondary(void)
 {
        int cpu = smp_processor_id();
 
        S390_lowcore.last_update_clock = get_tod_clock();
-       S390_lowcore.restart_stack = (unsigned long) restart_stack;
-       S390_lowcore.restart_fn = (unsigned long) do_restart;
-       S390_lowcore.restart_data = 0;
-       S390_lowcore.restart_source = -1UL;
        restore_access_regs(S390_lowcore.access_regs_save_area);
-       __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
-       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        cpu_init();
        preempt_disable();
        init_cpu_timer();
        vtime_init();
        pfault_init();
-       notify_cpu_starting(cpu);
+       notify_cpu_starting(smp_processor_id());
        if (topology_cpu_dedicated(cpu))
                set_cpu_flag(CIF_DEDICATED_CPU);
        else
                clear_cpu_flag(CIF_DEDICATED_CPU);
-       set_cpu_online(cpu, true);
+       set_cpu_online(smp_processor_id(), true);
        inc_irq_stat(CPU_RST);
        local_irq_enable();
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
+/*
+ *     Activate a secondary processor.
+ */
+static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
+{
+       S390_lowcore.restart_stack = (unsigned long) restart_stack;
+       S390_lowcore.restart_fn = (unsigned long) do_restart;
+       S390_lowcore.restart_data = 0;
+       S390_lowcore.restart_source = -1UL;
+       __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
+       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
+       CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
+}
+
 /* Upping and downing of CPUs */
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
index 0859cde36f7520616e62df133c2f88df69a47e27..888cc2f166db726d8e5c967c4082e781cc6137f4 100644 (file)
@@ -183,17 +183,19 @@ static void fill_hdr(struct sthyi_sctns *sctns)
 static void fill_stsi_mac(struct sthyi_sctns *sctns,
                          struct sysinfo_1_1_1 *sysinfo)
 {
+       sclp_ocf_cpc_name_copy(sctns->mac.infmname);
+       if (*(u64 *)sctns->mac.infmname != 0)
+               sctns->mac.infmval1 |= MAC_NAME_VLD;
+
        if (stsi(sysinfo, 1, 1, 1))
                return;
 
-       sclp_ocf_cpc_name_copy(sctns->mac.infmname);
-
        memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
        memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
        memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
        memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
 
-       sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
+       sctns->mac.infmval1 |= MAC_ID_VLD;
 }
 
 static void fill_stsi_par(struct sthyi_sctns *sctns,
index c1a080b11ae97743d2553f8e07a99a956ce22763..537f97fde37f977c4d6b3af0d45aab00926644d5 100644 (file)
 
        .section .text
 ENTRY(swsusp_arch_suspend)
-       stmg    %r6,%r15,__SF_GPRS(%r15)
+       lg      %r1,__LC_NODAT_STACK
+       aghi    %r1,-STACK_FRAME_OVERHEAD
+       stmg    %r6,%r15,__SF_GPRS(%r1)
+       stg     %r15,__SF_BACKCHAIN(%r1)
        lgr     %r1,%r15
-       aghi    %r15,-STACK_FRAME_OVERHEAD
-       stg     %r1,__SF_BACKCHAIN(%r15)
 
        /* Store FPU registers */
        brasl   %r14,save_fpu_regs
@@ -197,9 +198,7 @@ pgm_check_entry:
        brc     2,3b                            /* busy, try again */
 
        /* Suspend CPU not available -> panic */
-       larl    %r15,init_thread_union
-       aghi    %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
-       aghi    %r15,-STACK_FRAME_OVERHEAD
+       larl    %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
        larl    %r2,.Lpanic_string
        brasl   %r14,sclp_early_printk_force
        larl    %r3,.Ldisabled_wait_31
index 3031cc6dd0ab48de8ebf3797a2bc748995d67c49..ec31b48a42a52798bf31d5b55a7ef566b0ab3766 100644 (file)
@@ -56,7 +56,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
        vdso_pagelist = vdso64_pagelist;
        vdso_pages = vdso64_pages;
 #ifdef CONFIG_COMPAT
-       if (is_compat_task()) {
+       if (vma->vm_mm->context.compat_mm) {
                vdso_pagelist = vdso32_pagelist;
                vdso_pages = vdso32_pages;
        }
@@ -77,7 +77,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
 
        vdso_pages = vdso64_pages;
 #ifdef CONFIG_COMPAT
-       if (is_compat_task())
+       if (vma->vm_mm->context.compat_mm)
                vdso_pages = vdso32_pages;
 #endif
 
@@ -224,8 +224,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 
        vdso_pages = vdso64_pages;
 #ifdef CONFIG_COMPAT
-       if (is_compat_task())
+       if (is_compat_task()) {
                vdso_pages = vdso32_pages;
+               mm->context.compat_mm = 1;
+       }
 #endif
        /*
         * vDSO has a problem and was disabled, just don't "enable" it for
index c5c856f320bca47e9b64c0c35726881fb2394d1d..eb8aebea3ea7bd7a6967136b6cb9aee3e25473aa 100644 (file)
@@ -28,9 +28,10 @@ obj-y += vdso32_wrapper.o
 extra-y += vdso32.lds
 CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
 
-# Disable gcov profiling and ubsan for VDSO code
+# Disable gcov profiling, ubsan and kasan for VDSO code
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
index a9418bf975db5a32db1c88b07d59f71454ce550b..ada5c11a16e5adb20cfcd7f9908296eb1ac6cbb9 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/dwarf.h>
+#include <asm/ptrace.h>
 
        .text
        .align 4
@@ -18,8 +19,8 @@
 __kernel_clock_gettime:
        CFI_STARTPROC
        ahi     %r15,-16
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
        basr    %r5,0
 0:     al      %r5,21f-0b(%r5)                 /* get &_vdso_data */
        chi     %r2,__CLOCK_REALTIME_COARSE
@@ -72,13 +73,13 @@ __kernel_clock_gettime:
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
        lhi     %r2,0
        ahi     %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
 
        /* CLOCK_MONOTONIC_COARSE */
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
 9:     l       %r4,__VDSO_UPD_COUNT+4(%r5)     /* load update counter */
        tml     %r4,0x0001                      /* pending update ? loop */
        jnz     9b
@@ -158,17 +159,17 @@ __kernel_clock_gettime:
        st      %r1,4(%r3)                      /* store tp->tv_nsec */
        lhi     %r2,0
        ahi     %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
 
        /* Fallback to system call */
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
 19:    lhi     %r1,__NR_clock_gettime
        svc     0
        ahi     %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
        CFI_ENDPROC
index 3c0db0fa6ad90304929e7263ea2ca07bbe077eca..b23063fbc892cd91b1e08fabc52a52b26f968e98 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/dwarf.h>
+#include <asm/ptrace.h>
 
        .text
        .align 4
@@ -19,7 +20,7 @@ __kernel_gettimeofday:
        CFI_STARTPROC
        ahi     %r15,-16
        CFI_ADJUST_CFA_OFFSET 16
-       CFI_VAL_OFFSET 15, -160
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
        basr    %r5,0
 0:     al      %r5,13f-0b(%r5)                 /* get &_vdso_data */
 1:     ltr     %r3,%r3                         /* check if tz is NULL */
index 15b1ceafc4c18fd2cf7fc52920e6c259212b1708..a22b2cf86eec985d7f3bf32da11f5f0c220c28e7 100644 (file)
@@ -28,9 +28,10 @@ obj-y += vdso64_wrapper.o
 extra-y += vdso64.lds
 CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 
-# Disable gcov profiling and ubsan for VDSO code
+# Disable gcov profiling, ubsan and kasan for VDSO code
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
index fac3ab5ec83a9c3a73f9201b5e094309dda3a1a9..9d2ee79b90f250afeedaeb22e6646ef55bfce056 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/dwarf.h>
+#include <asm/ptrace.h>
 
        .text
        .align 4
@@ -18,8 +19,8 @@
 __kernel_clock_gettime:
        CFI_STARTPROC
        aghi    %r15,-16
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
        larl    %r5,_vdso_data
        cghi    %r2,__CLOCK_REALTIME_COARSE
        je      4f
@@ -56,13 +57,13 @@ __kernel_clock_gettime:
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
        lghi    %r2,0
        aghi    %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
 
        /* CLOCK_MONOTONIC_COARSE */
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
 3:     lg      %r4,__VDSO_UPD_COUNT(%r5)       /* load update counter */
        tmll    %r4,0x0001                      /* pending update ? loop */
        jnz     3b
@@ -115,13 +116,13 @@ __kernel_clock_gettime:
        stg     %r1,8(%r3)                      /* store tp->tv_nsec */
        lghi    %r2,0
        aghi    %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
 
        /* CPUCLOCK_VIRT for this thread */
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
 9:     lghi    %r4,0
        icm     %r0,15,__VDSO_ECTG_OK(%r5)
        jz      12f
@@ -142,17 +143,17 @@ __kernel_clock_gettime:
        stg     %r4,8(%r3)
        lghi    %r2,0
        aghi    %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
 
        /* Fallback to system call */
-       CFI_DEF_CFA_OFFSET 176
-       CFI_VAL_OFFSET 15, -160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
 12:    lghi    %r1,__NR_clock_gettime
        svc     0
        aghi    %r15,16
-       CFI_DEF_CFA_OFFSET 160
+       CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
        br      %r14
        CFI_ENDPROC
index 6e1f0b421695ac5c4b4bee16adee3690bf89b705..aebe10dc7c99a13498edd6ffddf99d822cc37d23 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/dwarf.h>
+#include <asm/ptrace.h>
 
        .text
        .align 4
@@ -19,7 +20,7 @@ __kernel_gettimeofday:
        CFI_STARTPROC
        aghi    %r15,-16
        CFI_ADJUST_CFA_OFFSET 16
-       CFI_VAL_OFFSET 15, -160
+       CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
        larl    %r5,_vdso_data
 0:     ltgr    %r3,%r3                         /* check if tz is NULL */
        je      1f
index b43f8d33a3697de32e7c9f7dae4cbf2e7cf3bc46..cc3cbdc93d35b4ef21e728977a380eba82693562 100644 (file)
@@ -16,6 +16,7 @@
 #define RO_AFTER_INIT_DATA
 
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
@@ -134,6 +135,8 @@ SECTIONS
                __nospec_return_end = . ;
        }
 
+       BOOT_DATA
+
        /* early.c uses stsi, which requires page aligned data. */
        . = ALIGN(PAGE_SIZE);
        INIT_DATA_SECTION(0x100)
@@ -146,6 +149,19 @@ SECTIONS
 
        _end = . ;
 
+       /*
+        * uncompressed image info used by the decompressor
+        * it should match struct vmlinux_info
+        */
+       .vmlinux.info 0 : {
+               QUAD(_stext)                                    /* default_lma */
+               QUAD(startup_continue)                          /* entry */
+               QUAD(__bss_start - _stext)                      /* image_size */
+               QUAD(__bss_stop - __bss_start)                  /* bss_size */
+               QUAD(__boot_data_start)                         /* bootdata_off */
+               QUAD(__boot_data_end - __boot_data_start)       /* bootdata_size */
+       }
+
        /* Debugging sections.  */
        STABS_DEBUG
        DWARF_DEBUG
index 57ab40188d4bddab071505f1d5a204a82dca3ce5..5418d10dc2a819b030d01c985a5e8129d5b1e3ce 100644 (file)
@@ -9,5 +9,9 @@ lib-$(CONFIG_SMP) += spinlock.o
 lib-$(CONFIG_KPROBES) += probes.o
 lib-$(CONFIG_UPROBES) += probes.o
 
+# Instrumenting memory accesses to __user data (in different address space)
+# produce false positives
+KASAN_SANITIZE_uaccess.o := n
+
 chkbss := mem.o
 include $(srctree)/arch/s390/scripts/Makefile.chkbss
index 40c4d59c926e52d8a7f3e7c3870dbb69e8091c8b..53008da0519076fd57f32ca057ac21333cdf51c6 100644 (file)
@@ -14,7 +14,8 @@
 /*
  * void *memmove(void *dest, const void *src, size_t n)
  */
-ENTRY(memmove)
+WEAK(memmove)
+ENTRY(__memmove)
        ltgr    %r4,%r4
        lgr     %r1,%r2
        jz      .Lmemmove_exit
@@ -47,6 +48,7 @@ ENTRY(memmove)
        BR_EX   %r14
 .Lmemmove_mvc:
        mvc     0(1,%r1),0(%r3)
+ENDPROC(__memmove)
 EXPORT_SYMBOL(memmove)
 
 /*
@@ -64,7 +66,8 @@ EXPORT_SYMBOL(memmove)
  *     return __builtin_memset(s, c, n);
  * }
  */
-ENTRY(memset)
+WEAK(memset)
+ENTRY(__memset)
        ltgr    %r4,%r4
        jz      .Lmemset_exit
        ltgr    %r3,%r3
@@ -108,6 +111,7 @@ ENTRY(memset)
        xc      0(1,%r1),0(%r1)
 .Lmemset_mvc:
        mvc     1(1,%r1),0(%r1)
+ENDPROC(__memset)
 EXPORT_SYMBOL(memset)
 
 /*
@@ -115,7 +119,8 @@ EXPORT_SYMBOL(memset)
  *
  * void *memcpy(void *dest, const void *src, size_t n)
  */
-ENTRY(memcpy)
+WEAK(memcpy)
+ENTRY(__memcpy)
        ltgr    %r4,%r4
        jz      .Lmemcpy_exit
        aghi    %r4,-1
@@ -136,6 +141,7 @@ ENTRY(memcpy)
        j       .Lmemcpy_remainder
 .Lmemcpy_mvc:
        mvc     0(1,%r1),0(%r3)
+ENDPROC(__memcpy)
 EXPORT_SYMBOL(memcpy)
 
 /*
index 33fe418506bc7743606a7bf2299e8a715741c09c..f5880bfd1b0cb1bb67ae7e8f2ac68c6404c88224 100644 (file)
@@ -4,10 +4,12 @@
 #
 
 obj-y          := init.o fault.o extmem.o mmap.o vmem.o maccess.o
-obj-y          += page-states.o gup.o pageattr.o mem_detect.o
-obj-y          += pgtable.o pgalloc.o
+obj-y          += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
 
 obj-$(CONFIG_CMM)              += cmm.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 obj-$(CONFIG_S390_PTDUMP)      += dump_pagetables.o
 obj-$(CONFIG_PGSTE)            += gmap.o
+
+KASAN_SANITIZE_kasan_init.o    := n
+obj-$(CONFIG_KASAN)            += kasan_init.o
index 7cdea2ec51e96c2b9ccfc2157dcc49f51ac99ed3..363f6470d742e5ee3a198aab507362207cc5f16b 100644 (file)
@@ -3,6 +3,8 @@
 #include <linux/debugfs.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/kasan.h>
+#include <asm/kasan.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
 
@@ -17,18 +19,26 @@ enum address_markers_idx {
        IDENTITY_NR = 0,
        KERNEL_START_NR,
        KERNEL_END_NR,
+#ifdef CONFIG_KASAN
+       KASAN_SHADOW_START_NR,
+       KASAN_SHADOW_END_NR,
+#endif
        VMEMMAP_NR,
        VMALLOC_NR,
        MODULES_NR,
 };
 
 static struct addr_marker address_markers[] = {
-       [IDENTITY_NR]     = {0, "Identity Mapping"},
-       [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
-       [KERNEL_END_NR]   = {(unsigned long)_end, "Kernel Image End"},
-       [VMEMMAP_NR]      = {0, "vmemmap Area"},
-       [VMALLOC_NR]      = {0, "vmalloc Area"},
-       [MODULES_NR]      = {0, "Modules Area"},
+       [IDENTITY_NR]           = {0, "Identity Mapping"},
+       [KERNEL_START_NR]       = {(unsigned long)_stext, "Kernel Image Start"},
+       [KERNEL_END_NR]         = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KASAN
+       [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
+       [KASAN_SHADOW_END_NR]   = {KASAN_SHADOW_END, "Kasan Shadow End"},
+#endif
+       [VMEMMAP_NR]            = {0, "vmemmap Area"},
+       [VMALLOC_NR]            = {0, "vmalloc Area"},
+       [MODULES_NR]            = {0, "Modules Area"},
        { -1, NULL }
 };
 
@@ -80,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
        } else if (prot != cur || level != st->level ||
                   st->current_address >= st->marker[1].start_address) {
                /* Print the actual finished series */
-               seq_printf(m, "0x%0*lx-0x%0*lx",
+               seq_printf(m, "0x%0*lx-0x%0*lx ",
                           width, st->start_address,
                           width, st->current_address);
                delta = (st->current_address - st->start_address) >> 10;
@@ -90,7 +100,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                }
                seq_printf(m, "%9lu%c ", delta, *unit);
                print_prot(m, st->current_prot, st->level);
-               if (st->current_address >= st->marker[1].start_address) {
+               while (st->current_address >= st->marker[1].start_address) {
                        st->marker++;
                        seq_printf(m, "---[ %s ]---\n", st->marker->name);
                }
@@ -100,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
        }
 }
 
+#ifdef CONFIG_KASAN
+static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
+{
+       unsigned int prot;
+
+       prot = pte_val(*kasan_zero_pte) &
+               (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
+       note_page(m, st, prot, 4);
+}
+#endif
+
 /*
  * The actual page table walker functions. In order to keep the
  * implementation of print_prot() short, we only check and pass
@@ -132,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
        pmd_t *pmd;
        int i;
 
+#ifdef CONFIG_KASAN
+       if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
+               note_kasan_zero_page(m, st);
+               return;
+       }
+#endif
+
        for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
                st->current_address = addr;
                pmd = pmd_offset(pud, addr);
@@ -156,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
        pud_t *pud;
        int i;
 
+#ifdef CONFIG_KASAN
+       if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
+               note_kasan_zero_page(m, st);
+               return;
+       }
+#endif
+
        for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
                st->current_address = addr;
                pud = pud_offset(p4d, addr);
@@ -179,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
        p4d_t *p4d;
        int i;
 
+#ifdef CONFIG_KASAN
+       if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
+               note_kasan_zero_page(m, st);
+               return;
+       }
+#endif
+
        for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
                st->current_address = addr;
                p4d = p4d_offset(pgd, addr);
index 72af23bacbb586ee87dce086fac2e28dd85bc7ec..2b8f32f56e0c20870ef250741562d6d6146d72be 100644 (file)
@@ -636,17 +636,19 @@ struct pfault_refbk {
        u64 reserved;
 } __attribute__ ((packed, aligned(8)));
 
+static struct pfault_refbk pfault_init_refbk = {
+       .refdiagc = 0x258,
+       .reffcode = 0,
+       .refdwlen = 5,
+       .refversn = 2,
+       .refgaddr = __LC_LPP,
+       .refselmk = 1ULL << 48,
+       .refcmpmk = 1ULL << 48,
+       .reserved = __PF_RES_FIELD
+};
+
 int pfault_init(void)
 {
-       struct pfault_refbk refbk = {
-               .refdiagc = 0x258,
-               .reffcode = 0,
-               .refdwlen = 5,
-               .refversn = 2,
-               .refgaddr = __LC_LPP,
-               .refselmk = 1ULL << 48,
-               .refcmpmk = 1ULL << 48,
-               .reserved = __PF_RES_FIELD };
         int rc;
 
        if (pfault_disable)
@@ -658,18 +660,20 @@ int pfault_init(void)
                "1:     la      %0,8\n"
                "2:\n"
                EX_TABLE(0b,1b)
-               : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
+               : "=d" (rc)
+               : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
         return rc;
 }
 
+static struct pfault_refbk pfault_fini_refbk = {
+       .refdiagc = 0x258,
+       .reffcode = 1,
+       .refdwlen = 5,
+       .refversn = 2,
+};
+
 void pfault_fini(void)
 {
-       struct pfault_refbk refbk = {
-               .refdiagc = 0x258,
-               .reffcode = 1,
-               .refdwlen = 5,
-               .refversn = 2,
-       };
 
        if (pfault_disable)
                return;
@@ -678,7 +682,7 @@ void pfault_fini(void)
                "       diag    %0,0,0x258\n"
                "0:     nopr    %%r7\n"
                EX_TABLE(0b,0b)
-               : : "a" (&refbk), "m" (refbk) : "cc");
+               : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
 }
 
 static DEFINE_SPINLOCK(pfault_lock);
index 3fa3e532361227ad134f32b46c6c0db58d9d1240..92d7a153e72a0fe8bad784552d7a142a9d03ba69 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/ctl_reg.h>
 #include <asm/sclp.h>
 #include <asm/set_memory.h>
+#include <asm/kasan.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
@@ -98,8 +99,9 @@ void __init paging_init(void)
        S390_lowcore.user_asce = S390_lowcore.kernel_asce;
        crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
        vmem_map_init();
+       kasan_copy_shadow(init_mm.pgd);
 
-        /* enable virtual mapping in kernel mode */
+       /* enable virtual mapping in kernel mode */
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
@@ -107,6 +109,7 @@ void __init paging_init(void)
        psw_bits(psw).dat = 1;
        psw_bits(psw).as = PSW_BITS_AS_HOME;
        __load_psw_mask(psw.mask);
+       kasan_free_early_identity();
 
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
        sparse_init();
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
new file mode 100644 (file)
index 0000000..acb9645
--- /dev/null
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kasan.h>
+#include <linux/sched/task.h>
+#include <linux/memblock.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/kasan.h>
+#include <asm/mem_detect.h>
+#include <asm/processor.h>
+#include <asm/sclp.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+static unsigned long segment_pos __initdata;
+static unsigned long segment_low __initdata;
+static unsigned long pgalloc_pos __initdata;
+static unsigned long pgalloc_low __initdata;
+static unsigned long pgalloc_freeable __initdata;
+static bool has_edat __initdata;
+static bool has_nx __initdata;
+
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+static void __init kasan_early_panic(const char *reason)
+{
+       sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
+       sclp_early_printk(reason);
+       disabled_wait(0);
+}
+
+static void * __init kasan_early_alloc_segment(void)
+{
+       segment_pos -= _SEGMENT_SIZE;
+
+       if (segment_pos < segment_low)
+               kasan_early_panic("out of memory during initialisation\n");
+
+       return (void *)segment_pos;
+}
+
+static void * __init kasan_early_alloc_pages(unsigned int order)
+{
+       pgalloc_pos -= (PAGE_SIZE << order);
+
+       if (pgalloc_pos < pgalloc_low)
+               kasan_early_panic("out of memory during initialisation\n");
+
+       return (void *)pgalloc_pos;
+}
+
+static void * __init kasan_early_crst_alloc(unsigned long val)
+{
+       unsigned long *table;
+
+       table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
+       if (table)
+               crst_table_init(table, val);
+       return table;
+}
+
+static pte_t * __init kasan_early_pte_alloc(void)
+{
+       static void *pte_leftover;
+       pte_t *pte;
+
+       BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
+
+       if (!pte_leftover) {
+               pte_leftover = kasan_early_alloc_pages(0);
+               pte = pte_leftover + _PAGE_TABLE_SIZE;
+       } else {
+               pte = pte_leftover;
+               pte_leftover = NULL;
+       }
+       memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+       return pte;
+}
+
+enum populate_mode {
+       POPULATE_ONE2ONE,
+       POPULATE_MAP,
+       POPULATE_ZERO_SHADOW
+};
+static void __init kasan_early_vmemmap_populate(unsigned long address,
+                                               unsigned long end,
+                                               enum populate_mode mode)
+{
+       unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
+       pgd_t *pg_dir;
+       p4d_t *p4_dir;
+       pud_t *pu_dir;
+       pmd_t *pm_dir;
+       pte_t *pt_dir;
+
+       pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
+       if (!has_nx)
+               pgt_prot_zero &= ~_PAGE_NOEXEC;
+       pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
+       sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
+
+       while (address < end) {
+               pg_dir = pgd_offset_k(address);
+               if (pgd_none(*pg_dir)) {
+                       if (mode == POPULATE_ZERO_SHADOW &&
+                           IS_ALIGNED(address, PGDIR_SIZE) &&
+                           end - address >= PGDIR_SIZE) {
+                               pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
+                               address = (address + PGDIR_SIZE) & PGDIR_MASK;
+                               continue;
+                       }
+                       p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
+                       pgd_populate(&init_mm, pg_dir, p4_dir);
+               }
+
+               p4_dir = p4d_offset(pg_dir, address);
+               if (p4d_none(*p4_dir)) {
+                       if (mode == POPULATE_ZERO_SHADOW &&
+                           IS_ALIGNED(address, P4D_SIZE) &&
+                           end - address >= P4D_SIZE) {
+                               p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
+                               address = (address + P4D_SIZE) & P4D_MASK;
+                               continue;
+                       }
+                       pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
+                       p4d_populate(&init_mm, p4_dir, pu_dir);
+               }
+
+               pu_dir = pud_offset(p4_dir, address);
+               if (pud_none(*pu_dir)) {
+                       if (mode == POPULATE_ZERO_SHADOW &&
+                           IS_ALIGNED(address, PUD_SIZE) &&
+                           end - address >= PUD_SIZE) {
+                               pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
+                               address = (address + PUD_SIZE) & PUD_MASK;
+                               continue;
+                       }
+                       pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
+                       pud_populate(&init_mm, pu_dir, pm_dir);
+               }
+
+               pm_dir = pmd_offset(pu_dir, address);
+               if (pmd_none(*pm_dir)) {
+                       if (mode == POPULATE_ZERO_SHADOW &&
+                           IS_ALIGNED(address, PMD_SIZE) &&
+                           end - address >= PMD_SIZE) {
+                               pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
+                               address = (address + PMD_SIZE) & PMD_MASK;
+                               continue;
+                       }
+                       /* the first megabyte of 1:1 is mapped with 4k pages */
+                       if (has_edat && address && end - address >= PMD_SIZE &&
+                           mode != POPULATE_ZERO_SHADOW) {
+                               void *page;
+
+                               if (mode == POPULATE_ONE2ONE) {
+                                       page = (void *)address;
+                               } else {
+                                       page = kasan_early_alloc_segment();
+                                       memset(page, 0, _SEGMENT_SIZE);
+                               }
+                               pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+                               address = (address + PMD_SIZE) & PMD_MASK;
+                               continue;
+                       }
+
+                       pt_dir = kasan_early_pte_alloc();
+                       pmd_populate(&init_mm, pm_dir, pt_dir);
+               } else if (pmd_large(*pm_dir)) {
+                       address = (address + PMD_SIZE) & PMD_MASK;
+                       continue;
+               }
+
+               pt_dir = pte_offset_kernel(pm_dir, address);
+               if (pte_none(*pt_dir)) {
+                       void *page;
+
+                       switch (mode) {
+                       case POPULATE_ONE2ONE:
+                               page = (void *)address;
+                               pte_val(*pt_dir) = __pa(page) | pgt_prot;
+                               break;
+                       case POPULATE_MAP:
+                               page = kasan_early_alloc_pages(0);
+                               memset(page, 0, PAGE_SIZE);
+                               pte_val(*pt_dir) = __pa(page) | pgt_prot;
+                               break;
+                       case POPULATE_ZERO_SHADOW:
+                               page = kasan_zero_page;
+                               pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
+                               break;
+                       }
+               }
+               address += PAGE_SIZE;
+       }
+}
+
+static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
+{
+       unsigned long asce_bits;
+
+       asce_bits = asce_type | _ASCE_TABLE_LENGTH;
+       S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
+       S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+
+       __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+       __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+       __ctl_load(S390_lowcore.kernel_asce, 13, 13);
+}
+
+static void __init kasan_enable_dat(void)
+{
+       psw_t psw;
+
+       psw.mask = __extract_psw();
+       psw_bits(psw).dat = 1;
+       psw_bits(psw).as = PSW_BITS_AS_HOME;
+       __load_psw_mask(psw.mask);
+}
+
+static void __init kasan_early_detect_facilities(void)
+{
+       __stfle(S390_lowcore.stfle_fac_list,
+               ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+       if (test_facility(8)) {
+               has_edat = true;
+               __ctl_set_bit(0, 23);
+       }
+       if (!noexec_disabled && test_facility(130)) {
+               has_nx = true;
+               __ctl_set_bit(0, 20);
+       }
+}
+
+static unsigned long __init get_mem_detect_end(void)
+{
+       unsigned long start;
+       unsigned long end;
+
+       if (mem_detect.count) {
+               __get_mem_detect_block(mem_detect.count - 1, &start, &end);
+               return end;
+       }
+       return 0;
+}
+
+void __init kasan_early_init(void)
+{
+       unsigned long untracked_mem_end;
+       unsigned long shadow_alloc_size;
+       unsigned long initrd_end;
+       unsigned long asce_type;
+       unsigned long memsize;
+       unsigned long vmax;
+       unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
+       pte_t pte_z;
+       pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
+       pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
+       p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
+
+       kasan_early_detect_facilities();
+       if (!has_nx)
+               pgt_prot &= ~_PAGE_NOEXEC;
+       pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
+
+       memsize = get_mem_detect_end();
+       if (!memsize)
+               kasan_early_panic("cannot detect physical memory size\n");
+       /* respect mem= cmdline parameter */
+       if (memory_end_set && memsize > memory_end)
+               memsize = memory_end;
+       memsize = min(memsize, KASAN_SHADOW_START);
+
+       if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
+               /* 4 level paging */
+               BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
+               BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
+               crst_table_init((unsigned long *)early_pg_dir,
+                               _REGION2_ENTRY_EMPTY);
+               untracked_mem_end = vmax = _REGION1_SIZE;
+               asce_type = _ASCE_TYPE_REGION2;
+       } else {
+               /* 3 level paging */
+               BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
+               BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
+               crst_table_init((unsigned long *)early_pg_dir,
+                               _REGION3_ENTRY_EMPTY);
+               untracked_mem_end = vmax = _REGION2_SIZE;
+               asce_type = _ASCE_TYPE_REGION3;
+       }
+
+       /* init kasan zero shadow */
+       crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
+       crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
+       crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
+       memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
+
+       shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
+       pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
+               initrd_end =
+                   round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
+               pgalloc_low = max(pgalloc_low, initrd_end);
+       }
+
+       if (pgalloc_low + shadow_alloc_size > memsize)
+               kasan_early_panic("out of memory during initialisation\n");
+
+       if (has_edat) {
+               segment_pos = round_down(memsize, _SEGMENT_SIZE);
+               segment_low = segment_pos - shadow_alloc_size;
+               pgalloc_pos = segment_low;
+       } else {
+               pgalloc_pos = memsize;
+       }
+       init_mm.pgd = early_pg_dir;
+       /*
+        * Current memory layout:
+        * +- 0 -------------+   +- shadow start -+
+        * | 1:1 ram mapping |  /| 1/8 ram        |
+        * +- end of ram ----+ / +----------------+
+        * | ... gap ...     |/  |      kasan     |
+        * +- shadow start --+   |      zero      |
+        * | 1/8 addr space  |   |      page      |
+        * +- shadow end    -+   |      mapping   |
+        * | ... gap ...     |\  |    (untracked) |
+        * +- modules vaddr -+ \ +----------------+
+        * | 2Gb             |  \|      unmapped  | allocated per module
+        * +-----------------+   +- shadow end ---+
+        */
+       /* populate kasan shadow (for identity mapping and zero page mapping) */
+       kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
+       if (IS_ENABLED(CONFIG_MODULES))
+               untracked_mem_end = vmax - MODULES_LEN;
+       kasan_early_vmemmap_populate(__sha(max_physmem_end),
+                                    __sha(untracked_mem_end),
+                                    POPULATE_ZERO_SHADOW);
+       /* memory allocated for identity mapping structs will be freed later */
+       pgalloc_freeable = pgalloc_pos;
+       /* populate identity mapping */
+       kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
+       kasan_set_pgd(early_pg_dir, asce_type);
+       kasan_enable_dat();
+       /* enable kasan */
+       init_task.kasan_depth = 0;
+       memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
+       sclp_early_printk("KernelAddressSanitizer initialized\n");
+}
+
+void __init kasan_copy_shadow(pgd_t *pg_dir)
+{
+       /*
+        * At this point we are still running on early pages setup early_pg_dir,
+        * while swapper_pg_dir has just been initialized with identity mapping.
+        * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
+        */
+
+       pgd_t *pg_dir_src;
+       pgd_t *pg_dir_dst;
+       p4d_t *p4_dir_src;
+       p4d_t *p4_dir_dst;
+       pud_t *pu_dir_src;
+       pud_t *pu_dir_dst;
+
+       pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
+       pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
+       p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
+       p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
+       if (!p4d_folded(*p4_dir_src)) {
+               /* 4 level paging */
+               memcpy(p4_dir_dst, p4_dir_src,
+                      (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
+               return;
+       }
+       /* 3 level paging */
+       pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
+       pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
+       memcpy(pu_dir_dst, pu_dir_src,
+              (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
+}
+
+void __init kasan_free_early_identity(void)
+{
+       memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
+}
index 7be06475809b875fd513173c617fe6683fb1f37d..97b3ee53852b36b34fb1653373c57c2ff3cedb24 100644 (file)
@@ -89,10 +89,8 @@ static int __memcpy_real(void *dest, void *src, size_t count)
        return rc;
 }
 
-/*
- * Copy memory in real mode (kernel to kernel)
- */
-int memcpy_real(void *dest, void *src, size_t count)
+static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
+                                 unsigned long count)
 {
        int irqs_disabled, rc;
        unsigned long flags;
@@ -103,13 +101,30 @@ int memcpy_real(void *dest, void *src, size_t count)
        irqs_disabled = arch_irqs_disabled_flags(flags);
        if (!irqs_disabled)
                trace_hardirqs_off();
-       rc = __memcpy_real(dest, src, count);
+       rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
        if (!irqs_disabled)
                trace_hardirqs_on();
        __arch_local_irq_ssm(flags);
        return rc;
 }
 
+/*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+       if (S390_lowcore.nodat_stack != 0)
+               return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
+                                    3, dest, src, count);
+       /*
+        * This is a really early memcpy_real call, the stacks are
+        * not set up yet. Just call _memcpy_real on the early boot
+        * stack
+        */
+       return _memcpy_real((unsigned long) dest,(unsigned long) src,
+                           (unsigned long) count);
+}
+
 /*
  * Copy memory in absolute mode (kernel to kernel)
  */
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
deleted file mode 100644 (file)
index 21f6c82..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2008, 2009
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <asm/ipl.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-
-#define CHUNK_READ_WRITE 0
-#define CHUNK_READ_ONLY  1
-
-static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
-{
-       memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
-                    start, start + size - 1);
-       memblock_add_range(&memblock.memory, start, size, 0, 0);
-       memblock_add_range(&memblock.physmem, start, size, 0, 0);
-}
-
-void __init detect_memory_memblock(void)
-{
-       unsigned long memsize, rnmax, rzm, addr, size;
-       int type;
-
-       rzm = sclp.rzm;
-       rnmax = sclp.rnmax;
-       memsize = rzm * rnmax;
-       if (!rzm)
-               rzm = 1UL << 17;
-       max_physmem_end = memsize;
-       addr = 0;
-       /* keep memblock lists close to the kernel */
-       memblock_set_bottom_up(true);
-       do {
-               size = 0;
-               /* assume lowcore is writable */
-               type = addr ? tprot(addr) : CHUNK_READ_WRITE;
-               do {
-                       size += rzm;
-                       if (max_physmem_end && addr + size >= max_physmem_end)
-                               break;
-               } while (type == tprot(addr + size));
-               if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
-                       if (max_physmem_end && (addr + size > max_physmem_end))
-                               size = max_physmem_end - addr;
-                       memblock_physmem_add(addr, size);
-               }
-               addr += size;
-       } while (addr < max_physmem_end);
-       memblock_set_bottom_up(false);
-       if (!max_physmem_end)
-               max_physmem_end = memblock_end_of_DRAM();
-       memblock_dump_all();
-}
index 2e3707b12eddbb92f02a27632ced337c7a889200..5a10ce34b95d10c9f1d7762761dd5507c9d7b2e0 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
 #include <asm/sigp.h>
+#include <asm/ptrace.h>
 
 /* The purgatory is the code running between two kernels. It's main purpose
  * is to verify that the next kernel was not corrupted after load and to
@@ -88,8 +89,7 @@ ENTRY(purgatory_start)
 .base_crash:
 
        /* Setup stack */
-       larl    %r15,purgatory_end
-       aghi    %r15,-160
+       larl    %r15,purgatory_end-STACK_FRAME_OVERHEAD
 
        /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called
         * directly with a flag passed in %r2 whether the purgatory shall do
index a8c4ce07fc9d635661652d1b96675d3cabbf2343..caa98a7fe3923bb752cec40f5459633627cc42fa 100644 (file)
@@ -73,6 +73,17 @@ config ZCRYPT
          + Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
          + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
 
+config ZCRYPT_MULTIDEVNODES
+       bool "Support for multiple zcrypt device nodes"
+       default y
+       depends on S390
+       depends on ZCRYPT
+       help
+         With this option enabled the zcrypt device driver can
+         provide multiple devices nodes in /dev. Each device
+         node can get customized to limit access and narrow
+         down the use of the available crypto hardware.
+
 config PKEY
        tristate "Kernel API for protected key handling"
        depends on S390
index a23e7d394a0ad1f1a74a241f1676accb9a57901a..5e9ebdb0594c537e75fb07b965b7940b6fbc5aab 100644 (file)
@@ -3309,10 +3309,8 @@ dasd_exit(void)
        dasd_proc_exit();
 #endif
        dasd_eer_exit();
-        if (dasd_page_cache != NULL) {
-               kmem_cache_destroy(dasd_page_cache);
-               dasd_page_cache = NULL;
-       }
+       kmem_cache_destroy(dasd_page_cache);
+       dasd_page_cache = NULL;
        dasd_gendisk_exit();
        dasd_devmap_exit();
        if (dasd_debug_area != NULL) {
index c6ab34f94b1b54c96d704abf3f19e6aa16eaca78..3072b89785ddf7329165d6f2c8e678821f79e1a4 100644 (file)
@@ -11,6 +11,7 @@ endif
 GCOV_PROFILE_sclp_early_core.o         := n
 KCOV_INSTRUMENT_sclp_early_core.o      := n
 UBSAN_SANITIZE_sclp_early_core.o       := n
+KASAN_SANITIZE_sclp_early_core.o       := n
 
 CFLAGS_sclp_early_core.o               += -D__NO_FORTIFY
 
index 4f1a69c9d81d621e159ee8a103c4ded636d596cb..fdc0c0b7a6f58ec8b62ef131b2530aac162f1fbe 100644 (file)
@@ -58,22 +58,31 @@ struct mon_private {
 
 static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
 {
-       struct appldata_product_id id;
+       struct appldata_parameter_list *parm_list;
+       struct appldata_product_id *id;
        int rc;
 
-       memcpy(id.prod_nr, "LNXAPPL", 7);
-       id.prod_fn = myhdr->applid;
-       id.record_nr = myhdr->record_num;
-       id.version_nr = myhdr->version;
-       id.release_nr = myhdr->release;
-       id.mod_lvl = myhdr->mod_level;
-       rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
+       id = kmalloc(sizeof(*id), GFP_KERNEL);
+       parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
+       rc = -ENOMEM;
+       if (!id || !parm_list)
+               goto out;
+       memcpy(id->prod_nr, "LNXAPPL", 7);
+       id->prod_fn = myhdr->applid;
+       id->record_nr = myhdr->record_num;
+       id->version_nr = myhdr->version;
+       id->release_nr = myhdr->release;
+       id->mod_lvl = myhdr->mod_level;
+       rc = appldata_asm(parm_list, id, fcn,
+                         (void *) buffer, myhdr->datalen);
        if (rc <= 0)
-               return rc;
+               goto out;
        pr_err("Writing monitor data failed with rc=%i\n", rc);
-       if (rc == 5)
-               return -EPERM;
-       return -EINVAL;
+       rc = (rc == 5) ? -EPERM : -EINVAL;
+out:
+       kfree(id);
+       kfree(parm_list);
+       return rc;
 }
 
 static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
index 1fe4918088e71d86e1b2e0dc2547ac4328af3cce..b3fcc24b11826eb15a11f0d0f8e457ad192bcd28 100644 (file)
@@ -63,6 +63,9 @@
 typedef unsigned int sclp_cmdw_t;
 
 #define SCLP_CMDW_READ_CPU_INFO                0x00010001
+#define SCLP_CMDW_READ_SCP_INFO                0x00020001
+#define SCLP_CMDW_READ_STORAGE_INFO    0x00040001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
 #define SCLP_CMDW_READ_EVENT_DATA      0x00770005
 #define SCLP_CMDW_WRITE_EVENT_DATA     0x00760005
 #define SCLP_CMDW_WRITE_EVENT_MASK     0x00780005
@@ -156,6 +159,54 @@ struct read_cpu_info_sccb {
        u8      reserved[4096 - 16];
 } __attribute__((packed, aligned(PAGE_SIZE)));
 
+struct read_info_sccb {
+       struct  sccb_header header;     /* 0-7 */
+       u16     rnmax;                  /* 8-9 */
+       u8      rnsize;                 /* 10 */
+       u8      _pad_11[16 - 11];       /* 11-15 */
+       u16     ncpurl;                 /* 16-17 */
+       u16     cpuoff;                 /* 18-19 */
+       u8      _pad_20[24 - 20];       /* 20-23 */
+       u8      loadparm[8];            /* 24-31 */
+       u8      _pad_32[42 - 32];       /* 32-41 */
+       u8      fac42;                  /* 42 */
+       u8      fac43;                  /* 43 */
+       u8      _pad_44[48 - 44];       /* 44-47 */
+       u64     facilities;             /* 48-55 */
+       u8      _pad_56[66 - 56];       /* 56-65 */
+       u8      fac66;                  /* 66 */
+       u8      _pad_67[76 - 67];       /* 67-83 */
+       u32     ibc;                    /* 76-79 */
+       u8      _pad80[84 - 80];        /* 80-83 */
+       u8      fac84;                  /* 84 */
+       u8      fac85;                  /* 85 */
+       u8      _pad_86[91 - 86];       /* 86-90 */
+       u8      fac91;                  /* 91 */
+       u8      _pad_92[98 - 92];       /* 92-97 */
+       u8      fac98;                  /* 98 */
+       u8      hamaxpow;               /* 99 */
+       u32     rnsize2;                /* 100-103 */
+       u64     rnmax2;                 /* 104-111 */
+       u32     hsa_size;               /* 112-115 */
+       u8      fac116;                 /* 116 */
+       u8      fac117;                 /* 117 */
+       u8      fac118;                 /* 118 */
+       u8      fac119;                 /* 119 */
+       u16     hcpua;                  /* 120-121 */
+       u8      _pad_122[124 - 122];    /* 122-123 */
+       u32     hmfai;                  /* 124-127 */
+       u8      _pad_128[4096 - 128];   /* 128-4095 */
+} __packed __aligned(PAGE_SIZE);
+
+struct read_storage_sccb {
+       struct sccb_header header;
+       u16 max_id;
+       u16 assigned;
+       u16 standby;
+       u16 :16;
+       u32 entries[0];
+} __packed;
+
 static inline void sclp_fill_core_info(struct sclp_core_info *info,
                                       struct read_cpu_info_sccb *sccb)
 {
@@ -275,6 +326,7 @@ unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb);
 int sclp_early_set_event_mask(struct init_sccb *sccb,
                              sccb_mask_t receive_mask,
                              sccb_mask_t send_mask);
+int sclp_early_get_info(struct read_info_sccb *info);
 
 /* useful inlines */
 
index d7686a68c09306b9442de1356fd676a8f966aad3..37d42de0607959e31e905ae486a415d63a5b4c90 100644 (file)
@@ -460,15 +460,6 @@ static int sclp_mem_freeze(struct device *dev)
        return -EPERM;
 }
 
-struct read_storage_sccb {
-       struct sccb_header header;
-       u16 max_id;
-       u16 assigned;
-       u16 standby;
-       u16 :16;
-       u32 entries[0];
-} __packed;
-
 static const struct dev_pm_ops sclp_mem_pm_ops = {
        .freeze         = sclp_mem_freeze,
 };
@@ -498,7 +489,7 @@ static int __init sclp_detect_standby_memory(void)
        for (id = 0; id <= sclp_max_storage_id; id++) {
                memset(sccb, 0, PAGE_SIZE);
                sccb->header.length = PAGE_SIZE;
-               rc = sclp_sync_request(0x00040001 | id << 8, sccb);
+               rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
                if (rc)
                        goto out;
                switch (sccb->header.response_code) {
index 9a74abb9224d02fc47041e98a6600265a3bac881..e792cee3b51c5d24089d39b63a88b67bad204fcc 100644 (file)
 #include "sclp_sdias.h"
 #include "sclp.h"
 
-#define SCLP_CMDW_READ_SCP_INFO                0x00020001
-#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
-
-struct read_info_sccb {
-       struct  sccb_header header;     /* 0-7 */
-       u16     rnmax;                  /* 8-9 */
-       u8      rnsize;                 /* 10 */
-       u8      _pad_11[16 - 11];       /* 11-15 */
-       u16     ncpurl;                 /* 16-17 */
-       u16     cpuoff;                 /* 18-19 */
-       u8      _pad_20[24 - 20];       /* 20-23 */
-       u8      loadparm[8];            /* 24-31 */
-       u8      _pad_32[42 - 32];       /* 32-41 */
-       u8      fac42;                  /* 42 */
-       u8      fac43;                  /* 43 */
-       u8      _pad_44[48 - 44];       /* 44-47 */
-       u64     facilities;             /* 48-55 */
-       u8      _pad_56[66 - 56];       /* 56-65 */
-       u8      fac66;                  /* 66 */
-       u8      _pad_67[76 - 67];       /* 67-83 */
-       u32     ibc;                    /* 76-79 */
-       u8      _pad80[84 - 80];        /* 80-83 */
-       u8      fac84;                  /* 84 */
-       u8      fac85;                  /* 85 */
-       u8      _pad_86[91 - 86];       /* 86-90 */
-       u8      fac91;                  /* 91 */
-       u8      _pad_92[98 - 92];       /* 92-97 */
-       u8      fac98;                  /* 98 */
-       u8      hamaxpow;               /* 99 */
-       u32     rnsize2;                /* 100-103 */
-       u64     rnmax2;                 /* 104-111 */
-       u8      _pad_112[116 - 112];    /* 112-115 */
-       u8      fac116;                 /* 116 */
-       u8      fac117;                 /* 117 */
-       u8      fac118;                 /* 118 */
-       u8      fac119;                 /* 119 */
-       u16     hcpua;                  /* 120-121 */
-       u8      _pad_122[124 - 122];    /* 122-123 */
-       u32     hmfai;                  /* 124-127 */
-       u8      _pad_128[4096 - 128];   /* 128-4095 */
-} __packed __aligned(PAGE_SIZE);
-
 static struct sclp_ipl_info sclp_ipl_info;
 
 struct sclp_info sclp;
 EXPORT_SYMBOL(sclp);
 
-static int __init sclp_early_read_info(struct read_info_sccb *sccb)
-{
-       int i;
-       sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
-                                 SCLP_CMDW_READ_SCP_INFO};
-
-       for (i = 0; i < ARRAY_SIZE(commands); i++) {
-               memset(sccb, 0, sizeof(*sccb));
-               sccb->header.length = sizeof(*sccb);
-               sccb->header.function_code = 0x80;
-               sccb->header.control_mask[2] = 0x80;
-               if (sclp_early_cmd(commands[i], sccb))
-                       break;
-               if (sccb->header.response_code == 0x10)
-                       return 0;
-               if (sccb->header.response_code != 0x1f0)
-                       break;
-       }
-       return -EIO;
-}
-
 static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
 {
        struct sclp_core_entry *cpue;
        u16 boot_cpu_address, cpu;
 
-       if (sclp_early_read_info(sccb))
+       if (sclp_early_get_info(sccb))
                return;
 
        sclp.facilities = sccb->facilities;
@@ -147,6 +84,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
                sclp_ipl_info.has_dump = 1;
        memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
 
+       if (sccb->hsa_size)
+               sclp.hsa_size = (sccb->hsa_size - 1) * PAGE_SIZE;
        sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
        sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
        sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
@@ -189,61 +128,6 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info)
        return 0;
 }
 
-static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
-{
-       memset(sccb, 0, sizeof(*sccb));
-       sccb->hdr.length = sizeof(*sccb);
-       sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
-       sccb->evbuf.hdr.type = EVTYP_SDIAS;
-       sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
-       sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
-       sccb->evbuf.event_id = 4712;
-       sccb->evbuf.dbs = 1;
-       if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
-               return -EIO;
-       if (sccb->hdr.response_code != 0x20)
-               return -EIO;
-       if (sccb->evbuf.blk_cnt == 0)
-               return 0;
-       return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
-}
-
-static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
-{
-       memset(sccb, 0, PAGE_SIZE);
-       sccb->hdr.length = PAGE_SIZE;
-       if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
-               return -EIO;
-       if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
-               return -EIO;
-       if (sccb->evbuf.blk_cnt == 0)
-               return 0;
-       return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
-}
-
-static void __init sclp_early_hsa_size_detect(void *sccb)
-{
-       unsigned long flags;
-       long size = -EIO;
-
-       raw_local_irq_save(flags);
-       if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
-               goto out;
-       size = sclp_early_hsa_size_init(sccb);
-       /* First check for synchronous response (LPAR) */
-       if (size)
-               goto out_mask;
-       if (!(S390_lowcore.ext_params & 1))
-               sclp_early_wait_irq();
-       size = sclp_early_hsa_copy_wait(sccb);
-out_mask:
-       sclp_early_set_event_mask(sccb, 0, 0);
-out:
-       raw_local_irq_restore(flags);
-       if (size > 0)
-               sclp.hsa_size = size;
-}
-
 static void __init sclp_early_console_detect(struct init_sccb *sccb)
 {
        if (sccb->header.response_code != 0x20)
@@ -262,7 +146,6 @@ void __init sclp_early_detect(void)
 
        sclp_early_facilities_detect(sccb);
        sclp_early_init_core_info(sccb);
-       sclp_early_hsa_size_detect(sccb);
 
        /*
         * Turn off SCLP event notifications.  Also save remote masks in the
index 2f61f5579aa54708213f3226c940ef65c462b72b..387c114ded3f776c843674ed1bb651bfe3233e8a 100644 (file)
@@ -9,9 +9,13 @@
 #include <asm/lowcore.h>
 #include <asm/ebcdic.h>
 #include <asm/irq.h>
+#include <asm/sections.h>
+#include <asm/mem_detect.h>
 #include "sclp.h"
 #include "sclp_rw.h"
 
+static struct read_info_sccb __bootdata(sclp_info_sccb);
+static int __bootdata(sclp_info_sccb_valid);
 char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
 int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
 /*
@@ -234,3 +238,115 @@ void sclp_early_printk_force(const char *str)
 {
        __sclp_early_printk(str, strlen(str), 1);
 }
+
+int __init sclp_early_read_info(void)
+{
+       int i;
+       struct read_info_sccb *sccb = &sclp_info_sccb;
+       sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+                                 SCLP_CMDW_READ_SCP_INFO};
+
+       for (i = 0; i < ARRAY_SIZE(commands); i++) {
+               memset(sccb, 0, sizeof(*sccb));
+               sccb->header.length = sizeof(*sccb);
+               sccb->header.function_code = 0x80;
+               sccb->header.control_mask[2] = 0x80;
+               if (sclp_early_cmd(commands[i], sccb))
+                       break;
+               if (sccb->header.response_code == 0x10) {
+                       sclp_info_sccb_valid = 1;
+                       return 0;
+               }
+               if (sccb->header.response_code != 0x1f0)
+                       break;
+       }
+       return -EIO;
+}
+
+int __init sclp_early_get_info(struct read_info_sccb *info)
+{
+       if (!sclp_info_sccb_valid)
+               return -EIO;
+
+       *info = sclp_info_sccb;
+       return 0;
+}
+
+int __init sclp_early_get_memsize(unsigned long *mem)
+{
+       unsigned long rnmax;
+       unsigned long rnsize;
+       struct read_info_sccb *sccb = &sclp_info_sccb;
+
+       if (!sclp_info_sccb_valid)
+               return -EIO;
+
+       rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+       rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+       rnsize <<= 20;
+       *mem = rnsize * rnmax;
+       return 0;
+}
+
+int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
+{
+       if (!sclp_info_sccb_valid)
+               return -EIO;
+
+       *hsa_size = 0;
+       if (sclp_info_sccb.hsa_size)
+               *hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE;
+       return 0;
+}
+
+#define SCLP_STORAGE_INFO_FACILITY     0x0000400000000000UL
+
+void __weak __init add_mem_detect_block(u64 start, u64 end) {}
+int __init sclp_early_read_storage_info(void)
+{
+       struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb;
+       int rc, id, max_id = 0;
+       unsigned long rn, rzm;
+       sclp_cmdw_t command;
+       u16 sn;
+
+       if (!sclp_info_sccb_valid)
+               return -EIO;
+
+       if (!(sclp_info_sccb.facilities & SCLP_STORAGE_INFO_FACILITY))
+               return -EOPNOTSUPP;
+
+       rzm = sclp_info_sccb.rnsize ?: sclp_info_sccb.rnsize2;
+       rzm <<= 20;
+
+       for (id = 0; id <= max_id; id++) {
+               memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb));
+               sccb->header.length = sizeof(sclp_early_sccb);
+               command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
+               rc = sclp_early_cmd(command, sccb);
+               if (rc)
+                       goto fail;
+
+               max_id = sccb->max_id;
+               switch (sccb->header.response_code) {
+               case 0x0010:
+                       for (sn = 0; sn < sccb->assigned; sn++) {
+                               if (!sccb->entries[sn])
+                                       continue;
+                               rn = sccb->entries[sn] >> 16;
+                               add_mem_detect_block((rn - 1) * rzm, rn * rzm);
+                       }
+                       break;
+               case 0x0310:
+               case 0x0410:
+                       break;
+               default:
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       mem_detect.count = 0;
+       return -EIO;
+}
index e7c84a4e5eb5833c7a83e9c4dc8721238750f786..995e9196852efd92ee10649dab8d40384ef947f2 100644 (file)
@@ -24,6 +24,7 @@
 
 #define SCLP_ATYPE_PCI                         2
 
+#define SCLP_ERRNOTIFY_AQ_RESET                        0
 #define SCLP_ERRNOTIFY_AQ_REPAIR               1
 #define SCLP_ERRNOTIFY_AQ_INFO_LOG             2
 
@@ -111,9 +112,14 @@ static int sclp_pci_check_report(struct zpci_report_error_header *report)
        if (report->version != 1)
                return -EINVAL;
 
-       if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
-           report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
+       switch (report->action) {
+       case SCLP_ERRNOTIFY_AQ_RESET:
+       case SCLP_ERRNOTIFY_AQ_REPAIR:
+       case SCLP_ERRNOTIFY_AQ_INFO_LOG:
+               break;
+       default:
                return -EINVAL;
+       }
 
        if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
                return -EINVAL;
index cdcde18e72203eeeb07e975adb5aaa8ca0258199..4554cdf4d6bdea8a1b5417c96c5a247c11fff14a 100644 (file)
@@ -971,7 +971,7 @@ tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
                snprintf(exception, BUFSIZE, "Data degraded");
                break;
        case 0x03:
-               snprintf(exception, BUFSIZE, "Data degraded in partion %i",
+               snprintf(exception, BUFSIZE, "Data degraded in partition %i",
                        sense->fmt.f70.mp);
                break;
        case 0x04:
index 069b9ef08206b1bc7168bdbfd4dd3de2ba026e6c..58333cb4503f4db1e3c76aa188e820997b2d9b6b 100644 (file)
@@ -153,7 +153,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
        }
 };
 
-#define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
+#define MAXMINOR  ARRAY_SIZE(sys_ser)
 
 static char FENCE[] = {"EOR"};
 static int vmlogrdr_major = 0;
index 93b2862bd3faecbc5702759855db207c43c49f17..4ebf6d4fc66cbed6d7493892620e3a78a55040cb 100644 (file)
@@ -608,6 +608,36 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
 }
 EXPORT_SYMBOL(ccwgroup_driver_unregister);
 
+static int __ccwgroupdev_check_busid(struct device *dev, void *id)
+{
+       char *bus_id = id;
+
+       return (strcmp(bus_id, dev_name(dev)) == 0);
+}
+
+/**
+ * get_ccwgroupdev_by_busid() - obtain device from a bus id
+ * @gdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @gdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ *  If a match is found, its reference count of the found device is increased
+ *  and it is returned; else %NULL is returned.
+ */
+struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
+                                                char *bus_id)
+{
+       struct device *dev;
+
+       dev = driver_find_device(&gdrv->driver, NULL, bus_id,
+                                __ccwgroupdev_check_busid);
+
+       return dev ? to_ccwgroupdev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
+
 /**
  * ccwgroup_probe_ccwdev() - probe function for slave devices
  * @cdev: ccw device to be probed
index 9c7d9da42ba0829692d0d8dadbbd1f42935962f3..9537e656e9278d5b9b9861320e39f086a8d94f02 100644 (file)
@@ -595,19 +595,11 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
                return 0;
 }
 
-static inline int contains_aobs(struct qdio_q *q)
-{
-       return !q->is_input_q && q->u.out.use_cq;
-}
-
 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 {
        unsigned char state = 0;
        int j, b = start;
 
-       if (!contains_aobs(q))
-               return;
-
        for (j = 0; j < count; ++j) {
                get_buf_state(q, b, &state, 0);
                if (state == SLSB_P_OUTPUT_PENDING) {
@@ -618,8 +610,6 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
                        q->u.out.sbal_state[b].flags |=
                                QDIO_OUTBUF_STATE_FLAG_PENDING;
                        q->u.out.aobs[b] = NULL;
-               } else if (state == SLSB_P_OUTPUT_EMPTY) {
-                       q->u.out.sbal_state[b].aob = NULL;
                }
                b = next_buf(b);
        }
@@ -638,7 +628,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
                q->aobs[bufnr] = aob;
        }
        if (q->aobs[bufnr]) {
-               q->sbal_state[bufnr].aob = q->aobs[bufnr];
                q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
                phys_aob = virt_to_phys(q->aobs[bufnr]);
                WARN_ON_ONCE(phys_aob & 0xFF);
@@ -666,10 +655,10 @@ static void qdio_kick_handler(struct qdio_q *q)
                qperf_inc(q, outbound_handler);
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
                              start, count);
+               if (q->u.out.use_cq)
+                       qdio_handle_aobs(q, start, count);
        }
 
-       qdio_handle_aobs(q, start, count);
-
        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
                   q->irq_ptr->int_parm);
 
index 78f1be41b05e3fb2cc5c91b31e0ec00877735851..e324d890a4f61840a8b2f6e3ef7a640b93c54579 100644 (file)
@@ -27,7 +27,6 @@ struct qaob *qdio_allocate_aob(void)
 {
        return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
 }
-EXPORT_SYMBOL_GPL(qdio_allocate_aob);
 
 void qdio_release_aob(struct qaob *aob)
 {
index b59af548ed1c57a3ad05c4956170146cb6f20be0..fd5e215c66b7c5672dd13fb6c893c176cc8f04fd 100644 (file)
@@ -10,7 +10,7 @@ zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
 zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
 obj-$(CONFIG_ZCRYPT) += zcrypt.o
 # adapter drivers depend on ap.o and zcrypt.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
 
 # pkey kernel module
 pkey-objs := pkey_api.o
index f039266b275dad7c31559e897f094a056a8c3c2c..048665e4f13d4695ed9c85c438e7f6b36bc3f7c9 100644 (file)
@@ -65,12 +65,11 @@ static struct device *ap_root_device;
 DEFINE_SPINLOCK(ap_list_lock);
 LIST_HEAD(ap_card_list);
 
-/* Default permissions (card and domain masking) */
-static struct ap_perms {
-       DECLARE_BITMAP(apm, AP_DEVICES);
-       DECLARE_BITMAP(aqm, AP_DOMAINS);
-} ap_perms;
-static DEFINE_MUTEX(ap_perms_mutex);
+/* Default permissions (ioctl, card and domain masking) */
+struct ap_perms ap_perms;
+EXPORT_SYMBOL(ap_perms);
+DEFINE_MUTEX(ap_perms_mutex);
+EXPORT_SYMBOL(ap_perms_mutex);
 
 static struct ap_config_info *ap_configuration;
 static bool initialised;
@@ -944,21 +943,9 @@ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
        return 0;
 }
 
-/*
- * process_mask_arg() - parse a bitmap string and clear/set the
- * bits in the bitmap accordingly. The string may be given as
- * absolute value, a hex string like 0x1F2E3D4C5B6A" simple over-
- * writing the current content of the bitmap. Or as relative string
- * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
- * bits are cleared or set. Distinction is done based on the very
- * first character which may be '+' or '-' for the relative string
- * and othewise assume to be an absolute value string. If parsing fails
- * a negative errno value is returned. All arguments and bitmaps are
- * big endian order.
- */
-static int process_mask_arg(const char *str,
-                           unsigned long *bitmap, int bits,
-                           struct mutex *lock)
+int ap_parse_mask_str(const char *str,
+                     unsigned long *bitmap, int bits,
+                     struct mutex *lock)
 {
        unsigned long *newmap, size;
        int rc;
@@ -989,6 +976,7 @@ static int process_mask_arg(const char *str,
        kfree(newmap);
        return rc;
 }
+EXPORT_SYMBOL(ap_parse_mask_str);
 
 /*
  * AP bus attributes.
@@ -1049,6 +1037,21 @@ static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
 
 static BUS_ATTR_RO(ap_usage_domain_mask);
 
+static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
+{
+       if (!ap_configuration)  /* QCI not supported */
+               return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+       return snprintf(buf, PAGE_SIZE,
+                       "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+                       ap_configuration->apm[0], ap_configuration->apm[1],
+                       ap_configuration->apm[2], ap_configuration->apm[3],
+                       ap_configuration->apm[4], ap_configuration->apm[5],
+                       ap_configuration->apm[6], ap_configuration->apm[7]);
+}
+
+static BUS_ATTR_RO(ap_adapter_mask);
+
 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 {
        return snprintf(buf, PAGE_SIZE, "%d\n",
@@ -1161,7 +1164,7 @@ static ssize_t apmask_store(struct bus_type *bus, const char *buf,
 {
        int rc;
 
-       rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
+       rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
        if (rc)
                return rc;
 
@@ -1192,7 +1195,7 @@ static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
 {
        int rc;
 
-       rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
+       rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
        if (rc)
                return rc;
 
@@ -1207,6 +1210,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
        &bus_attr_ap_domain,
        &bus_attr_ap_control_domain_mask,
        &bus_attr_ap_usage_domain_mask,
+       &bus_attr_ap_adapter_mask,
        &bus_attr_config_time,
        &bus_attr_poll_thread,
        &bus_attr_ap_interrupts,
@@ -1218,11 +1222,10 @@ static struct bus_attribute *const ap_bus_attrs[] = {
 };
 
 /**
- * ap_select_domain(): Select an AP domain.
- *
- * Pick one of the 16 AP domains.
+ * ap_select_domain(): Select an AP domain if possible and we haven't
+ * already done so before.
  */
-static int ap_select_domain(void)
+static void ap_select_domain(void)
 {
        int count, max_count, best_domain;
        struct ap_queue_status status;
@@ -1237,7 +1240,7 @@ static int ap_select_domain(void)
        if (ap_domain_index >= 0) {
                /* Domain has already been selected. */
                spin_unlock_bh(&ap_domain_lock);
-               return 0;
+               return;
        }
        best_domain = -1;
        max_count = 0;
@@ -1264,11 +1267,8 @@ static int ap_select_domain(void)
        if (best_domain >= 0) {
                ap_domain_index = best_domain;
                AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
-               spin_unlock_bh(&ap_domain_lock);
-               return 0;
        }
        spin_unlock_bh(&ap_domain_lock);
-       return -ENODEV;
 }
 
 /*
@@ -1346,8 +1346,7 @@ static void ap_scan_bus(struct work_struct *unused)
        AP_DBF(DBF_DEBUG, "%s running\n", __func__);
 
        ap_query_configuration(ap_configuration);
-       if (ap_select_domain() != 0)
-               goto out;
+       ap_select_domain();
 
        for (id = 0; id < AP_DEVICES; id++) {
                /* check if device is registered */
@@ -1467,12 +1466,11 @@ static void ap_scan_bus(struct work_struct *unused)
                }
        } /* end device loop */
 
-       if (defdomdevs < 1)
+       if (ap_domain_index >= 0 && defdomdevs < 1)
                AP_DBF(DBF_INFO,
                       "no queue device with default domain %d available\n",
                       ap_domain_index);
 
-out:
        mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
 }
 
@@ -1496,21 +1494,22 @@ static int __init ap_debug_init(void)
 static void __init ap_perms_init(void)
 {
        /* all resources useable if no kernel parameter string given */
+       memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
        memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
        memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
 
        /* apm kernel parameter string */
        if (apm_str) {
                memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
-               process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES,
-                                &ap_perms_mutex);
+               ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
+                                 &ap_perms_mutex);
        }
 
        /* aqm kernel parameter string */
        if (aqm_str) {
                memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
-               process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS,
-                                &ap_perms_mutex);
+               ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
+                                 &ap_perms_mutex);
        }
 }
 
@@ -1533,7 +1532,7 @@ static int __init ap_module_init(void)
                return -ENODEV;
        }
 
-       /* set up the AP permissions (ap and aq masks) */
+       /* set up the AP permissions (ioctls, ap and aq masks) */
        ap_perms_init();
 
        /* Get AP configuration data if available */
index 5246cd8c16a605f6748884b47bdc59d963c3b578..3eed1b36c876d1fde221a38e676544f2c1c3679f 100644 (file)
@@ -20,6 +20,7 @@
 
 #define AP_DEVICES 256         /* Number of AP devices. */
 #define AP_DOMAINS 256         /* Number of AP domains. */
+#define AP_IOCTLS  256         /* Number of ioctls. */
 #define AP_RESET_TIMEOUT (HZ*0.7)      /* Time in ticks for reset timeouts. */
 #define AP_CONFIG_TIME 30      /* Time in seconds between AP bus rescans. */
 #define AP_POLL_TIME 1         /* Time in ticks between receive polls. */
@@ -257,6 +258,14 @@ void ap_queue_resume(struct ap_device *ap_dev);
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
                               int comp_device_type, unsigned int functions);
 
+struct ap_perms {
+       unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
+       unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
+       unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+};
+extern struct ap_perms ap_perms;
+extern struct mutex ap_perms_mutex;
+
 /*
  * check APQN for owned/reserved by ap bus and default driver(s).
  * Checks if this APQN is or will be in use by the ap bus
@@ -280,4 +289,20 @@ int ap_owned_by_def_drv(int card, int queue);
 int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
                                       unsigned long *aqm);
 
+/*
+ * ap_parse_mask_str() - helper function to parse a bitmap string
+ * and clear/set the bits in the bitmap accordingly. The string may be
+ * given as absolute value, a hex string like 0x1F2E3D4C5B6A" simple
+ * overwriting the current content of the bitmap. Or as relative string
+ * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
+ * bits are cleared or set. Distinction is done based on the very
+ * first character which may be '+' or '-' for the relative string
+ * and othewise assume to be an absolute value string. If parsing fails
+ * a negative errno value is returned. All arguments and bitmaps are
+ * big endian order.
+ */
+int ap_parse_mask_str(const char *str,
+                     unsigned long *bitmap, int bits,
+                     struct mutex *lock);
+
 #endif /* _AP_BUS_H_ */
index 1b4001e0285fe0a5979558e2e9e2ce8f60e0808e..2f92bbed4bf6891f199b34e0002bc12dc9a2e5ea 100644 (file)
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/cpufeature.h>
 #include <asm/zcrypt.h>
 #include <asm/cpacf.h>
 #include <asm/pkey.h>
+#include <crypto/aes.h>
 
 #include "zcrypt_api.h"
 
@@ -32,6 +35,9 @@ MODULE_DESCRIPTION("s390 protected key interface");
 /* Size of vardata block used for some of the cca requests/replies */
 #define VARDATASIZE 4096
 
+/* mask of available pckmo subfunctions, fetched once at module init */
+static cpacf_mask_t pckmo_functions;
+
 /*
  * debug feature data and functions
  */
@@ -55,6 +61,24 @@ static void __exit pkey_debug_exit(void)
        debug_unregister(debug_info);
 }
 
+/* Key token types */
+#define TOKTYPE_NON_CCA                0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL   0x01 /* CCA internal key token */
+
+/* For TOKTYPE_NON_CCA: */
+#define TOKVER_PROTECTED_KEY   0x01 /* Protected key token */
+
+/* For TOKTYPE_CCA_INTERNAL: */
+#define TOKVER_CCA_AES         0x04 /* CCA AES key token */
+
+/* header part of a key token */
+struct keytoken_header {
+       u8  type;     /* one of the TOKTYPE values */
+       u8  res0[3];
+       u8  version;  /* one of the TOKVER values */
+       u8  res1[3];
+} __packed;
+
 /* inside view of a secure key token (only type 0x01 version 0x04) */
 struct secaeskeytoken {
        u8  type;     /* 0x01 for internal key token */
@@ -71,6 +95,17 @@ struct secaeskeytoken {
        u8  tvv[4];   /* token validation value */
 } __packed;
 
+/* inside view of a protected key token (only type 0x00 version 0x01) */
+struct protaeskeytoken {
+       u8  type;     /* 0x00 for PAES specific key tokens */
+       u8  res0[3];
+       u8  version;  /* should be 0x01 for protected AES key token */
+       u8  res1[3];
+       u32 keytype;  /* key type, one of the PKEY_KEYTYPE values */
+       u32 len;      /* bytes actually stored in protkey[] */
+       u8  protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+} __packed;
+
 /*
  * Simple check if the token is a valid CCA secure AES key
  * token. If keybitsize is given, the bitsize of the key is
@@ -80,16 +115,16 @@ static int check_secaeskeytoken(const u8 *token, int keybitsize)
 {
        struct secaeskeytoken *t = (struct secaeskeytoken *) token;
 
-       if (t->type != 0x01) {
+       if (t->type != TOKTYPE_CCA_INTERNAL) {
                DEBUG_ERR(
-                       "%s secure token check failed, type mismatch 0x%02x != 0x01\n",
-                       __func__, (int) t->type);
+                       "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n",
+                       __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
                return -EINVAL;
        }
-       if (t->version != 0x04) {
+       if (t->version != TOKVER_CCA_AES) {
                DEBUG_ERR(
-                       "%s secure token check failed, version mismatch 0x%02x != 0x04\n",
-                       __func__, (int) t->version);
+                       "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n",
+                       __func__, (int) t->version, TOKVER_CCA_AES);
                return -EINVAL;
        }
        if (keybitsize > 0 && t->bitsize != keybitsize) {
@@ -647,6 +682,16 @@ int pkey_clr2protkey(u32 keytype,
                return -EINVAL;
        }
 
+       /*
+        * Check if the needed pckmo subfunction is available.
+        * These subfunctions can be enabled/disabled by customers
+        * in the LPAR profile or may even change on the fly.
+        */
+       if (!cpacf_test_func(&pckmo_functions, fc)) {
+               DEBUG_ERR("%s pckmo functions not available\n", __func__);
+               return -EOPNOTSUPP;
+       }
+
        /* prepare param block */
        memset(paramblock, 0, sizeof(paramblock));
        memcpy(paramblock, clrkey->clrkey, keysize);
@@ -1051,6 +1096,166 @@ out:
 }
 EXPORT_SYMBOL(pkey_verifykey);
 
+/*
+ * Generate a random protected key
+ */
+int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
+{
+       struct pkey_clrkey clrkey;
+       int keysize;
+       int rc;
+
+       switch (keytype) {
+       case PKEY_KEYTYPE_AES_128:
+               keysize = 16;
+               break;
+       case PKEY_KEYTYPE_AES_192:
+               keysize = 24;
+               break;
+       case PKEY_KEYTYPE_AES_256:
+               keysize = 32;
+               break;
+       default:
+               DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
+                         keytype);
+               return -EINVAL;
+       }
+
+       /* generate a dummy random clear key */
+       get_random_bytes(clrkey.clrkey, keysize);
+
+       /* convert it to a dummy protected key */
+       rc = pkey_clr2protkey(keytype, &clrkey, protkey);
+       if (rc)
+               return rc;
+
+       /* replace the key part of the protected key with random bytes */
+       get_random_bytes(protkey->protkey, keysize);
+
+       return 0;
+}
+EXPORT_SYMBOL(pkey_genprotkey);
+
+/*
+ * Verify if a protected key is still valid
+ */
+int pkey_verifyprotkey(const struct pkey_protkey *protkey)
+{
+       unsigned long fc;
+       struct {
+               u8 iv[AES_BLOCK_SIZE];
+               u8 key[MAXPROTKEYSIZE];
+       } param;
+       u8 null_msg[AES_BLOCK_SIZE];
+       u8 dest_buf[AES_BLOCK_SIZE];
+       unsigned int k;
+
+       switch (protkey->type) {
+       case PKEY_KEYTYPE_AES_128:
+               fc = CPACF_KMC_PAES_128;
+               break;
+       case PKEY_KEYTYPE_AES_192:
+               fc = CPACF_KMC_PAES_192;
+               break;
+       case PKEY_KEYTYPE_AES_256:
+               fc = CPACF_KMC_PAES_256;
+               break;
+       default:
+               DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
+                         protkey->type);
+               return -EINVAL;
+       }
+
+       memset(null_msg, 0, sizeof(null_msg));
+
+       memset(param.iv, 0, sizeof(param.iv));
+       memcpy(param.key, protkey->protkey, sizeof(param.key));
+
+       k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
+                     sizeof(null_msg));
+       if (k != sizeof(null_msg)) {
+               DEBUG_ERR("%s protected key is not valid\n", __func__);
+               return -EKEYREJECTED;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(pkey_verifyprotkey);
+
+/*
+ * Transform a non-CCA key token into a protected key
+ */
+static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
+                              struct pkey_protkey *protkey)
+{
+       struct keytoken_header *hdr = (struct keytoken_header *)key;
+       struct protaeskeytoken *t;
+
+       switch (hdr->version) {
+       case TOKVER_PROTECTED_KEY:
+               if (keylen != sizeof(struct protaeskeytoken))
+                       return -EINVAL;
+
+               t = (struct protaeskeytoken *)key;
+               protkey->len = t->len;
+               protkey->type = t->keytype;
+               memcpy(protkey->protkey, t->protkey,
+                      sizeof(protkey->protkey));
+
+               return pkey_verifyprotkey(protkey);
+       default:
+               DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
+                         __func__, hdr->version);
+               return -EINVAL;
+       }
+}
+
+/*
+ * Transform a CCA internal key token into a protected key
+ */
+static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
+                              struct pkey_protkey *protkey)
+{
+       struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+       switch (hdr->version) {
+       case TOKVER_CCA_AES:
+               if (keylen != sizeof(struct secaeskeytoken))
+                       return -EINVAL;
+
+               return pkey_skey2pkey((struct pkey_seckey *)key,
+                                     protkey);
+       default:
+               DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
+                         __func__, hdr->version);
+               return -EINVAL;
+       }
+}
+
+/*
+ * Transform a key blob (of any type) into a protected key
+ */
+int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
+                     struct pkey_protkey *protkey)
+{
+       struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+       if (keylen < sizeof(struct keytoken_header))
+               return -EINVAL;
+
+       switch (hdr->type) {
+       case TOKTYPE_NON_CCA:
+               return pkey_nonccatok2pkey(key, keylen, protkey);
+       case TOKTYPE_CCA_INTERNAL:
+               return pkey_ccainttok2pkey(key, keylen, protkey);
+       default:
+               DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__,
+                         hdr->type);
+               return -EINVAL;
+       }
+}
+EXPORT_SYMBOL(pkey_keyblob2pkey);
+
 /*
  * File io functions
  */
@@ -1167,6 +1372,58 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
                        return -EFAULT;
                break;
        }
+       case PKEY_GENPROTK: {
+               struct pkey_genprotk __user *ugp = (void __user *) arg;
+               struct pkey_genprotk kgp;
+
+               if (copy_from_user(&kgp, ugp, sizeof(kgp)))
+                       return -EFAULT;
+               rc = pkey_genprotkey(kgp.keytype, &kgp.protkey);
+               DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
+               if (rc)
+                       break;
+               if (copy_to_user(ugp, &kgp, sizeof(kgp)))
+                       return -EFAULT;
+               break;
+       }
+       case PKEY_VERIFYPROTK: {
+               struct pkey_verifyprotk __user *uvp = (void __user *) arg;
+               struct pkey_verifyprotk kvp;
+
+               if (copy_from_user(&kvp, uvp, sizeof(kvp)))
+                       return -EFAULT;
+               rc = pkey_verifyprotkey(&kvp.protkey);
+               DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+               break;
+       }
+       case PKEY_KBLOB2PROTK: {
+               struct pkey_kblob2pkey __user *utp = (void __user *) arg;
+               struct pkey_kblob2pkey ktp;
+               __u8 __user *ukey;
+               __u8 *kkey;
+
+               if (copy_from_user(&ktp, utp, sizeof(ktp)))
+                       return -EFAULT;
+               if (ktp.keylen < MINKEYBLOBSIZE ||
+                   ktp.keylen > MAXKEYBLOBSIZE)
+                       return -EINVAL;
+               ukey = ktp.key;
+               kkey = kmalloc(ktp.keylen, GFP_KERNEL);
+               if (kkey == NULL)
+                       return -ENOMEM;
+               if (copy_from_user(kkey, ukey, ktp.keylen)) {
+                       kfree(kkey);
+                       return -EFAULT;
+               }
+               rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+               DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+               kfree(kkey);
+               if (rc)
+                       break;
+               if (copy_to_user(utp, &ktp, sizeof(ktp)))
+                       return -EFAULT;
+               break;
+       }
        default:
                /* unknown/unsupported ioctl cmd */
                return -ENOTTY;
@@ -1178,6 +1435,236 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
 /*
  * Sysfs and file io operations
  */
+
+/*
+ * Sysfs attribute read function for all protected key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+                                         loff_t off, size_t count)
+{
+       struct protaeskeytoken protkeytoken;
+       struct pkey_protkey protkey;
+       int rc;
+
+       if (off != 0 || count < sizeof(protkeytoken))
+               return -EINVAL;
+       if (is_xts)
+               if (count < 2 * sizeof(protkeytoken))
+                       return -EINVAL;
+
+       memset(&protkeytoken, 0, sizeof(protkeytoken));
+       protkeytoken.type = TOKTYPE_NON_CCA;
+       protkeytoken.version = TOKVER_PROTECTED_KEY;
+       protkeytoken.keytype = keytype;
+
+       rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
+       if (rc)
+               return rc;
+
+       protkeytoken.len = protkey.len;
+       memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+       memcpy(buf, &protkeytoken, sizeof(protkeytoken));
+
+       if (is_xts) {
+               rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
+               if (rc)
+                       return rc;
+
+               protkeytoken.len = protkey.len;
+               memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+               memcpy(buf + sizeof(protkeytoken), &protkeytoken,
+                      sizeof(protkeytoken));
+
+               return 2 * sizeof(protkeytoken);
+       }
+
+       return sizeof(protkeytoken);
+}
+
+static ssize_t protkey_aes_128_read(struct file *filp,
+                                   struct kobject *kobj,
+                                   struct bin_attribute *attr,
+                                   char *buf, loff_t off,
+                                   size_t count)
+{
+       return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+                                         off, count);
+}
+
+static ssize_t protkey_aes_192_read(struct file *filp,
+                                   struct kobject *kobj,
+                                   struct bin_attribute *attr,
+                                   char *buf, loff_t off,
+                                   size_t count)
+{
+       return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+                                         off, count);
+}
+
+static ssize_t protkey_aes_256_read(struct file *filp,
+                                   struct kobject *kobj,
+                                   struct bin_attribute *attr,
+                                   char *buf, loff_t off,
+                                   size_t count)
+{
+       return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+                                         off, count);
+}
+
+static ssize_t protkey_aes_128_xts_read(struct file *filp,
+                                       struct kobject *kobj,
+                                       struct bin_attribute *attr,
+                                       char *buf, loff_t off,
+                                       size_t count)
+{
+       return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+                                         off, count);
+}
+
+static ssize_t protkey_aes_256_xts_read(struct file *filp,
+                                       struct kobject *kobj,
+                                       struct bin_attribute *attr,
+                                       char *buf, loff_t off,
+                                       size_t count)
+{
+       return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+                                         off, count);
+}
+
+static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+
+static struct bin_attribute *protkey_attrs[] = {
+       &bin_attr_protkey_aes_128,
+       &bin_attr_protkey_aes_192,
+       &bin_attr_protkey_aes_256,
+       &bin_attr_protkey_aes_128_xts,
+       &bin_attr_protkey_aes_256_xts,
+       NULL
+};
+
+static struct attribute_group protkey_attr_group = {
+       .name      = "protkey",
+       .bin_attrs = protkey_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all secure key ccadata binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+                                         loff_t off, size_t count)
+{
+       int rc;
+
+       if (off != 0 || count < sizeof(struct secaeskeytoken))
+               return -EINVAL;
+       if (is_xts)
+               if (count < 2 * sizeof(struct secaeskeytoken))
+                       return -EINVAL;
+
+       rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+       if (rc)
+               return rc;
+
+       if (is_xts) {
+               buf += sizeof(struct pkey_seckey);
+               rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+               if (rc)
+                       return rc;
+
+               return 2 * sizeof(struct secaeskeytoken);
+       }
+
+       return sizeof(struct secaeskeytoken);
+}
+
+static ssize_t ccadata_aes_128_read(struct file *filp,
+                                   struct kobject *kobj,
+                                   struct bin_attribute *attr,
+                                   char *buf, loff_t off,
+                                   size_t count)
+{
+       return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+                                         off, count);
+}
+
+static ssize_t ccadata_aes_192_read(struct file *filp,
+                                   struct kobject *kobj,
+                                   struct bin_attribute *attr,
+                                   char *buf, loff_t off,
+                                   size_t count)
+{
+       return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+                                         off, count);
+}
+
+static ssize_t ccadata_aes_256_read(struct file *filp,
+                                   struct kobject *kobj,
+                                   struct bin_attribute *attr,
+                                   char *buf, loff_t off,
+                                   size_t count)
+{
+       return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+                                         off, count);
+}
+
+static ssize_t ccadata_aes_128_xts_read(struct file *filp,
+                                       struct kobject *kobj,
+                                       struct bin_attribute *attr,
+                                       char *buf, loff_t off,
+                                       size_t count)
+{
+       return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+                                         off, count);
+}
+
+static ssize_t ccadata_aes_256_xts_read(struct file *filp,
+                                       struct kobject *kobj,
+                                       struct bin_attribute *attr,
+                                       char *buf, loff_t off,
+                                       size_t count)
+{
+       return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+                                         off, count);
+}
+
+static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+
+static struct bin_attribute *ccadata_attrs[] = {
+       &bin_attr_ccadata_aes_128,
+       &bin_attr_ccadata_aes_192,
+       &bin_attr_ccadata_aes_256,
+       &bin_attr_ccadata_aes_128_xts,
+       &bin_attr_ccadata_aes_256_xts,
+       NULL
+};
+
+static struct attribute_group ccadata_attr_group = {
+       .name      = "ccadata",
+       .bin_attrs = ccadata_attrs,
+};
+
+static const struct attribute_group *pkey_attr_groups[] = {
+       &protkey_attr_group,
+       &ccadata_attr_group,
+       NULL,
+};
+
 static const struct file_operations pkey_fops = {
        .owner          = THIS_MODULE,
        .open           = nonseekable_open,
@@ -1190,6 +1677,7 @@ static struct miscdevice pkey_dev = {
        .minor  = MISC_DYNAMIC_MINOR,
        .mode   = 0666,
        .fops   = &pkey_fops,
+       .groups = pkey_attr_groups,
 };
 
 /*
@@ -1197,14 +1685,23 @@ static struct miscdevice pkey_dev = {
  */
 static int __init pkey_init(void)
 {
-       cpacf_mask_t pckmo_functions;
+       cpacf_mask_t kmc_functions;
 
-       /* check for pckmo instructions available */
+       /*
+        * The pckmo instruction should be available - even if we don't
+        * actually invoke it. This instruction comes with MSA 3 which
+        * is also the minimum level for the kmc instructions which
+        * are able to work with protected keys.
+        */
        if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
                return -EOPNOTSUPP;
-       if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) ||
-           !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) ||
-           !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY))
+
+       /* check for kmc instructions available */
+       if (!cpacf_query(CPACF_KMC, &kmc_functions))
+               return -EOPNOTSUPP;
+       if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
+           !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
+           !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
                return -EOPNOTSUPP;
 
        pkey_debug_init();
@@ -1222,5 +1719,5 @@ static void __exit pkey_exit(void)
        pkey_debug_exit();
 }
 
-module_init(pkey_init);
+module_cpu_feature_match(MSA, pkey_init);
 module_exit(pkey_exit);
index e6854127b4343dc5ee73357472b4c97f7f8a377e..eb93c2d27d0ad142c4d977d74df3e415468336af 100644 (file)
@@ -1,8 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- *  zcrypt 2.1.0
- *
- *  Copyright IBM Corp. 2001, 2012
+ *  Copyright IBM Corp. 2001, 2018
  *  Author(s): Robert Burroughs
  *            Eric Rossman (edrossma@us.ibm.com)
  *            Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -11,6 +9,7 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
+ *  Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
  */
 
 #include <linux/module.h>
@@ -24,6 +23,8 @@
 #include <linux/uaccess.h>
 #include <linux/hw_random.h>
 #include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/ctype.h>
 #include <asm/debug.h>
 
 #define CREATE_TRACE_POINTS
@@ -108,6 +109,375 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
 }
 EXPORT_SYMBOL(zcrypt_msgtype);
 
+/*
+ * Multi device nodes extension functions.
+ */
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+
+struct zcdn_device;
+
+static struct class *zcrypt_class;
+static dev_t zcrypt_devt;
+static struct cdev zcrypt_cdev;
+
+struct zcdn_device {
+       struct device device;
+       struct ap_perms perms;
+};
+
+#define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
+
+#define ZCDN_MAX_NAME 32
+
+static int zcdn_create(const char *name);
+static int zcdn_destroy(const char *name);
+
+/* helper function, matches the name for find_zcdndev_by_name() */
+static int __match_zcdn_name(struct device *dev, const void *data)
+{
+       return strcmp(dev_name(dev), (const char *)data) == 0;
+}
+
+/* helper function, matches the devt value for find_zcdndev_by_devt() */
+static int __match_zcdn_devt(struct device *dev, const void *data)
+{
+       return dev->devt == *((dev_t *) data);
+}
+
+/*
+ * Find zcdn device by name.
+ * Returns reference to the zcdn device which needs to be released
+ * with put_device() after use.
+ */
+static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
+{
+       struct device *dev =
+               class_find_device(zcrypt_class, NULL,
+                                 (void *) name,
+                                 __match_zcdn_name);
+
+       return dev ? to_zcdn_dev(dev) : NULL;
+}
+
+/*
+ * Find zcdn device by devt value.
+ * Returns reference to the zcdn device which needs to be released
+ * with put_device() after use.
+ */
+static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
+{
+       struct device *dev =
+               class_find_device(zcrypt_class, NULL,
+                                 (void *) &devt,
+                                 __match_zcdn_devt);
+
+       return dev ? to_zcdn_dev(dev) : NULL;
+}
+
+static ssize_t ioctlmask_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       int i, rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       buf[0] = '0';
+       buf[1] = 'x';
+       for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
+               snprintf(buf + 2 + 2 * i * sizeof(long),
+                        PAGE_SIZE - 2 - 2 * i * sizeof(long),
+                        "%016lx", zcdndev->perms.ioctlm[i]);
+       buf[2 + 2 * i * sizeof(long)] = '\n';
+       buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+       rc = 2 + 2 * i * sizeof(long) + 1;
+
+       mutex_unlock(&ap_perms_mutex);
+
+       return rc;
+}
+
+static ssize_t ioctlmask_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       int rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
+                              AP_IOCTLS, &ap_perms_mutex);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(ioctlmask);
+
+static ssize_t apmask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       int i, rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       buf[0] = '0';
+       buf[1] = 'x';
+       for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
+               snprintf(buf + 2 + 2 * i * sizeof(long),
+                        PAGE_SIZE - 2 - 2 * i * sizeof(long),
+                        "%016lx", zcdndev->perms.apm[i]);
+       buf[2 + 2 * i * sizeof(long)] = '\n';
+       buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+       rc = 2 + 2 * i * sizeof(long) + 1;
+
+       mutex_unlock(&ap_perms_mutex);
+
+       return rc;
+}
+
+static ssize_t apmask_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       int rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
+                              AP_DEVICES, &ap_perms_mutex);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(apmask);
+
+static ssize_t aqmask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       int i, rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       buf[0] = '0';
+       buf[1] = 'x';
+       for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
+               snprintf(buf + 2 + 2 * i * sizeof(long),
+                        PAGE_SIZE - 2 - 2 * i * sizeof(long),
+                        "%016lx", zcdndev->perms.aqm[i]);
+       buf[2 + 2 * i * sizeof(long)] = '\n';
+       buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+       rc = 2 + 2 * i * sizeof(long) + 1;
+
+       mutex_unlock(&ap_perms_mutex);
+
+       return rc;
+}
+
+static ssize_t aqmask_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       int rc;
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
+                              AP_DOMAINS, &ap_perms_mutex);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(aqmask);
+
+static struct attribute *zcdn_dev_attrs[] = {
+       &dev_attr_ioctlmask.attr,
+       &dev_attr_apmask.attr,
+       &dev_attr_aqmask.attr,
+       NULL
+};
+
+static struct attribute_group zcdn_dev_attr_group = {
+       .attrs = zcdn_dev_attrs
+};
+
+static const struct attribute_group *zcdn_dev_attr_groups[] = {
+       &zcdn_dev_attr_group,
+       NULL
+};
+
+static ssize_t zcdn_create_store(struct class *class,
+                                struct class_attribute *attr,
+                                const char *buf, size_t count)
+{
+       int rc;
+       char name[ZCDN_MAX_NAME];
+
+       strncpy(name, skip_spaces(buf), sizeof(name));
+       name[sizeof(name) - 1] = '\0';
+
+       rc = zcdn_create(strim(name));
+
+       return rc ? rc : count;
+}
+
+static const struct class_attribute class_attr_zcdn_create =
+       __ATTR(create, 0600, NULL, zcdn_create_store);
+
+static ssize_t zcdn_destroy_store(struct class *class,
+                                 struct class_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       int rc;
+       char name[ZCDN_MAX_NAME];
+
+       strncpy(name, skip_spaces(buf), sizeof(name));
+       name[sizeof(name) - 1] = '\0';
+
+       rc = zcdn_destroy(strim(name));
+
+       return rc ? rc : count;
+}
+
+static const struct class_attribute class_attr_zcdn_destroy =
+       __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
+
+static void zcdn_device_release(struct device *dev)
+{
+       struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+       ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
+                  MAJOR(dev->devt), MINOR(dev->devt));
+
+       kfree(zcdndev);
+}
+
+static int zcdn_create(const char *name)
+{
+       dev_t devt;
+       int i, rc = 0;
+       char nodename[ZCDN_MAX_NAME];
+       struct zcdn_device *zcdndev;
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       /* check if device node with this name already exists */
+       if (name[0]) {
+               zcdndev = find_zcdndev_by_name(name);
+               if (zcdndev) {
+                       put_device(&zcdndev->device);
+                       rc = -EEXIST;
+                       goto unlockout;
+               }
+       }
+
+       /* find an unused minor number */
+       for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
+               devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
+               zcdndev = find_zcdndev_by_devt(devt);
+               if (zcdndev)
+                       put_device(&zcdndev->device);
+               else
+                       break;
+       }
+       if (i == ZCRYPT_MAX_MINOR_NODES) {
+               rc = -ENOSPC;
+               goto unlockout;
+       }
+
+       /* alloc and prepare a new zcdn device */
+       zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
+       if (!zcdndev) {
+               rc = -ENOMEM;
+               goto unlockout;
+       }
+       zcdndev->device.release = zcdn_device_release;
+       zcdndev->device.class = zcrypt_class;
+       zcdndev->device.devt = devt;
+       zcdndev->device.groups = zcdn_dev_attr_groups;
+       if (name[0])
+               strncpy(nodename, name, sizeof(nodename));
+       else
+               snprintf(nodename, sizeof(nodename),
+                        ZCRYPT_NAME "_%d", (int) MINOR(devt));
+       nodename[sizeof(nodename)-1] = '\0';
+       if (dev_set_name(&zcdndev->device, nodename)) {
+               rc = -EINVAL;
+               goto unlockout;
+       }
+       rc = device_register(&zcdndev->device);
+       if (rc) {
+               put_device(&zcdndev->device);
+               goto unlockout;
+       }
+
+       ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
+                  MAJOR(devt), MINOR(devt));
+
+unlockout:
+       mutex_unlock(&ap_perms_mutex);
+       return rc;
+}
+
+static int zcdn_destroy(const char *name)
+{
+       int rc = 0;
+       struct zcdn_device *zcdndev;
+
+       if (mutex_lock_interruptible(&ap_perms_mutex))
+               return -ERESTARTSYS;
+
+       /* try to find this zcdn device */
+       zcdndev = find_zcdndev_by_name(name);
+       if (!zcdndev) {
+               rc = -ENOENT;
+               goto unlockout;
+       }
+
+       /*
+        * The zcdn device is not hard destroyed. It is subject to
+        * reference counting and thus just needs to be unregistered.
+        */
+       put_device(&zcdndev->device);
+       device_unregister(&zcdndev->device);
+
+unlockout:
+       mutex_unlock(&ap_perms_mutex);
+       return rc;
+}
+
+static void zcdn_destroy_all(void)
+{
+       int i;
+       dev_t devt;
+       struct zcdn_device *zcdndev;
+
+       mutex_lock(&ap_perms_mutex);
+       for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
+               devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
+               zcdndev = find_zcdndev_by_devt(devt);
+               if (zcdndev) {
+                       put_device(&zcdndev->device);
+                       device_unregister(&zcdndev->device);
+               }
+       }
+       mutex_unlock(&ap_perms_mutex);
+}
+
+#endif
+
 /**
  * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
  *
@@ -137,6 +507,23 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
  */
 static int zcrypt_open(struct inode *inode, struct file *filp)
 {
+       struct ap_perms *perms = &ap_perms;
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+       if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+               struct zcdn_device *zcdndev;
+
+               if (mutex_lock_interruptible(&ap_perms_mutex))
+                       return -ERESTARTSYS;
+               zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+               /* find returns a reference, no get_device() needed */
+               mutex_unlock(&ap_perms_mutex);
+               if (zcdndev)
+                       perms = &zcdndev->perms;
+       }
+#endif
+       filp->private_data = (void *) perms;
+
        atomic_inc(&zcrypt_open_count);
        return nonseekable_open(inode, filp);
 }
@@ -148,10 +535,55 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
  */
 static int zcrypt_release(struct inode *inode, struct file *filp)
 {
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+       if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+               struct zcdn_device *zcdndev;
+
+               if (mutex_lock_interruptible(&ap_perms_mutex))
+                       return -ERESTARTSYS;
+               zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+               mutex_unlock(&ap_perms_mutex);
+               if (zcdndev) {
+                       /* 2 puts here: one for find, one for open */
+                       put_device(&zcdndev->device);
+                       put_device(&zcdndev->device);
+               }
+       }
+#endif
+
        atomic_dec(&zcrypt_open_count);
        return 0;
 }
 
+static inline int zcrypt_check_ioctl(struct ap_perms *perms,
+                                    unsigned int cmd)
+{
+       int rc = -EPERM;
+       int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
+
+       if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
+               if (test_bit_inv(ioctlnr, perms->ioctlm))
+                       rc = 0;
+       }
+
+       if (rc)
+               ZCRYPT_DBF(DBF_WARN,
+                          "ioctl check failed: ioctlnr=0x%04x rc=%d\n",
+                          ioctlnr, rc);
+
+       return rc;
+}
+
+static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
+{
+       return test_bit_inv(card, perms->apm) ? true : false;
+}
+
+static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
+{
+       return test_bit_inv(queue, perms->aqm) ? true : false;
+}
+
 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct zcrypt_queue *zq,
                                                     unsigned int weight)
@@ -213,7 +645,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
 /*
  * zcrypt ioctls.
  */
-static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
+static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+                              struct ica_rsa_modexpo *mex)
 {
        struct zcrypt_card *zc, *pref_zc;
        struct zcrypt_queue *zq, *pref_zq;
@@ -250,6 +683,9 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
                if (zc->min_mod_size > mex->inputdatalength ||
                    zc->max_mod_size < mex->inputdatalength)
                        continue;
+               /* check if device node has admission for this card */
+               if (!zcrypt_check_card(perms, zc->card->id))
+                       continue;
                /* get weight index of the card device  */
                weight = zc->speed_rating[func_code];
                if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -258,6 +694,10 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
                        /* check if device is online and eligible */
                        if (!zq->online || !zq->ops->rsa_modexpo)
                                continue;
+                       /* check if device node has admission for this queue */
+                       if (!zcrypt_check_queue(perms,
+                                               AP_QID_QUEUE(zq->queue->qid)))
+                               continue;
                        if (zcrypt_queue_compare(zq, pref_zq,
                                                 weight, pref_weight))
                                continue;
@@ -287,7 +727,8 @@ out:
        return rc;
 }
 
-static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
+static long zcrypt_rsa_crt(struct ap_perms *perms,
+                          struct ica_rsa_modexpo_crt *crt)
 {
        struct zcrypt_card *zc, *pref_zc;
        struct zcrypt_queue *zq, *pref_zq;
@@ -324,6 +765,9 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
                if (zc->min_mod_size > crt->inputdatalength ||
                    zc->max_mod_size < crt->inputdatalength)
                        continue;
+               /* check if device node has admission for this card */
+               if (!zcrypt_check_card(perms, zc->card->id))
+                       continue;
                /* get weight index of the card device  */
                weight = zc->speed_rating[func_code];
                if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -332,6 +776,10 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
                        /* check if device is online and eligible */
                        if (!zq->online || !zq->ops->rsa_modexpo_crt)
                                continue;
+                       /* check if device node has admission for this queue */
+                       if (!zcrypt_check_queue(perms,
+                                               AP_QID_QUEUE(zq->queue->qid)))
+                               continue;
                        if (zcrypt_queue_compare(zq, pref_zq,
                                                 weight, pref_weight))
                                continue;
@@ -361,7 +809,8 @@ out:
        return rc;
 }
 
-long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+static long _zcrypt_send_cprb(struct ap_perms *perms,
+                             struct ica_xcRB *xcRB)
 {
        struct zcrypt_card *zc, *pref_zc;
        struct zcrypt_queue *zq, *pref_zq;
@@ -373,6 +822,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
 
        trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
+       xcRB->status = 0;
        ap_init_message(&ap_msg);
        rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
        if (rc)
@@ -389,6 +839,9 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
                if (xcRB->user_defined != AUTOSELECT &&
                    xcRB->user_defined != zc->card->id)
                        continue;
+               /* check if device node has admission for this card */
+               if (!zcrypt_check_card(perms, zc->card->id))
+                       continue;
                /* get weight index of the card device  */
                weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
                if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -400,6 +853,10 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
                            ((*domain != (unsigned short) AUTOSELECT) &&
                             (*domain != AP_QID_QUEUE(zq->queue->qid))))
                                continue;
+                       /* check if device node has admission for this queue */
+                       if (!zcrypt_check_queue(perms,
+                                               AP_QID_QUEUE(zq->queue->qid)))
+                               continue;
                        if (zcrypt_queue_compare(zq, pref_zq,
                                                 weight, pref_weight))
                                continue;
@@ -433,6 +890,11 @@ out:
                              AP_QID_CARD(qid), AP_QID_QUEUE(qid));
        return rc;
 }
+
+long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+{
+       return _zcrypt_send_cprb(&ap_perms, xcRB);
+}
 EXPORT_SYMBOL(zcrypt_send_cprb);
 
 static bool is_desired_ep11_card(unsigned int dev_id,
@@ -459,7 +921,8 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
        return false;
 }
 
-static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
+                                 struct ep11_urb *xcrb)
 {
        struct zcrypt_card *zc, *pref_zc;
        struct zcrypt_queue *zq, *pref_zq;
@@ -510,6 +973,9 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
                if (targets &&
                    !is_desired_ep11_card(zc->card->id, target_num, targets))
                        continue;
+               /* check if device node has admission for this card */
+               if (!zcrypt_check_card(perms, zc->card->id))
+                       continue;
                /* get weight index of the card device  */
                weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
                if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -522,6 +988,10 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
                             !is_desired_ep11_queue(zq->queue->qid,
                                                    target_num, targets)))
                                continue;
+                       /* check if device node has admission for this queue */
+             &nb