Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Aug 2018 20:52:44 +0000 (13:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Aug 2018 20:52:44 +0000 (13:52 -0700)
Pull second set of KVM updates from Paolo Bonzini:
 "ARM:
   - Support for Group0 interrupts in guests
   - Cache management optimizations for ARMv8.4 systems
   - Userspace interface for RAS
   - Fault path optimization
   - Emulated physical timer fixes
   - Random cleanups

  x86:
   - fixes for L1TF
   - a new test case
   - non-support for SGX (inject the right exception in the guest)
   - fix lockdep false positive"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (49 commits)
  KVM: VMX: fixes for vmentry_l1d_flush module parameter
  kvm: selftest: add dirty logging test
  kvm: selftest: pass in extra memory when create vm
  kvm: selftest: include the tools headers
  kvm: selftest: unify the guest port macros
  tools: introduce test_and_clear_bit
  KVM: x86: SVM: Call x86_spec_ctrl_set_guest/host() with interrupts disabled
  KVM: vmx: Inject #UD for SGX ENCLS instruction in guest
  KVM: vmx: Add defines for SGX ENCLS exiting
  x86/kvm/vmx: Fix coding style in vmx_setup_l1d_flush()
  x86: kvm: avoid unused variable warning
  KVM: Documentation: rename the capability of KVM_CAP_ARM_SET_SERROR_ESR
  KVM: arm/arm64: Skip updating PTE entry if no change
  KVM: arm/arm64: Skip updating PMD entry if no change
  KVM: arm: Use true and false for boolean values
  KVM: arm/arm64: vgic: Do not use spin_lock_irqsave/restore with irq disabled
  KVM: arm/arm64: vgic: Move DEBUG_SPINLOCK_BUG_ON to vgic.h
  KVM: arm: vgic-v3: Add support for ICC_SGI0R and ICC_ASGI1R accesses
  KVM: arm64: vgic-v3: Add support for ICC_SGI0R_EL1 and ICC_ASGI1R_EL1 accesses
  KVM: arm/arm64: vgic-v3: Add core support for Group0 SGIs
  ...

290 files changed:
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/filesystems/f2fs.txt
Documentation/filesystems/proc.txt
Documentation/sysctl/kernel.txt
Documentation/sysctl/vm.txt
arch/Kconfig
arch/arm/kernel/process.c
arch/arm64/Kconfig
arch/h8300/Kconfig
arch/h8300/Makefile
arch/h8300/boot/dts/h8300h_sim.dts
arch/h8300/include/asm/bitops.h
arch/h8300/include/asm/ptrace.h
arch/h8300/kernel/kgdb.c
arch/h8300/kernel/setup.c
arch/h8300/kernel/sim-console.c
arch/powerpc/Kconfig
arch/sparc/Kconfig
arch/sparc/Makefile
arch/sparc/include/asm/dma-mapping.h
arch/sparc/kernel/ioport.c
arch/sparc/mm/init_32.c
arch/x86/Kconfig
arch/x86/boot/compressed/kaslr.c
arch/x86/include/asm/Kbuild
arch/x86/include/asm/export.h [deleted file]
arch/x86/kernel/acpi/cstate.c
arch/x86/kvm/x86.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-wf2q.c
block/blk-core.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq-tag.c
block/blk-mq.c
block/blk-wbt.c
block/blk.h
block/elevator.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/dbinput.c
drivers/acpi/acpica/dbmethod.c
drivers/acpi/acpica/dbxface.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/utstrsuppt.c
drivers/acpi/acpica/utstrtoul64.c
drivers/acpi/pmic/intel_pmic_crc.c
drivers/block/DAC960.c
drivers/block/pktcdvd.c
drivers/block/zram/zram_drv.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpuidle/governors/menu.c
drivers/firmware/efi/libstub/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/radeon/radeon_mn.c
drivers/ide/hpt366.c
drivers/ide/ide-floppy.c
drivers/ide/ide-io.c
drivers/ide/ide-probe.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/sis5513.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/hfi1/mmu_rb.c
drivers/infiniband/hw/mlx5/odp.c
drivers/irqchip/irq-renesas-h8s.c
drivers/md/bcache/Kconfig
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/bset.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/closure.c
drivers/md/bcache/closure.h
drivers/md/bcache/debug.c
drivers/md/bcache/debug.h
drivers/md/bcache/extents.c
drivers/md/bcache/extents.h
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/journal.h
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/stats.c
drivers/md/bcache/stats.h
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/sysfs.h
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/bcache/writeback.h
drivers/misc/mic/scif/scif_dma.c
drivers/misc/sgi-gru/grutlbpurge.c
drivers/pci/pci-acpi.c
drivers/pci/quirks.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/xen/gntdev.c
fs/adfs/inode.c
fs/autofs/autofs_i.h
fs/autofs/expire.c
fs/autofs/inode.c
fs/autofs/root.c
fs/eventpoll.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/f2fs/xattr.c
fs/fat/cache.c
fs/fat/dir.c
fs/fat/fat.h
fs/fat/fatent.c
fs/fat/file.c
fs/fat/inode.c
fs/fat/misc.c
fs/fat/namei_msdos.c
fs/fat/namei_vfat.c
fs/hfsplus/Kconfig
fs/hfsplus/Makefile
fs/hfsplus/acl.h [deleted file]
fs/hfsplus/dir.c
fs/hfsplus/extents.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/inode.c
fs/hfsplus/posix_acl.c [deleted file]
fs/hfsplus/super.c
fs/hfsplus/unicode.c
fs/hfsplus/xattr.c
fs/hfsplus/xattr.h
fs/hfsplus/xattr_security.c
fs/hugetlbfs/inode.c
fs/nilfs2/file.c
fs/nilfs2/super.c
fs/overlayfs/dir.c
fs/proc/Kconfig
fs/proc/base.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/kcore.c
fs/proc/meminfo.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/proc/uptime.c
fs/proc/vmcore.c
fs/reiserfs/item_ops.c
fs/reiserfs/journal.c
fs/reiserfs/procfs.c
fs/reiserfs/reiserfs.h
fs/reiserfs/xattr.c
fs/sysv/inode.c
fs/userfaultfd.c
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acpixf.h
include/asm-generic/bug.h
include/asm-generic/export.h
include/linux/backing-dev-defs.h
include/linux/backing-dev.h
include/linux/bitops.h
include/linux/compiler.h
include/linux/crash_core.h
include/linux/crc64.h [new file with mode: 0644]
include/linux/export.h
include/linux/f2fs_fs.h
include/linux/init.h
include/linux/ipc_namespace.h
include/linux/kcore.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/memcontrol.h
include/linux/memory_hotplug.h
include/linux/mm.h
include/linux/mmu_notifier.h
include/linux/mmzone.h
include/linux/net_dim.h
include/linux/nodemask.h
include/linux/oom.h
include/linux/pci.h
include/linux/percpu.h
include/linux/proc_fs.h
include/linux/sched.h
include/linux/sched/signal.h
include/linux/sched/sysctl.h
include/linux/sched/user.h
include/linux/shrinker.h
include/linux/signal.h
include/linux/swap.h
include/linux/tracepoint.h
include/rdma/ib_umem_odp.h
include/uapi/linux/auto_fs.h
include/uapi/linux/bcache.h
init/Kconfig
init/do_mounts.c
init/do_mounts_initrd.c
init/do_mounts_md.c
init/do_mounts_rd.c
init/initramfs.c
init/main.c
ipc/msg.c
ipc/namespace.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/crash_core.c
kernel/fork.c
kernel/hung_task.c
kernel/module.c
kernel/power/Kconfig
kernel/printk/printk.c
kernel/sched/idle.c
kernel/sched/wait.c
kernel/signal.c
kernel/sysctl.c
kernel/trace/blktrace.c
kernel/tracepoint.c
kernel/user.c
lib/.gitignore
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/bitmap.c
lib/crc64.c [new file with mode: 0644]
lib/gen_crc64table.c [new file with mode: 0644]
lib/rhashtable.c
lib/test_debug_virtual.c
lib/test_hexdump.c
mm/Kconfig.debug
mm/backing-dev.c
mm/hmm.c
mm/internal.h
mm/ksm.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mempool.c
mm/mm_init.c
mm/mmap.c
mm/mmu_notifier.c
mm/oom_kill.c
mm/page_alloc.c
mm/percpu.c
mm/shmem.c
mm/swap_slots.c
mm/swapfile.c
mm/vmscan.c
scripts/checkpatch.pl
scripts/get_maintainer.pl
scripts/spelling.txt
security/security.c
tools/testing/selftests/proc/.gitignore
tools/testing/selftests/proc/Makefile
tools/testing/selftests/proc/proc.h
tools/testing/selftests/proc/self.c [new file with mode: 0644]
tools/testing/selftests/proc/thread-self.c [new file with mode: 0644]
tools/testing/selftests/vm/.gitignore
tools/testing/selftests/vm/Makefile
tools/testing/selftests/vm/map_populate.c [new file with mode: 0644]
tools/testing/selftests/vm/run_vmtests
virt/kvm/kvm_main.c

index 9b0123388f182dfe5829205a73bdf5a63bd32e10..94a24aedcdb237cfe07fb9f4fe0c33e953a49bd5 100644 (file)
@@ -51,6 +51,14 @@ Description:
                 Controls the dirty page count condition for the in-place-update
                 policies.
 
+What:          /sys/fs/f2fs/<disk>/min_seq_blocks
+Date:          August 2018
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+                Controls the dirty page count condition for batched sequential
+                writes in ->writepages.
+
+
 What:          /sys/fs/f2fs/<disk>/min_hot_blocks
 Date:          March 2017
 Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
index 1746131bc9cb315dca4d4b03b23678cece518727..184193bcb262ac908f1f5a7a7c2c662dec0ea4b8 100644 (file)
@@ -1072,6 +1072,24 @@ PAGE_SIZE multiple when read back.
        high limit is used and monitored properly, this limit's
        utility is limited to providing the final safety net.
 
+  memory.oom.group
+       A read-write single value file which exists on non-root
+       cgroups.  The default value is "0".
+
+       Determines whether the cgroup should be treated as
+       an indivisible workload by the OOM killer. If set,
+       all tasks belonging to the cgroup or to its descendants
+       (if the memory cgroup is not a leaf cgroup) are killed
+       together or not at all. This can be used to avoid
+       partial kills to guarantee workload integrity.
+
+       Tasks with the OOM protection (oom_score_adj set to -1000)
+       are treated as an exception and are never killed.
+
+       If the OOM killer is invoked in a cgroup, it's not going
+       to kill any tasks outside of this cgroup, regardless
+       memory.oom.group values of ancestor cgroups.
+
   memory.events
        A read-only flat-keyed file which exists on non-root cgroups.
        The following entries are defined.  Unless specified
index bffb0caa369305e221108ae7557605acf7809b89..970d837bd57fcaf73479e457279a0d300835619b 100644 (file)
                        on: enable the feature
 
        page_poison=    [KNL] Boot-time parameter changing the state of
-                       poisoning on the buddy allocator.
-                       off: turn off poisoning
+                       poisoning on the buddy allocator, available with
+                       CONFIG_PAGE_POISONING=y.
+                       off: turn off poisoning (default)
                        on: turn on poisoning
 
        panic=          [KNL] Kernel behaviour on panic: delay <timeout>
index 69f8de99573974749263a33d75904ad5d37971de..e5edd29687b50a74f8946ca215f639bb71b434fa 100644 (file)
@@ -157,6 +157,24 @@ data_flush             Enable data flushing before checkpoint in order to
                        persist data of regular and symlink.
 fault_injection=%d     Enable fault injection in all supported types with
                        specified injection rate.
+fault_type=%d          Support configuring fault injection type, should be
+                       enabled with fault_injection option, fault type value
+                       is shown below, it supports single or combined type.
+                       Type_Name               Type_Value
+                       FAULT_KMALLOC           0x000000001
+                       FAULT_KVMALLOC          0x000000002
+                       FAULT_PAGE_ALLOC                0x000000004
+                       FAULT_PAGE_GET          0x000000008
+                       FAULT_ALLOC_BIO         0x000000010
+                       FAULT_ALLOC_NID         0x000000020
+                       FAULT_ORPHAN            0x000000040
+                       FAULT_BLOCK             0x000000080
+                       FAULT_DIR_DEPTH         0x000000100
+                       FAULT_EVICT_INODE       0x000000200
+                       FAULT_TRUNCATE          0x000000400
+                       FAULT_IO                        0x000000800
+                       FAULT_CHECKPOINT                0x000001000
+                       FAULT_DISCARD           0x000002000
 mode=%s                Control block allocation mode which supports "adaptive"
                        and "lfs". In "lfs" mode, there should be no random
                        writes towards main area.
index 1605acbb23b6631f11d4979017e361360c4208a2..22b4b00dee31290bde37b3abc63f4a0807f973cc 100644 (file)
@@ -870,6 +870,7 @@ Committed_AS:   100056 kB
 VmallocTotal:   112216 kB
 VmallocUsed:       428 kB
 VmallocChunk:   111088 kB
+Percpu:          62080 kB
 HardwareCorrupted:   0 kB
 AnonHugePages:   49152 kB
 ShmemHugePages:      0 kB
@@ -962,6 +963,8 @@ Committed_AS: The amount of memory presently allocated on the system.
 VmallocTotal: total size of vmalloc memory area
  VmallocUsed: amount of vmalloc area which is used
 VmallocChunk: largest contiguous block of vmalloc area which is free
+      Percpu: Memory allocated to the percpu allocator used to back percpu
+              allocations. This stat excludes the cost of metadata.
 
 ..............................................................................
 
index 59585030cbafa4e6a489980caf54234b5c91eeef..37a679501ddc68bc0ab26c58444794c0d30c8f40 100644 (file)
@@ -38,6 +38,7 @@ show up in /proc/sys/kernel:
 - hung_task_panic
 - hung_task_check_count
 - hung_task_timeout_secs
+- hung_task_check_interval_secs
 - hung_task_warnings
 - hyperv_record_panic_msg
 - kexec_load_disabled
@@ -355,7 +356,7 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
 
 hung_task_timeout_secs:
 
-Check interval. When a task in D state did not get scheduled
+When a task in D state did not get scheduled
 for more than this value report a warning.
 This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
 
@@ -364,6 +365,18 @@ Possible values to set are in range {0..LONG_MAX/HZ}.
 
 ==============================================================
 
+hung_task_check_interval_secs:
+
+Hung task check interval. If hung task checking is enabled
+(see hung_task_timeout_secs), the check is done every
+hung_task_check_interval_secs seconds.
+This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
+
+0 (default): means use hung_task_timeout_secs as checking interval.
+Possible values to set are in range {0..LONG_MAX/HZ}.
+
+==============================================================
+
 hung_task_warnings:
 
 The maximum number of warnings to report. During a check interval
@@ -451,7 +464,8 @@ Notes:
 1) kernel doesn't guarantee, that new object will have desired id. So,
 it's up to userspace, how to handle an object with "wrong" id.
 2) Toggle with non-default value will be set back to -1 by kernel after
-successful IPC object allocation.
+successful IPC object allocation. If an IPC object allocation syscall
+fails, it is undefined if the value remains unmodified or is reset to -1.
 
 ==============================================================
 
index e72853b6d7257b30c93eceff5eee014cfa146ae6..7d73882e2c273c57d1277c701b71a1774b22763c 100644 (file)
@@ -691,7 +691,7 @@ and don't use much of it.
 The default value is 0.
 
 See Documentation/vm/overcommit-accounting.rst and
-mm/mmap.c::__vm_enough_memory() for more information.
+mm/util.c::__vm_enough_memory() for more information.
 
 ==============================================================
 
index c6148166a7b4b0df1873ab55f299f148d9a8d6a9..4426e9687d89832e405c448eef7e41cb7a26d9ce 100644 (file)
@@ -841,6 +841,16 @@ config REFCOUNT_FULL
          against various use-after-free conditions that can be used in
          security flaw exploits.
 
+config HAVE_ARCH_PREL32_RELOCATIONS
+       bool
+       help
+         May be selected by an architecture if it supports place-relative
+         32-bit relocations, both in the toolchain and in the module loader,
+         in which case relative references can be used in special sections
+         for PCI fixup, initcalls etc which are only half the size on 64 bit
+         architectures, and don't require runtime relocation on relocatable
+         kernels.
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index d9c2991331111617cc3fce04f38fe17fd1a78f6d..82ab015bf42b7bf263cd13d61274e6f4e899d9db 100644 (file)
@@ -330,16 +330,15 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
  * atomic helpers. Insert it into the gate_vma so that it is visible
  * through ptrace and /proc/<pid>/mem.
  */
-static struct vm_area_struct gate_vma = {
-       .vm_start       = 0xffff0000,
-       .vm_end         = 0xffff0000 + PAGE_SIZE,
-       .vm_flags       = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
-};
+static struct vm_area_struct gate_vma;
 
 static int __init gate_vma_init(void)
 {
        vma_init(&gate_vma, NULL);
        gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+       gate_vma.vm_start = 0xffff0000;
+       gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
+       gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
        return 0;
 }
 arch_initcall(gate_vma_init);
index d0a53cc6293a3d3c288ffc1f9809d90fe439cea2..29e75b47becd5a94c9e4cf7944e77a67cdca05d7 100644 (file)
@@ -108,6 +108,7 @@ config ARM64
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+       select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_STACKLEAK
        select HAVE_ARCH_THREAD_STRUCT_WHITELIST
index 5e89d40be8cd9764bb3c8e80dc7cbcfa3f2ce959..0b334b671e90c95f817a8638a89655f7f717c4d8 100644 (file)
@@ -16,6 +16,7 @@ config H8300
        select OF_IRQ
        select OF_EARLY_FLATTREE
        select HAVE_MEMBLOCK
+       select NO_BOOTMEM
        select TIMER_OF
        select H8300_TMR8
        select HAVE_KERNEL_GZIP
index e1c02ca230cb0a9dbd985b59b0646eb86f0d9891..cc12b162c22286097918d120ac1dd5cafbe834cf 100644 (file)
@@ -8,6 +8,8 @@
 # (C) Copyright 2002-2015 Yoshinori Sato <ysato@users.sourceforge.jp>
 #
 
+KBUILD_DEFCONFIG := edosk2674_defconfig
+
 cflags-$(CONFIG_CPU_H8300H)    := -mh
 aflags-$(CONFIG_CPU_H8300H)    := -mh -Wa,--mach=h8300h
 ldflags-$(CONFIG_CPU_H8300H)   := -mh8300helf_linux
@@ -22,6 +24,8 @@ KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
 KBUILD_AFLAGS += $(aflags-y)
 LDFLAGS += $(ldflags-y)
 
+CHECKFLAGS += -msize-long
+
 ifeq ($(CROSS_COMPILE),)
 CROSS_COMPILE := h8300-unknown-linux-
 endif
index f1c31cecdec8a2e1be00c741d5bf40bf81f8c4e9..595398b9d0180a805c0c70fc5a4d0501cdf76504 100644 (file)
@@ -73,7 +73,7 @@
        timer16: timer@ffff68 {
                compatible = "renesas,16bit-timer";
                reg = <0xffff68 8>, <0xffff60 8>;
-               interrupts = <24 0>;
+               interrupts = <26 0>;
                renesas,channel = <0>;
                clocks = <&fclk>;
                clock-names = "fck";
index ea0cb0cf6a8bb2ed519a6176dec51ef92b77d1b1..647a83bd40b7093ca9ac37f7e14593ca51e55c54 100644 (file)
@@ -29,11 +29,11 @@ static inline unsigned long ffz(unsigned long word)
 
        result = -1;
        __asm__("1:\n\t"
-               "shlr.l %2\n\t"
+               "shlr.l %1\n\t"
                "adds #1,%0\n\t"
                "bcs 1b"
-               : "=r"(result)
-               : "0"(result), "r"(word));
+               : "=r"(result),"=r"(word)
+               : "0"(result), "1"(word));
        return result;
 }
 
@@ -66,7 +66,7 @@ H8300_GEN_BITOP(change_bit, "bnot")
 
 #undef H8300_GEN_BITOP
 
-static inline int test_bit(int nr, const unsigned long *addr)
+static inline int test_bit(int nr, const volatile unsigned long *addr)
 {
        int ret = 0;
        unsigned char *b_addr;
@@ -162,11 +162,11 @@ static inline unsigned long __ffs(unsigned long word)
 
        result = -1;
        __asm__("1:\n\t"
-               "shlr.l %2\n\t"
+               "shlr.l %1\n\t"
                "adds #1,%0\n\t"
                "bcc 1b"
-               : "=r" (result)
-               : "0"(result), "r"(word));
+               : "=r" (result),"=r"(word)
+               : "0"(result), "1"(word));
        return result;
 }
 
index 313cafa853807866927bbf7f0a792e521f4b3fd6..66d383848ff1f2a7a27095b4764e0be075cb95c8 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <uapi/asm/ptrace.h>
 
+struct task_struct;
+
 #ifndef __ASSEMBLY__
 #ifndef PS_S
 #define PS_S  (0x10)
index 602e478afbd5800eab9ba03277bf7d4db36336f0..1a1d30cb0609c60cc3a92528060a10f1f4d286e4 100644 (file)
@@ -129,7 +129,7 @@ void kgdb_arch_exit(void)
        /* Nothing to do */
 }
 
-const struct kgdb_arch arch_kgdb_ops = {
+struct kgdb_arch arch_kgdb_ops = {
        /* Breakpoint instruction: trapa #2 */
        .gdb_bpt_instr = { 0x57, 0x20 },
 };
index a4d0470c10a9ff98356ca33ff68152e072865c74..34e2df5c0d6d8724509cdbacab72c9914397fa2d 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
-#include <linux/of_platform.h>
 #include <linux/of_address.h>
 #include <linux/clk-provider.h>
 #include <linux/memblock.h>
@@ -71,10 +70,6 @@ void __init h8300_fdt_init(void *fdt, char *bootargs)
 
 static void __init bootmem_init(void)
 {
-       int bootmap_size;
-       unsigned long ram_start_pfn;
-       unsigned long free_ram_start_pfn;
-       unsigned long ram_end_pfn;
        struct memblock_region *region;
 
        memory_end = memory_start = 0;
@@ -88,33 +83,17 @@ static void __init bootmem_init(void)
        if (!memory_end)
                panic("No memory!");
 
-       ram_start_pfn = PFN_UP(memory_start);
-       /* free_ram_start_pfn is first page after kernel */
-       free_ram_start_pfn = PFN_UP(__pa(_end));
-       ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       /* setup bootmem globals (we use no_bootmem, but mm still depends on this) */
+       min_low_pfn = PFN_UP(memory_start);
+       max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       max_pfn = max_low_pfn;
 
-       max_pfn = ram_end_pfn;
+       memblock_reserve(__pa(_stext), _end - _stext);
 
-       /*
-        * give all the memory to the bootmap allocator,  tell it to put the
-        * boot mem_map at the start of memory
-        */
-       bootmap_size = init_bootmem_node(NODE_DATA(0),
-                                        free_ram_start_pfn,
-                                        0,
-                                        ram_end_pfn);
-       /*
-        * free the usable memory,  we have to make sure we do not free
-        * the bootmem bitmap so we then reserve it after freeing it :-)
-        */
-       free_bootmem(PFN_PHYS(free_ram_start_pfn),
-                    (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT);
-       reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size,
-                       BOOTMEM_DEFAULT);
+       early_init_fdt_reserve_self();
+       early_init_fdt_scan_reserved_mem();
 
-       for_each_memblock(reserved, region) {
-               reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT);
-       }
+       memblock_dump_all();
 }
 
 void __init setup_arch(char **cmdline_p)
@@ -188,15 +167,6 @@ const struct seq_operations cpuinfo_op = {
        .show   = show_cpuinfo,
 };
 
-static int __init device_probe(void)
-{
-       of_platform_populate(NULL, NULL, NULL, NULL);
-
-       return 0;
-}
-
-device_initcall(device_probe);
-
 #if defined(CONFIG_CPU_H8300H)
 #define get_wait(base, addr) ({                \
        int baddr;                      \
index 46138f55a9ea7eedb86eeb60e074655d863bcc69..03aa35b1a08c2701765da7a61bf5017f97099ce3 100644 (file)
 
 static void sim_write(struct console *con, const char *s, unsigned n)
 {
-       register const int fd __asm__("er0") = 1; /* stdout */
        register const char *_ptr __asm__("er1") = s;
        register const unsigned _len __asm__("er2") = n;
 
-       __asm__(".byte 0x5e,0x00,0x00,0xc7\n\t" /* jsr @0xc7 (sys_write) */
-               : : "g"(fd), "g"(_ptr), "g"(_len));
+       __asm__("sub.l er0,er0\n\t"             /* er0 = 1 (stdout) */
+               "inc.l #1,er0\n\t"
+               ".byte 0x5e,0x00,0x00,0xc7\n\t" /* jsr @0xc7 (sys_write) */
+               : : "g"(_ptr), "g"(_len):"er0");
 }
 
 static int __init sim_setup(struct earlycon_device *device, const char *opt)
index a80669209155383343ba8adbb90b3e8427e2afdb..db0b6eebbfa5b55a6f7c4f55b1d489bcf158d974 100644 (file)
@@ -177,6 +177,7 @@ config PPC
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
+       select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_CBPF_JIT                    if !PPC64
index 2d58c26bff9a72f9c2d98ca3757eef5faa5356c9..e6f2a38d2e61ece051d30350ad332b7b2acab229 100644 (file)
@@ -45,9 +45,13 @@ config SPARC
        select LOCKDEP_SMALL if LOCKDEP
        select NEED_DMA_MAP_STATE
        select NEED_SG_DMA_LENGTH
+       select HAVE_MEMBLOCK
+       select NO_BOOTMEM
 
 config SPARC32
        def_bool !64BIT
+       select ARCH_HAS_SYNC_DMA_FOR_CPU
+       select DMA_NONCOHERENT_OPS
        select GENERIC_ATOMIC64
        select CLZ_TAB
        select HAVE_UID16
@@ -60,7 +64,6 @@ config SPARC64
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_RCU_TABLE_FREE if SMP
-       select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_DYNAMIC_FTRACE
@@ -79,7 +82,6 @@ config SPARC64
        select IRQ_PREFLOW_FASTEOI
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select HAVE_C_RECORDMCOUNT
-       select NO_BOOTMEM
        select HAVE_ARCH_AUDITSYSCALL
        select ARCH_SUPPORTS_ATOMIC_RMW
        select HAVE_NMI
index 966a13d2b127e7c4933aaf561d90f47a36a8a67d..e32ef20de567702368bcfbfd7de371150aeedd5a 100644 (file)
@@ -9,10 +9,10 @@
 # Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
 
 # We are not yet configured - so test on arch
-ifeq ($(ARCH),sparc)
-        KBUILD_DEFCONFIG := sparc32_defconfig
-else
+ifeq ($(ARCH),sparc64)
         KBUILD_DEFCONFIG := sparc64_defconfig
+else
+        KBUILD_DEFCONFIG := sparc32_defconfig
 endif
 
 ifeq ($(CONFIG_SPARC32),y)
index 12ae33daf52f2a9308918331ac992d0a4cb482ab..e17566376934f5b5dc796083b4c11fcb36aa140f 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/dma-debug.h>
 
 extern const struct dma_map_ops *dma_ops;
-extern const struct dma_map_ops pci32_dma_ops;
 
 extern struct bus_type pci_bus_type;
 
@@ -15,11 +14,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
 #ifdef CONFIG_SPARC_LEON
        if (sparc_cpu_model == sparc_leon)
-               return &pci32_dma_ops;
+               return &dma_noncoherent_ops;
 #endif
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
        if (bus == &pci_bus_type)
-               return &pci32_dma_ops;
+               return &dma_noncoherent_ops;
 #endif
        return dma_ops;
 }
index cca9134cfa7d2984cbc7b7b8c47073dd51b9de21..6799c93c9f274ff0c9a60a68131ae61f74fba937 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/of_device.h>
 
 #include <asm/io.h>
@@ -434,42 +435,41 @@ arch_initcall(sparc_register_ioport);
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
  */
-static void *pci32_alloc_coherent(struct device *dev, size_t len,
-                                 dma_addr_t *pba, gfp_t gfp,
-                                 unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
 {
-       unsigned long len_total = PAGE_ALIGN(len);
+       unsigned long len_total = PAGE_ALIGN(size);
        void *va;
        struct resource *res;
        int order;
 
-       if (len == 0) {
+       if (size == 0) {
                return NULL;
        }
-       if (len > 256*1024) {                   /* __get_free_pages() limit */
+       if (size > 256*1024) {                  /* __get_free_pages() limit */
                return NULL;
        }
 
        order = get_order(len_total);
        va = (void *) __get_free_pages(gfp, order);
        if (va == NULL) {
-               printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
+               printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT);
                goto err_nopages;
        }
 
        if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
-               printk("pci_alloc_consistent: no core\n");
+               printk("%s: no core\n", __func__);
                goto err_nomem;
        }
 
        if (allocate_resource(&_sparc_dvma, res, len_total,
            _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
-               printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
+               printk("%s: cannot occupy 0x%lx", __func__, len_total);
                goto err_nova;
        }
        srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
 
-       *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
+       *dma_handle = virt_to_phys(va);
        return (void *) res->start;
 
 err_nova:
@@ -481,184 +481,53 @@ err_nopages:
 }
 
 /* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ * cpu_addr is what was returned arch_dma_alloc, size must be the same as what
+ * was passed into arch_dma_alloc, and likewise dma_addr must be the same as
+ * what *dma_ndler was set to.
  *
  * References to the memory and mappings associated with cpu_addr/dma_addr
  * past this call are illegal.
  */
-static void pci32_free_coherent(struct device *dev, size_t n, void *p,
-                               dma_addr_t ba, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_addr, unsigned long attrs)
 {
        struct resource *res;
 
        if ((res = lookup_resource(&_sparc_dvma,
-           (unsigned long)p)) == NULL) {
-               printk("pci_free_consistent: cannot free %p\n", p);
+           (unsigned long)cpu_addr)) == NULL) {
+               printk("%s: cannot free %p\n", __func__, cpu_addr);
                return;
        }
 
-       if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
-               printk("pci_free_consistent: unaligned va %p\n", p);
+       if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) {
+               printk("%s: unaligned va %p\n", __func__, cpu_addr);
                return;
        }
 
-       n = PAGE_ALIGN(n);
-       if (resource_size(res) != n) {
-               printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
-                   (long)resource_size(res), (long)n);
+       size = PAGE_ALIGN(size);
+       if (resource_size(res) != size) {
+               printk("%s: region 0x%lx asked 0x%zx\n", __func__,
+                   (long)resource_size(res), size);
                return;
        }
 
-       dma_make_coherent(ba, n);
-       srmmu_unmapiorange((unsigned long)p, n);
+       dma_make_coherent(dma_addr, size);
+       srmmu_unmapiorange((unsigned long)cpu_addr, size);
 
        release_resource(res);
        kfree(res);
-       free_pages((unsigned long)phys_to_virt(ba), get_order(n));
-}
-
-/*
- * Same as pci_map_single, but with pages.
- */
-static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction dir,
-                                unsigned long attrs)
-{
-       /* IIep is write-through, not flushing. */
-       return page_to_phys(page) + offset;
-}
-
-static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
-                            enum dma_data_direction dir, unsigned long attrs)
-{
-       if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_make_coherent(ba, PAGE_ALIGN(size));
-}
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scatter-gather version of the
- * above pci_map_single interface.  Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length.  They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
-                       int nents, enum dma_data_direction dir,
-                       unsigned long attrs)
-{
-       struct scatterlist *sg;
-       int n;
-
-       /* IIep is write-through, not flushing. */
-       for_each_sg(sgl, sg, nents, n) {
-               sg->dma_address = sg_phys(sg);
-               sg->dma_length = sg->length;
-       }
-       return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
-                          int nents, enum dma_data_direction dir,
-                          unsigned long attrs)
-{
-       struct scatterlist *sg;
-       int n;
-
-       if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-               for_each_sg(sgl, sg, nents, n) {
-                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
-               }
-       }
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation before or after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, you
- * must first perform a pci_dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
-                                     size_t size, enum dma_data_direction dir)
-{
-       if (dir != PCI_DMA_TODEVICE) {
-               dma_make_coherent(ba, PAGE_ALIGN(size));
-       }
-}
-
-static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
-                                        size_t size, enum dma_data_direction dir)
-{
-       if (dir != PCI_DMA_TODEVICE) {
-               dma_make_coherent(ba, PAGE_ALIGN(size));
-       }
+       free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
 }
 
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single_* but for a scatter-gather list,
- * same rules and usage.
- */
-static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
-                                 int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int n;
-
-       if (dir != PCI_DMA_TODEVICE) {
-               for_each_sg(sgl, sg, nents, n) {
-                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
-               }
-       }
-}
+/* IIep is write-through, not flushing on cpu to device transfer. */
 
-static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
-                                    int nents, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
 {
-       struct scatterlist *sg;
-       int n;
-
-       if (dir != PCI_DMA_TODEVICE) {
-               for_each_sg(sgl, sg, nents, n) {
-                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
-               }
-       }
+       if (dir != PCI_DMA_TODEVICE)
+               dma_make_coherent(paddr, PAGE_ALIGN(size));
 }
 
-/* note: leon re-uses pci32_dma_ops */
-const struct dma_map_ops pci32_dma_ops = {
-       .alloc                  = pci32_alloc_coherent,
-       .free                   = pci32_free_coherent,
-       .map_page               = pci32_map_page,
-       .unmap_page             = pci32_unmap_page,
-       .map_sg                 = pci32_map_sg,
-       .unmap_sg               = pci32_unmap_sg,
-       .sync_single_for_cpu    = pci32_sync_single_for_cpu,
-       .sync_single_for_device = pci32_sync_single_for_device,
-       .sync_sg_for_cpu        = pci32_sync_sg_for_cpu,
-       .sync_sg_for_device     = pci32_sync_sg_for_device,
-};
-EXPORT_SYMBOL(pci32_dma_ops);
-
 const struct dma_map_ops *dma_ops = &sbus_dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
index 95fe4f081ba3b152c59fa001f279b976c8152e41..92634d4e440c26be54dddd96c22160259654dd1e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/pagemap.h>
 #include <linux/poison.h>
 #include <linux/gfp.h>
@@ -101,13 +102,46 @@ static unsigned long calc_max_low_pfn(void)
        return tmp;
 }
 
+static void __init find_ramdisk(unsigned long end_of_phys_memory)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       unsigned long size;
+
+       /* Now have to check initial ramdisk, so that it won't pass
+        * the end of memory
+        */
+       if (sparc_ramdisk_image) {
+               if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
+                       sparc_ramdisk_image -= KERNBASE;
+               initrd_start = sparc_ramdisk_image + phys_base;
+               initrd_end = initrd_start + sparc_ramdisk_size;
+               if (initrd_end > end_of_phys_memory) {
+                       printk(KERN_CRIT "initrd extends beyond end of memory "
+                              "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+                              initrd_end, end_of_phys_memory);
+                       initrd_start = 0;
+               } else {
+                       /* Reserve the initrd image area. */
+                       size = initrd_end - initrd_start;
+                       memblock_reserve(initrd_start, size);
+
+                       initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
+                       initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
+               }
+       }
+#endif
+}
+
 unsigned long __init bootmem_init(unsigned long *pages_avail)
 {
-       unsigned long bootmap_size, start_pfn;
-       unsigned long end_of_phys_memory = 0UL;
-       unsigned long bootmap_pfn, bytes_avail, size;
+       unsigned long start_pfn, bytes_avail, size;
+       unsigned long end_of_phys_memory = 0;
+       unsigned long high_pages = 0;
        int i;
 
+       memblock_set_bottom_up(true);
+       memblock_allow_resize();
+
        bytes_avail = 0UL;
        for (i = 0; sp_banks[i].num_bytes != 0; i++) {
                end_of_phys_memory = sp_banks[i].base_addr +
@@ -124,24 +158,25 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
                                if (sp_banks[i].num_bytes == 0) {
                                        sp_banks[i].base_addr = 0xdeadbeef;
                                } else {
+                                       memblock_add(sp_banks[i].base_addr,
+                                                    sp_banks[i].num_bytes);
                                        sp_banks[i+1].num_bytes = 0;
                                        sp_banks[i+1].base_addr = 0xdeadbeef;
                                }
                                break;
                        }
                }
+               memblock_add(sp_banks[i].base_addr, sp_banks[i].num_bytes);
        }
 
        /* Start with page aligned address of last symbol in kernel
-        * image.  
+        * image.
         */
        start_pfn  = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
 
        /* Now shift down to get the real physical page frame number. */
        start_pfn >>= PAGE_SHIFT;
 
-       bootmap_pfn = start_pfn;
-
        max_pfn = end_of_phys_memory >> PAGE_SHIFT;
 
        max_low_pfn = max_pfn;
@@ -150,85 +185,19 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
        if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
                highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
                max_low_pfn = calc_max_low_pfn();
+               high_pages = calc_highpages();
                printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
-                   calc_highpages() >> (20 - PAGE_SHIFT));
+                   high_pages >> (20 - PAGE_SHIFT));
        }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-       /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
-       if (sparc_ramdisk_image) {
-               if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
-                       sparc_ramdisk_image -= KERNBASE;
-               initrd_start = sparc_ramdisk_image + phys_base;
-               initrd_end = initrd_start + sparc_ramdisk_size;
-               if (initrd_end > end_of_phys_memory) {
-                       printk(KERN_CRIT "initrd extends beyond end of memory "
-                                        "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
-                              initrd_end, end_of_phys_memory);
-                       initrd_start = 0;
-               }
-               if (initrd_start) {
-                       if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
-                           initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
-                               bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
-               }
-       }
-#endif 
-       /* Initialize the boot-time allocator. */
-       bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
-                                        max_low_pfn);
-
-       /* Now register the available physical memory with the
-        * allocator.
-        */
-       *pages_avail = 0;
-       for (i = 0; sp_banks[i].num_bytes != 0; i++) {
-               unsigned long curr_pfn, last_pfn;
+       find_ramdisk(end_of_phys_memory);
 
-               curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
-               if (curr_pfn >= max_low_pfn)
-                       break;
-
-               last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
-               if (last_pfn > max_low_pfn)
-                       last_pfn = max_low_pfn;
-
-               /*
-                * .. finally, did all the rounding and playing
-                * around just make the area go away?
-                */
-               if (last_pfn <= curr_pfn)
-                       continue;
-
-               size = (last_pfn - curr_pfn) << PAGE_SHIFT;
-               *pages_avail += last_pfn - curr_pfn;
-
-               free_bootmem(sp_banks[i].base_addr, size);
-       }
-
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (initrd_start) {
-               /* Reserve the initrd image area. */
-               size = initrd_end - initrd_start;
-               reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
-               *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-               initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
-               initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;            
-       }
-#endif
        /* Reserve the kernel text/data/bss. */
        size = (start_pfn << PAGE_SHIFT) - phys_base;
-       reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT);
-       *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+       memblock_reserve(phys_base, size);
 
-       /* Reserve the bootmem map.   We do not account for it
-        * in pages_avail because we will release that memory
-        * in free_all_bootmem.
-        */
-       size = bootmap_size;
-       reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
-       *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+       size = memblock_phys_mem_size() - memblock_reserved_size();
+       *pages_avail = (size >> PAGE_SHIFT) - high_pages;
 
        return max_pfn;
 }
@@ -322,7 +291,7 @@ void __init mem_init(void)
 
                map_high_region(start_pfn, end_pfn);
        }
-       
+
        mem_init_print_info(NULL);
 }
 
index b0312f8947cee149cc1cea4d5b4bb851499a7b86..512003f168895c231150f943d18db545e82e9f61 100644 (file)
@@ -124,6 +124,7 @@ config X86
        select HAVE_ARCH_MMAP_RND_BITS          if MMU
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if MMU && COMPAT
        select HAVE_ARCH_COMPAT_MMAP_BASES      if MMU && COMPAT
+       select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_THREAD_STRUCT_WHITELIST
        select HAVE_ARCH_TRACEHOOK
index 302517929932bb52405d5396c57f084691090e5e..d1e19f358b6ec5fbc5c682e02d2b36149b0aa43a 100644 (file)
  * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
  * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
  * which is meaningless and will cause compiling error in some cases.
- * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
- * as empty.
  */
-#define _LINUX_EXPORT_H
-#define EXPORT_SYMBOL(sym)
+#define __DISABLE_EXPORTS
 
 #include "misc.h"
 #include "error.h"
index de690c2d2e33aafc529e30d0b8066534c1789669..a0ab9ab61c754c6a413744bd54eb9599bfbf5dc1 100644 (file)
@@ -8,5 +8,6 @@ generated-y += xen-hypercalls.h
 
 generic-y += dma-contiguous.h
 generic-y += early_ioremap.h
+generic-y += export.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h
deleted file mode 100644 (file)
index 2a51d66..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_64BIT
-#define KSYM_ALIGN 16
-#endif
-#include <asm-generic/export.h>
index dde437f5d14ff828dccff19275033ff2330acc60..158ad1483c4352b2f93c7cbb931dfdb8dd40c3bf 100644 (file)
@@ -108,7 +108,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
                        cx->type);
        }
        snprintf(cx->desc,
-                       ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
+                       ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
                        cx->address);
 out:
        return retval;
index 14ee9a814888e54ea44e8da5c35fc43415689bb5..506bd2b4b8bb76e21a959310d33f6de6180719f8 100644 (file)
@@ -7303,8 +7303,9 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
        kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
 }
 
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
-               unsigned long start, unsigned long end)
+int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end,
+               bool blockable)
 {
        unsigned long apic_address;
 
@@ -7315,6 +7316,8 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
        apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
        if (start <= apic_address && apic_address < end)
                kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+
+       return 0;
 }
 
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
index a9e8633388f40985a0a49246a1e95c37def3cc8f..58c6efa9f9a99970e7599b673df3f65f5f4a9810 100644 (file)
@@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
        if (ret)
                return ret;
 
-       return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
+       ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
+       return ret ?: nbytes;
 }
 
 #ifdef CONFIG_DEBUG_BLK_CGROUP
index 41d9036b182249e8aff1e5e270e0d599baaf9445..653100fb719eb80e1bb11e9ef7761f14f960623f 100644 (file)
@@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194;
 static const int bfq_default_max_budget = 16 * 1024;
 
 /*
- * Async to sync throughput distribution is controlled as follows:
- * when an async request is served, the entity is charged the number
- * of sectors of the request, multiplied by the factor below
+ * When a sync request is dispatched, the queue that contains that
+ * request, and all the ancestor entities of that queue, are charged
+ * with the number of sectors of the request. In constrast, if the
+ * request is async, then the queue and its ancestor entities are
+ * charged with the number of sectors of the request, multiplied by
+ * the factor below. This throttles the bandwidth for async I/O,
+ * w.r.t. to sync I/O, and it is done to counter the tendency of async
+ * writes to steal I/O throughput to reads.
+ *
+ * The current value of this parameter is the result of a tuning with
+ * several hardware and software configurations. We tried to find the
+ * lowest value for which writes do not cause noticeable problems to
+ * reads. In fact, the lower this parameter, the stabler I/O control,
+ * in the following respect.  The lower this parameter is, the less
+ * the bandwidth enjoyed by a group decreases
+ * - when the group does writes, w.r.t. to when it does reads;
+ * - when other groups do reads, w.r.t. to when they do writes.
  */
-static const int bfq_async_charge_factor = 10;
+static const int bfq_async_charge_factor = 3;
 
 /* Default timeout values, in jiffies, approximating CFQ defaults. */
 const int bfq_timeout = HZ / 8;
@@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
        if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
                return blk_rq_sectors(rq);
 
-       /*
-        * If there are no weight-raised queues, then amplify service
-        * by just the async charge factor; otherwise amplify service
-        * by twice the async charge factor, to further reduce latency
-        * for weight-raised queues.
-        */
-       if (bfqq->bfqd->wr_busy_queues == 0)
-               return blk_rq_sectors(rq) * bfq_async_charge_factor;
-
-       return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
+       return blk_rq_sectors(rq) * bfq_async_charge_factor;
 }
 
 /**
@@ -3298,6 +3303,27 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
                 */
        } else
                entity->service = 0;
+
+       /*
+        * Reset the received-service counter for every parent entity.
+        * Differently from what happens with bfqq->entity.service,
+        * the resetting of this counter never needs to be postponed
+        * for parent entities. In fact, in case bfqq may have a
+        * chance to go on being served using the last, partially
+        * consumed budget, bfqq->entity.service needs to be kept,
+        * because if bfqq then actually goes on being served using
+        * the same budget, the last value of bfqq->entity.service is
+        * needed to properly decrement bfqq->entity.budget by the
+        * portion already consumed. In contrast, it is not necessary
+        * to keep entity->service for parent entities too, because
+        * the bubble up of the new value of bfqq->entity.budget will
+        * make sure that the budgets of parent entities are correct,
+        * even in case bfqq and thus parent entities go on receiving
+        * service with the same budget.
+        */
+       entity = entity->parent;
+       for_each_entity(entity)
+               entity->service = 0;
 }
 
 /*
index dbc07b4560598e3b3aceb5b4d53ebe6c69cafe87..ae52bff43ce4ff1697fdfdbc0c9c10fb4148c69e 100644 (file)
@@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
        if (!change_without_lookup) /* lookup needed */
                next_in_service = bfq_lookup_next_entity(sd, expiration);
 
-       if (next_in_service)
-               parent_sched_may_change = !sd->next_in_service ||
+       if (next_in_service) {
+               bool new_budget_triggers_change =
                        bfq_update_parent_budget(next_in_service);
 
+               parent_sched_may_change = !sd->next_in_service ||
+                       new_budget_triggers_change;
+       }
+
        sd->next_in_service = next_in_service;
 
        if (!next_in_service)
@@ -877,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                          unsigned long time_ms)
 {
        struct bfq_entity *entity = &bfqq->entity;
-       int tot_serv_to_charge = entity->service;
-       unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
-
-       if (time_ms > 0 && time_ms < timeout_ms)
-               tot_serv_to_charge =
-                       (bfqd->bfq_max_budget * time_ms) / timeout_ms;
-
-       if (tot_serv_to_charge < entity->service)
-               tot_serv_to_charge = entity->service;
+       unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
+       unsigned long bounded_time_ms = min(time_ms, timeout_ms);
+       int serv_to_charge_for_time =
+               (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
+       int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
 
        /* Increase budget to avoid inconsistencies */
        if (tot_serv_to_charge > entity->budget)
index 12550340418d950dccc3e27b615a6dace19b8b48..dee56c282efb092196cd9be8400b6bc68c7f7f27 100644 (file)
@@ -1036,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
                    laptop_mode_timer_fn, 0);
        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
        INIT_WORK(&q->timeout_work, NULL);
-       INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
        INIT_LIST_HEAD(&q->icq_list);
 #ifdef CONFIG_BLK_CGROUP
@@ -2162,7 +2161,9 @@ static inline bool should_fail_request(struct hd_struct *part,
 
 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
 {
-       if (part->policy && op_is_write(bio_op(bio))) {
+       const int op = bio_op(bio);
+
+       if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
                char b[BDEVNAME_SIZE];
 
                WARN_ONCE(1,
index cf9c66c6d35a866c63a8a1785eeab769ec3298d4..29bfe8017a2d8e6cbadeab6b9d1d63d293f656a5 100644 (file)
@@ -462,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
                blk_mq_sched_free_tags(set, hctx, i);
 }
 
-int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
-                          unsigned int hctx_idx)
-{
-       struct elevator_queue *e = q->elevator;
-       int ret;
-
-       if (!e)
-               return 0;
-
-       ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
-       if (ret)
-               return ret;
-
-       if (e->type->ops.mq.init_hctx) {
-               ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
-               if (ret) {
-                       blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
-                       return ret;
-               }
-       }
-
-       blk_mq_debugfs_register_sched_hctx(q, hctx);
-
-       return 0;
-}
-
-void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
-                           unsigned int hctx_idx)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (!e)
-               return;
-
-       blk_mq_debugfs_unregister_sched_hctx(hctx);
-
-       if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
-               e->type->ops.mq.exit_hctx(hctx, hctx_idx);
-               hctx->sched_data = NULL;
-       }
-
-       blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
-}
-
 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 {
        struct blk_mq_hw_ctx *hctx;
index 0cb8f938dff9d6a9b2fd3f6b519790c6f9f7b805..4e028ee4243014ff8eed44627f8e0cc5068217ca 100644 (file)
@@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
 
-int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
-                          unsigned int hctx_idx);
-void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
-                           unsigned int hctx_idx);
-
 static inline bool
 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
index 816923bf874d7c8eb55ed640735caab5131022a4..94e1ed667b6ea383a99f1cd76d6917af0d2a1ba6 100644 (file)
@@ -320,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
        struct blk_mq_hw_ctx *hctx;
        int i;
 
+       /*
+        * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
+        * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
+        * to avoid race with it. __blk_mq_update_nr_hw_queues will users
+        * synchronize_rcu to ensure all of the users go out of the critical
+        * section below and see zeroed q_usage_counter.
+        */
+       rcu_read_lock();
+       if (percpu_ref_is_zero(&q->q_usage_counter)) {
+               rcu_read_unlock();
+               return;
+       }
 
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_tags *tags = hctx->tags;
@@ -335,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
        }
-
+       rcu_read_unlock();
 }
 
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
index 72a0033ccee92af029ccc1219fbeb4fc7c127455..85a1c1a59c72716ce2e31c280d7fd43d5c6e61e9 100644 (file)
@@ -2145,8 +2145,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
        if (set->ops->exit_request)
                set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
 
-       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
-
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
 
@@ -2214,12 +2212,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
            set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
                goto free_bitmap;
 
-       if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
-               goto exit_hctx;
-
        hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
        if (!hctx->fq)
-               goto sched_exit_hctx;
+               goto exit_hctx;
 
        if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
                goto free_fq;
@@ -2233,8 +2228,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
 
  free_fq:
        kfree(hctx->fq);
- sched_exit_hctx:
-       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
@@ -2896,10 +2889,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
+/*
+ * request_queue and elevator_type pair.
+ * It is just used by __blk_mq_update_nr_hw_queues to cache
+ * the elevator_type associated with a request_queue.
+ */
+struct blk_mq_qe_pair {
+       struct list_head node;
+       struct request_queue *q;
+       struct elevator_type *type;
+};
+
+/*
+ * Cache the elevator_type in qe pair list and switch the
+ * io scheduler to 'none'
+ */
+static bool blk_mq_elv_switch_none(struct list_head *head,
+               struct request_queue *q)
+{
+       struct blk_mq_qe_pair *qe;
+
+       if (!q->elevator)
+               return true;
+
+       qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+       if (!qe)
+               return false;
+
+       INIT_LIST_HEAD(&qe->node);
+       qe->q = q;
+       qe->type = q->elevator->type;
+       list_add(&qe->node, head);
+
+       mutex_lock(&q->sysfs_lock);
+       /*
+        * After elevator_switch_mq, the previous elevator_queue will be
+        * released by elevator_release. The reference of the io scheduler
+        * module get by elevator_get will also be put. So we need to get
+        * a reference of the io scheduler module here to prevent it to be
+        * removed.
+        */
+       __module_get(qe->type->elevator_owner);
+       elevator_switch_mq(q, NULL);
+       mutex_unlock(&q->sysfs_lock);
+
+       return true;
+}
+
+static void blk_mq_elv_switch_back(struct list_head *head,
+               struct request_queue *q)
+{
+       struct blk_mq_qe_pair *qe;
+       struct elevator_type *t = NULL;
+
+       list_for_each_entry(qe, head, node)
+               if (qe->q == q) {
+                       t = qe->type;
+                       break;
+               }
+
+       if (!t)
+               return;
+
+       list_del(&qe->node);
+       kfree(qe);
+
+       mutex_lock(&q->sysfs_lock);
+       elevator_switch_mq(q, t);
+       mutex_unlock(&q->sysfs_lock);
+}
+
 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
                                                        int nr_hw_queues)
 {
        struct request_queue *q;
+       LIST_HEAD(head);
 
        lockdep_assert_held(&set->tag_list_lock);
 
@@ -2910,6 +2974,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_freeze_queue(q);
+       /*
+        * Sync with blk_mq_queue_tag_busy_iter.
+        */
+       synchronize_rcu();
+       /*
+        * Switch IO scheduler to 'none', cleaning up the data associated
+        * with the previous scheduler. We will switch back once we are done
+        * updating the new sw to hw queue mappings.
+        */
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
+               if (!blk_mq_elv_switch_none(&head, q))
+                       goto switch_back;
 
        set->nr_hw_queues = nr_hw_queues;
        blk_mq_update_queue_map(set);
@@ -2918,6 +2994,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
                blk_mq_queue_reinit(q);
        }
 
+switch_back:
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
+               blk_mq_elv_switch_back(&head, q);
+
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
 }
index 1d94a20374fcbf2108edd4d54927ad4b0d5c4647..bb93c7c2b182bc8efee456bb64c4da5f05939e3c 100644 (file)
@@ -576,12 +576,8 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
        struct rq_wb *rwb = RQWB(rqos);
        enum wbt_flags flags;
 
-       if (!rwb_enabled(rwb))
-               return;
-
        flags = bio_to_wbt_flags(rwb, bio);
-
-       if (!wbt_should_throttle(rwb, bio)) {
+       if (!(flags & WBT_TRACKED)) {
                if (flags & WBT_READ)
                        wb_timestamp(rwb, &rwb->last_issue);
                return;
index d4d67e94892042d4faefa12484438020af1edd9c..9db4e389582c8da7848d458a9d4f12d64a1aecae 100644 (file)
@@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
 
 int elevator_init(struct request_queue *);
 int elevator_init_mq(struct request_queue *q);
+int elevator_switch_mq(struct request_queue *q,
+                             struct elevator_type *new_e);
 void elevator_exit(struct request_queue *, struct elevator_queue *);
 int elv_register_queue(struct request_queue *q);
 void elv_unregister_queue(struct request_queue *q);
@@ -297,7 +299,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
  *     b) the queue had IO stats enabled when this request was started, and
  *     c) it's a file system request
  */
-static inline int blk_do_io_stat(struct request *rq)
+static inline bool blk_do_io_stat(struct request *rq)
 {
        return rq->rq_disk &&
               (rq->rq_flags & RQF_IO_STAT) &&
index fa828b5bfd4b14c977b2d3cdea41ff12f1573344..5ea6e7d600e46edcd1eec808324640ab8b7099ed 100644 (file)
@@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e)
 }
 EXPORT_SYMBOL_GPL(elv_unregister);
 
-static int elevator_switch_mq(struct request_queue *q,
+int elevator_switch_mq(struct request_queue *q,
                              struct elevator_type *new_e)
 {
        int ret;
 
        lockdep_assert_held(&q->sysfs_lock);
 
-       blk_mq_freeze_queue(q);
-       blk_mq_quiesce_queue(q);
-
        if (q->elevator) {
                if (q->elevator->registered)
                        elv_unregister_queue(q);
@@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q,
                blk_add_trace_msg(q, "elv switch: none");
 
 out:
-       blk_mq_unquiesce_queue(q);
-       blk_mq_unfreeze_queue(q);
        return ret;
 }
 
@@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        lockdep_assert_held(&q->sysfs_lock);
 
-       if (q->mq_ops)
-               return elevator_switch_mq(q, new_e);
+       if (q->mq_ops) {
+               blk_mq_freeze_queue(q);
+               blk_mq_quiesce_queue(q);
+
+               err = elevator_switch_mq(q, new_e);
+
+               blk_mq_unquiesce_queue(q);
+               blk_mq_unfreeze_queue(q);
+
+               return err;
+       }
 
        /*
         * Turn on BYPASS and drain all requests w/ elevator private data.
index c5367bf5487fcf3a1ac466e990675871a7af16ae..0f28a38a43ea1dadaeafbbefa478637deb1de90d 100644 (file)
@@ -164,6 +164,7 @@ struct acpi_namespace_node {
 #define ANOBJ_SUBTREE_HAS_INI           0x10   /* Used to optimize device initialization */
 #define ANOBJ_EVALUATED                 0x20   /* Set on first evaluation of node */
 #define ANOBJ_ALLOCATED_BUFFER          0x40   /* Method AML buffer is dynamic (install_method) */
+#define ANOBJ_NODE_EARLY_INIT           0x80   /* acpi_exec only: Node was create via init file (-fi) */
 
 #define ANOBJ_IS_EXTERNAL               0x08   /* iASL only: This object created via External() */
 #define ANOBJ_METHOD_NO_RETVAL          0x10   /* iASL only: Method has no return value */
index 3825df9234803a84cd5bc9ddd6892452eb87160f..bbb3b4d1e796cef48aad9362ca7d3022d3acab17 100644 (file)
 /* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */
 
 #define ACPI_NS_NO_UPSEARCH         0
-#define ACPI_NS_SEARCH_PARENT       0x01
-#define ACPI_NS_DONT_OPEN_SCOPE     0x02
-#define ACPI_NS_NO_PEER_SEARCH      0x04
-#define ACPI_NS_ERROR_IF_FOUND      0x08
-#define ACPI_NS_PREFIX_IS_SCOPE     0x10
-#define ACPI_NS_EXTERNAL            0x20
-#define ACPI_NS_TEMPORARY           0x40
-#define ACPI_NS_OVERRIDE_IF_FOUND   0x80
+#define ACPI_NS_SEARCH_PARENT       0x0001
+#define ACPI_NS_DONT_OPEN_SCOPE     0x0002
+#define ACPI_NS_NO_PEER_SEARCH      0x0004
+#define ACPI_NS_ERROR_IF_FOUND      0x0008
+#define ACPI_NS_PREFIX_IS_SCOPE     0x0010
+#define ACPI_NS_EXTERNAL            0x0020
+#define ACPI_NS_TEMPORARY           0x0040
+#define ACPI_NS_OVERRIDE_IF_FOUND   0x0080
+#define ACPI_NS_EARLY_INIT          0x0100
 
 /* Flags for acpi_ns_walk_namespace */
 
index 2733cd4e418c49beb051f895a1b17037e65b37d0..3374d41582b53aaafec4334484f4085de2c4db76 100644 (file)
@@ -180,6 +180,8 @@ char acpi_ut_remove_leading_zeros(char **string);
 
 u8 acpi_ut_detect_hex_prefix(char **string);
 
+void acpi_ut_remove_hex_prefix(char **string);
+
 u8 acpi_ut_detect_octal_prefix(char **string);
 
 /*
index 556ff59bbbfcce71fa5c37b530c0dcb931d2c442..3e5f95390f0dd8a179c7e23751cf30a1d960e958 100644 (file)
@@ -763,7 +763,12 @@ acpi_db_command_dispatch(char *input_buffer,
        case CMD_DISASSEMBLE:
        case CMD_DISASM:
 
+#ifdef ACPI_DISASSEMBLER
                (void)acpi_db_disassemble_method(acpi_gbl_db_args[1]);
+#else
+               acpi_os_printf
+                   ("The AML Disassembler is not configured/present\n");
+#endif
                break;
 
        case CMD_DUMP:
@@ -872,7 +877,12 @@ acpi_db_command_dispatch(char *input_buffer,
 
        case CMD_LIST:
 
+#ifdef ACPI_DISASSEMBLER
                acpi_db_disassemble_aml(acpi_gbl_db_args[1], op);
+#else
+               acpi_os_printf
+                   ("The AML Disassembler is not configured/present\n");
+#endif
                break;
 
        case CMD_LOCKS:
index 9fcecf104ba001106de75f0dbc24d18c7163e305..d8b7a0fe92ecd07bca1547ac26f58ea2a5e5e28e 100644 (file)
@@ -216,6 +216,7 @@ cleanup:
        acpi_ut_remove_reference(obj_desc);
 }
 
+#ifdef ACPI_DISASSEMBLER
 /*******************************************************************************
  *
  * FUNCTION:    acpi_db_disassemble_aml
@@ -242,9 +243,8 @@ void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op)
        if (statements) {
                num_statements = strtoul(statements, NULL, 0);
        }
-#ifdef ACPI_DISASSEMBLER
+
        acpi_dm_disassemble(NULL, op, num_statements);
-#endif
 }
 
 /*******************************************************************************
@@ -317,8 +317,6 @@ acpi_status acpi_db_disassemble_method(char *name)
        walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE;
 
        status = acpi_ps_parse_aml(walk_state);
-
-#ifdef ACPI_DISASSEMBLER
        (void)acpi_dm_parse_deferred_ops(op);
 
        /* Now we can disassemble the method */
@@ -326,7 +324,6 @@ acpi_status acpi_db_disassemble_method(char *name)
        acpi_gbl_dm_opt_verbose = FALSE;
        acpi_dm_disassemble(NULL, op, 0);
        acpi_gbl_dm_opt_verbose = TRUE;
-#endif
 
        acpi_ps_delete_parse_tree(op);
 
@@ -337,6 +334,7 @@ acpi_status acpi_db_disassemble_method(char *name)
        acpi_ut_release_owner_id(&obj_desc->method.owner_id);
        return (AE_OK);
 }
+#endif
 
 /*******************************************************************************
  *
index 4647aa8efecbb79772102ace42ecc1aed73bd198..f2526726daf6cbf7c2c72e7697aaab398ba0beca 100644 (file)
@@ -10,6 +10,7 @@
 #include "amlcode.h"
 #include "acdebug.h"
 #include "acinterp.h"
+#include "acparser.h"
 
 #define _COMPONENT          ACPI_CA_DEBUGGER
 ACPI_MODULE_NAME("dbxface")
@@ -262,10 +263,17 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
                        }
                }
 
-               /* Now we can display it */
+               /* Now we can disassemble and display it */
 
 #ifdef ACPI_DISASSEMBLER
                acpi_dm_disassemble(walk_state, display_op, ACPI_UINT32_MAX);
+#else
+               /*
+                * The AML Disassembler is not configured - at least we can
+                * display the opcode value and name
+                */
+               acpi_os_printf("AML Opcode: %4.4X %s\n", op->common.aml_opcode,
+                              acpi_ps_get_opcode_name(op->common.aml_opcode));
 #endif
 
                if ((op->common.aml_opcode == AML_IF_OP) ||
index 7c937595dfcbd5d594a13eb9233851bdda0527b4..30fe89545d6ab614a32d8d563e2d335dc25613b2 100644 (file)
 #include "acnamesp.h"
 #include "acparser.h"
 
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
+
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dsfield")
 
@@ -259,6 +263,13 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
        u64 position;
        union acpi_parse_object *child;
 
+#ifdef ACPI_EXEC_APP
+       u64 value = 0;
+       union acpi_operand_object *result_desc;
+       union acpi_operand_object *obj_desc;
+       char *name_path;
+#endif
+
        ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
 
        /* First field starts at bit zero */
@@ -391,6 +402,25 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                                        if (ACPI_FAILURE(status)) {
                                                return_ACPI_STATUS(status);
                                        }
+#ifdef ACPI_EXEC_APP
+                                       name_path =
+                                           acpi_ns_get_external_pathname(info->
+                                                                         field_node);
+                                       obj_desc =
+                                           acpi_ut_create_integer_object
+                                           (value);
+                                       if (ACPI_SUCCESS
+                                           (ae_lookup_init_file_entry
+                                            (name_path, &value))) {
+                                               acpi_ex_write_data_to_field
+                                                   (obj_desc,
+                                                    acpi_ns_get_attached_object
+                                                    (info->field_node),
+                                                    &result_desc);
+                                       }
+                                       acpi_ut_remove_reference(obj_desc);
+                                       ACPI_FREE(name_path);
+#endif
                                }
                        }
 
@@ -573,7 +603,9 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
            !(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
                flags |= ACPI_NS_TEMPORARY;
        }
-
+#ifdef ACPI_EXEC_APP
+       flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+#endif
        /*
         * Walk the list of entries in the field_list
         * Note: field_list can be of zero length. In this case, Arg will be NULL.
index 3de794bcf8fa8c1bf906ca5ac6f4d422ec5e0faa..69603ba52a3accc41d71b35ac863d6d4c7498f0d 100644 (file)
@@ -528,13 +528,18 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
 
                status =
                    acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
-               value = (u32)value64;
+               if (ACPI_SUCCESS(status)) {
+                       value = (u32)value64;
+               }
                break;
 
        case ACPI_REGISTER_PM_TIMER:    /* 32-bit access */
 
                status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
-               value = (u32)value64;
+               if (ACPI_SUCCESS(status)) {
+                       value = (u32)value64;
+               }
+
                break;
 
        case ACPI_REGISTER_SMI_COMMAND_BLOCK:   /* 8-bit access */
index fe9d46d81750792350270c4f6c1920a77e236a05..d8b8fc2ff5633e7406c256fc6fff3e0ee6b16f2a 100644 (file)
@@ -56,14 +56,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
-       /*
-        * If the target sleep state is S5, clear all GPEs and fixed events too
-        */
-       if (sleep_state == ACPI_STATE_S5) {
-               status = acpi_hw_clear_acpi_status();
-               if (ACPI_FAILURE(status)) {
-                       return_ACPI_STATUS(status);
-               }
+       status = acpi_hw_clear_acpi_status();
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
        }
        acpi_gbl_system_awake_and_running = FALSE;
 
index 83a593e2155d7a71db823ab341acf903fb30cbea..e3f10afde5ffae47deb166544cf8f5ded393f726 100644 (file)
@@ -558,6 +558,14 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                                                  (char *)&current_node->name,
                                                  current_node));
                        }
+#ifdef ACPI_EXEC_APP
+                       if ((status == AE_ALREADY_EXISTS) &&
+                           (this_node->flags & ANOBJ_NODE_EARLY_INIT)) {
+                               this_node->flags &= ~ANOBJ_NODE_EARLY_INIT;
+                               status = AE_OK;
+                       }
+#endif
+
 #ifdef ACPI_ASL_COMPILER
                        /*
                         * If this ACPI name already exists within the namespace as an
@@ -676,6 +684,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                        }
                }
        }
+#ifdef ACPI_EXEC_APP
+       if (flags & ACPI_NS_EARLY_INIT) {
+               this_node->flags |= ANOBJ_NODE_EARLY_INIT;
+       }
+#endif
 
        *return_node = this_node;
        return_ACPI_STATUS(AE_OK);
index 44f35ab3347d1a57aded2402e6e386c4d792d759..34fc2f7476eddadf678a85000a0c0023c9f3bb64 100644 (file)
@@ -22,6 +22,7 @@
 #include "acdispat.h"
 #include "amlcode.h"
 #include "acconvert.h"
+#include "acnamesp.h"
 
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psloop")
@@ -527,12 +528,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                                if (ACPI_FAILURE(status)) {
                                        return_ACPI_STATUS(status);
                                }
-                               if (walk_state->opcode == AML_SCOPE_OP) {
+                               if (acpi_ns_opens_scope
+                                   (acpi_ps_get_opcode_info
+                                    (walk_state->opcode)->object_type)) {
                                        /*
-                                        * If the scope op fails to parse, skip the body of the
-                                        * scope op because the parse failure indicates that the
-                                        * device may not exist.
+                                        * If the scope/device op fails to parse, skip the body of
+                                        * the scope op because the parse failure indicates that
+                                        * the device may not exist.
                                         */
+                                       ACPI_ERROR((AE_INFO,
+                                                   "Skip parsing opcode %s",
+                                                   acpi_ps_get_opcode_name
+                                                   (walk_state->opcode)));
                                        walk_state->parser_state.aml =
                                            walk_state->aml + 1;
                                        walk_state->parser_state.aml =
@@ -540,8 +547,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                                            (&walk_state->parser_state);
                                        walk_state->aml =
                                            walk_state->parser_state.aml;
-                                       ACPI_ERROR((AE_INFO,
-                                                   "Skipping Scope block"));
                                }
 
                                continue;
@@ -709,20 +714,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                        } else
                            if ((walk_state->
                                 parse_flags & ACPI_PARSE_MODULE_LEVEL)
-                               && status != AE_CTRL_TRANSFER
-                               && ACPI_FAILURE(status)) {
+                               && (ACPI_AML_EXCEPTION(status)
+                                   || status == AE_ALREADY_EXISTS
+                                   || status == AE_NOT_FOUND)) {
                                /*
-                                * ACPI_PARSE_MODULE_LEVEL flag means that we are currently
-                                * loading a table by executing it as a control method.
-                                * However, if we encounter an error while loading the table,
-                                * we need to keep trying to load the table rather than
-                                * aborting the table load (setting the status to AE_OK
-                                * continues the table load). If we get a failure at this
-                                * point, it means that the dispatcher got an error while
-                                * processing Op (most likely an AML operand error) or a
-                                * control method was called from module level and the
-                                * dispatcher returned AE_CTRL_TRANSFER. In the latter case,
-                                * leave the status alone, there's nothing wrong with it.
+                                * ACPI_PARSE_MODULE_LEVEL flag means that we
+                                * are currently loading a table by executing
+                                * it as a control method. However, if we
+                                * encounter an error while loading the table,
+                                * we need to keep trying to load the table
+                                * rather than aborting the table load (setting
+                                * the status to AE_OK continues the table
+                                * load). If we get a failure at this point, it
+                                * means that the dispatcher got an error while
+                                * trying to execute the Op.
                                 */
                                status = AE_OK;
                        }
index 51891f9fb05709ca9084c2d95afd7117b6575450..862149c8a208e6fdc3be390bdacfa5aefd44b1c0 100644 (file)
@@ -516,9 +516,9 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
                            acpi_tb_check_duplication(table_desc, table_index);
                        if (ACPI_FAILURE(status)) {
                                if (status != AE_CTRL_TERMINATE) {
-                                       ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+                                       ACPI_EXCEPTION((AE_INFO, status,
                                                        "%4.4s 0x%8.8X%8.8X"
-                                                       " Table is duplicated",
+                                                       " Table is already loaded",
                                                        acpi_ut_valid_nameseg
                                                        (table_desc->signature.
                                                         ascii) ? table_desc->
index 118f3ff1fbb55bd9a1f98ac19c455f299fd38cde..8cc4392c61f33aea636251426e263b7017613498 100644 (file)
@@ -355,6 +355,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
        u16 original_count;
        u16 new_count = 0;
        acpi_cpu_flags lock_flags;
+       char *message;
 
        ACPI_FUNCTION_NAME(ut_update_ref_count);
 
@@ -391,6 +392,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
                                  object, object->common.type,
                                  acpi_ut_get_object_type_name(object),
                                  new_count));
+               message = "Incremement";
                break;
 
        case REF_DECREMENT:
@@ -420,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
                if (new_count == 0) {
                        acpi_ut_delete_internal_obj(object);
                }
+               message = "Decrement";
                break;
 
        default:
@@ -436,8 +439,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
         */
        if (new_count > ACPI_MAX_REFERENCE_COUNT) {
                ACPI_WARNING((AE_INFO,
-                             "Large Reference Count (0x%X) in object %p, Type=0x%.2X",
-                             new_count, object, object->common.type));
+                             "Large Reference Count (0x%X) in object %p, Type=0x%.2X Operation=%s",
+                             new_count, object, object->common.type, message));
        }
 }
 
index 954f8e3e35cda87e2f5aa95b247555b1f9f92354..05ff20049b875f2c0a504a7dd64bb7decc0a9919 100644 (file)
@@ -231,14 +231,34 @@ char acpi_ut_remove_whitespace(char **string)
 
 u8 acpi_ut_detect_hex_prefix(char **string)
 {
+       char *initial_position = *string;
 
+       acpi_ut_remove_hex_prefix(string);
+       if (*string != initial_position) {
+               return (TRUE);  /* String is past leading 0x */
+       }
+
+       return (FALSE);         /* Not a hex string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_remove_hex_prefix
+ *
+ * PARAMETERS:  string                  - Pointer to input ASCII string
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: Remove a hex "0x" prefix
+ *
+ ******************************************************************************/
+
+void acpi_ut_remove_hex_prefix(char **string)
+{
        if ((**string == ACPI_ASCII_ZERO) &&
            (tolower((int)*(*string + 1)) == 'x')) {
                *string += 2;   /* Go past the leading 0x */
-               return (TRUE);
        }
-
-       return (FALSE);         /* Not a hex string */
 }
 
 /*******************************************************************************
index 8fadad242db6dc54587383f51fd82e578ddcfa5a..5fde619a8bbdde9a8f0bb3f97c70d77461559463 100644 (file)
@@ -218,7 +218,7 @@ u64 acpi_ut_implicit_strtoul64(char *string)
         * implicit conversions, and the "0x" prefix is "not allowed".
         * However, allow a "0x" prefix as an ACPI extension.
         */
-       acpi_ut_detect_hex_prefix(&string);
+       acpi_ut_remove_hex_prefix(&string);
 
        if (!acpi_ut_remove_leading_zeros(&string)) {
                return_VALUE(0);
index 7ffa74048107ae3c69bd1b79bb056e7aebca2c55..22c9e374c9233fda78d9cd4b067f96a43bdd2039 100644 (file)
 #define PMIC_A0LOCK_REG                0xc5
 
 static struct pmic_table power_table[] = {
+/*     {
+               .address = 0x00,
+               .reg = ??,
+               .bit = ??,
+       }, ** VSYS */
+       {
+               .address = 0x04,
+               .reg = 0x63,
+               .bit = 0x00,
+       }, /* SYSX -> VSYS_SX */
+       {
+               .address = 0x08,
+               .reg = 0x62,
+               .bit = 0x00,
+       }, /* SYSU -> VSYS_U */
+       {
+               .address = 0x0c,
+               .reg = 0x64,
+               .bit = 0x00,
+       }, /* SYSS -> VSYS_S */
+       {
+               .address = 0x10,
+               .reg = 0x6a,
+               .bit = 0x00,
+       }, /* V50S -> V5P0S */
+       {
+               .address = 0x14,
+               .reg = 0x6b,
+               .bit = 0x00,
+       }, /* HOST -> VHOST, USB2/3 host */
+       {
+               .address = 0x18,
+               .reg = 0x6c,
+               .bit = 0x00,
+       }, /* VBUS -> VBUS, USB2/3 OTG */
+       {
+               .address = 0x1c,
+               .reg = 0x6d,
+               .bit = 0x00,
+       }, /* HDMI -> VHDMI */
+/*     {
+               .address = 0x20,
+               .reg = ??,
+               .bit = ??,
+       }, ** S285 */
        {
                .address = 0x24,
                .reg = 0x66,
                .bit = 0x00,
-       },
+       }, /* X285 -> V2P85SX, camera */
+/*     {
+               .address = 0x28,
+               .reg = ??,
+               .bit = ??,
+       }, ** V33A */
+       {
+               .address = 0x2c,
+               .reg = 0x69,
+               .bit = 0x00,
+       }, /* V33S -> V3P3S, display/ssd/audio */
+       {
+               .address = 0x30,
+               .reg = 0x68,
+               .bit = 0x00,
+       }, /* V33U -> V3P3U, SDIO wifi&bt */
+/*     {
+               .address = 0x34 .. 0x40,
+               .reg = ??,
+               .bit = ??,
+       }, ** V33I, V18A, REFQ, V12A */
+       {
+               .address = 0x44,
+               .reg = 0x5c,
+               .bit = 0x00,
+       }, /* V18S -> V1P8S, SOC/USB PHY/SIM */
        {
                .address = 0x48,
                .reg = 0x5d,
                .bit = 0x00,
-       },
+       }, /* V18X -> V1P8SX, eMMC/camara/audio */
+       {
+               .address = 0x4c,
+               .reg = 0x5b,
+               .bit = 0x00,
+       }, /* V18U -> V1P8U, LPDDR */
+       {
+               .address = 0x50,
+               .reg = 0x61,
+               .bit = 0x00,
+       }, /* V12X -> V1P2SX, SOC SFR */
+       {
+               .address = 0x54,
+               .reg = 0x60,
+               .bit = 0x00,
+       }, /* V12S -> V1P2S, MIPI */
+/*     {
+               .address = 0x58,
+               .reg = ??,
+               .bit = ??,
+       }, ** V10A */
+       {
+               .address = 0x5c,
+               .reg = 0x56,
+               .bit = 0x00,
+       }, /* V10S -> V1P0S, SOC GFX */
+       {
+               .address = 0x60,
+               .reg = 0x57,
+               .bit = 0x00,
+       }, /* V10X -> V1P0SX, SOC display/DDR IO/PCIe */
+       {
+               .address = 0x64,
+               .reg = 0x59,
+               .bit = 0x00,
+       }, /* V105 -> V1P05S, L2 SRAM */
 };
 
 static struct pmic_table thermal_table[] = {
index f99e5c883368a7eeca339c4bf47e0a10b2842aee..581312ac375ff46516dd4504a515da43c9e283ae 100644 (file)
@@ -2428,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
     {
       DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
        Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
-      unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
-                                          "Read Cache Enabled",
-                                          "Read Ahead Enabled",
-                                          "Intelligent Read Ahead Enabled",
-                                          "-", "-", "-", "-" };
-      unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
-                                           "Logical Device Read Only",
-                                           "Write Cache Enabled",
-                                           "Intelligent Write Cache Enabled",
-                                           "-", "-", "-", "-" };
+      static const unsigned char *ReadCacheStatus[] = {
+        "Read Cache Disabled",
+        "Read Cache Enabled",
+        "Read Ahead Enabled",
+        "Intelligent Read Ahead Enabled",
+        "-", "-", "-", "-"
+      };
+      static const unsigned char *WriteCacheStatus[] = {
+        "Write Cache Disabled",
+        "Logical Device Read Only",
+        "Write Cache Enabled",
+        "Intelligent Write Cache Enabled",
+        "-", "-", "-", "-"
+      };
       unsigned char *GeometryTranslation;
       if (LogicalDeviceInfo == NULL) continue;
       switch (LogicalDeviceInfo->DriveGeometry)
@@ -4339,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
 static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
 {
   DAC960_Controller_T *Controller = Command->Controller;
-  unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
-                                  "NOT READY", "MEDIUM ERROR",
-                                  "HARDWARE ERROR", "ILLEGAL REQUEST",
-                                  "UNIT ATTENTION", "DATA PROTECT",
-                                  "BLANK CHECK", "VENDOR-SPECIFIC",
-                                  "COPY ABORTED", "ABORTED COMMAND",
-                                  "EQUAL", "VOLUME OVERFLOW",
-                                  "MISCOMPARE", "RESERVED" };
+  static const unsigned char *SenseErrors[] = {
+    "NO SENSE", "RECOVERED ERROR",
+    "NOT READY", "MEDIUM ERROR",
+    "HARDWARE ERROR", "ILLEGAL REQUEST",
+    "UNIT ATTENTION", "DATA PROTECT",
+    "BLANK CHECK", "VENDOR-SPECIFIC",
+    "COPY ABORTED", "ABORTED COMMAND",
+    "EQUAL", "VOLUME OVERFLOW",
+    "MISCOMPARE", "RESERVED"
+  };
   unsigned char *CommandName = "UNKNOWN";
   switch (Command->CommandType)
     {
index e285413d4a7510f63ad670921628cab8e904a6a6..6f1d25c1eb640b8a0cffe94c3bd7e0cfa0ea691b 100644 (file)
@@ -2740,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
        pd->write_congestion_on  = write_congestion_on;
        pd->write_congestion_off = write_congestion_off;
 
+       ret = -ENOMEM;
        disk = alloc_disk(1);
        if (!disk)
                goto out_mem;
index c7acf74253a12e22fb9720b2c75966a68ea3eded..a1d6b5597c17bac113c062f7b3200e2926b9f0e5 100644 (file)
@@ -337,6 +337,7 @@ static ssize_t backing_dev_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        char *file_name;
+       size_t sz;
        struct file *backing_dev = NULL;
        struct inode *inode;
        struct address_space *mapping;
@@ -357,7 +358,11 @@ static ssize_t backing_dev_store(struct device *dev,
                goto out;
        }
 
-       strlcpy(file_name, buf, len);
+       strlcpy(file_name, buf, PATH_MAX);
+       /* ignore trailing newline */
+       sz = strlen(file_name);
+       if (sz > 0 && file_name[sz - 1] == '\n')
+               file_name[sz - 1] = 0x00;
 
        backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
        if (IS_ERR(backing_dev)) {
index 1d50e97d49f192cd8bf1c5eb0ce569e9641cf43e..6d53f7d9fc7a92d415b507f9b8facdb3f4143b17 100644 (file)
@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
 
 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
 {
-       struct policy_dbs_info *policy_dbs = policy->governor_data;
+       struct policy_dbs_info *policy_dbs;
+
+       /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
+       mutex_lock(&gov_dbs_data_mutex);
+       policy_dbs = policy->governor_data;
+       if (!policy_dbs)
+               goto out;
 
        mutex_lock(&policy_dbs->update_mutex);
        cpufreq_policy_apply_limits(policy);
        gov_update_sample_delay(policy_dbs, 0);
-
        mutex_unlock(&policy_dbs->update_mutex);
+
+out:
+       mutex_unlock(&gov_dbs_data_mutex);
 }
 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
index 1aef60d160eb0c6236bee855a9a2cdd244679418..110483f0e3fbad97c1304b633b5d55c940e1fe70 100644 (file)
@@ -328,9 +328,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                unsigned int polling_threshold;
 
                /*
-                * We want to default to C1 (hlt), not to busy polling
-                * unless the timer is happening really really soon, or
-                * C1's exit latency exceeds the user configured limit.
+                * Default to a physical idle state, not to busy polling, unless
+                * a timer is going to trigger really really soon.
                 */
                polling_threshold = max_t(unsigned int, 20, s->target_residency);
                if (data->next_timer_us > polling_threshold &&
@@ -349,14 +348,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                 * If the tick is already stopped, the cost of possible short
                 * idle duration misprediction is much higher, because the CPU
                 * may be stuck in a shallow idle state for a long time as a
-                * result of it.  In that case say we might mispredict and try
-                * to force the CPU into a state for which we would have stopped
-                * the tick, unless a timer is going to expire really soon
-                * anyway.
+                * result of it.  In that case say we might mispredict and use
+                * the known time till the closest timer event for the idle
+                * state selection.
                 */
                if (data->predicted_us < TICK_USEC)
-                       data->predicted_us = min_t(unsigned int, TICK_USEC,
-                                                  ktime_to_us(delta_next));
+                       data->predicted_us = ktime_to_us(delta_next);
        } else {
                /*
                 * Use the performance multiplier and the user-configurable
@@ -381,8 +378,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                        continue;
                if (idx == -1)
                        idx = i; /* first enabled state */
-               if (s->target_residency > data->predicted_us)
-                       break;
+               if (s->target_residency > data->predicted_us) {
+                       if (!tick_nohz_tick_stopped())
+                               break;
+
+                       /*
+                        * If the state selected so far is shallow and this
+                        * state's target residency matches the time till the
+                        * closest timer event, select this one to avoid getting
+                        * stuck in the shallow one for too long.
+                        */
+                       if (drv->states[idx].target_residency < TICK_USEC &&
+                           s->target_residency <= ktime_to_us(delta_next))
+                               idx = i;
+
+                       goto out;
+               }
                if (s->exit_latency > latency_req) {
                        /*
                         * If we break out of the loop for latency reasons, use
@@ -403,14 +414,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * Don't stop the tick if the selected state is a polling one or if the
         * expected idle duration is shorter than the tick period length.
         */
-       if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
-           expected_interval < TICK_USEC) {
+       if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+            expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
                unsigned int delta_next_us = ktime_to_us(delta_next);
 
                *stop_tick = false;
 
-               if (!tick_nohz_tick_stopped() && idx > 0 &&
-                   drv->states[idx].target_residency > delta_next_us) {
+               if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
                        /*
                         * The tick is not going to be stopped and the target
                         * residency of the state to be returned is not within
@@ -418,8 +428,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                         * tick, so try to correct that.
                         */
                        for (i = idx - 1; i >= 0; i--) {
-                           if (drv->states[i].disabled ||
-                               dev->states_usage[i].disable)
+                               if (drv->states[i].disabled ||
+                                   dev->states_usage[i].disable)
                                        continue;
 
                                idx = i;
@@ -429,6 +439,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                }
        }
 
+out:
        data->last_state_idx = idx;
 
        return data->last_state_idx;
index 88c322d7c71edf306591be82d717ac4426eb09e1..14c40a7750d1d4017edbc386d4e02df7949149fe 100644 (file)
@@ -24,6 +24,7 @@ KBUILD_CFLAGS                 := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
                                   -D__NO_FORTIFY \
                                   $(call cc-option,-ffreestanding) \
                                   $(call cc-option,-fno-stack-protector) \
+                                  -D__DISABLE_EXPORTS
 
 GCOV_PROFILE                   := n
 KASAN_SANITIZE                 := n
index a365ea2383d18c137df14d1fa116fad954d9ca45..e55508b394962d8d77e6fe285cee1a9544214714 100644 (file)
@@ -178,12 +178,18 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
  *
  * @amn: our notifier
  */
-static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
+static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
 {
-       mutex_lock(&amn->read_lock);
+       if (blockable)
+               mutex_lock(&amn->read_lock);
+       else if (!mutex_trylock(&amn->read_lock))
+               return -EAGAIN;
+
        if (atomic_inc_return(&amn->recursion) == 1)
                down_read_non_owner(&amn->lock);
        mutex_unlock(&amn->read_lock);
+
+       return 0;
 }
 
 /**
@@ -239,10 +245,11 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
  * Block for operations on BOs to finish and mark pages as accessed and
  * potentially dirty.
  */
-static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
                                                 struct mm_struct *mm,
                                                 unsigned long start,
-                                                unsigned long end)
+                                                unsigned long end,
+                                                bool blockable)
 {
        struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
@@ -250,17 +257,28 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       amdgpu_mn_read_lock(amn);
+       /* TODO we should be able to split locking for interval tree and
+        * amdgpu_mn_invalidate_node
+        */
+       if (amdgpu_mn_read_lock(amn, blockable))
+               return -EAGAIN;
 
        it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
 
+               if (!blockable) {
+                       amdgpu_mn_read_unlock(amn);
+                       return -EAGAIN;
+               }
+
                node = container_of(it, struct amdgpu_mn_node, it);
                it = interval_tree_iter_next(it, start, end);
 
                amdgpu_mn_invalidate_node(node, start, end);
        }
+
+       return 0;
 }
 
 /**
@@ -275,10 +293,11 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
  * necessitates evicting all user-mode queues of the process. The BOs
  * are restorted in amdgpu_mn_invalidate_range_end_hsa.
  */
-static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
                                                 struct mm_struct *mm,
                                                 unsigned long start,
-                                                unsigned long end)
+                                                unsigned long end,
+                                                bool blockable)
 {
        struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
@@ -286,13 +305,19 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       amdgpu_mn_read_lock(amn);
+       if (amdgpu_mn_read_lock(amn, blockable))
+               return -EAGAIN;
 
        it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
                struct amdgpu_bo *bo;
 
+               if (!blockable) {
+                       amdgpu_mn_read_unlock(amn);
+                       return -EAGAIN;
+               }
+
                node = container_of(it, struct amdgpu_mn_node, it);
                it = interval_tree_iter_next(it, start, end);
 
@@ -304,6 +329,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
                                amdgpu_amdkfd_evict_userptr(mem, mm);
                }
        }
+
+       return 0;
 }
 
 /**
index dcd6e230d16aa7905c1ae075fab469e333f7ef8b..2c9b284036d10217a013e146eba8704123006d65 100644 (file)
@@ -112,10 +112,11 @@ static void del_object(struct i915_mmu_object *mo)
        mo->attached = false;
 }
 
-static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
                                                       struct mm_struct *mm,
                                                       unsigned long start,
-                                                      unsigned long end)
+                                                      unsigned long end,
+                                                      bool blockable)
 {
        struct i915_mmu_notifier *mn =
                container_of(_mn, struct i915_mmu_notifier, mn);
@@ -124,7 +125,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        LIST_HEAD(cancelled);
 
        if (RB_EMPTY_ROOT(&mn->objects.rb_root))
-               return;
+               return 0;
 
        /* interval ranges are inclusive, but invalidate range is exclusive */
        end--;
@@ -132,6 +133,10 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        spin_lock(&mn->lock);
        it = interval_tree_iter_first(&mn->objects, start, end);
        while (it) {
+               if (!blockable) {
+                       spin_unlock(&mn->lock);
+                       return -EAGAIN;
+               }
                /* The mmu_object is released late when destroying the
                 * GEM object so it is entirely possible to gain a
                 * reference on an object in the process of being freed
@@ -154,6 +159,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 
        if (!list_empty(&cancelled))
                flush_workqueue(mn->wq);
+
+       return 0;
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
index abd24975c9b1d946cec7c85ba7ebe3b820d1c8e4..f8b35df44c60eba57e7b2ec76c068190dd941c79 100644 (file)
@@ -118,19 +118,27 @@ static void radeon_mn_release(struct mmu_notifier *mn,
  * We block for all BOs between start and end to be idle and
  * unmap them by move them into system domain again.
  */
-static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                                             struct mm_struct *mm,
                                             unsigned long start,
-                                            unsigned long end)
+                                            unsigned long end,
+                                            bool blockable)
 {
        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
        struct ttm_operation_ctx ctx = { false, false };
        struct interval_tree_node *it;
+       int ret = 0;
 
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       mutex_lock(&rmn->lock);
+       /* TODO we should be able to split locking for interval tree and
+        * the tear down.
+        */
+       if (blockable)
+               mutex_lock(&rmn->lock);
+       else if (!mutex_trylock(&rmn->lock))
+               return -EAGAIN;
 
        it = interval_tree_iter_first(&rmn->objects, start, end);
        while (it) {
@@ -138,6 +146,11 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                struct radeon_bo *bo;
                long r;
 
+               if (!blockable) {
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
+
                node = container_of(it, struct radeon_mn_node, it);
                it = interval_tree_iter_next(it, start, end);
 
@@ -166,7 +179,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                }
        }
        
+out_unlock:
        mutex_unlock(&rmn->lock);
+
+       return ret;
 }
 
 static const struct mmu_notifier_ops radeon_mn_ops = {
index e52c58c29d9a71fc43818bdef99de45c194e8764..4d565b0c5a6e6a9cf7a280e7b3ccf28f7bd542a3 100644 (file)
@@ -574,13 +574,14 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive)
                if (!HPT370_ALLOW_ATA100_5 ||
                    check_in_drive_list(drive, bad_ata100_5))
                        return ATA_UDMA4;
+               /* else: fall through */
        case HPT372 :
        case HPT372A:
        case HPT372N:
        case HPT374 :
                if (ata_id_is_sata(drive->id))
                        mask &= ~0x0e;
-               /* Fall thru */
+               /* fall through */
        default:
                return mask;
        }
@@ -600,7 +601,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
        case HPT374 :
                if (ata_id_is_sata(drive->id))
                        return 0x00;
-               /* Fall thru */
+               /* else: fall through */
        default:
                return 0x07;
        }
index 5bd2aafc375346fdae583545b7779d6a8bbe5923..a8df300f949c2859b9c708d5848795e5cd6d3415 100644 (file)
@@ -427,6 +427,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
                                 * (maintains previous driver behaviour)
                                 */
                                break;
+                       /* else: fall through */
                case CAPACITY_CURRENT:
                        /* Normal Zip/LS-120 disks */
                        if (memcmp(cap_desc, &floppy->cap_desc, 8))
index a444bad7a2aa50fdd9d8248ace46c64935b84015..0d93e0cfbeaf91ad0592589dcdf2141abdbc951e 100644 (file)
@@ -460,7 +460,6 @@ void do_ide_request(struct request_queue *q)
        struct ide_host *host = hwif->host;
        struct request  *rq = NULL;
        ide_startstop_t startstop;
-       unsigned long queue_run_ms = 3; /* old plug delay */
 
        spin_unlock_irq(q->queue_lock);
 
@@ -480,9 +479,6 @@ repeat:
                prev_port = hwif->host->cur_port;
                if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
                    time_after(drive->sleep, jiffies)) {
-                       unsigned long left = jiffies - drive->sleep;
-
-                       queue_run_ms = jiffies_to_msecs(left + 1);
                        ide_unlock_port(hwif);
                        goto plug_device;
                }
index 416a2f3530711aa556acec93eba46df0ead97a2b..3b75a7b7a28408a84545d610c5bbdaafd223b59d 100644 (file)
@@ -142,6 +142,7 @@ static void ide_classify_atapi_dev(ide_drive_t *drive)
                }
                /* Early cdrom models used zero */
                type = ide_cdrom;
+               /* fall through */
        case ide_cdrom:
                drive->dev_flags |= IDE_DFLAG_REMOVABLE;
 #ifdef CONFIG_PPC
index aee7b46d2330c8e7fa670545d8815ae484ac53f9..34c1165226a4c050fd33444136875801a8be0b86 100644 (file)
@@ -1746,7 +1746,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
 {
        unsigned long t;
        int speed;
-       int buffer_size;
        u16 *ctl = (u16 *)&tape->caps[12];
 
        ide_debug_log(IDE_DBG_FUNC, "minor: %d", minor);
@@ -1781,7 +1780,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
                *ctl /= 2;
                tape->buffer_size = *ctl * tape->blk_size;
        }
-       buffer_size = tape->buffer_size;
 
        /* select the "best" DSC read/write polling freq */
        speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
index 89b29028d315b17b6602c5cb8ad9b1a6731c76dc..c21d5c50ae3a1678f19c89a3496cc223dcfc7cc2 100644 (file)
@@ -128,7 +128,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
                        return pre_task_out_intr(drive, cmd);
                }
                handler = task_pio_intr;
-               /* fall-through */
+               /* fall through */
        case ATA_PROT_NODATA:
                if (handler == NULL)
                        handler = task_no_data_intr;
@@ -140,6 +140,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
                hwif->expiry = dma_ops->dma_timer_expiry;
                ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
                dma_ops->dma_start(drive);
+               /* fall through */
        default:
                return ide_started;
        }
index c3062b53056f2b70e63230e53a4c6465e8349af2..024bc7ba49ee5e8198b49a2f510ff05d0be70840 100644 (file)
@@ -494,6 +494,7 @@ static int init_chipset_sis5513(struct pci_dev *dev)
                pci_read_config_byte(dev, 0x09, &reg);
                if ((reg & 0x0f) != 0x00)
                        pci_write_config_byte(dev, 0x09, reg&0xf0);
+               /* fall through */
        case ATA_16:
                /* force per drive recovery and active timings
                   needed on ATA_33 and below chips */
index 182436b92ba93a95ca6e119108c30d50706b7212..6ec748eccff7e87d7691b92a6d50534d89d4625e 100644 (file)
@@ -186,6 +186,7 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
        rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
                                      ULLONG_MAX,
                                      ib_umem_notifier_release_trampoline,
+                                     true,
                                      NULL);
        up_read(&context->umem_rwsem);
 }
@@ -207,22 +208,31 @@ static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
        return 0;
 }
 
-static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
                                                    struct mm_struct *mm,
                                                    unsigned long start,
-                                                   unsigned long end)
+                                                   unsigned long end,
+                                                   bool blockable)
 {
        struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+       int ret;
 
        if (!context->invalidate_range)
-               return;
+               return 0;
+
+       if (blockable)
+               down_read(&context->umem_rwsem);
+       else if (!down_read_trylock(&context->umem_rwsem))
+               return -EAGAIN;
 
        ib_ucontext_notifier_start_account(context);
-       down_read(&context->umem_rwsem);
-       rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+       ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
                                      end,
-                                     invalidate_range_start_trampoline, NULL);
+                                     invalidate_range_start_trampoline,
+                                     blockable, NULL);
        up_read(&context->umem_rwsem);
+
+       return ret;
 }
 
 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
@@ -242,10 +252,15 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
        if (!context->invalidate_range)
                return;
 
+       /*
+        * TODO: we currently bail out if there is any sleepable work to be done
+        * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
+        * here. But this is ugly and fragile.
+        */
        down_read(&context->umem_rwsem);
        rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
                                      end,
-                                     invalidate_range_end_trampoline, NULL);
+                                     invalidate_range_end_trampoline, true, NULL);
        up_read(&context->umem_rwsem);
        ib_ucontext_notifier_end_account(context);
 }
@@ -798,6 +813,7 @@ EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
                                  u64 start, u64 last,
                                  umem_call_back cb,
+                                 bool blockable,
                                  void *cookie)
 {
        int ret_val = 0;
@@ -809,6 +825,9 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
 
        for (node = rbt_ib_umem_iter_first(root, start, last - 1);
                        node; node = next) {
+               /* TODO move the blockable decision up to the callback */
+               if (!blockable)
+                       return -EAGAIN;
                next = rbt_ib_umem_iter_next(node, start, last - 1);
                umem = container_of(node, struct ib_umem_odp, interval_tree);
                ret_val = cb(umem->umem, start, last, cookie) || ret_val;
index 70aceefe14d5fa306a3ce78408b9866518030104..e1c7996c018efe688dbe081cc0928f05c5f0bbdd 100644 (file)
@@ -67,9 +67,9 @@ struct mmu_rb_handler {
 
 static unsigned long mmu_node_start(struct mmu_rb_node *);
 static unsigned long mmu_node_last(struct mmu_rb_node *);
-static void mmu_notifier_range_start(struct mmu_notifier *,
+static int mmu_notifier_range_start(struct mmu_notifier *,
                                     struct mm_struct *,
-                                    unsigned long, unsigned long);
+                                    unsigned long, unsigned long, bool);
 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
                                           unsigned long, unsigned long);
 static void do_remove(struct mmu_rb_handler *handler,
@@ -284,10 +284,11 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
        handler->ops->remove(handler->ops_arg, node);
 }
 
-static void mmu_notifier_range_start(struct mmu_notifier *mn,
+static int mmu_notifier_range_start(struct mmu_notifier *mn,
                                     struct mm_struct *mm,
                                     unsigned long start,
-                                    unsigned long end)
+                                    unsigned long end,
+                                    bool blockable)
 {
        struct mmu_rb_handler *handler =
                container_of(mn, struct mmu_rb_handler, mn);
@@ -313,6 +314,8 @@ static void mmu_notifier_range_start(struct mmu_notifier *mn,
 
        if (added)
                queue_work(handler->wq, &handler->del_work);
+
+       return 0;
 }
 
 /*
index f1a87a690a4cd0555a1bb0ccbd585bb114a498b9..d216e0d2921dafc28b59e9a56dfbd82c660cc0d8 100644 (file)
@@ -488,7 +488,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
 
        down_read(&ctx->umem_rwsem);
        rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
-                                     mr_leaf_free, imr);
+                                     mr_leaf_free, true, imr);
        up_read(&ctx->umem_rwsem);
 
        wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
index aed31afb02169ca165271ea742a036ca71ac9879..85234d4566386c67677c1f06be3ff77bde24b1b8 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/io.h>
 
 static void *intc_baseaddr;
-#define IPRA ((unsigned long)intc_baseaddr)
+#define IPRA (intc_baseaddr)
 
 static const unsigned char ipr_table[] = {
        0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */
@@ -34,7 +34,7 @@ static const unsigned char ipr_table[] = {
 static void h8s_disable_irq(struct irq_data *data)
 {
        int pos;
-       unsigned int addr;
+       void __iomem *addr;
        unsigned short pri;
        int irq = data->irq;
 
@@ -48,7 +48,7 @@ static void h8s_disable_irq(struct irq_data *data)
 static void h8s_enable_irq(struct irq_data *data)
 {
        int pos;
-       unsigned int addr;
+       void __iomem *addr;
        unsigned short pri;
        int irq = data->irq;
 
index 17bf109c58e9e4206dd055e81a571bfb79236fd4..f6e0a8b3a61ed34b7b36e02c639c5e7507c23857 100644 (file)
@@ -1,7 +1,8 @@
 
 config BCACHE
        tristate "Block device as cache"
-       ---help---
+       select CRC64
+       help
        Allows a block device to be used as cache for other devices; uses
        a btree for indexing and the layout is optimized for SSDs.
 
@@ -10,7 +11,7 @@ config BCACHE
 config BCACHE_DEBUG
        bool "Bcache debugging"
        depends on BCACHE
-       ---help---
+       help
        Don't select this option unless you're a developer
 
        Enables extra debugging tools, allows expensive runtime checks to be
@@ -20,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG
        bool "Debug closures"
        depends on BCACHE
        select DEBUG_FS
-       ---help---
+       help
        Keeps all active closures in a linked list and provides a debugfs
        interface to list them, which makes it possible to see asynchronous
        operations that get stuck.
index 7fa2631b422c89a160a6e49165bcf639d7d235e2..7a28232d868bd1b2c20081149d1ef6f3afec786a 100644 (file)
@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
 {
        struct cache *ca;
        struct bucket *b;
-       unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
-       unsigned i;
+       unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
+       unsigned int i;
        int r;
 
        atomic_sub(sectors, &c->rescale);
@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
 
 #define bucket_prio(b)                                                 \
 ({                                                                     \
-       unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;     \
+       unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
                                                                        \
        (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);  \
 })
@@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca)
 
        while (!fifo_full(&ca->free_inc)) {
                size_t n;
+
                get_random_bytes(&n, sizeof(n));
 
                n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
@@ -301,7 +302,7 @@ do {                                                                        \
 
 static int bch_allocator_push(struct cache *ca, long bucket)
 {
-       unsigned i;
+       unsigned int i;
 
        /* Prios/gens are actually the most important reserve */
        if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
@@ -385,7 +386,7 @@ out:
 
 /* Allocation */
 
-long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
+long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
 {
        DEFINE_WAIT(w);
        struct bucket *b;
@@ -421,7 +422,7 @@ out:
        if (expensive_debug_checks(ca->set)) {
                size_t iter;
                long i;
-               unsigned j;
+               unsigned int j;
 
                for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
                        BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
@@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
 
 void bch_bucket_free(struct cache_set *c, struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < KEY_PTRS(k); i++)
                __bch_bucket_free(PTR_CACHE(c, k, i),
                                  PTR_BUCKET(c, k, i));
 }
 
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
                           struct bkey *k, int n, bool wait)
 {
        int i;
@@ -510,10 +511,11 @@ err:
        return -1;
 }
 
-int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
                         struct bkey *k, int n, bool wait)
 {
        int ret;
+
        mutex_lock(&c->bucket_lock);
        ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
        mutex_unlock(&c->bucket_lock);
@@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
 
 struct open_bucket {
        struct list_head        list;
-       unsigned                last_write_point;
-       unsigned                sectors_free;
+       unsigned int            last_write_point;
+       unsigned int            sectors_free;
        BKEY_PADDED(key);
 };
 
@@ -556,7 +558,7 @@ struct open_bucket {
  */
 static struct open_bucket *pick_data_bucket(struct cache_set *c,
                                            const struct bkey *search,
-                                           unsigned write_point,
+                                           unsigned int write_point,
                                            struct bkey *alloc)
 {
        struct open_bucket *ret, *ret_task = NULL;
@@ -595,12 +597,16 @@ found:
  *
  * If s->writeback is true, will not fail.
  */
-bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
-                      unsigned write_point, unsigned write_prio, bool wait)
+bool bch_alloc_sectors(struct cache_set *c,
+                      struct bkey *k,
+                      unsigned int sectors,
+                      unsigned int write_point,
+                      unsigned int write_prio,
+                      bool wait)
 {
        struct open_bucket *b;
        BKEY_PADDED(key) alloc;
-       unsigned i;
+       unsigned int i;
 
        /*
         * We might have to allocate a new bucket, which we can't do with a
@@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
        spin_lock(&c->data_bucket_lock);
 
        while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
-               unsigned watermark = write_prio
+               unsigned int watermark = write_prio
                        ? RESERVE_MOVINGGC
                        : RESERVE_NONE;
 
@@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c)
 
        for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
                struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+
                if (!b)
                        return -ENOMEM;
 
index 05f82ff6f016ebc348381cefda2258b23101b4af..83504dd8100ab2a80d7f0e737e50266de41add32 100644 (file)
@@ -252,7 +252,7 @@ struct bcache_device {
        struct kobject          kobj;
 
        struct cache_set        *c;
-       unsigned                id;
+       unsigned int            id;
 #define BCACHEDEVNAME_SIZE     12
        char                    name[BCACHEDEVNAME_SIZE];
 
@@ -264,18 +264,19 @@ struct bcache_device {
 #define BCACHE_DEV_UNLINK_DONE         2
 #define BCACHE_DEV_WB_RUNNING          3
 #define BCACHE_DEV_RATE_DW_RUNNING     4
-       unsigned                nr_stripes;
-       unsigned                stripe_size;
+       unsigned int            nr_stripes;
+       unsigned int            stripe_size;
        atomic_t                *stripe_sectors_dirty;
        unsigned long           *full_dirty_stripes;
 
        struct bio_set          bio_split;
 
-       unsigned                data_csum:1;
+       unsigned int            data_csum:1;
 
-       int (*cache_miss)(struct btree *, struct search *,
-                         struct bio *, unsigned);
-       int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
+       int (*cache_miss)(struct btree *b, struct search *s,
+                         struct bio *bio, unsigned int sectors);
+       int (*ioctl)(struct bcache_device *d, fmode_t mode,
+                    unsigned int cmd, unsigned long arg);
 };
 
 struct io {
@@ -284,7 +285,7 @@ struct io {
        struct list_head        lru;
 
        unsigned long           jiffies;
-       unsigned                sequential;
+       unsigned int            sequential;
        sector_t                last;
 };
 
@@ -358,18 +359,18 @@ struct cached_dev {
        struct cache_accounting accounting;
 
        /* The rest of this all shows up in sysfs */
-       unsigned                sequential_cutoff;
-       unsigned                readahead;
+       unsigned int            sequential_cutoff;
+       unsigned int            readahead;
 
-       unsigned                io_disable:1;
-       unsigned                verify:1;
-       unsigned                bypass_torture_test:1;
+       unsigned int            io_disable:1;
+       unsigned int            verify:1;
+       unsigned int            bypass_torture_test:1;
 
-       unsigned                partial_stripes_expensive:1;
-       unsigned                writeback_metadata:1;
-       unsigned                writeback_running:1;
+       unsigned int            partial_stripes_expensive:1;
+       unsigned int            writeback_metadata:1;
+       unsigned int            writeback_running:1;
        unsigned char           writeback_percent;
-       unsigned                writeback_delay;
+       unsigned int            writeback_delay;
 
        uint64_t                writeback_rate_target;
        int64_t                 writeback_rate_proportional;
@@ -377,16 +378,16 @@ struct cached_dev {
        int64_t                 writeback_rate_integral_scaled;
        int32_t                 writeback_rate_change;
 
-       unsigned                writeback_rate_update_seconds;
-       unsigned                writeback_rate_i_term_inverse;
-       unsigned                writeback_rate_p_term_inverse;
-       unsigned                writeback_rate_minimum;
+       unsigned int            writeback_rate_update_seconds;
+       unsigned int            writeback_rate_i_term_inverse;
+       unsigned int            writeback_rate_p_term_inverse;
+       unsigned int            writeback_rate_minimum;
 
        enum stop_on_failure    stop_when_cache_set_failed;
 #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
        atomic_t                io_errors;
-       unsigned                error_limit;
-       unsigned                offline_seconds;
+       unsigned int            error_limit;
+       unsigned int            offline_seconds;
 
        char                    backing_dev_name[BDEVNAME_SIZE];
 };
@@ -447,7 +448,7 @@ struct cache {
         * until a gc finishes - otherwise we could pointlessly burn a ton of
         * cpu
         */
-       unsigned                invalidate_needs_gc;
+       unsigned int            invalidate_needs_gc;
 
        bool                    discard; /* Get rid of? */
 
@@ -472,7 +473,7 @@ struct gc_stat {
 
        size_t                  nkeys;
        uint64_t                data;   /* sectors */
-       unsigned                in_use; /* percent */
+       unsigned int            in_use; /* percent */
 };
 
 /*
@@ -518,7 +519,7 @@ struct cache_set {
        int                     caches_loaded;
 
        struct bcache_device    **devices;
-       unsigned                devices_max_used;
+       unsigned int            devices_max_used;
        atomic_t                attached_dev_nr;
        struct list_head        cached_devs;
        uint64_t                cached_dev_sectors;
@@ -548,7 +549,7 @@ struct cache_set {
         * Default number of pages for a new btree node - may be less than a
         * full bucket
         */
-       unsigned                btree_pages;
+       unsigned int            btree_pages;
 
        /*
         * Lists of struct btrees; lru is the list for structs that have memory
@@ -571,7 +572,7 @@ struct cache_set {
        struct list_head        btree_cache_freed;
 
        /* Number of elements in btree_cache + btree_cache_freeable lists */
-       unsigned                btree_cache_used;
+       unsigned int            btree_cache_used;
 
        /*
         * If we need to allocate memory for a new btree node and that
@@ -613,8 +614,8 @@ struct cache_set {
        uint16_t                min_prio;
 
        /*
-        * max(gen - last_gc) for all buckets. When it gets too big we have to gc
-        * to keep gens from wrapping around.
+        * max(gen - last_gc) for all buckets. When it gets too big we have to
+        * gc to keep gens from wrapping around.
         */
        uint8_t                 need_gc;
        struct gc_stat          gc_stats;
@@ -649,7 +650,7 @@ struct cache_set {
        struct mutex            verify_lock;
 #endif
 
-       unsigned                nr_uuids;
+       unsigned int            nr_uuids;
        struct uuid_entry       *uuids;
        BKEY_PADDED(uuid_bucket);
        struct closure          uuid_write;
@@ -670,12 +671,12 @@ struct cache_set {
        struct journal          journal;
 
 #define CONGESTED_MAX          1024
-       unsigned                congested_last_us;
+       unsigned int            congested_last_us;
        atomic_t                congested;
 
        /* The rest of this all shows up in sysfs */
-       unsigned                congested_read_threshold_us;
-       unsigned                congested_write_threshold_us;
+       unsigned int            congested_read_threshold_us;
+       unsigned int            congested_write_threshold_us;
 
        struct time_stats       btree_gc_time;
        struct time_stats       btree_split_time;
@@ -694,16 +695,16 @@ struct cache_set {
                ON_ERROR_PANIC,
        }                       on_error;
 #define DEFAULT_IO_ERROR_LIMIT 8
-       unsigned                error_limit;
-       unsigned                error_decay;
+       unsigned int            error_limit;
+       unsigned int            error_decay;
 
        unsigned short          journal_delay_ms;
        bool                    expensive_debug_checks;
-       unsigned                verify:1;
-       unsigned                key_merging_disabled:1;
-       unsigned                gc_always_rewrite:1;
-       unsigned                shrinker_disabled:1;
-       unsigned                copy_gc_enabled:1;
+       unsigned int            verify:1;
+       unsigned int            key_merging_disabled:1;
+       unsigned int            gc_always_rewrite:1;
+       unsigned int            shrinker_disabled:1;
+       unsigned int            copy_gc_enabled:1;
 
 #define BUCKET_HASH_BITS       12
        struct hlist_head       bucket_hash[1 << BUCKET_HASH_BITS];
@@ -712,7 +713,7 @@ struct cache_set {
 };
 
 struct bbio {
-       unsigned                submit_time_us;
+       unsigned int            submit_time_us;
        union {
                struct bkey     key;
                uint64_t        _pad[3];
@@ -729,10 +730,10 @@ struct bbio {
 
 #define btree_bytes(c)         ((c)->btree_pages * PAGE_SIZE)
 #define btree_blocks(b)                                                        \
-       ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
+       ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 
 #define btree_default_blocks(c)                                                \
-       ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+       ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 
 #define bucket_pages(c)                ((c)->sb.bucket_size / PAGE_SECTORS)
 #define bucket_bytes(c)                ((c)->sb.bucket_size << 9)
@@ -761,21 +762,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 
 static inline struct cache *PTR_CACHE(struct cache_set *c,
                                      const struct bkey *k,
-                                     unsigned ptr)
+                                     unsigned int ptr)
 {
        return c->cache[PTR_DEV(k, ptr)];
 }
 
 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
                                   const struct bkey *k,
-                                  unsigned ptr)
+                                  unsigned int ptr)
 {
        return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 }
 
 static inline struct bucket *PTR_BUCKET(struct cache_set *c,
                                        const struct bkey *k,
-                                       unsigned ptr)
+                                       unsigned int ptr)
 {
        return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 }
@@ -783,17 +784,18 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 static inline uint8_t gen_after(uint8_t a, uint8_t b)
 {
        uint8_t r = a - b;
+
        return r > 128U ? 0 : r;
 }
 
 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
-                               unsigned i)
+                               unsigned int i)
 {
        return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 }
 
 static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
-                                unsigned i)
+                                unsigned int i)
 {
        return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
 }
@@ -879,16 +881,16 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
 #define BUCKET_GC_GEN_MAX      96U
 
 #define kobj_attribute_write(n, fn)                                    \
-       static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
+       static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
 
 #define kobj_attribute_rw(n, show, store)                              \
        static struct kobj_attribute ksysfs_##n =                       \
-               __ATTR(n, S_IWUSR|S_IRUSR, show, store)
+               __ATTR(n, 0600, show, store)
 
 static inline void wake_up_allocators(struct cache_set *c)
 {
        struct cache *ca;
-       unsigned i;
+       unsigned int i;
 
        for_each_cache(ca, c, i)
                wake_up_process(ca->alloc_thread);
@@ -924,40 +926,43 @@ static inline void wait_for_kthread_stop(void)
 /* Forward declarations */
 
 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
-void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
-void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
-                             blk_status_t, const char *);
-void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
-               const char *);
-void bch_bbio_free(struct bio *, struct cache_set *);
-struct bio *bch_bbio_alloc(struct cache_set *);
-
-void __bch_submit_bbio(struct bio *, struct cache_set *);
-void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
-
-uint8_t bch_inc_gen(struct cache *, struct bucket *);
-void bch_rescale_priorities(struct cache_set *, int);
-
-bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
-void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
-
-void __bch_bucket_free(struct cache *, struct bucket *);
-void bch_bucket_free(struct cache_set *, struct bkey *);
-
-long bch_bucket_alloc(struct cache *, unsigned, bool);
-int __bch_bucket_alloc_set(struct cache_set *, unsigned,
-                          struct bkey *, int, bool);
-int bch_bucket_alloc_set(struct cache_set *, unsigned,
-                        struct bkey *, int, bool);
-bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
-                      unsigned, unsigned, bool);
+void bch_count_io_errors(struct cache *ca, blk_status_t error,
+                        int is_read, const char *m);
+void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+                             blk_status_t error, const char *m);
+void bch_bbio_endio(struct cache_set *c, struct bio *bio,
+                   blk_status_t error, const char *m);
+void bch_bbio_free(struct bio *bio, struct cache_set *c);
+struct bio *bch_bbio_alloc(struct cache_set *c);
+
+void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
+void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+                    struct bkey *k, unsigned int ptr);
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
+void bch_rescale_priorities(struct cache_set *c, int sectors);
+
+bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
+void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
+
+void __bch_bucket_free(struct cache *ca, struct bucket *b);
+void bch_bucket_free(struct cache_set *c, struct bkey *k);
+
+long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+                          struct bkey *k, int n, bool wait);
+int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+                        struct bkey *k, int n, bool wait);
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
+                      unsigned int sectors, unsigned int write_point,
+                      unsigned int write_prio, bool wait);
 bool bch_cached_dev_error(struct cached_dev *dc);
 
 __printf(2, 3)
-bool bch_cache_set_error(struct cache_set *, const char *, ...);
+bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
 
-void bch_prio_write(struct cache *);
-void bch_write_bdev_super(struct cached_dev *, struct closure *);
+void bch_prio_write(struct cache *ca);
+void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 
 extern struct workqueue_struct *bcache_wq;
 extern struct mutex bch_register_lock;
@@ -969,30 +974,31 @@ extern struct kobj_type bch_cache_set_ktype;
 extern struct kobj_type bch_cache_set_internal_ktype;
 extern struct kobj_type bch_cache_ktype;
 
-void bch_cached_dev_release(struct kobject *);
-void bch_flash_dev_release(struct kobject *);
-void bch_cache_set_release(struct kobject *);
-void bch_cache_release(struct kobject *);
+void bch_cached_dev_release(struct kobject *kobj);
+void bch_flash_dev_release(struct kobject *kobj);
+void bch_cache_set_release(struct kobject *kobj);
+void bch_cache_release(struct kobject *kobj);
 
-int bch_uuid_write(struct cache_set *);
-void bcache_write_super(struct cache_set *);
+int bch_uuid_write(struct cache_set *c);
+void bcache_write_super(struct cache_set *c);
 
 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
 
-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
-void bch_cached_dev_detach(struct cached_dev *);
-void bch_cached_dev_run(struct cached_dev *);
-void bcache_device_stop(struct bcache_device *);
-
-void bch_cache_set_unregister(struct cache_set *);
-void bch_cache_set_stop(struct cache_set *);
-
-struct cache_set *bch_cache_set_alloc(struct cache_sb *);
-void bch_btree_cache_free(struct cache_set *);
-int bch_btree_cache_alloc(struct cache_set *);
-void bch_moving_init_cache_set(struct cache_set *);
-int bch_open_buckets_alloc(struct cache_set *);
-void bch_open_buckets_free(struct cache_set *);
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+                         uint8_t *set_uuid);
+void bch_cached_dev_detach(struct cached_dev *dc);
+void bch_cached_dev_run(struct cached_dev *dc);
+void bcache_device_stop(struct bcache_device *d);
+
+void bch_cache_set_unregister(struct cache_set *c);
+void bch_cache_set_stop(struct cache_set *c);
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
+void bch_btree_cache_free(struct cache_set *c);
+int bch_btree_cache_alloc(struct cache_set *c);
+void bch_moving_init_cache_set(struct cache_set *c);
+int bch_open_buckets_alloc(struct cache_set *c);
+void bch_open_buckets_free(struct cache_set *c);
 
 int bch_cache_allocator_start(struct cache *ca);
 
index 596c93b44e9b37ba9d7b1d96acc5aa20cb53d76a..8f07fa6e17394b5c0c54f7250f960f7361fd2111 100644 (file)
 
 #ifdef CONFIG_BCACHE_DEBUG
 
-void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
 {
        struct bkey *k, *next;
 
        for (k = i->start; k < bset_bkey_last(i); k = next) {
                next = bkey_next(k);
 
-               printk(KERN_ERR "block %u key %u/%u: ", set,
-                      (unsigned) ((u64 *) k - i->d), i->keys);
+               pr_err("block %u key %u/%u: ", set,
+                      (unsigned int) ((u64 *) k - i->d), i->keys);
 
                if (b->ops->key_dump)
                        b->ops->key_dump(b, k);
                else
-                       printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+                       pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
 
                if (next < bset_bkey_last(i) &&
                    bkey_cmp(k, b->ops->is_extents ?
                             &START_KEY(next) : next) > 0)
-                       printk(KERN_ERR "Key skipped backwards\n");
+                       pr_err("Key skipped backwards\n");
        }
 }
 
 void bch_dump_bucket(struct btree_keys *b)
 {
-       unsigned i;
+       unsigned int i;
 
        console_lock();
        for (i = 0; i <= b->nsets; i++)
@@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b)
 
 int __bch_count_data(struct btree_keys *b)
 {
-       unsigned ret = 0;
+       unsigned int ret = 0;
        struct btree_iter iter;
        struct bkey *k;
 
@@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
 
 /* Keylists */
 
-int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
 {
        size_t oldsize = bch_keylist_nkeys(l);
        size_t newsize = oldsize + u64s;
@@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l)
 /* Key/pointer manipulation */
 
 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
-                             unsigned i)
+                             unsigned int i)
 {
        BUG_ON(i > KEY_PTRS(src));
 
@@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
 
 bool __bch_cut_front(const struct bkey *where, struct bkey *k)
 {
-       unsigned i, len = 0;
+       unsigned int i, len = 0;
 
        if (bkey_cmp(where, &START_KEY(k)) <= 0)
                return false;
@@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
 
 bool __bch_cut_back(const struct bkey *where, struct bkey *k)
 {
-       unsigned len = 0;
+       unsigned int len = 0;
 
        if (bkey_cmp(where, k) >= 0)
                return false;
@@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
 #define BKEY_MANTISSA_MASK     ((1 << BKEY_MANTISSA_BITS) - 1)
 
 struct bkey_float {
-       unsigned        exponent:BKEY_EXPONENT_BITS;
-       unsigned        m:BKEY_MID_BITS;
-       unsigned        mantissa:BKEY_MANTISSA_BITS;
+       unsigned int    exponent:BKEY_EXPONENT_BITS;
+       unsigned int    m:BKEY_MID_BITS;
+       unsigned int    mantissa:BKEY_MANTISSA_BITS;
 } __packed;
 
 /*
@@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b)
 }
 EXPORT_SYMBOL(bch_btree_keys_free);
 
-int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+int bch_btree_keys_alloc(struct btree_keys *b,
+                        unsigned int page_order,
+                        gfp_t gfp)
 {
        struct bset_tree *t = b->set;
 
@@ -345,7 +347,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
                         bool *expensive_debug_checks)
 {
-       unsigned i;
+       unsigned int i;
 
        b->ops = ops;
        b->expensive_debug_checks = expensive_debug_checks;
@@ -370,7 +372,7 @@ EXPORT_SYMBOL(bch_btree_keys_init);
  * return array index next to j when does in-order traverse
  * of a binary tree which is stored in a linear array
  */
-static unsigned inorder_next(unsigned j, unsigned size)
+static unsigned int inorder_next(unsigned int j, unsigned int size)
 {
        if (j * 2 + 1 < size) {
                j = j * 2 + 1;
@@ -387,7 +389,7 @@ static unsigned inorder_next(unsigned j, unsigned size)
  * return array index previous to j when does in-order traverse
  * of a binary tree which is stored in a linear array
  */
-static unsigned inorder_prev(unsigned j, unsigned size)
+static unsigned int inorder_prev(unsigned int j, unsigned int size)
 {
        if (j * 2 < size) {
                j = j * 2;
@@ -400,7 +402,8 @@ static unsigned inorder_prev(unsigned j, unsigned size)
        return j;
 }
 
-/* I have no idea why this code works... and I'm the one who wrote it
+/*
+ * I have no idea why this code works... and I'm the one who wrote it
  *
  * However, I do know what it does:
  * Given a binary tree constructed in an array (i.e. how you normally implement
@@ -413,10 +416,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
  * extra is a function of size:
  *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
  */
-static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
+static unsigned int __to_inorder(unsigned int j,
+                                 unsigned int size,
+                                 unsigned int extra)
 {
-       unsigned b = fls(j);
-       unsigned shift = fls(size - 1) - b;
+       unsigned int b = fls(j);
+       unsigned int shift = fls(size - 1) - b;
 
        j  ^= 1U << (b - 1);
        j <<= 1;
@@ -433,14 +438,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
  * Return the cacheline index in bset_tree->data, where j is index
  * from a linear array which stores the auxiliar binary tree
  */
-static unsigned to_inorder(unsigned j, struct bset_tree *t)
+static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
 {
        return __to_inorder(j, t->size, t->extra);
 }
 
-static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
+static unsigned int __inorder_to_tree(unsigned int j,
+                                     unsigned int size,
+                                     unsigned int extra)
 {
-       unsigned shift;
+       unsigned int shift;
 
        if (j > extra)
                j += j - extra;
@@ -457,7 +464,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
  * Return an index from a linear array which stores the auxiliar binary
  * tree, j is the cacheline index of t->data.
  */
-static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
+static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
 {
        return __inorder_to_tree(j, t->size, t->extra);
 }
@@ -468,14 +475,15 @@ void inorder_test(void)
        unsigned long done = 0;
        ktime_t start = ktime_get();
 
-       for (unsigned size = 2;
+       for (unsigned int size = 2;
             size < 65536000;
             size++) {
-               unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
-               unsigned i = 1, j = rounddown_pow_of_two(size - 1);
+               unsigned int extra =
+                       (size - rounddown_pow_of_two(size - 1)) << 1;
+               unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
 
                if (!(size % 4096))
-                       printk(KERN_NOTICE "loop %u, %llu per us\n", size,
+                       pr_notice("loop %u, %llu per us\n", size,
                               done / ktime_us_delta(ktime_get(), start));
 
                while (1) {
@@ -518,30 +526,31 @@ void inorder_test(void)
  * of the previous key so we can walk backwards to it from t->tree[j]'s key.
  */
 
-static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
-                                     unsigned offset)
+static struct bkey *cacheline_to_bkey(struct bset_tree *t,
+                                     unsigned int cacheline,
+                                     unsigned int offset)
 {
        return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
 }
 
-static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
 {
        return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 }
 
-static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
-                                        unsigned cacheline,
+static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
+                                        unsigned int cacheline,
                                         struct bkey *k)
 {
        return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
 }
 
-static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
+static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
 {
        return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
 }
 
-static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
+static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
 {
        return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
 }
@@ -550,7 +559,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
  * For the write set - the one we're currently inserting keys into - we don't
  * maintain a full search tree, we just keep a simple lookup table in t->prev.
  */
-static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
+static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
 {
        return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
 }
@@ -576,14 +585,15 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
  * See make_bfloat() to check when most significant bit of f->exponent
  * is set or not.
  */
-static inline unsigned bfloat_mantissa(const struct bkey *k,
+static inline unsigned int bfloat_mantissa(const struct bkey *k,
                                       struct bkey_float *f)
 {
        const uint64_t *p = &k->low - (f->exponent >> 6);
+
        return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
 }
 
-static void make_bfloat(struct bset_tree *t, unsigned j)
+static void make_bfloat(struct bset_tree *t, unsigned int j)
 {
        struct bkey_float *f = &t->tree[j];
        struct bkey *m = tree_to_bkey(t, j);
@@ -631,7 +641,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
 static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
 {
        if (t != b->set) {
-               unsigned j = roundup(t[-1].size,
+               unsigned int j = roundup(t[-1].size,
                                     64 / sizeof(struct bkey_float));
 
                t->tree = t[-1].tree + j;
@@ -686,13 +696,13 @@ void bch_bset_build_written_tree(struct btree_keys *b)
 {
        struct bset_tree *t = bset_tree_last(b);
        struct bkey *prev = NULL, *k = t->data->start;
-       unsigned j, cacheline = 1;
+       unsigned int j, cacheline = 1;
 
        b->last_set_unwritten = 0;
 
        bset_alloc_tree(b, t);
 
-       t->size = min_t(unsigned,
+       t->size = min_t(unsigned int,
                        bkey_to_cacheline(t, bset_bkey_last(t->data)),
                        b->set->tree + btree_keys_cachelines(b) - t->tree);
 
@@ -732,7 +742,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree);
 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 {
        struct bset_tree *t;
-       unsigned inorder, j = 1;
+       unsigned int inorder, j = 1;
 
        for (t = b->set; t <= bset_tree_last(b); t++)
                if (k < bset_bkey_last(t->data))
@@ -779,14 +789,15 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
                                      struct bset_tree *t,
                                      struct bkey *k)
 {
-       unsigned shift = bkey_u64s(k);
-       unsigned j = bkey_to_cacheline(t, k);
+       unsigned int shift = bkey_u64s(k);
+       unsigned int j = bkey_to_cacheline(t, k);
 
        /* We're getting called from btree_split() or btree_gc, just bail out */
        if (!t->size)
                return;
 
-       /* k is the key we just inserted; we need to find the entry in the
+       /*
+        * k is the key we just inserted; we need to find the entry in the
         * lookup table for the first key that is strictly greater than k:
         * it's either k's cacheline or the next one
         */
@@ -794,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
               table_to_bkey(t, j) <= k)
                j++;
 
-       /* Adjust all the lookup table entries, and find a new key for any that
+       /*
+        * Adjust all the lookup table entries, and find a new key for any that
         * have gotten too big
         */
        for (; j < t->size; j++) {
@@ -819,7 +831,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
             k != bset_bkey_last(t->data);
             k = bkey_next(k))
                if (t->size == bkey_to_cacheline(t, k)) {
-                       t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
+                       t->prev[t->size] =
+                               bkey_to_cacheline_offset(t, t->size, k);
                        t->size++;
                }
 }
@@ -867,10 +880,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
 }
 EXPORT_SYMBOL(bch_bset_insert);
 
-unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
                              struct bkey *replace_key)
 {
-       unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+       unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
        struct bset *i = bset_tree_last(b)->data;
        struct bkey *m, *prev = NULL;
        struct btree_iter iter;
@@ -922,10 +935,10 @@ struct bset_search_iter {
 static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
                                                     const struct bkey *search)
 {
-       unsigned li = 0, ri = t->size;
+       unsigned int li = 0, ri = t->size;
 
        while (li + 1 != ri) {
-               unsigned m = (li + ri) >> 1;
+               unsigned int m = (li + ri) >> 1;
 
                if (bkey_cmp(table_to_bkey(t, m), search) > 0)
                        ri = m;
@@ -944,7 +957,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
 {
        struct bkey *l, *r;
        struct bkey_float *f;
-       unsigned inorder, j, n = 1;
+       unsigned int inorder, j, n = 1;
 
        do {
                /*
@@ -958,7 +971,8 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
                 *              p = 0;
                 * but a branch instruction is avoided.
                 */
-               unsigned p = n << 4;
+               unsigned int p = n << 4;
+
                p &= ((int) (p - t->size)) >> 31;
 
                prefetch(&t->tree[p]);
@@ -978,7 +992,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
                 * to work  - that's done in make_bfloat()
                 */
                if (likely(f->exponent != 127))
-                       n = j * 2 + (((unsigned)
+                       n = j * 2 + (((unsigned int)
                                      (f->mantissa -
                                       bfloat_mantissa(search, f))) >> 31);
                else
@@ -1109,6 +1123,7 @@ static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
                                          struct bset_tree *start)
 {
        struct bkey *ret = NULL;
+
        iter->size = ARRAY_SIZE(iter->data);
        iter->used = 0;
 
@@ -1184,7 +1199,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state)
        mempool_exit(&state->pool);
 }
 
-int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+                            unsigned int page_order)
 {
        spin_lock_init(&state->time.lock);
 
@@ -1237,7 +1253,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
 }
 
 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
-                        unsigned start, unsigned order, bool fixup,
+                        unsigned int start, unsigned int order, bool fixup,
                         struct bset_sort_state *state)
 {
        uint64_t start_time;
@@ -1288,7 +1304,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
                bch_time_stats_update(&state->time, start_time);
 }
 
-void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
                            struct bset_sort_state *state)
 {
        size_t order = b->page_order, keys = 0;
@@ -1298,7 +1314,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
        __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
 
        if (start) {
-               unsigned i;
+               unsigned int i;
 
                for (i = start; i <= b->nsets; i++)
                        keys += b->set[i].data->keys;
@@ -1323,8 +1339,8 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
                         struct bset_sort_state *state)
 {
        uint64_t start_time = local_clock();
-
        struct btree_iter iter;
+
        bch_btree_iter_init(b, &iter, NULL);
 
        btree_mergesort(b, new->set->data, &iter, false, true);
@@ -1338,7 +1354,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
 
 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
 {
-       unsigned crit = SORT_CRIT;
+       unsigned int crit = SORT_CRIT;
        int i;
 
        /* Don't sort if nothing to do */
@@ -1367,7 +1383,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy);
 
 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i <= b->nsets; i++) {
                struct bset_tree *t = &b->set[i];
index b867f2200495b6edf47231597d978b508572f694..bac76aabca6d7977abfecfaee4d884719f3699f4 100644 (file)
@@ -163,10 +163,10 @@ struct bset_tree {
         */
 
        /* size of the binary tree and prev array */
-       unsigned                size;
+       unsigned int            size;
 
        /* function of size - precalculated for to_inorder() */
-       unsigned                extra;
+       unsigned int            extra;
 
        /* copy of the last key in the set */
        struct bkey             end;
@@ -187,18 +187,25 @@ struct bset_tree {
 };
 
 struct btree_keys_ops {
-       bool            (*sort_cmp)(struct btree_iter_set,
-                                   struct btree_iter_set);
-       struct bkey     *(*sort_fixup)(struct btree_iter *, struct bkey *);
-       bool            (*insert_fixup)(struct btree_keys *, struct bkey *,
-                                       struct btree_iter *, struct bkey *);
-       bool            (*key_invalid)(struct btree_keys *,
-                                      const struct bkey *);
-       bool            (*key_bad)(struct btree_keys *, const struct bkey *);
-       bool            (*key_merge)(struct btree_keys *,
-                                    struct bkey *, struct bkey *);
-       void            (*key_to_text)(char *, size_t, const struct bkey *);
-       void            (*key_dump)(struct btree_keys *, const struct bkey *);
+       bool            (*sort_cmp)(struct btree_iter_set l,
+                                   struct btree_iter_set r);
+       struct bkey     *(*sort_fixup)(struct btree_iter *iter,
+                                      struct bkey *tmp);
+       bool            (*insert_fixup)(struct btree_keys *b,
+                                       struct bkey *insert,
+                                       struct btree_iter *iter,
+                                       struct bkey *replace_key);
+       bool            (*key_invalid)(struct btree_keys *bk,
+                                      const struct bkey *k);
+       bool            (*key_bad)(struct btree_keys *bk,
+                                  const struct bkey *k);
+       bool            (*key_merge)(struct btree_keys *bk,
+                                    struct bkey *l, struct bkey *r);
+       void            (*key_to_text)(char *buf,
+                                      size_t size,
+                                      const struct bkey *k);
+       void            (*key_dump)(struct btree_keys *keys,
+                                   const struct bkey *k);
 
        /*
         * Only used for deciding whether to use START_KEY(k) or just the key
@@ -211,7 +218,7 @@ struct btree_keys {
        const struct btree_keys_ops     *ops;
        uint8_t                 page_order;
        uint8_t                 nsets;
-       unsigned                last_set_unwritten:1;
+       unsigned int            last_set_unwritten:1;
        bool                    *expensive_debug_checks;
 
        /*
@@ -239,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
        return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
 }
 
-static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+static inline unsigned int bset_byte_offset(struct btree_keys *b,
+                                           struct bset *i)
 {
        return ((size_t) i) - ((size_t) b->set->data);
 }
 
-static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+static inline unsigned int bset_sector_offset(struct btree_keys *b,
+                                             struct bset *i)
 {
        return bset_byte_offset(b, i) >> 9;
 }
@@ -273,25 +282,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
 }
 
 static inline struct bset *bset_next_set(struct btree_keys *b,
-                                        unsigned block_bytes)
+                                        unsigned int block_bytes)
 {
        struct bset *i = bset_tree_last(b)->data;
 
        return ((void *) i) + roundup(set_bytes(i), block_bytes);
 }
 
-void bch_btree_keys_free(struct btree_keys *);
-int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
-void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
-                        bool *);
-
-void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
-void bch_bset_build_written_tree(struct btree_keys *);
-void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
-bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
-void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
-unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
-                             struct bkey *);
+void bch_btree_keys_free(struct btree_keys *b);
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
+                        gfp_t gfp);
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+                        bool *expensive_debug_checks);
+
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
+void bch_bset_build_written_tree(struct btree_keys *b);
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+                    struct bkey *insert);
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+                             struct bkey *replace_key);
 
 enum {
        BTREE_INSERT_STATUS_NO_INSERT = 0,
@@ -313,18 +324,21 @@ struct btree_iter {
        } data[MAX_BSETS];
 };
 
-typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
 
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
-                                       struct btree_keys *, ptr_filter_fn);
+struct bkey *bch_btree_iter_next(struct btree_iter *iter);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+                                       struct btree_keys *b,
+                                       ptr_filter_fn fn);
 
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
-                                struct bkey *);
+void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+                        struct bkey *end);
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+                                struct btree_iter *iter,
+                                struct bkey *search);
 
-struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
-                              const struct bkey *);
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+                              const struct bkey *search);
 
 /*
  * Returns the first key that is strictly greater than search
@@ -349,21 +363,23 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
 struct bset_sort_state {
        mempool_t               pool;
 
-       unsigned                page_order;
-       unsigned                crit_factor;
+       unsigned int            page_order;
+       unsigned int            crit_factor;
 
        struct time_stats       time;
 };
 
-void bch_bset_sort_state_free(struct bset_sort_state *);
-int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
-void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
-void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
-                        struct bset_sort_state *);
-void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
-                                   struct bset_sort_state *);
-void bch_btree_sort_partial(struct btree_keys *, unsigned,
-                           struct bset_sort_state *);
+void bch_bset_sort_state_free(struct bset_sort_state *state);
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+                            unsigned int page_order);
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+                        struct bset_sort_state *state);
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+                                   struct btree_iter *iter,
+                                   struct bset_sort_state *state);
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+                           struct bset_sort_state *state);
 
 static inline void bch_btree_sort(struct btree_keys *b,
                                  struct bset_sort_state *state)
@@ -377,13 +393,13 @@ struct bset_stats {
        size_t floats, failed;
 };
 
-void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
 
 /* Bkey utility code */
 
 #define bset_bkey_last(i)      bkey_idx((struct bkey *) (i)->d, (i)->keys)
 
-static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
 {
        return bkey_idx(i->start, idx);
 }
@@ -401,10 +417,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
                : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 }
 
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
-                             unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+                             unsigned int i);
+bool __bch_cut_front(const struct bkey *where, struct bkey *k);
+bool __bch_cut_back(const struct bkey *where, struct bkey *k);
 
 static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
 {
@@ -522,18 +538,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
        return bch_keylist_nkeys(l) * sizeof(uint64_t);
 }
 
-struct bkey *bch_keylist_pop(struct keylist *);
-void bch_keylist_pop_front(struct keylist *);
-int __bch_keylist_realloc(struct keylist *, unsigned);
+struct bkey *bch_keylist_pop(struct keylist *l);
+void bch_keylist_pop_front(struct keylist *l);
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
 
 /* Debug stuff */
 
 #ifdef CONFIG_BCACHE_DEBUG
 
-int __bch_count_data(struct btree_keys *);
-void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
-void bch_dump_bucket(struct btree_keys *);
+int __bch_count_data(struct btree_keys *b);
+void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
+                                    const char *fmt,
+                                    ...);
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
+void bch_dump_bucket(struct btree_keys *b);
 
 #else
 
@@ -541,7 +559,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
 static inline void __printf(2, 3)
        __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
 static inline void bch_dump_bucket(struct btree_keys *b) {}
-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
 
 #endif
 
index c19f7716df8800b95184f25f629349109fcfbc20..e7d4817681f223f456330f9c63eb6e958f2b9a76 100644 (file)
@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
 
 void bkey_put(struct cache_set *c, struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < KEY_PTRS(k); i++)
                if (ptr_available(c, k, i))
@@ -287,6 +287,7 @@ err:
 static void btree_node_read_endio(struct bio *bio)
 {
        struct closure *cl = bio->bi_private;
+
        closure_put(cl);
 }
 
@@ -435,7 +436,10 @@ static void do_btree_node_write(struct btree *b)
 
                continue_at(cl, btree_node_write_done, NULL);
        } else {
-               /* No problem for multipage bvec since the bio is just allocated */
+               /*
+                * No problem for multipage bvec since the bio is
+                * just allocated
+                */
                b->bio->bi_vcnt = 0;
                bch_bio_map(b->bio, i);
 
@@ -479,7 +483,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
 
 void bch_btree_node_write(struct btree *b, struct closure *parent)
 {
-       unsigned nsets = b->keys.nsets;
+       unsigned int nsets = b->keys.nsets;
 
        lockdep_assert_held(&b->lock);
 
@@ -581,7 +585,7 @@ static void mca_bucket_free(struct btree *b)
        list_move(&b->list, &b->c->btree_cache_freeable);
 }
 
-static unsigned btree_order(struct bkey *k)
+static unsigned int btree_order(struct bkey *k)
 {
        return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
 }
@@ -589,7 +593,7 @@ static unsigned btree_order(struct bkey *k)
 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 {
        if (!bch_btree_keys_alloc(&b->keys,
-                                 max_t(unsigned,
+                                 max_t(unsigned int,
                                        ilog2(b->c->btree_pages),
                                        btree_order(k)),
                                  gfp)) {
@@ -604,6 +608,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
                                      struct bkey *k, gfp_t gfp)
 {
        struct btree *b = kzalloc(sizeof(struct btree), gfp);
+
        if (!b)
                return NULL;
 
@@ -620,7 +625,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
        return b;
 }
 
-static int mca_reap(struct btree *b, unsigned min_order, bool flush)
+static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
 {
        struct closure cl;
 
@@ -746,6 +751,7 @@ void bch_btree_cache_free(struct cache_set *c)
 {
        struct btree *b;
        struct closure cl;
+
        closure_init_stack(&cl);
 
        if (c->shrink.list.next)
@@ -786,7 +792,7 @@ void bch_btree_cache_free(struct cache_set *c)
 
 int bch_btree_cache_alloc(struct cache_set *c)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < mca_reserve(c); i++)
                if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
@@ -1124,6 +1130,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
                                                  struct btree_op *op)
 {
        struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
+
        if (!IS_ERR_OR_NULL(n)) {
                mutex_lock(&n->write_lock);
                bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
@@ -1136,7 +1143,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
 
 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
 
        mutex_lock(&b->c->bucket_lock);
 
@@ -1157,7 +1164,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
 {
        struct cache_set *c = b->c;
        struct cache *ca;
-       unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
+       unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
 
        mutex_lock(&c->bucket_lock);
 
@@ -1181,7 +1188,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
                                    struct bkey *k)
 {
        uint8_t stale = 0;
-       unsigned i;
+       unsigned int i;
        struct bucket *g;
 
        /*
@@ -1219,7 +1226,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
                        SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
 
                /* guard against overflow */
-               SET_GC_SECTORS_USED(g, min_t(unsigned,
+               SET_GC_SECTORS_USED(g, min_t(unsigned int,
                                             GC_SECTORS_USED(g) + KEY_SIZE(k),
                                             MAX_GC_SECTORS_USED));
 
@@ -1233,7 +1240,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
 
 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < KEY_PTRS(k); i++)
                if (ptr_available(c, k, i) &&
@@ -1259,7 +1266,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
 {
        uint8_t stale = 0;
-       unsigned keys = 0, good_keys = 0;
+       unsigned int keys = 0, good_keys = 0;
        struct bkey *k;
        struct btree_iter iter;
        struct bset_tree *t;
@@ -1302,16 +1309,18 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
 
 struct gc_merge_info {
        struct btree    *b;
-       unsigned        keys;
+       unsigned int    keys;
 };
 
-static int bch_btree_insert_node(struct btree *, struct btree_op *,
-                                struct keylist *, atomic_t *, struct bkey *);
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+                                struct keylist *insert_keys,
+                                atomic_t *journal_ref,
+                                struct bkey *replace_key);
 
 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
                             struct gc_stat *gc, struct gc_merge_info *r)
 {
-       unsigned i, nodes = 0, keys = 0, blocks;
+       unsigned int i, nodes = 0, keys = 0, blocks;
        struct btree *new_nodes[GC_MERGE_NODES];
        struct keylist keylist;
        struct closure cl;
@@ -1511,11 +1520,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
        return -EINTR;
 }
 
-static unsigned btree_gc_count_keys(struct btree *b)
+static unsigned int btree_gc_count_keys(struct btree *b)
 {
        struct bkey *k;
        struct btree_iter iter;
-       unsigned ret = 0;
+       unsigned int ret = 0;
 
        for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
                ret += bkey_u64s(k);
@@ -1678,7 +1687,7 @@ static void btree_gc_start(struct cache_set *c)
 {
        struct cache *ca;
        struct bucket *b;
-       unsigned i;
+       unsigned int i;
 
        if (!c->gc_mark_valid)
                return;
@@ -1704,7 +1713,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
 {
        struct bucket *b;
        struct cache *ca;
-       unsigned i;
+       unsigned int i;
 
        mutex_lock(&c->bucket_lock);
 
@@ -1722,7 +1731,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
                struct bcache_device *d = c->devices[i];
                struct cached_dev *dc;
                struct keybuf_key *w, *n;
-               unsigned j;
+               unsigned int j;
 
                if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
                        continue;
@@ -1814,7 +1823,7 @@ static void bch_btree_gc(struct cache_set *c)
 static bool gc_should_run(struct cache_set *c)
 {
        struct cache *ca;
-       unsigned i;
+       unsigned int i;
 
        for_each_cache(ca, c, i)
                if (ca->invalidate_needs_gc)
@@ -1905,7 +1914,7 @@ void bch_initial_gc_finish(struct cache_set *c)
 {
        struct cache *ca;
        struct bucket *b;
-       unsigned i;
+       unsigned int i;
 
        bch_btree_gc_finish(c);
 
@@ -1945,7 +1954,7 @@ void bch_initial_gc_finish(struct cache_set *c)
 static bool btree_insert_key(struct btree *b, struct bkey *k,
                             struct bkey *replace_key)
 {
-       unsigned status;
+       unsigned int status;
 
        BUG_ON(bkey_cmp(k, &b->key) > 0);
 
@@ -2044,7 +2053,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                           block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
 
        if (split) {
-               unsigned keys = 0;
+               unsigned int keys = 0;
 
                trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
 
@@ -2222,10 +2231,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
                rw_lock(true, b, b->level);
 
                if (b->key.ptr[0] != btree_ptr ||
-                   b->seq != seq + 1) {
+                   b->seq != seq + 1) {
                        op->lock = b->level;
                        goto out;
-               }
+               }
        }
 
        SET_KEY_PTRS(check_key, 1);
@@ -2300,7 +2309,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
 
 void bch_btree_set_root(struct btree *b)
 {
-       unsigned i;
+       unsigned int i;
        struct closure cl;
 
        closure_init_stack(&cl);
@@ -2412,7 +2421,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
 
 struct refill {
        struct btree_op op;
-       unsigned        nr_found;
+       unsigned int    nr_found;
        struct keybuf   *buf;
        struct bkey     *end;
        keybuf_pred_fn  *pred;
@@ -2488,6 +2497,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
 
        if (!RB_EMPTY_ROOT(&buf->keys)) {
                struct keybuf_key *w;
+
                w = RB_FIRST(&buf->keys, struct keybuf_key, node);
                buf->start      = START_KEY(&w->key);
 
@@ -2519,6 +2529,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
 {
        bool ret = false;
        struct keybuf_key *p, *w, s;
+
        s.key = *start;
 
        if (bkey_cmp(end, &buf->start) <= 0 ||
@@ -2545,6 +2556,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
 {
        struct keybuf_key *w;
+
        spin_lock(&buf->lock);
 
        w = RB_FIRST(&buf->keys, struct keybuf_key, node);
index 68e9d926134da9391558236a18f90cdeef474a88..a68d6c55783bd97eaf49d5744c9422f8af24be07 100644 (file)
@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
        return bset_tree_last(&b->keys)->data;
 }
 
-static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
+static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
 {
        return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
 }
@@ -213,7 +213,7 @@ struct btree_op {
        /* Btree level at which we start taking write locks */
        short                   lock;
 
-       unsigned                insert_collision:1;
+       unsigned int            insert_collision:1;
 };
 
 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
@@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b)
        (w ? up_write : up_read)(&b->lock);
 }
 
-void bch_btree_node_read_done(struct btree *);
-void __bch_btree_node_write(struct btree *, struct closure *);
-void bch_btree_node_write(struct btree *, struct closure *);
-
-void bch_btree_set_root(struct btree *);
-struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
-                                    int, bool, struct btree *);
-struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
-                                struct bkey *, int, bool, struct btree *);
-
-int bch_btree_insert_check_key(struct btree *, struct btree_op *,
-                              struct bkey *);
-int bch_btree_insert(struct cache_set *, struct keylist *,
-                    atomic_t *, struct bkey *);
-
-int bch_gc_thread_start(struct cache_set *);
-void bch_initial_gc_finish(struct cache_set *);
-void bch_moving_gc(struct cache_set *);
-int bch_btree_check(struct cache_set *);
-void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
+void bch_btree_node_read_done(struct btree *b);
+void __bch_btree_node_write(struct btree *b, struct closure *parent);
+void bch_btree_node_write(struct btree *b, struct closure *parent);
+
+void bch_btree_set_root(struct btree *b);
+struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+                                    int level, bool wait,
+                                    struct btree *parent);
+struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+                                struct bkey *k, int level, bool write,
+                                struct btree *parent);
+
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+                              struct bkey *check_key);
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+                    atomic_t *journal_ref, struct bkey *replace_key);
+
+int bch_gc_thread_start(struct cache_set *c);
+void bch_initial_gc_finish(struct cache_set *c);
+void bch_moving_gc(struct cache_set *c);
+int bch_btree_check(struct cache_set *c);
+void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
 
 static inline void wake_up_gc(struct cache_set *c)
 {
@@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c)
 
 #define MAP_END_KEY    1
 
-typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
-int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
-                         struct bkey *, btree_map_nodes_fn *, int);
+typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+                         struct bkey *from, btree_map_nodes_fn *fn, int flags);
 
 static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
                                      struct bkey *from, btree_map_nodes_fn *fn)
@@ -290,21 +292,23 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
        return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
 }
 
-typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
-                               struct bkey *);
-int bch_btree_map_keys(struct btree_op *, struct cache_set *,
-                      struct bkey *, btree_map_keys_fn *, int);
-
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
-void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *,
-                      struct bkey *, keybuf_pred_fn *);
-bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
-                                 struct bkey *);
-void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
-struct keybuf_key *bch_keybuf_next(struct keybuf *);
-struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
-                                         struct bkey *, keybuf_pred_fn *);
+typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
+                               struct bkey *k);
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+                      struct bkey *from, btree_map_keys_fn *fn, int flags);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
+
+void bch_keybuf_init(struct keybuf *buf);
+void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+                      struct bkey *end, keybuf_pred_fn *pred);
+bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+                                 struct bkey *end);
+void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
+struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
+                                         struct keybuf *buf,
+                                         struct bkey *end,
+                                         keybuf_pred_fn *pred);
 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
 #endif
index 618253683d409e0a29db0832ec853b5af4930d29..73f5319295bc9c1c8a3caf47ac7b7ab4ee5cfcdd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Asynchronous refcounty things
  *
@@ -162,12 +163,13 @@ static struct dentry *closure_debug;
 static int debug_seq_show(struct seq_file *f, void *data)
 {
        struct closure *cl;
+
        spin_lock_irq(&closure_list_lock);
 
        list_for_each_entry(cl, &closure_list, all) {
                int r = atomic_read(&cl->remaining);
 
-               seq_printf(f, "%p: %pF -> %pf p %p r %i ",
+               seq_printf(f, "%p: %pS -> %pS p %p r %i ",
                           cl, (void *) cl->ip, cl->fn, cl->parent,
                           r & CLOSURE_REMAINING_MASK);
 
@@ -177,7 +179,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
                           r & CLOSURE_RUNNING  ? "R" : "");
 
                if (r & CLOSURE_WAITING)
-                       seq_printf(f, " W %pF\n",
+                       seq_printf(f, " W %pS\n",
                                   (void *) cl->waiting_on);
 
                seq_printf(f, "\n");
index 7c2c5bc7c88b12f1ca7067f0f77c086dc54ef594..eca0d496b6869e78c76c3774ad819612f27661a9 100644 (file)
@@ -159,7 +159,7 @@ struct closure {
 #define CLOSURE_MAGIC_DEAD     0xc054dead
 #define CLOSURE_MAGIC_ALIVE    0xc054a11e
 
-       unsigned                magic;
+       unsigned int            magic;
        struct list_head        all;
        unsigned long           ip;
        unsigned long           waiting_on;
@@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl)
 }
 
 /**
- * closure_wake_up - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list,
+ *                  with memory barrier
  */
 static inline void closure_wake_up(struct closure_waitlist *list)
 {
+       /* Memory barrier for the wait list */
        smp_mb();
        __closure_wake_up(list);
 }
index 12034c07257b87ab82800ed3b2d60c72d7c49e33..06da66b2488ae8f5371fcfeb92c7b336d24e78ca 100644 (file)
@@ -67,34 +67,35 @@ void bch_btree_verify(struct btree *b)
        if (inmemory->keys != sorted->keys ||
            memcmp(inmemory->start,
                   sorted->start,
-                  (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+                  (void *) bset_bkey_last(inmemory) -
+                  (void *) inmemory->start)) {
                struct bset *i;
-               unsigned j;
+               unsigned int j;
 
                console_lock();
 
-               printk(KERN_ERR "*** in memory:\n");
+               pr_err("*** in memory:\n");
                bch_dump_bset(&b->keys, inmemory, 0);
 
-               printk(KERN_ERR "*** read back in:\n");
+               pr_err("*** read back in:\n");
                bch_dump_bset(&v->keys, sorted, 0);
 
                for_each_written_bset(b, ondisk, i) {
-                       unsigned block = ((void *) i - (void *) ondisk) /
+                       unsigned int block = ((void *) i - (void *) ondisk) /
                                block_bytes(b->c);
 
-                       printk(KERN_ERR "*** on disk block %u:\n", block);
+                       pr_err("*** on disk block %u:\n", block);
                        bch_dump_bset(&b->keys, i, block);
                }
 
-               printk(KERN_ERR "*** block %zu not written\n",
+               pr_err("*** block %zu not written\n",
                       ((void *) i - (void *) ondisk) / block_bytes(b->c));
 
                for (j = 0; j < inmemory->keys; j++)
                        if (inmemory->d[j] != sorted->d[j])
                                break;
 
-               printk(KERN_ERR "b->written %u\n", b->written);
+               pr_err("b->written %u\n", b->written);
 
                console_unlock();
                panic("verify failed at %u\n", j);
@@ -176,9 +177,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
 
        while (size) {
                struct keybuf_key *w;
-               unsigned bytes = min(i->bytes, size);
-
+               unsigned int bytes = min(i->bytes, size);
                int err = copy_to_user(buf, i->buf, bytes);
+
                if (err)
                        return err;
 
@@ -237,8 +238,8 @@ void bch_debug_init_cache_set(struct cache_set *c)
 {
        if (!IS_ERR_OR_NULL(bcache_debug)) {
                char name[50];
-               snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
 
+               snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
                c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
                                               &cache_set_debug_ops);
        }
index acc48d3fa274d9d031f13b3609ec41cd0e307478..fb3d4dff4b26ea7ad5e088d1e67e65ebe1992903 100644 (file)
@@ -8,8 +8,8 @@ struct cache_set;
 
 #ifdef CONFIG_BCACHE_DEBUG
 
-void bch_btree_verify(struct btree *);
-void bch_data_verify(struct cached_dev *, struct bio *);
+void bch_btree_verify(struct btree *b);
+void bch_data_verify(struct cached_dev *dc, struct bio *bio);
 
 #define expensive_debug_checks(c)      ((c)->expensive_debug_checks)
 #define key_merging_disabled(c)                ((c)->key_merging_disabled)
@@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
 #endif
 
 #ifdef CONFIG_DEBUG_FS
-void bch_debug_init_cache_set(struct cache_set *);
+void bch_debug_init_cache_set(struct cache_set *c);
 #else
 static inline void bch_debug_init_cache_set(struct cache_set *c) {}
 #endif
index 1d096742eb41a01444e7c6412951af5440a6fd28..c809724e6571e4be1d61ed0198a471ec8889c044 100644 (file)
@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
 
 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < KEY_PTRS(k); i++)
                if (ptr_available(c, k, i)) {
@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
 
 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < KEY_PTRS(k); i++)
                if (ptr_available(c, k, i)) {
@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
 
 void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
 {
-       unsigned i = 0;
+       unsigned int i = 0;
        char *out = buf, *end = buf + size;
 
 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
@@ -126,22 +126,22 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
 static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
 {
        struct btree *b = container_of(keys, struct btree, keys);
-       unsigned j;
+       unsigned int j;
        char buf[80];
 
        bch_extent_to_text(buf, sizeof(buf), k);
-       printk(" %s", buf);
+       pr_err(" %s", buf);
 
        for (j = 0; j < KEY_PTRS(k); j++) {
                size_t n = PTR_BUCKET_NR(b->c, k, j);
-               printk(" bucket %zu", n);
 
+               pr_err(" bucket %zu", n);
                if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-                       printk(" prio %i",
+                       pr_err(" prio %i",
                               PTR_BUCKET(b->c, k, j)->prio);
        }
 
-       printk(" %s\n", bch_ptr_status(b->c, k));
+       pr_err(" %s\n", bch_ptr_status(b->c, k));
 }
 
 /* Btree ptrs */
@@ -166,12 +166,13 @@ bad:
 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
 {
        struct btree *b = container_of(bk, struct btree, keys);
+
        return __bch_btree_ptr_invalid(b->c, k);
 }
 
 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
 {
-       unsigned i;
+       unsigned int i;
        char buf[80];
        struct bucket *g;
 
@@ -204,7 +205,7 @@ err:
 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
 {
        struct btree *b = container_of(bk, struct btree, keys);
-       unsigned i;
+       unsigned int i;
 
        if (!bkey_cmp(k, &ZERO_KEY) ||
            !KEY_PTRS(k) ||
@@ -327,13 +328,14 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
        struct cache_set *c = container_of(b, struct btree, keys)->c;
 
        uint64_t old_offset;
-       unsigned old_size, sectors_found = 0;
+       unsigned int old_size, sectors_found = 0;
 
        BUG_ON(!KEY_OFFSET(insert));
        BUG_ON(!KEY_SIZE(insert));
 
        while (1) {
                struct bkey *k = bch_btree_iter_next(iter);
+
                if (!k)
                        break;
 
@@ -363,7 +365,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
                         * k might have been split since we inserted/found the
                         * key we're replacing
                         */
-                       unsigned i;
+                       unsigned int i;
                        uint64_t offset = KEY_START(k) -
                                KEY_START(replace_key);
 
@@ -498,11 +500,12 @@ bad:
 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
 {
        struct btree *b = container_of(bk, struct btree, keys);
+
        return __bch_extent_invalid(b->c, k);
 }
 
 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
-                                    unsigned ptr)
+                                    unsigned int ptr)
 {
        struct bucket *g = PTR_BUCKET(b->c, k, ptr);
        char buf[80];
@@ -534,7 +537,7 @@ err:
 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
 {
        struct btree *b = container_of(bk, struct btree, keys);
-       unsigned i, stale;
+       unsigned int i, stale;
 
        if (!KEY_PTRS(k) ||
            bch_extent_invalid(bk, k))
@@ -574,10 +577,12 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
                ~((uint64_t)1 << 63);
 }
 
-static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+static bool bch_extent_merge(struct btree_keys *bk,
+                            struct bkey *l,
+                            struct bkey *r)
 {
        struct btree *b = container_of(bk, struct btree, keys);
-       unsigned i;
+       unsigned int i;
 
        if (key_merging_disabled(b->c))
                return false;
index 0cd3575afa1d0b68f6845dd7bebd4e041889b4f1..4d667e05bb73e8cdd389e67fc5489554a583bbaf 100644 (file)
@@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops;
 struct bkey;
 struct cache_set;
 
-void bch_extent_to_text(char *, size_t, const struct bkey *);
-bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool __bch_extent_invalid(struct cache_set *, const struct bkey *);
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k);
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k);
+bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
 
 #endif /* _BCACHE_EXTENTS_H */
index 9612873afee2c3df0b61b4034f0d0232cf2259dd..c250979683194c69f8ea1b9585e13907c877eb3a 100644 (file)
@@ -17,6 +17,7 @@
 void bch_bbio_free(struct bio *bio, struct cache_set *c)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
+
        mempool_free(b, &c->bio_meta);
 }
 
@@ -42,9 +43,10 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
 }
 
 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
-                    struct bkey *k, unsigned ptr)
+                    struct bkey *k, unsigned int ptr)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
+
        bch_bkey_copy_single_ptr(&b->key, k, ptr);
        __bch_submit_bbio(bio, c);
 }
@@ -52,7 +54,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 /* IO errors */
 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
 {
-       unsigned errors;
+       unsigned int errors;
 
        WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
 
@@ -75,16 +77,16 @@ void bch_count_io_errors(struct cache *ca,
         */
 
        if (ca->set->error_decay) {
-               unsigned count = atomic_inc_return(&ca->io_count);
+               unsigned int count = atomic_inc_return(&ca->io_count);
 
                while (count > ca->set->error_decay) {
-                       unsigned errors;
-                       unsigned old = count;
-                       unsigned new = count - ca->set->error_decay;
+                       unsigned int errors;
+        &nbs