Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Sep 2018 18:02:46 +0000 (08:02 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Sep 2018 18:02:46 +0000 (08:02 -1000)
Pull x86 fixes from Ingol Molnar:
 "Misc fixes:

   - EFI crash fix

   - Xen PV fixes

   - do not allow PTI on 2-level 32-bit kernels for now

   - documentation fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/APM: Fix build warning when PROC_FS is not enabled
  Revert "x86/mm/legacy: Populate the user page-table with user pgd's"
  x86/efi: Load fixmap GDT in efi_call_phys_epilog() before setting %cr3
  x86/xen: Disable CPU0 hotplug for Xen PV
  x86/EISA: Don't probe EISA bus for Xen PV guests
  x86/doc: Fix Documentation/x86/earlyprintk.txt

290 files changed:
Documentation/ABI/stable/sysfs-devices-system-xen_memory
Documentation/admin-guide/kernel-parameters.txt
Documentation/device-mapper/dm-raid.txt
Documentation/filesystems/vfs.txt
MAINTAINERS
Makefile
arch/arm64/include/asm/jump_label.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/crash_core.c [new file with mode: 0644]
arch/arm64/kernel/machine_kexec.c
arch/hexagon/include/asm/bitops.h
arch/hexagon/kernel/dma.c
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
arch/mips/lantiq/xway/dma.c
arch/riscv/kernel/setup.c
arch/s390/crypto/paes_s390.c
arch/x86/events/intel/lbr.c
arch/xtensa/Kconfig
arch/xtensa/Makefile
arch/xtensa/platforms/iss/setup.c
block/blk-cgroup.c
drivers/android/binder_alloc.c
drivers/base/firmware_loader/main.c
drivers/block/null_blk.h
drivers/block/null_blk_main.c
drivers/block/null_blk_zoned.c
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/ipmi/kcs_bmc.c
drivers/dma/mic_x100_dma.c
drivers/fpga/dfl-fme-pr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hv/vmbus_drv.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/temperature/maxim_thermocouple.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/misc/hmc6352.c
drivers/misc/ibmvmc.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/hbm.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/omap_hsmmc.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/qualcomm/qca_7k.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/qualcomm/qca_spi.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/Makefile
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/usb/qmi_wwan.c
drivers/net/xen-netfront.c
drivers/nvme/target/rdma.c
drivers/of/base.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/switch/switchtec.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/s390/crypto/ap_bus.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_main.c
drivers/staging/erofs/Kconfig
drivers/staging/erofs/super.c
drivers/staging/fbtft/TODO
drivers/staging/gasket/TODO
drivers/staging/vboxvideo/vbox_drv.c
drivers/staging/vboxvideo/vbox_mode.c
drivers/staging/wilc1000/Makefile
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_debugfs.c
drivers/staging/wilc1000/wilc_wlan.c
drivers/staging/wilc1000/wilc_wlan_if.h
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/tty/hvc/hvc_console.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/class/cdc-wdm.c
drivers/usb/common/common.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/message.c
drivers/usb/core/of.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/fotg210-udc.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/misc/uss720.c
drivers/usb/misc/yurex.c
drivers/usb/mtu3/mtu3_core.c
drivers/usb/mtu3/mtu3_hw_regs.h
drivers/usb/serial/io_ti.h
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/bus.c
drivers/usb/typec/class.c
drivers/xen/Kconfig
drivers/xen/cpu_hotplug.c
drivers/xen/events/events_base.c
drivers/xen/gntdev.c
drivers/xen/manage.c
drivers/xen/mem-reservation.c
drivers/xen/xen-balloon.c
fs/cifs/Kconfig
fs/cifs/cifssmb.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb2pdu.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/pnfs.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/pstore/ram_core.c
include/asm-generic/io.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/hid.h
include/linux/mlx5/driver.h
include/linux/mm_types.h
include/linux/mm_types_task.h
include/linux/mod_devicetable.h
include/linux/pci.h
include/linux/vm_event_item.h
include/linux/vmacache.h
include/net/netfilter/nf_conntrack_timeout.h
include/uapi/linux/perf_event.h
include/xen/mem-reservation.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/jump_label.c
kernel/locking/lockdep.c
kernel/locking/mutex.c
kernel/locking/test-ww_mutex.c
kernel/printk/printk.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/topology.c
mm/Makefile
mm/debug.c
mm/fadvise.c
mm/readahead.c
mm/vmacache.c
net/core/skbuff.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/netfilter/Kconfig
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/iucv/af_iucv.c
net/iucv/iucv.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_ct.c
net/netfilter/xt_CHECKSUM.c
net/netfilter/xt_cluster.c
net/netfilter/xt_hashlimit.c
net/rds/bind.c
net/sched/act_tunnel_key.c
net/tipc/netlink_compat.c
net/tipc/socket.c
net/tipc/socket.h
net/tls/tls_sw.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/hv/hv_kvp_daemon.c
tools/include/linux/lockdep.h
tools/include/linux/nmi.h [new file with mode: 0644]
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/vhost.h
tools/perf/Makefile.perf
tools/perf/arch/arm64/Makefile
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/arch/x86/include/arch-tests.h
tools/perf/arch/x86/tests/Build
tools/perf/arch/x86/tests/arch-tests.c
tools/perf/arch/x86/tests/bp-modify.c [new file with mode: 0644]
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/evsel.c
tools/perf/util/map.c
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-parse.c

index caa311d59ac1d24c92643f37b396407a1ab654f0..6d83f95a8a8e131c9a2f0454cd39128ffd3d7723 100644 (file)
@@ -75,3 +75,12 @@ Contact:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 Description:
                Amount (in KiB) of low (or normal) memory in the
                balloon.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/scrub_pages
+Date:          September 2018
+KernelVersion: 4.20
+Contact:       xen-devel@lists.xenproject.org
+Description:
+               Control scrubbing pages before returning them to Xen for others domains
+               use. Can be set with xen_scrub_pages cmdline
+               parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
index 64a3bf54b97492e3b2b25508a7785d1f1e9e4186..92eb1f42240d7168354dc7129898e2500ef95c1a 100644 (file)
                        Disables the PV optimizations forcing the HVM guest to
                        run as generic HVM guest with no PV drivers.
 
+       xen_scrub_pages=        [XEN]
+                       Boolean option to control scrubbing pages before giving them back
+                       to Xen, for use by other domains. Can be also changed at runtime
+                       with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+                       Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index 390c145f01d717fc9bc9499fe4f0fc5d8dbc1257..52a719b49afd4fdbbb1dc5adfb86972b458ed79b 100644 (file)
@@ -348,3 +348,7 @@ Version History
 1.13.1  Fix deadlock caused by early md_stop_writes().  Also fix size an
        state races.
 1.13.2  Fix raid redundancy validation and avoid keeping raid set frozen
+1.14.0  Fix reshape race on small devices.  Fix stripe adding reshape
+       deadlock/potential data corruption.  Update superblock when
+       specific devices are requested via rebuild.  Fix RAID leg
+       rebuild errors.
index 4b2084d0f1fb26c9b86bd7e2e587590bc1c1b31a..a6c6a8af48a296cf9b7197c8f065370814efd90d 100644 (file)
@@ -848,7 +848,7 @@ struct file_operations
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-4.1, the following members are defined:
+4.18, the following members are defined:
 
 struct file_operations {
        struct module *owner;
@@ -858,11 +858,11 @@ struct file_operations {
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
+       int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
-       int (*mremap)(struct file *, struct vm_area_struct *);
        int (*open) (struct inode *, struct file *);
        int (*flush) (struct file *, fl_owner_t id);
        int (*release) (struct inode *, struct file *);
@@ -882,6 +882,10 @@ struct file_operations {
 #ifndef CONFIG_MMU
        unsigned (*mmap_capabilities)(struct file *);
 #endif
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
+       int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -899,6 +903,9 @@ otherwise noted.
 
   iterate: called when the VFS needs to read the directory contents
 
+  iterate_shared: called when the VFS needs to read the directory contents
+       when filesystem supports concurrent dir iterators
+
   poll: called by the VFS when a process wants to check if there is
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
@@ -951,6 +958,16 @@ otherwise noted.
 
   fallocate: called by the VFS to preallocate blocks or punch a hole.
 
+  copy_file_range: called by the copy_file_range(2) system call.
+
+  clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
+       FICLONE commands.
+
+  dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
+       command.
+
+  fadvise: possibly called by the fadvise64() system call.
+
 Note that the file operations are implemented by the specific
 filesystem in which the inode resides. When opening a device node
 (character or block special) most filesystems will call special
index d870cb57c887a1c5fa773a8fd9d614c54cb3cce0..4ece30f15777cecaf93bc2c362427ca4f46ad96e 100644 (file)
@@ -5625,6 +5625,8 @@ F:        lib/fault-inject.c
 
 FBTFT Framebuffer drivers
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/staging/fbtft/
 
@@ -6060,7 +6062,7 @@ F:        Documentation/gcc-plugins.txt
 
 GASKET DRIVER FRAMEWORK
 M:     Rob Springer <rspringer@google.com>
-M:     John Joseph <jnjoseph@google.com>
+M:     Todd Poynor <toddpoynor@google.com>
 M:     Ben Chan <benchan@chromium.org>
 S:     Maintained
 F:     drivers/staging/gasket/
@@ -7016,6 +7018,20 @@ F:       drivers/crypto/vmx/aes*
 F:     drivers/crypto/vmx/ghash*
 F:     drivers/crypto/vmx/ppc-xlate.pl
 
+IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpaphp*
+
+IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpadlpar*
+
 IBM ServeRAID RAID DRIVER
 S:     Orphan
 F:     drivers/scsi/ips.*
@@ -8300,7 +8316,7 @@ F:        include/linux/libata.h
 F:     Documentation/devicetree/bindings/ata/
 
 LIBLOCKDEP
-M:     Sasha Levin <alexander.levin@verizon.com>
+M:     Sasha Levin <alexander.levin@microsoft.com>
 S:     Maintained
 F:     tools/lib/lockdep/
 
@@ -11154,7 +11170,7 @@ F:      drivers/pci/controller/dwc/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Jingoo Han <jingoohan1@gmail.com>
-M:     Joao Pinto <Joao.Pinto@synopsys.com>
+M:     Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11346,10 +11362,10 @@ S:    Maintained
 F:     drivers/platform/x86/peaq-wmi.c
 
 PER-CPU MEMORY ALLOCATOR
+M:     Dennis Zhou <dennis@kernel.org>
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
-M:     Dennis Zhou <dennisszhou@gmail.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
 S:     Maintained
 F:     include/linux/percpu*.h
 F:     mm/percpu*.c
index 4d5c883a98e5a018b4ac301cd420a5745886d500..a5ef6818157a0bbab3dd3bc9c2a775bfb5d43a28 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -616,6 +616,11 @@ CFLAGS_GCOV        := -fprofile-arcs -ftest-coverage \
        $(call cc-disable-warning,maybe-uninitialized,)
 export CFLAGS_GCOV
 
+# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
+ifdef CONFIG_FUNCTION_TRACER
+  CC_FLAGS_FTRACE := -pg
+endif
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -755,9 +760,6 @@ KBUILD_CFLAGS       += $(call cc-option, -femit-struct-debug-baseonly) \
 endif
 
 ifdef CONFIG_FUNCTION_TRACER
-ifndef CC_FLAGS_FTRACE
-CC_FLAGS_FTRACE := -pg
-endif
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
   # gcc 5 supports generating the mcount tables directly
   ifeq ($(call cc-option-yn,-mrecord-mcount),y)
index 1b5e0e843c3af8e8035c8f48ef518a3505adb881..7e2b3e360086311427a0bc77ec609b77b43b740d 100644 (file)
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm goto("1: nop\n\t"
+       asm_volatile_goto("1: nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm goto("1: b %l[l_yes]\n\t"
+       asm_volatile_goto("1: b %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
index 95ac7374d723e7fa48338caba5a32d6e9818377a..4c8b13bede80f98195ba1090fea11601a6d3df22 100644 (file)
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC)             += machine_kexec.o relocate_kernel.o    \
 arm64-obj-$(CONFIG_ARM64_RELOC_TEST)   += arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)         += crash_dump.o
+arm64-obj-$(CONFIG_CRASH_CORE)         += crash_core.o
 arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)  += sdei.o
 arm64-obj-$(CONFIG_ARM64_SSBD)         += ssbd.o
 
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644 (file)
index 0000000..ca4c3e1
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/crash_core.h>
+#include <asm/memory.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+       VMCOREINFO_NUMBER(VA_BITS);
+       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+                                               kimage_voffset);
+       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+                                               PHYS_OFFSET);
+       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+}
index f6a5c6bc14348592cefc9967685af7de9afbe4d7..922add8adb7498ff4609725782d09a56b46781d6 100644 (file)
@@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
        }
 }
 #endif /* CONFIG_HIBERNATION */
-
-void arch_crash_save_vmcoreinfo(void)
-{
-       VMCOREINFO_NUMBER(VA_BITS);
-       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
-       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
-                                               kimage_voffset);
-       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
-                                               PHYS_OFFSET);
-       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-}
index 5e4a59b3ec1bb042f25de860a8489340a7add291..2691a1857d203db2522ae178fe744225c71d2da9 100644 (file)
@@ -211,7 +211,7 @@ static inline long ffz(int x)
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
        int r;
 
@@ -232,7 +232,7 @@ static inline long fls(int x)
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
        int r;
 
index 77459df34e2e7e18b29eb62e097641d7f3c1a6d9..7ebe7ad19d155803dfa50087471e3bcd000f22c6 100644 (file)
@@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
                        panic("Can't create %s() memory pool!", __func__);
                else
                        gen_pool_add(coherent_pool,
-                               pfn_to_virt(max_low_pfn),
+                               (unsigned long)pfn_to_virt(max_low_pfn),
                                hexagon_coherent_pool_size, -1);
        }
 
index 4901833498f7e3878b458d23afb876e7812a95f1..8441b2698e6472e9454aec3bb730e94b029fdd21 100644 (file)
@@ -40,6 +40,7 @@ struct ltq_dma_channel {
        int desc;                       /* the current descriptor */
        struct ltq_dma_desc *desc_base; /* the descriptor base */
        int phys;                       /* physical addr */
+       struct device *dev;
 };
 
 enum {
index 4b9fbb6744adecfe17fdba69649e8f95f875dfcb..664f2f7f55c1c06f34e58a551fa917a27f1f9dff 100644 (file)
@@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
        unsigned long flags;
 
        ch->desc = 0;
-       ch->desc_base = dma_zalloc_coherent(NULL,
+       ch->desc_base = dma_zalloc_coherent(ch->dev,
                                LTQ_DESC_NUM * LTQ_DESC_SIZE,
                                &ch->phys, GFP_ATOMIC);
 
@@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
        if (!ch->desc_base)
                return;
        ltq_dma_close(ch);
-       dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+       dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
                ch->desc_base, ch->phys);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_free);
index db20dc630e7efbadeb8ebeb497d5685a3cbad682..aee6031230306a934747c64edb4b61f6928e9e8b 100644 (file)
@@ -85,15 +85,8 @@ atomic_t hart_lottery;
 #ifdef CONFIG_BLK_DEV_INITRD
 static void __init setup_initrd(void)
 {
-       extern char __initramfs_start[];
-       extern unsigned long __initramfs_size;
        unsigned long size;
 
-       if (__initramfs_size > 0) {
-               initrd_start = (unsigned long)(&__initramfs_start);
-               initrd_end = initrd_start + __initramfs_size;
-       }
-
        if (initrd_start >= initrd_end) {
                printk(KERN_INFO "initrd not found or empty");
                goto disable;
index 80b27294c1de0844f07d01c5aecbb5410a38602e..ab9a0ebecc199b52507246b47db7b79dd0420058 100644 (file)
@@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
                              walk->dst.virt.addr, walk->src.virt.addr, n);
                if (k)
                        ret = blkcipher_walk_done(desc, walk, nbytes - k);
-               if (n < k) {
+               if (k < n) {
                        if (__cbc_paes_set_key(ctx) != 0)
                                return blkcipher_walk_done(desc, walk, -EIO);
                        memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
index f3e006bed9a75f94aa8bb9aa388d3037a2d71cbe..c88ed39582a10095b41b364bf6532b0f5298c6e2 100644 (file)
@@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
 
        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
+
+       /* Knights Landing does have MISPREDICT bit */
+       if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+               x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
 }
index 04d038f3b6fab86fed6e458b136f8fc46c05feeb..b9ad83a0ee5dbf1604acc3c4edc90698e7eea1a6 100644 (file)
@@ -4,6 +4,7 @@ config ZONE_DMA
 
 config XTENSA
        def_bool y
+       select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
index 295c120ed099d26f198b32342ba1bfb59f59075e..d67e30faff9c83b519702e15e1ea4ad70eed52d8 100644 (file)
@@ -64,11 +64,7 @@ endif
 vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
 plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
 
-ifeq ($(KBUILD_SRC),)
-KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
-else
 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
-endif
 
 KBUILD_DEFCONFIG := iss_defconfig
 
index f4bbb28026f8be3006890fe236989db791c79e1d..58709e89a8ed1f41ed7acaad204d4d91e973386a 100644 (file)
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
 
 void __init platform_setup(char **p_cmdline)
 {
+       static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
+       static char cmdline[COMMAND_LINE_SIZE] __initdata;
        int argc = simc_argc();
        int argv_size = simc_argv_size();
 
        if (argc > 1) {
-               void **argv = alloc_bootmem(argv_size);
-               char *cmdline = alloc_bootmem(argv_size);
-               int i;
+               if (argv_size > sizeof(argv)) {
+                       pr_err("%s: command line too long: argv_size = %d\n",
+                              __func__, argv_size);
+               } else {
+                       int i;
 
-               cmdline[0] = 0;
-               simc_argv((void *)argv);
+                       cmdline[0] = 0;
+                       simc_argv((void *)argv);
 
-               for (i = 1; i < argc; ++i) {
-                       if (i > 1)
-                               strcat(cmdline, " ");
-                       strcat(cmdline, argv[i]);
+                       for (i = 1; i < argc; ++i) {
+                               if (i > 1)
+                                       strcat(cmdline, " ");
+                               strcat(cmdline, argv[i]);
+                       }
+                       *p_cmdline = cmdline;
                }
-               *p_cmdline = cmdline;
        }
 
        atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
index c19f9078da1ed9b17295eb4fcce108b1d5658c93..c630e02836a80d7d406778208c659aebda8fcf06 100644 (file)
@@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
        for (i = 0; i < BLKCG_MAX_POLS; i++)
                if (!blkcg_policy[i])
                        break;
-       if (i >= BLKCG_MAX_POLS)
+       if (i >= BLKCG_MAX_POLS) {
+               pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
                goto err_unlock;
+       }
 
        /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
        if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
index 3f3b7b253445a1fb87dfefdfaae9ad744b82d50d..64fd96eada31f42e5677a72de837fafa2987165b 100644 (file)
@@ -332,6 +332,35 @@ err_no_vma:
        return vma ? -ENOMEM : -ESRCH;
 }
 
+
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+               struct vm_area_struct *vma)
+{
+       if (vma)
+               alloc->vma_vm_mm = vma->vm_mm;
+       /*
+        * If we see alloc->vma is not NULL, buffer data structures set up
+        * completely. Look at smp_rmb side binder_alloc_get_vma.
+        * We also want to guarantee new alloc->vma_vm_mm is always visible
+        * if alloc->vma is set.
+        */
+       smp_wmb();
+       alloc->vma = vma;
+}
+
+static inline struct vm_area_struct *binder_alloc_get_vma(
+               struct binder_alloc *alloc)
+{
+       struct vm_area_struct *vma = NULL;
+
+       if (alloc->vma) {
+               /* Look at description in binder_alloc_set_vma */
+               smp_rmb();
+               vma = alloc->vma;
+       }
+       return vma;
+}
+
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        size_t size, data_offsets_size;
        int ret;
 
-       if (alloc->vma == NULL) {
+       if (!binder_alloc_get_vma(alloc)) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
        alloc->free_async_space = alloc->buffer_size / 2;
-       barrier();
-       alloc->vma = vma;
-       alloc->vma_vm_mm = vma->vm_mm;
+       binder_alloc_set_vma(alloc, vma);
        mmgrab(alloc->vma_vm_mm);
 
        return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
        int buffers, page_count;
        struct binder_buffer *buffer;
 
-       BUG_ON(alloc->vma);
-
        buffers = 0;
        mutex_lock(&alloc->mutex);
+       BUG_ON(alloc->vma);
+
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
 
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  */
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
-       WRITE_ONCE(alloc->vma, NULL);
+       binder_alloc_set_vma(alloc, NULL);
 }
 
 /**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       vma = alloc->vma;
+       vma = binder_alloc_get_vma(alloc);
        if (vma) {
                if (!mmget_not_zero(alloc->vma_vm_mm))
                        goto err_mmget;
index 0943e7065e0eadbc1348b3bfdcf7aed15b495c62..b3c0498ee4331f4c2a47a72fb74bce9328b4486e 100644 (file)
@@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
 static int alloc_lookup_fw_priv(const char *fw_name,
                                struct firmware_cache *fwc,
                                struct fw_priv **fw_priv, void *dbuf,
-                               size_t size)
+                               size_t size, enum fw_opt opt_flags)
 {
        struct fw_priv *tmp;
 
        spin_lock(&fwc->lock);
-       tmp = __lookup_fw_priv(fw_name);
-       if (tmp) {
-               kref_get(&tmp->ref);
-               spin_unlock(&fwc->lock);
-               *fw_priv = tmp;
-               pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
-               return 1;
+       if (!(opt_flags & FW_OPT_NOCACHE)) {
+               tmp = __lookup_fw_priv(fw_name);
+               if (tmp) {
+                       kref_get(&tmp->ref);
+                       spin_unlock(&fwc->lock);
+                       *fw_priv = tmp;
+                       pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+                       return 1;
+               }
        }
+
        tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
-       if (tmp)
+       if (tmp && !(opt_flags & FW_OPT_NOCACHE))
                list_add(&tmp->list, &fwc->head);
        spin_unlock(&fwc->lock);
 
@@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device,
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-                         struct device *device, void *dbuf, size_t size)
+                         struct device *device, void *dbuf, size_t size,
+                         enum fw_opt opt_flags)
 {
        struct firmware *firmware;
        struct fw_priv *fw_priv;
@@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
                return 0; /* assigned */
        }
 
-       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size);
+       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+                                 opt_flags);
 
        /*
         * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
                goto out;
        }
 
-       ret = _request_firmware_prepare(&fw, name, device, buf, size);
+       ret = _request_firmware_prepare(&fw, name, device, buf, size,
+                                       opt_flags);
        if (ret <= 0) /* error or already assigned */
                goto out;
 
index d81781f22dba0930d3e7bbf10b2a02e46704a982..34e0030f059281148043ccb70a2b9b2e64b4216b 100644 (file)
@@ -87,10 +87,10 @@ struct nullb {
 #ifdef CONFIG_BLK_DEV_ZONED
 int null_zone_init(struct nullb_device *dev);
 void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb,
-                                           struct nullb_cmd *cmd);
-void null_zone_write(struct nullb_cmd *cmd);
-void null_zone_reset(struct nullb_cmd *cmd);
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                       unsigned int nr_sectors);
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
 #else
 static inline int null_zone_init(struct nullb_device *dev)
 {
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
 }
 static inline void null_zone_exit(struct nullb_device *dev) {}
 static inline blk_status_t null_zone_report(struct nullb *nullb,
-                                           struct nullb_cmd *cmd)
+                                           struct bio *bio)
 {
        return BLK_STS_NOTSUPP;
 }
-static inline void null_zone_write(struct nullb_cmd *cmd) {}
-static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                                  unsigned int nr_sectors)
+{
+}
+static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
 #endif /* CONFIG_BLK_DEV_ZONED */
 #endif /* __NULL_BLK_H */
index 6127e3ff7b4b343816baca729606511cb653d404..093b614d652445a337db00ea8beaded767073415 100644 (file)
@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
        }
 }
 
+static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
+{
+       struct nullb_device *dev = cmd->nq->dev;
+
+       if (dev->queue_mode == NULL_Q_BIO) {
+               if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
+                       cmd->error = null_zone_report(nullb, cmd->bio);
+                       return true;
+               }
+       } else {
+               if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+                       cmd->error = null_zone_report(nullb, cmd->rq->bio);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
 {
        struct nullb_device *dev = cmd->nq->dev;
        struct nullb *nullb = dev->nullb;
        int err = 0;
 
-       if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
-               cmd->error = null_zone_report(nullb, cmd);
+       if (cmd_report_zone(nullb, cmd))
                goto out;
-       }
 
        if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
                struct request *rq = cmd->rq;
@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
        cmd->error = errno_to_blk_status(err);
 
        if (!cmd->error && dev->zoned) {
-               if (req_op(cmd->rq) == REQ_OP_WRITE)
-                       null_zone_write(cmd);
-               else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
-                       null_zone_reset(cmd);
+               sector_t sector;
+               unsigned int nr_sectors;
+               int op;
+
+               if (dev->queue_mode == NULL_Q_BIO) {
+                       op = bio_op(cmd->bio);
+                       sector = cmd->bio->bi_iter.bi_sector;
+                       nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
+               } else {
+                       op = req_op(cmd->rq);
+                       sector = blk_rq_pos(cmd->rq);
+                       nr_sectors = blk_rq_sectors(cmd->rq);
+               }
+
+               if (op == REQ_OP_WRITE)
+                       null_zone_write(cmd, sector, nr_sectors);
+               else if (op == REQ_OP_ZONE_RESET)
+                       null_zone_reset(cmd, sector);
        }
 out:
        /* Complete IO by inline, softirq or timer */
index a979ca00d7be4327f636168ef044c5a82d849744..7c6b86d987002b73251a9725d71af5b281987c9f 100644 (file)
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
        kvfree(dev->zones);
 }
 
-static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
-                             unsigned int zno, unsigned int nr_zones)
+static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
+                              unsigned int zno, unsigned int nr_zones)
 {
        struct blk_zone_report_hdr *hdr = NULL;
        struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
        void *addr;
        unsigned int zones_to_cpy;
 
-       bio_for_each_segment(bvec, rq->bio, iter) {
+       bio_for_each_segment(bvec, bio, iter) {
                addr = kmap_atomic(bvec.bv_page);
 
                zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
        }
 }
 
-blk_status_t null_zone_report(struct nullb *nullb,
-                                    struct nullb_cmd *cmd)
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
 {
        struct nullb_device *dev = nullb->dev;
-       struct request *rq = cmd->rq;
-       unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+       unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
        unsigned int nr_zones = dev->nr_zones - zno;
-       unsigned int max_zones = (blk_rq_bytes(rq) /
-                                       sizeof(struct blk_zone)) - 1;
+       unsigned int max_zones;
 
+       max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
        nr_zones = min_t(unsigned int, nr_zones, max_zones);
-
-       null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+       null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
 
        return BLK_STS_OK;
 }
 
-void null_zone_write(struct nullb_cmd *cmd)
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                    unsigned int nr_sectors)
 {
        struct nullb_device *dev = cmd->nq->dev;
-       struct request *rq = cmd->rq;
-       sector_t sector = blk_rq_pos(rq);
-       unsigned int rq_sectors = blk_rq_sectors(rq);
        unsigned int zno = null_zone_no(dev, sector);
        struct blk_zone *zone = &dev->zones[zno];
 
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
        case BLK_ZONE_COND_EMPTY:
        case BLK_ZONE_COND_IMP_OPEN:
                /* Writes must be at the write pointer position */
-               if (blk_rq_pos(rq) != zone->wp) {
+               if (sector != zone->wp) {
                        cmd->error = BLK_STS_IOERR;
                        break;
                }
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
                if (zone->cond == BLK_ZONE_COND_EMPTY)
                        zone->cond = BLK_ZONE_COND_IMP_OPEN;
 
-               zone->wp += rq_sectors;
+               zone->wp += nr_sectors;
                if (zone->wp == zone->start + zone->len)
                        zone->cond = BLK_ZONE_COND_FULL;
                break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
        }
 }
 
-void null_zone_reset(struct nullb_cmd *cmd)
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
 {
        struct nullb_device *dev = cmd->nq->dev;
-       struct request *rq = cmd->rq;
-       unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+       unsigned int zno = null_zone_no(dev, sector);
        struct blk_zone *zone = &dev->zones[zno];
 
        zone->cond = BLK_ZONE_COND_EMPTY;
index a3397664f80014b881387e1faa855a8102a0caa5..97d6856c9c0f98b3b7adf4ecd1d83805034c1e4b 100644 (file)
@@ -59,8 +59,6 @@ enum bt_states {
        BT_STATE_RESET3,
        BT_STATE_RESTART,
        BT_STATE_PRINTME,
-       BT_STATE_CAPABILITIES_BEGIN,
-       BT_STATE_CAPABILITIES_END,
        BT_STATE_LONG_BUSY      /* BT doesn't get hosed :-) */
 };
 
@@ -86,7 +84,6 @@ struct si_sm_data {
        int             error_retries;  /* end of "common" fields */
        int             nonzero_status; /* hung BMCs stay all 0 */
        enum bt_states  complete;       /* to divert the state machine */
-       int             BT_CAP_outreqs;
        long            BT_CAP_req2rsp;
        int             BT_CAP_retries; /* Recommended retries */
 };
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
        case BT_STATE_RESET3:           return("RESET3");
        case BT_STATE_RESTART:          return("RESTART");
        case BT_STATE_LONG_BUSY:        return("LONG_BUSY");
-       case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
-       case BT_STATE_CAPABILITIES_END: return("CAP_END");
        }
        return("BAD STATE");
 }
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
        bt->complete = BT_STATE_IDLE;   /* end here */
        bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
        bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
-       /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
        return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-       unsigned char status, BT_CAP[8];
+       unsigned char status;
        static enum bt_states last_printed = BT_STATE_PRINTME;
        int i;
 
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                if (status & BT_H_BUSY)         /* clear a leftover H_BUSY */
                        BT_CONTROL(BT_H_BUSY);
 
-               bt->timeout = bt->BT_CAP_req2rsp;
-
-               /* Read BT capabilities if it hasn't been done yet */
-               if (!bt->BT_CAP_outreqs)
-                       BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
-                                       SI_SM_CALL_WITHOUT_DELAY);
                BT_SI_SM_RETURN(SI_SM_IDLE);
 
        case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                BT_STATE_CHANGE(BT_STATE_XACTION_START,
                                SI_SM_CALL_WITH_DELAY);
 
-       /*
-        * Get BT Capabilities, using timing of upper level state machine.
-        * Set outreqs to prevent infinite loop on timeout.
-        */
-       case BT_STATE_CAPABILITIES_BEGIN:
-               bt->BT_CAP_outreqs = 1;
-               {
-                       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
-                       bt->state = BT_STATE_IDLE;
-                       bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
-               }
-               bt->complete = BT_STATE_CAPABILITIES_END;
-               BT_STATE_CHANGE(BT_STATE_XACTION_START,
-                               SI_SM_CALL_WITH_DELAY);
-
-       case BT_STATE_CAPABILITIES_END:
-               i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
-               bt_init_data(bt, bt->io);
-               if ((i == 8) && !BT_CAP[2]) {
-                       bt->BT_CAP_outreqs = BT_CAP[3];
-                       bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
-                       bt->BT_CAP_retries = BT_CAP[7];
-               } else
-                       printk(KERN_WARNING "IPMI BT: using default values\n");
-               if (!bt->BT_CAP_outreqs)
-                       bt->BT_CAP_outreqs = 1;
-               printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
-                       bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
-               bt->timeout = bt->BT_CAP_req2rsp;
-               return SI_SM_CALL_WITHOUT_DELAY;
-
        default:        /* should never occur */
                return error_recovery(bt,
                                      status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 
 static int bt_detect(struct si_sm_data *bt)
 {
+       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+       unsigned char BT_CAP[8];
+       enum si_sm_result smi_result;
+       int rv;
+
        /*
         * It's impossible for the BT status and interrupt registers to be
         * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
        if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
                return 1;
        reset_flags(bt);
+
+       /*
+        * Try getting the BT capabilities here.
+        */
+       rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+       if (rv) {
+               dev_warn(bt->io->dev,
+                        "Can't start capabilities transaction: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       smi_result = SI_SM_CALL_WITHOUT_DELAY;
+       for (;;) {
+               if (smi_result == SI_SM_CALL_WITH_DELAY ||
+                   smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+                       schedule_timeout_uninterruptible(1);
+                       smi_result = bt_event(bt, jiffies_to_usecs(1));
+               } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+                       smi_result = bt_event(bt, 0);
+               } else
+                       break;
+       }
+
+       rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+       bt_init_data(bt, bt->io);
+       if (rv < 8) {
+               dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       if (BT_CAP[2]) {
+               dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+               dev_warn(bt->io->dev, "using default values\n");
+       } else {
+               bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+               bt->BT_CAP_retries = BT_CAP[7];
+       }
+
+       dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+                bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
        return 0;
 }
 
index 51832b8a2c6283f9b1ccfd07a60997fdade8d4cf..7fc9612070a1f1abe43b489f226c84a9c4c50632 100644 (file)
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
 
        rv = handlers->start_processing(send_info, intf);
        if (rv)
-               goto out;
+               goto out_err;
 
        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
        if (rv) {
                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
-               goto out;
+               goto out_err_started;
        }
 
        mutex_lock(&intf->bmc_reg_mutex);
        rv = __scan_channels(intf, &id);
        mutex_unlock(&intf->bmc_reg_mutex);
+       if (rv)
+               goto out_err_bmc_reg;
 
- out:
-       if (rv) {
-               ipmi_bmc_unregister(intf);
-               list_del_rcu(&intf->link);
-               mutex_unlock(&ipmi_interfaces_mutex);
-               synchronize_srcu(&ipmi_interfaces_srcu);
-               cleanup_srcu_struct(&intf->users_srcu);
-               kref_put(&intf->refcount, intf_free);
-       } else {
-               /*
-                * Keep memory order straight for RCU readers.  Make
-                * sure everything else is committed to memory before
-                * setting intf_num to mark the interface valid.
-                */
-               smp_wmb();
-               intf->intf_num = i;
-               mutex_unlock(&ipmi_interfaces_mutex);
+       /*
+        * Keep memory order straight for RCU readers.  Make
+        * sure everything else is committed to memory before
+        * setting intf_num to mark the interface valid.
+        */
+       smp_wmb();
+       intf->intf_num = i;
+       mutex_unlock(&ipmi_interfaces_mutex);
 
-               /* After this point the interface is legal to use. */
-               call_smi_watchers(i, intf->si_dev);
-       }
+       /* After this point the interface is legal to use. */
+       call_smi_watchers(i, intf->si_dev);
+
+       return 0;
+
+ out_err_bmc_reg:
+       ipmi_bmc_unregister(intf);
+ out_err_started:
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
+ out_err:
+       list_del_rcu(&intf->link);
+       mutex_unlock(&ipmi_interfaces_mutex);
+       synchronize_srcu(&ipmi_interfaces_srcu);
+       cleanup_srcu_struct(&intf->users_srcu);
+       kref_put(&intf->refcount, intf_free);
 
        return rv;
 }
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
        }
        srcu_read_unlock(&intf->users_srcu, index);
 
-       intf->handlers->shutdown(intf->send_info);
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
 
        cleanup_smi_msgs(intf);
 
index 90ec010bffbd9776c012586b4e01b24cdd0bd2d6..5faa917df1b629647cb7fecd291a1d1b8d80eae9 100644 (file)
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
                 si_to_str[new_smi->io.si_type]);
 
        WARN_ON(new_smi->io.dev->init_name != NULL);
-       kfree(init_name);
-
-       return 0;
-
-out_err:
-       if (new_smi->intf) {
-               ipmi_unregister_smi(new_smi->intf);
-               new_smi->intf = NULL;
-       }
 
+ out_err:
        kfree(init_name);
-
        return rv;
 }
 
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
 
        kfree(smi_info->si_sm);
        smi_info->si_sm = NULL;
+
+       smi_info->intf = NULL;
 }
 
 /*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
 
        list_del(&smi_info->link);
 
-       if (smi_info->intf) {
+       if (smi_info->intf)
                ipmi_unregister_smi(smi_info->intf);
-               smi_info->intf = NULL;
-       }
 
        if (smi_info->pdev) {
                if (smi_info->pdev_registered)
index 18e4650c233b1de514ee836dfc28021f35953a5b..29e67a80fb208f804e4ed1fb0a560142002c7ffc 100644 (file)
@@ -181,6 +181,8 @@ struct ssif_addr_info {
        struct device *dev;
        struct i2c_client *client;
 
+       struct i2c_client *added_client;
+
        struct mutex clients_mutex;
        struct list_head clients;
 
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
                complete(&ssif_info->wake_thread);
                kthread_stop(ssif_info->thread);
        }
-
-       /*
-        * No message can be outstanding now, we have removed the
-        * upper layer and it permitted us to do so.
-        */
-       kfree(ssif_info);
 }
 
 static int ssif_remove(struct i2c_client *client)
 {
        struct ssif_info *ssif_info = i2c_get_clientdata(client);
-       struct ipmi_smi *intf;
        struct ssif_addr_info *addr_info;
 
        if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
         */
-       intf = ssif_info->intf;
-       ssif_info->intf = NULL;
-       ipmi_unregister_smi(intf);
+       ipmi_unregister_smi(ssif_info->intf);
 
        list_for_each_entry(addr_info, &ssif_infos, link) {
                if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
                }
        }
 
+       kfree(ssif_info);
+
        return 0;
 }
 
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
  out:
        if (rv) {
-               /*
-                * Note that if addr_info->client is assigned, we
-                * leave it.  The i2c client hangs around even if we
-                * return a failure here, and the failure here is not
-                * propagated back to the i2c code.  This seems to be
-                * design intent, strange as it may be.  But if we
-                * don't leave it, ssif_platform_remove will not remove
-                * the client like it should.
-                */
+               if (addr_info)
+                       addr_info->client = NULL;
+
                dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
                kfree(ssif_info);
        }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
        if (adev->type != &i2c_adapter_type)
                return 0;
 
-       i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+       addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+                                                &addr_info->binfo);
 
        if (!addr_info->adapter_name)
                return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
                return 0;
 
        mutex_lock(&ssif_infos_mutex);
-       i2c_unregister_device(addr_info->client);
+       i2c_unregister_device(addr_info->added_client);
 
        list_del(&addr_info->link);
        kfree(addr_info);
index bb882ab161fe1bbb4b678cc9bf105b77273296e3..e6124bd548df211317e7a69fa42da99849382e69 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "kcs_bmc.h"
 
+#define DEVICE_NAME "ipmi-kcs"
+
 #define KCS_MSG_BUFSIZ    1000
 
 #define KCS_ZERO_DATA     0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
        if (!kcs_bmc)
                return NULL;
 
-       dev_set_name(dev, "ipmi-kcs%u", channel);
-
        spin_lock_init(&kcs_bmc->lock);
        kcs_bmc->channel = channel;
 
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
                return NULL;
 
        kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
-       kcs_bmc->miscdev.name = dev_name(dev);
+       kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+                                              DEVICE_NAME, channel);
        kcs_bmc->miscdev.fops = &kcs_bmc_fops;
 
        return kcs_bmc;
index b76cb17d879c635efbfec026da59a5c608ff0a6e..adfd316db1a892a5f0bbae831ae6569c8c2ba270 100644 (file)
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
        int ret;
        struct device *dev = &mbdev->dev;
 
-       mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
+       mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
        if (!mic_dma_dev) {
                ret = -ENOMEM;
                goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
 reg_error:
        mic_dma_uninit(mic_dma_dev);
 init_error:
-       kfree(mic_dma_dev);
        mic_dma_dev = NULL;
 alloc_error:
        dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
 static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
 {
        mic_dma_uninit(mic_dma_dev);
-       kfree(mic_dma_dev);
 }
 
 /* DEBUGFS CODE */
index fc9fd2d0482f4d8356f4ae8e1acfacdfdbfe3b65..0b840531ef33a2e1f387d15f550fb808df5ae8bf 100644 (file)
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
                /* Create region for each port */
                fme_region = dfl_fme_create_region(pdata, mgr,
                                                   fme_br->br, i);
-               if (!fme_region) {
+               if (IS_ERR(fme_region)) {
                        ret = PTR_ERR(fme_region);
                        goto destroy_region;
                }
index b6e9df11115d358734d686af7b26a9eec0a6de64..b31d121a876bf32e64c51ad23ead9b08713f74bb 100644 (file)
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
 {
        struct drm_gem_object *gobj;
        unsigned long size;
+       int r;
 
        gobj = drm_gem_object_lookup(p->filp, data->handle);
        if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
 
-       size = amdgpu_bo_size(p->uf_entry.robj);
-       if (size != PAGE_SIZE || (data->offset + 8) > size)
-               return -EINVAL;
-
-       *offset = data->offset;
-
        drm_gem_object_put_unlocked(gobj);
 
+       size = amdgpu_bo_size(p->uf_entry.robj);
+       if (size != PAGE_SIZE || (data->offset + 8) > size) {
+               r = -EINVAL;
+               goto error_unref;
+       }
+
        if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
-               amdgpu_bo_unref(&p->uf_entry.robj);
-               return -EINVAL;
+               r = -EINVAL;
+               goto error_unref;
        }
 
+       *offset = data->offset;
+
        return 0;
+
+error_unref:
+       amdgpu_bo_unref(&p->uf_entry.robj);
+       return r;
 }
 
 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1262,10 +1269,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 error_abort:
        dma_fence_put(&job->base.s_fence->finished);
        job->base.s_fence = NULL;
+       amdgpu_mn_unlock(p->mn);
 
 error_unlock:
        amdgpu_job_free(job);
-       amdgpu_mn_unlock(p->mn);
        return r;
 }
 
index 8ab5ccbc14ace34f6618452b01432bafd55cb5a3..39bf2ce548c61e2cabb2ad65aa9036d07f963093 100644 (file)
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_COMMON,
+               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_IH,
        };
 
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
 
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_SMC,
-               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_DCE,
                AMD_IP_BLOCK_TYPE_GFX,
                AMD_IP_BLOCK_TYPE_SDMA,
index e7ca4623cfb946f41b0e3e1ffb107a3f295daedb..7c3b634d8d5f4117698ca46542eba887b85c4692 100644 (file)
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
index a45f46d8537f15bd187fd4195d7de2c6b6dffd2a..c7afee37b2b8cbe5c85786fbf1171ed86c0731d4 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/device.h>
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
        info = (struct kvmgt_guest_info *)handle;
        kvm = info->kvm;
 
-       if (kthread)
+       if (kthread) {
+               if (!mmget_not_zero(kvm->mm))
+                       return -EFAULT;
                use_mm(kvm->mm);
+       }
 
        idx = srcu_read_lock(&kvm->srcu);
        ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
                      kvm_read_guest(kvm, gpa, buf, len);
        srcu_read_unlock(&kvm->srcu, idx);
 
-       if (kthread)
+       if (kthread) {
                unuse_mm(kvm->mm);
+               mmput(kvm->mm);
+       }
 
        return ret;
 }
index fa75a2eead9070fbfd1894b659b37f84eed9e11a..b0d3a43ccd033761dfefdf077e09526a98aff3c0 100644 (file)
@@ -42,8 +42,6 @@
 #define DEVICE_TYPE_EFP3   0x20
 #define DEVICE_TYPE_EFP4   0x10
 
-#define DEV_SIZE       38
-
 struct opregion_header {
        u8 signature[16];
        u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
        u16 size; /* data size */
 } __packed;
 
+/* For supporting windows guest with opregion, here hardcode the emulated
+ * bdb header version as '186', and the corresponding child_device_config
+ * length should be '33' but not '38'.
+ */
 struct efp_child_device_config {
        u16 handle;
        u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
        u8 mipi_bridge_type; /* 171 */
        u16 device_class_ext;
        u8 dvo_function;
-       u8 dp_usb_type_c:1; /* 195 */
-       u8 skip6:7;
-       u8 dp_usb_type_c_2x_gpio_index; /* 195 */
-       u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
-       u8 iboost_dp:4; /* 196 */
-       u8 iboost_hdmi:4; /* 196 */
 } __packed;
 
 struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
        v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 
        strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
-       v->bdb_header.version = 186; /* child_dev_size = 38 */
+       v->bdb_header.version = 186; /* child_dev_size = 33 */
        v->bdb_header.header_size = sizeof(v->bdb_header);
 
        v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
 
        /* child device */
        num_child = 4; /* each port has one child */
+       v->general_definitions.child_dev_size =
+               sizeof(struct efp_child_device_config);
        v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
        /* size will include child devices */
        v->general_definitions_header.size =
-               sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
-       v->general_definitions.child_dev_size = DEV_SIZE;
+               sizeof(struct bdb_general_definitions) +
+                       num_child * v->general_definitions.child_dev_size;
 
        /* portA */
        v->child0.handle = DEVICE_TYPE_EFP1;
index 4a3c8ee9a9732cccc4e1ce1cc42197b0c662cc73..d2951096bca0d48caaf10abf1827ef81152abdc8 100644 (file)
@@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->pcu_lock);
-               /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+               /*
+                * Wait for PCODE to finish disabling IPS. The BSpec specified
+                * 42ms timeout value leads to occasional timeouts so use 100ms
+                * instead.
+                */
                if (intel_wait_for_register(dev_priv,
                                            IPS_CTL, IPS_ENABLE, 0,
-                                           42))
+                                           100))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
index c2f10d8993296471833e4a5d8a0cd69463a992b4..443dfaefd7a6b413c4664b132fe5db59a9297f24 100644 (file)
@@ -181,8 +181,9 @@ struct intel_overlay {
        u32 brightness, contrast, saturation;
        u32 old_xscale, old_yscale;
        /* register access */
-       u32 flip_addr;
        struct drm_i915_gem_object *reg_bo;
+       struct overlay_registers __iomem *regs;
+       u32 flip_addr;
        /* flip handling */
        struct i915_gem_active last_flip;
 };
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
                                  PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
 }
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
-                                        overlay->flip_addr,
-                                        PAGE_SIZE);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
-                                    struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap(regs);
-}
-
 static void intel_overlay_submit_request(struct intel_overlay *overlay,
                                         struct i915_request *rq,
                                         i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      struct drm_i915_gem_object *new_bo,
                                      struct put_image_params *params)
 {
-       int ret, tmp_width;
-       struct overlay_registers __iomem *regs;
-       bool scale_changed = false;
+       struct overlay_registers __iomem *regs = overlay->regs;
        struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
+       bool scale_changed = false;
        struct i915_vma *vma;
+       int ret, tmp_width;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        if (!overlay->active) {
                u32 oconfig;
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unpin;
-               }
+
                oconfig = OCONF_CC_OUT_8BIT;
                if (IS_GEN4(dev_priv))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
                iowrite32(oconfig, &regs->OCONFIG);
-               intel_overlay_unmap_regs(overlay, regs);
 
                ret = intel_overlay_on(overlay);
                if (ret != 0)
                        goto out_unpin;
        }
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs) {
-               ret = -ENOMEM;
-               goto out_unpin;
-       }
-
        iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
        iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
 
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        iowrite32(overlay_cmd_reg(params), &regs->OCMD);
 
-       intel_overlay_unmap_regs(overlay, regs);
-
        ret = intel_overlay_continue(overlay, vma, scale_changed);
        if (ret)
                goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        if (ret != 0)
                return ret;
 
-       regs = intel_overlay_map_regs(overlay);
-       iowrite32(0, &regs->OCMD);
-       intel_overlay_unmap_regs(overlay, regs);
+       iowrite32(0, &overlay->regs->OCMD);
 
        return intel_overlay_off(overlay);
 }
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
        struct drm_intel_overlay_attrs *attrs = data;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                overlay->contrast   = attrs->contrast;
                overlay->saturation = attrs->saturation;
 
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unlock;
-               }
-
-               update_reg_attrs(overlay, regs);
-
-               intel_overlay_unmap_regs(overlay, regs);
+               update_reg_attrs(overlay, overlay->regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
                        if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
        return ret;
 }
 
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+       if (obj == NULL)
+               obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_put_bo;
+       }
+
+       if (use_phys)
+               overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+       else
+               overlay->flip_addr = i915_ggtt_offset(vma);
+       overlay->regs = i915_vma_pin_iomap(vma);
+       i915_vma_unpin(vma);
+
+       if (IS_ERR(overlay->regs)) {
+               err = PTR_ERR(overlay->regs);
+               goto err_put_bo;
+       }
+
+       overlay->reg_bo = obj;
+       return 0;
+
+err_put_bo:
+       i915_gem_object_put(obj);
+       return err;
+}
+
 void intel_setup_overlay(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay;
-       struct drm_i915_gem_object *reg_bo;
-       struct overlay_registers __iomem *regs;
-       struct i915_vma *vma = NULL;
        int ret;
 
        if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        if (!overlay)
                return;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       if (WARN_ON(dev_priv->overlay))
-               goto out_free;
-
        overlay->i915 = dev_priv;
 
-       reg_bo = NULL;
-       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
-       if (reg_bo == NULL)
-               reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
-       if (IS_ERR(reg_bo))
-               goto out_free;
-       overlay->reg_bo = reg_bo;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
-               ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
-               if (ret) {
-                       DRM_ERROR("failed to attach phys overlay regs\n");
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = reg_bo->phys_handle->busaddr;
-       } else {
-               vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
-                                              0, PAGE_SIZE, PIN_MAPPABLE);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin overlay register bo\n");
-                       ret = PTR_ERR(vma);
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = i915_ggtt_offset(vma);
-
-               ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
-               if (ret) {
-                       DRM_ERROR("failed to move overlay register bo into the GTT\n");
-                       goto out_unpin_bo;
-               }
-       }
-
-       /* init all values */
        overlay->color_key = 0x0101fe;
        overlay->color_key_enabled = true;
        overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 
        init_request_active(&overlay->last_flip, NULL);
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs)
-               goto out_unpin_bo;
+       mutex_lock(&dev_priv->drm.struct_mutex);
+
+       ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+       if (ret)
+               goto out_free;
+
+       ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
+       if (ret)
+               goto out_reg_bo;
 
-       memset_io(regs, 0, sizeof(struct overlay_registers));
-       update_polyphase_filter(regs);
-       update_reg_attrs(overlay, regs);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       intel_overlay_unmap_regs(overlay, regs);
+       memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+       update_polyphase_filter(overlay->regs);
+       update_reg_attrs(overlay, overlay->regs);
 
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-       DRM_INFO("initialized overlay support\n");
+       DRM_INFO("Initialized overlay support.\n");
        return;
 
-out_unpin_bo:
-       if (vma)
-               i915_vma_unpin(vma);
-out_free_bo:
-       i915_gem_object_put(reg_bo);
+out_reg_bo:
+       i915_gem_object_put(overlay->reg_bo);
 out_free:
        mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
-       return;
 }
 
 void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
 {
-       if (!dev_priv->overlay)
+       struct intel_overlay *overlay;
+
+       overlay = fetch_and_zero(&dev_priv->overlay);
+       if (!overlay)
                return;
 
-       /* The bo's should be free'd by the generic code already.
+       /*
+        * The bo's should be free'd by the generic code already.
         * Furthermore modesetting teardown happens beforehand so the
-        * hardware should be off already */
-       WARN_ON(dev_priv->overlay->active);
+        * hardware should be off already.
+        */
+       WARN_ON(overlay->active);
+
+       i915_gem_object_put(overlay->reg_bo);
 
-       i915_gem_object_put(dev_priv->overlay->reg_bo);
-       kfree(dev_priv->overlay);
+       kfree(overlay);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
        u32 isr;
 };
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               /* Cast to make sparse happy, but it's wc memory anyway, so
-                * equivalent to the wc io mapping on X86. */
-               regs = (struct overlay_registers __iomem *)
-                       overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
-                                               overlay->flip_addr);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
-                                       struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap_atomic(regs);
-}
-
 struct intel_overlay_error_state *
 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay = dev_priv->overlay;
        struct intel_overlay_error_state *error;
-       struct overlay_registers __iomem *regs;
 
        if (!overlay || !overlay->active)
                return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
        error->isr = I915_READ(ISR);
        error->base = overlay->flip_addr;
 
-       regs = intel_overlay_map_regs_atomic(overlay);
-       if (!regs)
-               goto err;
-
-       memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay, regs);
+       memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
 
        return error;
-
-err:
-       kfree(error);
-       return NULL;
 }
 
 void
index 8412119bd94058b7197c531c49fc7ad88a9e6fab..5691dfa1db6fe388bcf50b2db79c3aa5a05a7696 100644 (file)
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
        int ret;
 
        if (dpcd >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+               /* Even if we're enabling MST, start with disabling the
+                * branching unit to clear any sink-side MST topology state
+                * that wasn't set by us
+                */
+               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
                if (ret < 0)
                        return ret;
 
-               dpcd &= ~DP_MST_EN;
-               if (state)
-                       dpcd |= DP_MST_EN;
-
-               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
-               if (ret < 0)
-                       return ret;
+               if (state) {
+                       /* Now, start initializing */
+                       ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
+                                                DP_MST_EN);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
 int
 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
 {
-       int ret, state = 0;
+       struct drm_dp_aux *aux;
+       int ret;
+       bool old_state, new_state;
+       u8 mstm_ctrl;
 
        if (!mstm)
                return 0;
 
-       if (dpcd[0] >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+       mutex_lock(&mstm->mgr.lock);
+
+       old_state = mstm->mgr.mst_state;
+       new_state = old_state;
+       aux = mstm->mgr.aux;
+
+       if (old_state) {
+               /* Just check that the MST hub is still as we expect it */
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
+               if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
+                       DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
+                       new_state = false;
+               }
+       } else if (dpcd[0] >= 0x12) {
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
                if (ret < 0)
-                       return ret;
+                       goto probe_error;
 
                if (!(dpcd[1] & DP_MST_CAP))
                        dpcd[0] = 0x11;
                else
-                       state = allow;
+                       new_state = allow;
+       }
+
+       if (new_state == old_state) {
+               mutex_unlock(&mstm->mgr.lock);
+               return new_state;
        }
 
-       ret = nv50_mstm_enable(mstm, dpcd[0], state);
+       ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
        if (ret)
-               return ret;
+               goto probe_error;
+
+       mutex_unlock(&mstm->mgr.lock);
 
-       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
        if (ret)
                return nv50_mstm_enable(mstm, dpcd[0], 0);
 
-       return mstm->mgr.mst_state;
+       return new_state;
+
+probe_error:
+       mutex_unlock(&mstm->mgr.lock);
+       return ret;
 }
 
 static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
 static const struct drm_mode_config_funcs
 nv50_disp_func = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
        .atomic_check = nv50_disp_atomic_check,
        .atomic_commit = nv50_disp_atomic_commit,
        .atomic_state_alloc = nv50_disp_atomic_state_alloc,
index 51932c72334ef6529abb18bc44762cc01ba2d176..247f72cc4d10a4547309effb6b8904e9c0d60d49 100644 (file)
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
 nouveau_connector_ddc_detect(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct nouveau_connector *nv_connector = nouveau_connector(connector);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
-       struct nouveau_encoder *nv_encoder = NULL;
+       struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
        struct drm_encoder *encoder;
-       int i, panel = -ENODEV;
-
-       /* eDP panels need powering on by us (if the VBIOS doesn't default it
-        * to on) before doing any AUX channel transactions.  LVDS panel power
-        * is handled by the SOR itself, and not required for LVDS DDC.
-        */
-       if (nv_connector->type == DCB_CONNECTOR_eDP) {
-               panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
-               if (panel == 0) {
-                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
-                       msleep(300);
-               }
-       }
+       int i, ret;
+       bool switcheroo_ddc = false;
 
        drm_connector_for_each_possible_encoder(connector, encoder, i) {
                nv_encoder = nouveau_encoder(encoder);
 
-               if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-                       int ret = nouveau_dp_detect(nv_encoder);
+               switch (nv_encoder->dcb->type) {
+               case DCB_OUTPUT_DP:
+                       ret = nouveau_dp_detect(nv_encoder);
                        if (ret == NOUVEAU_DP_MST)
                                return NULL;
-                       if (ret == NOUVEAU_DP_SST)
-                               break;
-               } else
-               if ((vga_switcheroo_handler_flags() &
-                    VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
-                   nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
-                   nv_encoder->i2c) {
-                       int ret;
-                       vga_switcheroo_lock_ddc(dev->pdev);
-                       ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
-                       vga_switcheroo_unlock_ddc(dev->pdev);
-                       if (ret)
+                       else if (ret == NOUVEAU_DP_SST)
+                               found = nv_encoder;
+
+                       break;
+               case DCB_OUTPUT_LVDS:
+                       switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
+                                           VGA_SWITCHEROO_CAN_SWITCH_DDC);
+               /* fall-through */
+               default:
+                       if (!nv_encoder->i2c)
                                break;
-               } else
-               if (nv_encoder->i2c) {
+
+                       if (switcheroo_ddc)
+                               vga_switcheroo_lock_ddc(dev->pdev);
                        if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
-                               break;
+                               found = nv_encoder;
+                       if (switcheroo_ddc)
+                               vga_switcheroo_unlock_ddc(dev->pdev);
+
+                       break;
                }
+               if (found)
+                       break;
        }
 
-       /* eDP panel not detected, restore panel power GPIO to previous
-        * state to avoid confusing the SOR for other output types.
-        */
-       if (!nv_encoder && panel == 0)
-               nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
-
-       return nv_encoder;
+       return found;
 }
 
 static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                nv_connector->edid = NULL;
        }
 
-       /* Outputs are only polled while runtime active, so acquiring a
-        * runtime PM ref here is unnecessary (and would deadlock upon
-        * runtime suspend because it waits for polling to finish).
+       /* Outputs are only polled while runtime active, so resuming the
+        * device here is unnecessary (and would deadlock upon runtime suspend
+        * because it waits for polling to finish). We do however, want to
+        * prevent the autosuspend timer from elapsing during this operation
+        * if possible.
         */
-       if (!drm_kms_helper_is_poll_worker()) {
-               ret = pm_runtime_get_sync(connector->dev->dev);
+       if (drm_kms_helper_is_poll_worker()) {
+               pm_runtime_get_noresume(dev->dev);
+       } else {
+               ret = pm_runtime_get_sync(dev->dev);
                if (ret < 0 && ret != -EACCES)
                        return conn_status;
        }
@@ -638,10 +628,8 @@ detect_analog:
 
  out:
 
-       if (!drm_kms_helper_is_poll_worker()) {
-               pm_runtime_mark_last_busy(connector->dev->dev);
-               pm_runtime_put_autosuspend(connector->dev->dev);
-       }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 
        return conn_status;
 }
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
        const struct nvif_notify_conn_rep_v0 *rep = notify->data;
        const char *name = connector->name;
        struct nouveau_encoder *nv_encoder;
+       int ret;
+
+       ret = pm_runtime_get(drm->dev->dev);
+       if (ret == 0) {
+               /* We can't block here if there's a pending PM request
+                * running, as we'll deadlock nouveau_display_fini() when it
+                * calls nvif_put() on our nvif_notify struct. So, simply
+                * defer the hotplug event until the device finishes resuming
+                */
+               NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
+                        name);
+               schedule_work(&drm->hpd_work);
+
+               pm_runtime_put_noidle(drm->dev->dev);
+               return NVIF_NOTIFY_KEEP;
+       } else if (ret != 1 && ret != -EACCES) {
+               NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
+                       name, ret);
+               return NVIF_NOTIFY_DROP;
+       }
 
        if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
                NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
                drm_helper_hpd_irq_event(connector->dev);
        }
 
+       pm_runtime_mark_last_busy(drm->dev->dev);
+       pm_runtime_put_autosuspend(drm->dev->dev);
        return NVIF_NOTIFY_KEEP;
 }
 
index 139368b31916b0f5a680916f0bc261ece493ec8f..540c0cbbfcee41bf3fdae2fa339d71373e58a133 100644 (file)
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
 
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
        pm_runtime_get_sync(drm->dev->dev);
 
        drm_helper_hpd_irq_event(drm->dev);
-       /* enable polling for external displays */
-       drm_kms_helper_poll_enable(drm->dev);
 
        pm_runtime_mark_last_busy(drm->dev->dev);
        pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
 {
        struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
        struct acpi_bus_event *info = data;
+       int ret;
 
        if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
                if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
-                       /*
-                        * This may be the only indication we receive of a
-                        * connector hotplug on a runtime suspended GPU,
-                        * schedule hpd_work to check.
-                        */
-                       schedule_work(&drm->hpd_work);
+                       ret = pm_runtime_get(drm->dev->dev);
+                       if (ret == 1 || ret == -EACCES) {
+                               /* If the GPU is already awake, or in a state
+                                * where we can't wake it up, it can handle
+                                * it's own hotplug events.
+                                */
+                               pm_runtime_put_autosuspend(drm->dev->dev);
+                       } else if (ret == 0) {
+                               /* This may be the only indication we receive
+                                * of a connector hotplug on a runtime
+                                * suspended GPU, schedule hpd_work to check.
+                                */
+                               NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+                               schedule_work(&drm->hpd_work);
+                               pm_runtime_put_noidle(drm->dev->dev);
+                       } else {
+                               NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+                                       ret);
+                       }
 
                        /* acpi-video should not generate keypresses for this */
                        return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       /* enable connector detection and polling for connectors without HPD
+        * support
+        */
+       drm_kms_helper_poll_enable(dev);
+
        /* enable hotplug interrupts */
        drm_connector_list_iter_begin(dev, &conn_iter);
        nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
 }
 
 void
-nouveau_display_fini(struct drm_device *dev, bool suspend)
+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
 {
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        }
        drm_connector_list_iter_end(&conn_iter);
 
+       if (!runtime)
+               cancel_work_sync(&drm->hpd_work);
+
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
 }
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
                        }
                }
 
-               nouveau_display_fini(dev, true);
+               nouveau_display_fini(dev, true, runtime);
                return 0;
        }
 
-       nouveau_display_fini(dev, true);
+       nouveau_display_fini(dev, true, runtime);
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
index 54aa7c3fa42dddfa96b684d308def526da9c4d1d..ff92b54ce448467b290a0c61f59d3e259fed19df 100644 (file)
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
 int  nouveau_display_create(struct drm_device *dev);
 void nouveau_display_destroy(struct drm_device *dev);
 int  nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev, bool suspend);
+void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
 int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
 void nouveau_display_resume(struct drm_device *dev, bool runtime);
 int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
index c7ec86d6c3c910fdb73a7774eb921a414c156363..74d2283f2c28e7bb06f97856f93b610d12522a04 100644 (file)
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                mutex_unlock(&drm->master.lock);
        }
        if (ret) {
-               NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
                goto done;
        }
 
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
-               NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->device.object, mmus);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MMU class\n");
+               NV_PRINTK(err, cli, "No supported MMU class\n");
                goto done;
        }
 
        ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
        if (ret) {
-               NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, vmms);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported VMM class\n");
+               NV_PRINTK(err, cli, "No supported VMM class\n");
                goto done;
        }
 
        ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
        if (ret) {
-               NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, mems);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MEM class\n");
+               NV_PRINTK(err, cli, "No supported MEM class\n");
                goto done;
        }
 
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
                pm_runtime_allow(dev->dev);
                pm_runtime_mark_last_busy(dev->dev);
                pm_runtime_put(dev->dev);
-       } else {
-               /* enable polling for external displays */
-               drm_kms_helper_poll_enable(dev);
        }
+
        return 0;
 
 fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
        nouveau_debugfs_fini(drm);
 
        if (dev->mode_config.num_crtc)
-               nouveau_display_fini(dev, false);
+               nouveau_display_fini(dev, false, false);
        nouveau_display_destroy(dev);
 
        nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
                return -EBUSY;
        }
 
-       drm_kms_helper_poll_disable(drm_dev);
        nouveau_switcheroo_optimus_dsm();
        ret = nouveau_do_suspend(drm_dev, true);
        pci_save_state(pdev);
index 844498c4267cb691ecde39083740ca6d08a86390..0f64c0a1d4b30236243e6229afa19d990592f86c 100644 (file)
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
        console_unlock();
 
        if (state == FBINFO_STATE_RUNNING) {
+               nouveau_fbcon_hotplug_resume(drm->fbcon);
                pm_runtime_mark_last_busy(drm->dev->dev);
                pm_runtime_put_sync(drm->dev->dev);
        }
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
        schedule_work(&drm->fbcon_work);
 }
 
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_fbdev *fbcon = drm->fbcon;
+       int ret;
+
+       if (!fbcon)
+               return;
+
+       mutex_lock(&fbcon->hotplug_lock);
+
+       ret = pm_runtime_get(dev->dev);
+       if (ret == 1 || ret == -EACCES) {
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+
+               pm_runtime_mark_last_busy(dev->dev);
+               pm_runtime_put_autosuspend(dev->dev);
+       } else if (ret == 0) {
+               /* If the GPU was already in the process of suspending before
+                * this event happened, then we can't block here as we'll
+                * deadlock the runtime pmops since they wait for us to
+                * finish. So, just defer this event for when we runtime
+                * resume again. It will be handled by fbcon_work.
+                */
+               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+               fbcon->hotplug_waiting = true;
+               pm_runtime_put_noidle(drm->dev->dev);
+       } else {
+               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+                        ret);
+       }
+
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+       struct nouveau_drm *drm;
+
+       if (!fbcon)
+               return;
+       drm = nouveau_drm(fbcon->helper.dev);
+
+       mutex_lock(&fbcon->hotplug_lock);
+       if (fbcon->hotplug_waiting) {
+               fbcon->hotplug_waiting = false;
+
+               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+       }
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
 int
 nouveau_fbcon_init(struct drm_device *dev)
 {
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 
        drm->fbcon = fbcon;
        INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+       mutex_init(&fbcon->hotplug_lock);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index a6f192ea3fa6758934f2a54054f10708d2c73f14..db9d52047ef8dfbfc77fa96fcdb250c16f18e951 100644 (file)
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
        struct nvif_object gdi;
        struct nvif_object blit;
        struct nvif_object twod;
+
+       struct mutex hotplug_lock;
+       bool hotplug_waiting;
 };
 
 void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
 void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
 void nouveau_fbcon_accel_restore(struct drm_device *dev);
 
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
 extern int nouveau_nofbaccel;
 
 #endif /* __NV50_FBCON_H__ */
index 3da5a4305aa4847a32c0abf398a50e7a0dc90b7e..8f1ce4833230a1e693f0a76387022289fc1317d3 100644 (file)
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                pr_err("VGA switcheroo: switched nouveau on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                nouveau_pmops_resume(&pdev->dev);
-               drm_kms_helper_poll_enable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("VGA switcheroo: switched nouveau off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               drm_kms_helper_poll_disable(dev);
                nouveau_switcheroo_optimus_dsm();
                nouveau_pmops_suspend(&pdev->dev);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
index 32fa94a9773f558686cf8a698ac3de53305a6007..cbd33e87b799a70b6085707de74bafaefe047e24 100644 (file)
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        struct nvkm_outp *outp, *outt, *pair;
        struct nvkm_conn *conn;
        struct nvkm_head *head;
+       struct nvkm_ior *ior;
        struct nvbios_connE connE;
        struct dcb_output dcbE;
        u8  hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
                        return ret;
        }
 
+       /* Enforce identity-mapped SOR assignment for panels, which have
+        * certain bits (ie. backlight controls) wired to a specific SOR.
+        */
+       list_for_each_entry(outp, &disp->outp, head) {
+               if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+                   outp->conn->info.type == DCB_CONNECTOR_eDP) {
+                       ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+                       if (!WARN_ON(!ior))
+                               ior->identity = true;
+                       outp->identity = true;
+               }
+       }
+
        i = 0;
        list_for_each_entry(head, &disp->head, head)
                i = max(i, head->id + 1);
index 7c5bed29ffef164224825a3f9e0eaac7aa786239..5f301e632599b471c54291797b59c0d51e18be9a 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
+#include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
 #include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
 }
 
 static void
-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
 
-       /* Prevent link from being retrained if sink sends an IRQ. */
-       atomic_set(&dp->lt.done, 0);
-       ior->dp.nr = 0;
-
        /* Execute DisableLT script from DP Info Table. */
        nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
                init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
        );
 }
 
+static void
+nvkm_dp_release(struct nvkm_outp *outp)
+{
+       struct nvkm_dp *dp = nvkm_dp(outp);
+
+       /* Prevent link from being retrained if sink sends an IRQ. */
+       atomic_set(&dp->lt.done, 0);
+       dp->outp.ior->dp.nr = 0;
+}
+
 static int
 nvkm_dp_acquire(struct nvkm_outp *outp)
 {
@@ -491,7 +498,7 @@ done:
        return ret;
 }
 
-static void
+static bool
 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 {
        struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 
                if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
                                sizeof(dp->dpcd)))
-                       return;
+                       return true;
        }
 
        if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
        }
 
        atomic_set(&dp->lt.done, 0);
+       return false;
 }
 
 static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
 static void
 nvkm_dp_init(struct nvkm_outp *outp)
 {
+       struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
        struct nvkm_dp *dp = nvkm_dp(outp);
+
        nvkm_notify_put(&dp->outp.conn->hpd);
-       nvkm_dp_enable(dp, true);
+
+       /* eDP panels need powering on by us (if the VBIOS doesn't default it
+        * to on) before doing any AUX channel transactions.  LVDS panel power
+        * is handled by the SOR itself, and not required for LVDS DDC.
+        */
+       if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
+               int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+               if (power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+
+               /* We delay here unconditionally, even if already powered,
+                * because some laptop panels having a significant resume
+                * delay before the panel begins responding.
+                *
+                * This is likely a bit of a hack, but no better idea for
+                * handling this at the moment.
+                */
+               msleep(300);
+
+               /* If the eDP panel can't be detected, we need to restore
+                * the panel power GPIO to avoid breaking another output.
+                */
+               if (!nvkm_dp_enable(dp, true) && power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
+       } else {
+               nvkm_dp_enable(dp, true);
+       }
+
        nvkm_notify_get(&dp->hpd);
 }
 
@@ -576,6 +613,7 @@ nvkm_dp_func = {
        .fini = nvkm_dp_fini,
        .acquire = nvkm_dp_acquire,
        .release = nvkm_dp_release,
+       .disable = nvkm_dp_disable,
 };
 
 static int
index e0b4e0c5704ee8560a5584832da069ae59f699e1..19911211a12aa55a200ac75bc2f88958598d96bd 100644 (file)
@@ -16,6 +16,7 @@ struct nvkm_ior {
        char name[8];
 
        struct list_head head;
+       bool identity;
 
        struct nvkm_ior_state {
                struct nvkm_outp *outp;
index f89c7b977aa5d269b3a91a0026610eb036c0463e..def005dd5fdaae020fd3ef7404e9f58b5369e082 100644 (file)
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
        nv50_disp_super_ied_off(head, ior, 2);
 
        /* If we're shutting down the OR's only active head, execute
-        * the output path's release function.
+        * the output path's disable function.
         */
        if (ior->arm.head == (1 << head->id)) {
-               if ((outp = ior->arm.outp) && outp->func->release)
-                       outp->func->release(outp, ior);
+               if ((outp = ior->arm.outp) && outp->func->disable)
+                       outp->func->disable(outp, ior);
        }
 }
 
index be9e7f8c3b2392fa96643f91391e606196ad7a67..c62030c96fba0932decd996fd46f3f9a308864d9 100644 (file)
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
        if (ior) {
                outp->acquired &= ~user;
                if (!outp->acquired) {
+                       if (outp->func->release && outp->ior)
+                               outp->func->release(outp);
                        outp->ior->asy.outp = NULL;
                        outp->ior = NULL;
                }
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
        if (proto == UNKNOWN)
                return -ENOSYS;
 
+       /* Deal with panels requiring identity-mapped SOR assignment. */
+       if (outp->identity) {
+               ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
+               if (WARN_ON(!ior))
+                       return -ENOSPC;
+               return nvkm_outp_acquire_ior(outp, user, ior);
+       }
+
        /* First preference is to reuse the OR that is currently armed
         * on HW, if any, in order to prevent unnecessary switching.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->arm.outp == outp)
+               if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
 
        /* Failing that, a completely unused OR is the next best thing. */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
+               if (!ior->identity &&
+                   !ior->asy.outp && ior->type == type && !ior->arm.outp &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
         * but will be released during the next modeset.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type &&
+               if (!ior->identity && !ior->asy.outp && ior->type == type &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
        outp->index = index;
        outp->info = *dcbE;
        outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
-       outp->or = ffs(outp->info.or) - 1;
 
        OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
                       "edid %x bus %d head %x",
index ea84d7d5741ad5a5e771483a225f6fea8a663223..6c8aa5cfed9d839b561a473e0883df827d16eb3c 100644 (file)
@@ -13,10 +13,10 @@ struct nvkm_outp {
        struct dcb_output info;
 
        struct nvkm_i2c_bus *i2c;
-       int or;
 
        struct list_head head;
        struct nvkm_conn *conn;
+       bool identity;
 
        /* Assembly state. */
 #define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
        void (*init)(struct nvkm_outp *);
        void (*fini)(struct nvkm_outp *);
        int (*acquire)(struct nvkm_outp *);
-       void (*release)(struct nvkm_outp *, struct nvkm_ior *);
+       void (*release)(struct nvkm_outp *);
+       void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
 };
 
 #define OUTP_MSG(o,l,f,a...) do {                                              \
index b80618e354919b11b00b6c88b12dbc648ba4680f..17235e940ca9e354226836b4bb9e8582a48e1f90 100644 (file)
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
        struct nvkm_bios *bios = subdev->device->bios;
        struct nvbios_pmuR pmu;
 
-       if (!nvbios_pmuRm(bios, type, &pmu)) {
-               nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
+       if (!nvbios_pmuRm(bios, type, &pmu))
                return -EINVAL;
-       }
 
        if (!post)
                return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                return -EINVAL;
        }
 
+       /* Upload DEVINIT application from VBIOS onto PMU. */
        ret = pmu_load(init, 0x04, post, &exec, &args);
-       if (ret)
+       if (ret) {
+               nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
                return ret;
+       }
 
-       /* upload first chunk of init data */
+       /* Upload tables required by opcodes in boot scripts. */
        if (post) {
-               // devinit tables
                u32 pmu = pmu_args(init, args + 0x08, 0x08);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
                pmu_data(init, pmu, img, len);
        }
 
-       /* upload second chunk of init data */
+       /* Upload boot scripts. */
        if (post) {
-               // devinit boot scripts
                u32 pmu = pmu_args(init, args + 0x08, 0x10);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
                pmu_data(init, pmu, img, len);
        }
 
-       /* execute init tables */
+       /* Execute DEVINIT. */
        if (post) {
                nvkm_wr32(device, 0x10a040, 0x00005000);
                pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                        return -ETIMEDOUT;
        }
 
-       /* load and execute some other ucode image (bios therm?) */
-       return pmu_load(init, 0x01, post, NULL, NULL);
+       /* Optional: Execute PRE_OS application on PMU, which should at
+        * least take care of fans until a full PMU has been loaded.
+        */
+       pmu_load(init, 0x01, post, NULL, NULL);
+       return 0;
 }
 
 static const struct nvkm_devinit_func
index de269eb482dd03cc7c2fd8b5a707d1566b1360e5..7459def78d504f006a2f7f0b625ba3372238b7ec 100644 (file)
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
 void
 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
-       if (vmm->func->part && inst) {
+       if (inst && vmm->func->part) {
                mutex_lock(&vmm->mutex);
                vmm->func->part(vmm, inst);
                mutex_unlock(&vmm->mutex);
index 25b7bd56ae1156aa209980f65a00d2c5a6efebf2..1cb41992aaa1f650f89cbf5ace72bd46786d7abc 100644 (file)
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
-       if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+       if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+                       usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
                /* The fn key on Apple USB keyboards */
                set_bit(EV_REP, hi->input->evbit);
                hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
                .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
index 3da354af7a0aac6ca129124eb6e511ea32c9535b..44564f61e9cc3c85250e2e6d1e5ff47ed95dcd4d 100644 (file)
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
        parser = vzalloc(sizeof(struct hid_parser));
        if (!parser) {
                ret = -ENOMEM;
-               goto err;
+               goto alloc_err;
        }
 
        parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
                                hid_err(device, "unbalanced delimiter at end of report description\n");
                                goto err;
                        }
+                       kfree(parser->collection_stack);
                        vfree(parser);
                        device->status |= HID_STAT_PARSED;
                        return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
 
        hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
 err:
+       kfree(parser->collection_stack);
+alloc_err:
        vfree(parser);
        hid_close_report(device);
        return ret;
index 79bdf0c7e3516bc0d5055309cac268145507f9d2..5146ee029db4bd6c35bb3a15cdd25b6caf54c64e 100644 (file)
@@ -88,6 +88,7 @@
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
 #define USB_VENDOR_ID_APPLE            0x05ac
+#define BT_VENDOR_ID_APPLE             0x004c
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE        0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD      0x030e
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI   0x026c
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
 #define I2C_VENDOR_ID_HANTICK          0x0911
 #define I2C_PRODUCT_ID_HANTICK_5288    0x5288
 
-#define I2C_VENDOR_ID_RAYD             0x2386
-#define I2C_PRODUCT_ID_RAYD_3118       0x3118
-
 #define USB_VENDOR_ID_HANWANG          0x0b57
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST     0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST      0x8fff
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
 #define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION    0x0ccd
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9      0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
index 4e94ea3e280a3c66a00cdf0709569a131bf4ee49..a481eaf39e887bad41d89bf251c0dc4a8f2a2096 100644 (file)
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
        input_dev->dev.parent = &hid->dev;
 
        hidinput->input = input_dev;
+       hidinput->application = application;
        list_add_tail(&hidinput->list, &hid->inputs);
 
        INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
        struct hid_input *hidinput;
 
        list_for_each_entry(hidinput, &hid->inputs, list) {
-               if (hidinput->report &&
-                   hidinput->report->application == report->application)
+               if (hidinput->application == report->application)
                        return hidinput;
        }
 
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
                        input_unregister_device(hidinput->input);
                else
                        input_free_device(hidinput->input);
+               kfree(hidinput->name);
                kfree(hidinput);
        }
 
index 40fbb7c52723378eaf32d351f9541dd7ef0d61e3..da954f3f4da7fcf5071522eec3a6b5b62dd46eed 100644 (file)
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                                     struct hid_usage *usage,
                                     enum latency_mode latency,
                                     bool surface_switch,
-                                    bool button_switch)
+                                    bool button_switch,
+                                    bool *inputmode_found)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
        struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
 
        switch (usage->hid) {
        case HID_DG_INPUTMODE:
+               /*
+                * Some elan panels wrongly declare 2 input mode features,
+                * and silently ignore when we set the value in the second
+                * field. Skip the second feature and hope for the best.
+                */
+               if (*inputmode_found)
+                       return false;
+
                if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
                        report_len = hid_report_len(report);
                        buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                }
 
                field->value[index] = td->inputmode_value;
+               *inputmode_found = true;
                return true;
 
        case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
        struct hid_usage *usage;
        int i, j;
        bool update_report;
+       bool inputmode_found = false;
 
        rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
        list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
                                                             usage,
                                                             latency,
                                                             surface_switch,
-                                                            button_switch))
+                                                            button_switch,
+                                                            &inputmode_found))
                                        update_report = true;
                        }
                }
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
 
+       if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+               hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
        timer_setup(&td->release_timer, mt_expired_timeout, 0);
 
        ret = hid_parse(hdev);
index 39e642686ff0466322cdacf7ea410fd6306932e5..683861f324e3cd6842f30016f4911b4b088d8a2a 100644 (file)
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 50af72baa5ca9a6dbf37505cda39fbd4290668c4..2b63487057c25b7fb931b8823db31d15f69667be 100644 (file)
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
 }
 EXPORT_SYMBOL_GPL(sensor_hub_device_close);
 
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+               unsigned int *rsize)
+{
+       /*
+        * Checks if the report descriptor of Thinkpad Helix 2 has a logical
+        * minimum for magnetic flux axis greater than the maximum.
+        */
+       if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
+               *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
+               rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
+               rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
+               rdesc[921] == 0x07 && rdesc[922] == 0x00) {
+               /* Sets negative logical minimum for mag x, y and z */
+               rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
+               rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
+               rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
+               rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
+       }
+
+       return rdesc;
+}
+
 static int sensor_hub_probe(struct hid_device *hdev,
                                const struct hid_device_id *id)
 {
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
        .probe = sensor_hub_probe,
        .remove = sensor_hub_remove,
        .raw_event = sensor_hub_raw_event,
+       .report_fixup = sensor_hub_report_fixup,
 #ifdef CONFIG_PM
        .suspend = sensor_hub_suspend,
        .resume = sensor_hub_resume,
index 2ce194a84868e0fe5e133e241a0de368dfe3becf..f3076659361abcb0567804c298af25e278de8fa9 100644 (file)
@@ -170,8 +170,6 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
                I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
                I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { 0, 0 }
@@ -1235,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev)
        pm_runtime_enable(dev);
 
        enable_irq(client->irq);
-       ret = i2c_hid_hwreset(client);
+
+       /* Instead of resetting device, simply powers the device on. This
+        * solves "incomplete reports" on Raydium devices 2386:3118 and
+        * 2386:4B33
+        */
+       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
                return ret;
 
-       /* RAYDIUM device (2386:3118) need to re-send report descr cmd
+       /* Some devices need to re-send report descr cmd
         * after resume, after this it will be back normal.
         * otherwise it issues too many incomplete reports.
         */
index 97869b7410ebb83ec78c31f2c9a1b65b6597a613..da133716bed05b63dadef22e072f05b4e7b0f5da 100644 (file)
@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 050f9872f5c0dc7d465c71f505dcb995b53c4321..a1125a5c7965a255f8b5480f47cc8a53b534b76f 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index b1b548a21f919b31f8e5d7f670352ee6f7d25f56..c71cc857b649ddc23f3289e23ec43e898901fa1a 100644 (file)
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
        if (!attribute->show)
                return -EIO;
 
+       if (chan->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
+
        return attribute->show(chan, buf);
 }
 
index 7589f2ad1dae7de9fe04b5448b233fdda786db70..631360b14ca71c398e67a9425dff363de1eb255a 100644 (file)
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
 
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 {
-       u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
+       u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
        struct st_lsm6dsx_hw *hw = sensor->hw;
        struct st_lsm6dsx_sensor *cur_sensor;
        int i, err, data;
        __le16 wdata;
 
+       if (!hw->sip)
+               return 0;
+
        for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
                cur_sensor = iio_priv(hw->iio_devs[i]);
 
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
                                                       : cur_sensor->watermark;
 
                fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
-               sip += cur_sensor->sip;
        }
 
-       if (!sip)
-               return 0;
-
-       fifo_watermark = max_t(u16, fifo_watermark, sip);
-       fifo_watermark = (fifo_watermark / sip) * sip;
+       fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
+       fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
        fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
 
        err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
index 54e383231d1edef07abf150793656fa0cb9e92e5..c31b9633f32d9b8bcac8d05d3822230d7725f5dc 100644 (file)
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
 static const struct spi_device_id maxim_thermocouple_id[] = {
        {"max6675", MAX6675},
        {"max31855", MAX31855},
-       {"max31856", MAX31855},
        {},
 };
 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
index f72677291b692511ce7abcf1e9e8da9bf8f06326..a36c94930c31de8a03652cbf07844c10bcc09877 100644 (file)
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        dgid = (union ib_gid *) &addr->sib_addr;
        pkey = ntohs(addr->sib_pkey);
 
+       mutex_lock(&lock);
        list_for_each_entry(cur_dev, &dev_list, list) {
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
                        if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                        cma_dev = cur_dev;
                                        sgid = gid;
                                        id_priv->id.port_num = p;
+                                       goto found;
                                }
                        }
                }
        }
-
-       if (!cma_dev)
-               return -ENODEV;
+       mutex_unlock(&lock);
+       return -ENODEV;
 
 found:
        cma_attach_to_dev(id_priv, cma_dev);
-       addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
-       memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+       mutex_unlock(&lock);
+       addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+       memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
        cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
        return 0;
 }
index 6eb64c6f08028b6c4a92407435bb2339128fe48b..c4118bcd5103565e3b20b6a970e67322061d195f 100644 (file)
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
                WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
                if (!uverbs_destroy_uobject(obj, reason))
                        ret = 0;
+               else
+                       atomic_set(&obj->usecnt, 0);
        }
        return ret;
 }
index ec8fb289621fb7590dd3e3f4000967fa2b6c9aae..5f437d1570fb02d516b1ee60f0a4cee42053da50 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
 {
@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
        f = fdget(cmd.fd);
        if (!f.file)
                return -ENOENT;
+       if (f.file->f_op != &ucma_fops) {
+               ret = -EINVAL;
+               goto file_put;
+       }
 
        /* Validate current fd and prevent destruction of id. */
        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
index 823beca448e10937d290b9ebaf0bf3aed444f05b..6d974e2363df249c4291a3331d9d16f0a14232a6 100644 (file)
@@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
        uverbs_dev->num_comp_vectors = device->num_comp_vectors;
 
        if (ib_uverbs_create_uapi(device, uverbs_dev))
-               goto err;
+               goto err_uapi;
 
        cdev_init(&uverbs_dev->cdev, NULL);
        uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
 
 err_class:
        device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-
 err_cdev:
        cdev_del(&uverbs_dev->cdev);
+err_uapi:
        clear_bit(devnum, dev_map);
-
 err:
        if (atomic_dec_and_test(&uverbs_dev->refcount))
                ib_uverbs_comp_dev(uverbs_dev);
index bbfb86eb2d24204565c78187faa6eb23984f24bf..bc2b9e03843903750aba02c0994430cd024cf4e3 100644 (file)
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
                                "Failed to destroy Shadow QP");
                        return rc;
                }
+               bnxt_qplib_free_qp_res(&rdev->qplib_res,
+                                      &rdev->qp1_sqp->qplib_qp);
                mutex_lock(&rdev->qp_lock);
                list_del(&rdev->qp1_sqp->list);
                atomic_dec(&rdev->qp_count);
index e426b990c1dd5bf3fa08966f41ccd08e3f3c4e44..6ad0d46ab879a6a1bf6d231e22b6e766b796efd2 100644 (file)
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
                                       struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_q *rq = &qp->rq;
-       struct bnxt_qplib_q *sq = &qp->rq;
+       struct bnxt_qplib_q *sq = &qp->sq;
        int rc = 0;
 
        if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
index b3203afa3b1de705c74da5aaec9ca55a3172b0d2..347fe18b1a41c0990c753aa29f2efaaea0cd119b 100644 (file)
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
 
        if (qhp->ibqp.uobject) {
+
+               /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+               if (qhp->wq.flushed)
+                       return;
+
+               qhp->wq.flushed = 1;
                t4_set_wq_in_error(&qhp->wq, 0);
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
index eec83757d55f94331936518804672a72b8e29898..6c967dde58e702c228835a84c3b1ea19abc1ea64 100644 (file)
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
                }
 
        /*
-        * A secondary bus reset (SBR) issues a hot reset to our device.
-        * The following routine does a 1s wait after the reset is dropped
-        * per PCI Trhfa (recovery time).  PCIe 3.0 section 6.6.1 -
-        * Conventional Reset, paragraph 3, line 35 also says that a 1s
-        * delay after a reset is required.  Per spec requirements,
-        * the link is either working or not after that point.
+        * This is an end around to do an SBR during probe time. A new API needs
+        * to be implemented to have cleaner interface but this fixes the
+        * current brokenness
         */
-       return pci_reset_bus(dev);
+       return pci_bridge_secondary_bus_reset(dev->bus->self);
 }
 
 /*
index ca0f1ee26091b38200b2447ccfe92324a3c0bd32..0bbeaaae47e07d2e99529925bb6236ab98be377b 100644 (file)
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->page_size_cap       = dev->dev->caps.page_size_cap;
        props->max_qp              = dev->dev->quotas.qp;
        props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
-       props->max_send_sge        = dev->dev->caps.max_sq_sg;
-       props->max_recv_sge        = dev->dev->caps.max_rq_sg;
-       props->max_sge_rd          = MLX4_MAX_SGE_RD;
+       props->max_send_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_recv_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_sge_rd = MLX4_MAX_SGE_RD;
        props->max_cq              = dev->dev->quotas.cq;
        props->max_cqe             = dev->dev->caps.max_cqes;
        props->max_mr              = dev->dev->quotas.mpt;
index ea01b8dd2be606206193408ca7bf9c42e4ea4e39..3d5424f335cb06e1dcaad16a4f4c3c7910d3295e 100644 (file)
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
 
        skb_queue_head_init(&skqueue);
 
+       netif_tx_lock_bh(p->dev);
        spin_lock_irq(&priv->lock);
        set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
        if (p->neigh)
                while ((skb = __skb_dequeue(&p->neigh->queue)))
                        __skb_queue_tail(&skqueue, skb);
        spin_unlock_irq(&priv->lock);
+       netif_tx_unlock_bh(p->dev);
 
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = p->dev;
index f266c81f396fe7a82032827c92c95896f168deba..0481223b1deb826af43fa8f0da0b81e5b6654009 100644 (file)
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
        int err;
 
        desc->tfm = essiv->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
        shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
        int i, r;
 
        desc->tfm = lmk->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
 
        /* calculate crc32 for every 32bit part and xor it */
        desc->tfm = tcw->crc32_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
        for (i = 0; i < 4; i++) {
                r = crypto_shash_init(desc);
                if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        skcipher_request_set_callback(ctx->r.req,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
 }
 
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        aead_request_set_callback(ctx->r.req_aead,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
 }
 
index 37887859946631cb40c42256f0ba3d4d5a471132..89ccb64342de7a4fa8e03d528f66ab9b726e0539 100644 (file)
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
        unsigned j, size;
 
        desc->tfm = ic->journal_mac;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 {
        int r;
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                      complete_journal_encrypt, comp);
        if (likely(encrypt))
                r = crypto_skcipher_encrypt(req);
index cae689de75fd5a719c406f1093db076c201fcdef..5ba067fa0c729bc89b7789bd35648b1189003b27 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
@@ -29,9 +29,6 @@
  */
 #define        MIN_RAID456_JOURNAL_SPACE (4*2048)
 
-/* Global list of all raid sets */
-static LIST_HEAD(raid_sets);
-
 static bool devices_handle_discard_safely = false;
 
 /*
@@ -227,7 +224,6 @@ struct rs_layout {
 
 struct raid_set {
        struct dm_target *ti;
-       struct list_head list;
 
        uint32_t stripe_cache_entries;
        unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
        mddev->new_chunk_sectors = l->new_chunk_sectors;
 }
 
-/* Find any raid_set in active slot for @rs on global list */
-static struct raid_set *rs_find_active(struct raid_set *rs)
-{
-       struct raid_set *r;
-       struct mapped_device *md = dm_table_get_md(rs->ti->table);
-
-       list_for_each_entry(r, &raid_sets, list)
-               if (r != rs && dm_table_get_md(r->ti->table) == md)
-                       return r;
-
-       return NULL;
-}
-
 /* raid10 algorithms (i.e. formats) */
 #define        ALGORITHM_RAID10_DEFAULT        0
 #define        ALGORITHM_RAID10_NEAR           1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
 
        mddev_init(&rs->md);
 
-       INIT_LIST_HEAD(&rs->list);
        rs->raid_disks = raid_devs;
        rs->delta_disks = 0;
 
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        for (i = 0; i < raid_devs; i++)
                md_rdev_init(&rs->dev[i].rdev);
 
-       /* Add @rs to global list. */
-       list_add(&rs->list, &raid_sets);
-
        /*
         * Remaining items to be initialized by further RAID params:
         *  rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        return rs;
 }
 
-/* Free all @rs allocations and remove it from global list. */
+/* Free all @rs allocations */
 static void raid_set_free(struct raid_set *rs)
 {
        int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
                        dm_put_device(rs->ti, rs->dev[i].data_dev);
        }
 
-       list_del(&rs->list);
-
        kfree(rs);
 }
 
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
                return 0;
        }
 
-       /* HM FIXME: get InSync raid_dev? */
+       /* HM FIXME: get In_Sync raid_dev? */
        rdev = &rs->dev[0].rdev;
 
        if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
                rs_set_new(rs);
        } else if (rs_is_recovering(rs)) {
+               /* Rebuild particular devices */
+               if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+                       set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+                       rs_setup_recovery(rs, MaxSector);
+               }
                /* A recovering raid set may be resized */
                ; /* skip setup rs */
        } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        /* Start raid set read-only and assumed clean to change in raid_resume() */
        rs->md.ro = 1;
        rs->md.in_sync = 1;
+
+       /* Keep array frozen */
        set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
 
        /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        rs->callbacks.congested_fn = raid_is_congested;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
-       /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
+       /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
        if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
                r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
                if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+/* Return sync state string for @state */
+enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
+static const char *sync_str(enum sync_state state)
+{
+       /* Has to be in above sync_state order! */
+       static const char *sync_strs[] = {
+               "frozen",
+               "reshape",
+               "resync",
+               "check",
+               "repair",
+               "recover",
+               "idle"
+       };
+
+       return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
+};
+
+/* Return enum sync_state for @mddev derived from @recovery flags */
+static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
        if (test_bit(MD_RECOVERY_FROZEN, &recovery))
-               return "frozen";
+               return st_frozen;
 
-       /* The MD sync thread can be done with io but still be running */
+       /* The MD sync thread can be done with io or be interrupted but still be running */
        if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
            (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
                if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
-                       return "reshape";
+                       return st_reshape;
 
                if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
                        if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
-                               return "resync";
-                       else if (test_bit(MD_RECOVERY_CHECK, &recovery))
-                               return "check";
-                       return "repair";
+                               return st_resync;
+                       if (test_bit(MD_RECOVERY_CHECK, &recovery))
+                               return st_check;
+                       return st_repair;
                }
 
                if (test_bit(MD_RECOVERY_RECOVER, &recovery))
-                       return "recover";
+                       return st_recover;
+
+               if (mddev->reshape_position != MaxSector)
+                       return st_reshape;
        }
 
-       return "idle";
+       return st_idle;
 }
 
 /*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                                sector_t resync_max_sectors)
 {
        sector_t r;
+       enum sync_state state;
        struct mddev *mddev = &rs->md;
 
        clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
        } else {
-               if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
-                   !test_bit(MD_RECOVERY_INTR, &recovery) &&
-                   (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
-                    test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
-                    test_bit(MD_RECOVERY_RUNNING, &recovery)))
-                       r = mddev->curr_resync_completed;
-               else
+               state = decipher_sync_action(mddev, recovery);
+
+               if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
                        r = mddev->recovery_cp;
+               else
+                       r = mddev->curr_resync_completed;
 
-               if (r >= resync_max_sectors &&
-                   (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
-                    (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
-                     !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
-                     !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
+               if (state == st_idle && r >= resync_max_sectors) {
                        /*
                         * Sync complete.
                         */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                        if (test_bit(MD_RECOVERY_RECOVER, &recovery))
                                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+               } else if (state == st_recover)
                        /*
                         * In case we are recovering, the array is not in sync
                         * and health chars should show the recovering legs.
                         */
                        ;
-
-               } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_resync)
                        /*
                         * If "resync" is occurring, the raid set
                         * is or may be out of sync hence the health
                         * characters shall be 'a'.
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
-
-               } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_reshape)
                        /*
                         * If "reshape" is occurring, the raid set
                         * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_check || state == st_repair)
                        /*
                         * If "check" or "repair" is occurring, the raid set has
                         * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               else {
+               else {
                        struct md_rdev *rdev;
 
                        /*
                         * We are idle and recovery is needed, prevent 'A' chars race
-                        * caused by components still set to in-sync by constrcuctor.
+                        * caused by components still set to in-sync by constructor.
                         */
                        if (test_bit(MD_RECOVERY_NEEDED, &recovery))
                                set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                progress = rs_get_progress(rs, recovery, resync_max_sectors);
                resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
                                    atomic64_read(&mddev->resync_mismatches) : 0;
-               sync_action = decipher_sync_action(&rs->md, recovery);
+               sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
 
                /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
                for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
        struct mddev *mddev = &rs->md;
        struct md_personality *pers = mddev->pers;
 
+       /* Don't allow the sync thread to work until the table gets reloaded. */
+       set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+
        r = rs_setup_reshape(rs);
        if (r)
                return r;
 
-       /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
-       if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
-               mddev_resume(mddev);
-
        /*
         * Check any reshape constraints enforced by the personalility
         *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
                }
        }
 
-       /* Suspend because a resume will happen in raid_resume() */
-       set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
-       mddev_suspend(mddev);
-
        /*
         * Now reshape got set up, update superblocks to
         * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
        if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
                return 0;
 
-       if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
-               struct raid_set *rs_active = rs_find_active(rs);
-
-               if (rs_active) {
-                       /*
-                        * In case no rebuilds have been requested
-                        * and an active table slot exists, copy
-                        * current resynchonization completed and
-                        * reshape position pointers across from
-                        * suspended raid set in the active slot.
-                        *
-                        * This resumes the new mapping at current
-                        * offsets to continue recover/reshape without
-                        * necessarily redoing a raid set partially or
-                        * causing data corruption in case of a reshape.
-                        */
-                       if (rs_active->md.curr_resync_completed != MaxSector)
-                               mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
-                       if (rs_active->md.reshape_position != MaxSector)
-                               mddev->reshape_position = rs_active->md.reshape_position;
-               }
-       }
-
        /*
         * The superblocks need to be updated on disk if the
         * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 13, 2},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 72142021b5c9a0410cfb6ccb04a93d613376fb53..74f6770c70b12404e965345ec561318bf2225fcb 100644 (file)
@@ -188,6 +188,12 @@ struct dm_pool_metadata {
        unsigned long flags;
        sector_t data_block_size;
 
+       /*
+        * We reserve a section of the metadata for commit overhead.
+        * All reported space does *not* include this.
+        */
+       dm_block_t metadata_reserve;
+
        /*
         * Set if a transaction has to be aborted but the attempt to roll back
         * to the previous (good) transaction failed.  The only pool metadata
@@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+       int r;
+       dm_block_t total;
+       dm_block_t max_blocks = 4096; /* 16M */
+
+       r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+       if (r) {
+               DMERR("could not get size of metadata device");
+               pmd->metadata_reserve = max_blocks;
+       } else {
+               sector_div(total, 10);
+               pmd->metadata_reserve = min(max_blocks, total);
+       }
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                                               sector_t data_block_size,
                                               bool format_device)
@@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                return ERR_PTR(r);
        }
 
+       __set_metadata_reserve(pmd);
+
        return pmd;
 }
 
@@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
        down_read(&pmd->root_lock);
        if (!pmd->fail_io)
                r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+       if (!r) {
+               if (*result < pmd->metadata_reserve)
+                       *result = 0;
+               else
+                       *result -= pmd->metadata_reserve;
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
        int r = -EINVAL;
 
        down_write(&pmd->root_lock);
-       if (!pmd->fail_io)
+       if (!pmd->fail_io) {
                r = __resize_space_map(pmd->metadata_sm, new_count);
+               if (!r)
+                       __set_metadata_reserve(pmd);
+       }
        up_write(&pmd->root_lock);
 
        return r;
index 7bd60a150f8faec85071cf1d984bd35d27b08391..aaf1ad481ee88e59b90273133b7a7bd90c69d3fb 100644 (file)
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
        PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
+
+       /*
+        * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+        */
+       PM_OUT_OF_METADATA_SPACE,
        PM_READ_ONLY,           /* metadata may not be changed */
+
        PM_FAIL,                /* all I/O fails */
 };
 
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+       return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+       return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+       int r;
+       const char *ooms_reason = NULL;
+       dm_block_t nr_free;
+
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+       if (r)
+               ooms_reason = "Could not get free metadata blocks";
+       else if (!nr_free)
+               ooms_reason = "No free metadata blocks";
+
+       if (ooms_reason && !is_read_only(pool)) {
+               DMERR("%s", ooms_reason);
+               set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+       }
+}
+
+static void check_for_data_space(struct pool *pool)
 {
        int r;
        dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY)
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
                metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-       else
-               check_for_space(pool);
+       else {
+               check_for_metadata_space(pool);
+               check_for_data_space(pool);
+       }
 
        return r;
 }
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
        }
 
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+       if (r) {
+               metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+               return r;
+       }
+
+       if (!free_blocks) {
+               /* Let's commit before we use up the metadata reserve. */
+               r = commit(pool);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
        case PM_OUT_OF_DATA_SPACE:
                return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
        case PM_FAIL:
                return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                error_retry_list(pool);
                break;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (old_mode != new_mode)
+               if (!is_read_only_pool_mode(old_mode))
                        notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
                DMINFO("%s: growing the metadata device from %llu to %llu blocks",
                       dm_device_name(pool->pool_md),
                       sb_metadata_dev_size, metadata_dev_size);
+
+               if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+                       set_pool_mode(pool, PM_WRITE);
+
                r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
                if (r) {
                        metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
                DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
                      dm_device_name(pool->pool_md));
                return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
        dm_block_t nr_blocks_data;
        dm_block_t nr_blocks_metadata;
        dm_block_t held_root;
+       enum pool_mode mode;
        char buf[BDEVNAME_SIZE];
        char buf2[BDEVNAME_SIZE];
        struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("- ");
 
-               if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+               mode = get_pool_mode(pool);
+               if (mode == PM_OUT_OF_DATA_SPACE)
                        DMEMIT("out_of_data_space ");
-               else if (pool->pf.mode == PM_READ_ONLY)
+               else if (is_read_only_pool_mode(mode))
                        DMEMIT("ro ");
                else
                        DMEMIT("rw ");
index 12decdbd722d866ed48a0a6a8d8b8f2e0a6aefbc..fc65f0dedf7f702b31d6adf21b8a2b26238c8b43 100644 (file)
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
 {
        struct scatterlist sg;
 
-       sg_init_one(&sg, data, len);
-       ahash_request_set_crypt(req, &sg, NULL, len);
-
-       return crypto_wait_req(crypto_ahash_update(req), wait);
+       if (likely(!is_vmalloc_addr(data))) {
+               sg_init_one(&sg, data, len);
+               ahash_request_set_crypt(req, &sg, NULL, len);
+               return crypto_wait_req(crypto_ahash_update(req), wait);
+       } else {
+               do {
+                       int r;
+                       size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+                       flush_kernel_vmap_range((void *)data, this_step);
+                       sg_init_table(&sg, 1);
+                       sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+                       ahash_request_set_crypt(req, &sg, NULL, this_step);
+                       r = crypto_wait_req(crypto_ahash_update(req), wait);
+                       if (unlikely(r))
+                               return r;
+                       data += this_step;
+                       len -= this_step;
+               } while (len);
+               return 0;
+       }
 }
 
 /*
index eeb7eef62174c4dfb5c8854efbef2d798f161f1e..38f90e17992717d7cd0f12ad904490abbaa2d1a5 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/sysfs.h>
+#include <linux/nospec.h>
 
 static DEFINE_MUTEX(compass_mutex);
 
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
                return ret;
        if (val >= strlen(map))
                return -EINVAL;
+       val = array_index_nospec(val, strlen(map));
        mutex_lock(&compass_mutex);
        ret = compass_command(c, map[val]);
        mutex_unlock(&compass_mutex);
index 8f82bb9d11e2e3bfb6fbbf3764180a1b7fb0d0a2..b8aaa684c397b0b8be8fe0c5ae00a37b087b6997 100644 (file)
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
        retrc = plpar_hcall_norets(H_REG_CRQ,
                                   vdev->unit_address,
                                   queue->msg_token, PAGE_SIZE);
-       retrc = rc;
+       rc = retrc;
 
        if (rc == H_RESOURCE)
                rc = ibmvmc_reset_crq_queue(adapter);
index 7bba62a72921ebe405f36306e9233afc910351b4..fc3872fe7b2510e1fa862ea508be53ad377a4bfd 100644 (file)
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
        cl = cldev->cl;
 
+       mutex_lock(&bus->device_lock);
        if (cl->state == MEI_FILE_UNINITIALIZED) {
-               mutex_lock(&bus->device_lock);
                ret = mei_cl_link(cl);
-               mutex_unlock(&bus->device_lock);
                if (ret)
-                       return ret;
+                       goto out;
                /* update pointers */
                cl->cldev = cldev;
        }
 
-       mutex_lock(&bus->device_lock);
        if (mei_cl_is_connected(cl)) {
                ret = 0;
                goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
        if (err < 0)
                dev_err(bus->dev, "Could not disconnect from the ME client\n");
 
-out:
        mei_cl_bus_module_put(cldev);
-
+out:
        /* Flush queues and remove any pending read */
        mei_cl_flush_queues(cl, NULL);
        mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
 
        mei_me_cl_put(cldev->me_cl);
        mei_dev_bus_put(cldev->bus);
+       mei_cl_unlink(cldev->cl);
        kfree(cldev->cl);
        kfree(cldev);
 }
 
 static const struct device_type mei_cl_device_type = {
-       .release        = mei_cl_bus_dev_release,
+       .release = mei_cl_bus_dev_release,
 };
 
 /**
index 4ab6251d418ecf4fac7ec93a4b8101db313f5249..ebdcf0b450e25bb00be1db564fd09f806e87edad 100644 (file)
@@ -1767,7 +1767,7 @@ out:
                }
        }
 
-       rets = buf->size;
+       rets = len;
 err:
        cl_dbg(dev, cl, "rpm: autosuspend\n");
        pm_runtime_mark_last_busy(dev->dev);
index 09e233d4c0de386eab9ea2f2ce178dedf4ebb480..e56f3e72d57a06cc4a9c2e8358bd3359113524c8 100644 (file)
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                props_res = (struct hbm_props_response *)mei_msg;
 
-               if (props_res->status) {
+               if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
+                       dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+                               props_res->me_addr);
+               } else if (props_res->status) {
                        dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
                                props_res->status,
                                mei_hbm_status_str(props_res->status));
                        return -EPROTO;
+               } else {
+                       mei_hbm_me_cl_add(dev, props_res);
                }
 
-               mei_hbm_me_cl_add(dev, props_res);
-
                /* request property for the next client */
                if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
                        return -EIO;
index 09cb89645d06ebc97c77b886ff3c6a1be9b62c5c..2cfec33178c1fa20532270c04a1f634414a4ec19 100644 (file)
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
 static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
 {
        struct device_node *slot_node;
+       struct platform_device *pdev;
 
        /*
         * TODO: the MMC core framework currently does not support
         * controllers with multiple slots properly. So we only register
         * the first slot for now
         */
-       slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+       slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
        if (!slot_node) {
                dev_warn(parent, "no 'mmc-slot' sub-node found\n");
                return ERR_PTR(-ENOENT);
        }
 
-       return of_platform_device_create(slot_node, NULL, parent);
+       pdev = of_platform_device_create(slot_node, NULL, parent);
+       of_node_put(slot_node);
+
+       return pdev;
 }
 
 static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
index 071693ebfe18914fe799b7da1d91df1e6f8b99b9..68760d4a5d3da1a53c655d1b33f1c2237273d136 100644 (file)
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
        dma_release_channel(host->tx_chan);
        dma_release_channel(host->rx_chan);
 
+       dev_pm_clear_wake_irq(host->dev);
        pm_runtime_dont_use_autosuspend(host->dev);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
index 17f12c18d225a50a21bba97eabf0670ac9f135e5..7635c38e77dd0ce43a960ffef0fdbaabc4f1422d 100644 (file)
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
        cqe = &admin_queue->cq.entries[head_masked];
 
        /* Go over all the completions */
-       while ((cqe->acq_common_descriptor.flags &
+       while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
                        ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Do not read the rest of the completion entry before the
                 * phase bit was validated
                 */
-               rmb();
+               dma_rmb();
                ena_com_handle_single_admin_completion(admin_queue, cqe);
 
                head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
        mmio_read_reg |= mmio_read->seq_num &
                        ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
-       /* make sure read_resp->req_id get updated before the hw can write
-        * there
-        */
-       wmb();
-
-       writel_relaxed(mmio_read_reg,
-                      ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+       writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
-       mmiowb();
        for (i = 0; i < timeout; i++) {
-               if (read_resp->req_id == mmio_read->seq_num)
+               if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
                        break;
 
                udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
        aenq_common = &aenq_e->aenq_common_desc;
 
        /* Go over all the events */
-       while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
-              phase) {
+       while ((READ_ONCE(aenq_common->flags) &
+               ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+               /* Make sure the phase bit (ownership) is as expected before
+                * reading the rest of the descriptor.
+                */
+               dma_rmb();
+
                pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
                         aenq_common->group, aenq_common->syndrom,
                         (u64)aenq_common->timestamp_low +
index ea149c134e1538d6c888db712d9b6cdc2b01412f..1c682b76190f9eb9ecbe6e428735ae21c4ed8b71 100644 (file)
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        if (desc_phase != expected_phase)
                return NULL;
 
+       /* Make sure we read the rest of the descriptor after the phase bit
+        * has been read
+        */
+       dma_rmb();
+
        return cdesc;
 }
 
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
        if (cdesc_phase != expected_phase)
                return -EAGAIN;
 
+       dma_rmb();
        if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
                pr_err("Invalid req id %d\n", cdesc->req_id);
                return -EINVAL;
index 6fdc753d948382e7a56aaeac801cf16ab3b8f833..2f7657227cfe9c60d77482c98df22fdb2f89e3c9 100644 (file)
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
        return io_sq->q_depth - 1 - cnt;
 }
 
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
-                                           bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
 {
        u16 tail;
 
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
        pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
                 io_sq->qid, tail);
 
-       if (relaxed)
-               writel_relaxed(tail, io_sq->db_addr);
-       else
-               writel(tail, io_sq->db_addr);
+       writel(tail, io_sq->db_addr);
 
        return 0;
 }
index c673ac2df65bdf3f9b4d03403be705b581505657..29b5774dd32d47e5ad2c43e78b599493f011d129 100644 (file)
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 
 static int ena_rss_init_default(struct ena_adapter *adapter);
 static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
 static int ena_restore_device(struct ena_adapter *adapter);
 
 static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
                return -ENOMEM;
        }
 
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+       dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
                           DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
                u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
        rx_info->page_offset = 0;
        ena_buf = &rx_info->ena_buf;
        ena_buf->paddr = dma;
-       ena_buf->len = PAGE_SIZE;
+       ena_buf->len = ENA_PAGE_SIZE;
 
        return 0;
 }
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
                return;
        }
 
-       dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+       dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
                       DMA_FROM_DEVICE);
 
        __free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                            rx_ring->qid, i, num);
        }
 
-       if (likely(i)) {
-               /* Add memory barrier to make sure the desc were written before
-                * issue a doorbell
-                */
-               wmb();
-               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
-               mmiowb();
-       }
+       /* ena_com_write_sq_doorbell issues a wmb() */
+       if (likely(i))
+               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
 
        rx_ring->next_to_use = next_to_use;
 
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        do {
                dma_unmap_page(rx_ring->dev,
                               dma_unmap_addr(&rx_info->ena_buf, paddr),
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-                               rx_info->page_offset, len, PAGE_SIZE);
+                               rx_info->page_offset, len, ENA_PAGE_SIZE);
 
                netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
                          "rx skb updated. len %d. data_len %d\n",
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
                          "Destroy failure, restarting device\n");
                ena_dump_stats_to_dmesg(adapter);
                /* rtnl lock already obtained in dev_ioctl() layer */
-               ena_destroy_device(adapter);
+               ena_destroy_device(adapter, false);
                ena_restore_device(adapter);
        }
 
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
                tx_ring->ring_size);
 
-       /* This WMB is aimed to:
-        * 1 - perform smp barrier before reading next_to_completion
-        * 2 - make sure the desc were written before trigger DB
-        */
-       wmb();
-
        /* stop the queue when no more space available, the packet can have up
         * to sgl_size + 2. one for the meta descriptor and one for header
         * (if the header is larger than tx_max_header_size).
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * stop the queue but meanwhile clean_tx_irq updates
                 * next_to_completion and terminates.
                 * The queue will remain stopped forever.
-                * To solve this issue this function perform rmb, check
-                * the wakeup condition and wake up the queue if needed.
+                * To solve this issue add a mb() to make sure that
+                * netif_tx_stop_queue() write is vissible before checking if
+                * there is additional space in the queue.
                 */
-               smp_rmb();
+               smp_mb();
 
                if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
                                > ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (netif_xmit_stopped(txq) || !skb->xmit_more) {
-               /* trigger the dma engine */
-               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+               /* trigger the dma engine. ena_com_write_sq_doorbell()
+                * has a mb
+                */
+               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
                u64_stats_update_begin(&tx_ring->syncp);
                tx_ring->tx_stats.doorbells++;
                u64_stats_update_end(&tx_ring->syncp);
@@ -2550,12 +2542,15 @@ err_disable_msix:
        return rc;
 }
 
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
 {
        struct net_device *netdev = adapter->netdev;
        struct ena_com_dev *ena_dev = adapter->ena_dev;
        bool dev_up;
 
+       if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               return;
+
        netif_carrier_off(netdev);
 
        del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2558,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
        adapter->dev_up_before_reset = dev_up;
 
-       ena_com_set_admin_running_state(ena_dev, false);
+       if (!graceful)
+               ena_com_set_admin_running_state(ena_dev, false);
 
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
@@ -2591,6 +2587,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        adapter->reset_reason = ENA_REGS_RESET_NORMAL;
 
        clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+       clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
 }
 
 static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2632,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
                }
        }
 
+       set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
        dev_err(&pdev->dev, "Device reset completed successfully\n");
 
@@ -2665,7 +2663,7 @@ static void ena_fw_reset_device(struct work_struct *work)
                return;
        }
        rtnl_lock();
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, false);
        ena_restore_device(adapter);
        rtnl_unlock();
 }
@@ -3409,30 +3407,24 @@ static void ena_remove(struct pci_dev *pdev)
                netdev->rx_cpu_rmap = NULL;
        }
 #endif /* CONFIG_RFS_ACCEL */
-
-       unregister_netdev(netdev);
        del_timer_sync(&adapter->timer_service);
 
        cancel_work_sync(&adapter->reset_task);
 
-       /* Reset the device only if the device is running. */
-       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
-               ena_com_dev_reset(ena_dev, adapter->reset_reason);
+       unregister_netdev(netdev);
 
-       ena_free_mgmnt_irq(adapter);
+       /* If the device is running then we want to make sure the device will be
+        * reset to make sure no more events will be issued by the device.
+        */
+       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
 
-       ena_disable_msix(adapter);
+       rtnl_lock();
+       ena_destroy_device(adapter, true);
+       rtnl_unlock();
 
        free_netdev(netdev);
 
-       ena_com_mmio_reg_read_request_destroy(ena_dev);
-
-       ena_com_abort_admin_commands(ena_dev);
-
-       ena_com_wait_for_abort_completion(ena_dev);
-
-       ena_com_admin_destroy(ena_dev);
-
        ena_com_rss_destroy(ena_dev);
 
        ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3459,7 @@ static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
                        "ignoring device reset request as the device is being suspended\n");
                clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
        }
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, true);
        rtnl_unlock();
        return 0;
 }
index f1972b5ab650332f9fd7c6aedbb949756785c644..7c7ae56c52cfddb11a8e1b096738dfc499ea57dc 100644 (file)
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
index ff92ab1daeb80cec7779e1f00faa5f0174fcf727..1e9d882c04ef1a8d989c56f14f0db271d6c4027d 100644 (file)
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                                port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
                        }
                }
-               return status;
+               goto err;
        }
 
        pcie = be_get_pcie_desc(resp->func_param, desc_count,
index 7a637b51c7d2302b63f2eb579adea4e0459e7430..e08301d833e2ed91bda977109404175f556f563f 100644 (file)
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
                struct ltq_etop_chan *ch = &priv->ch[i];
 
                ch->idx = ch->dma.nr = i;
+               ch->dma.dev = &priv->pdev->dev;
 
                if (IS_TX(i)) {
                        ltq_dma_alloc_tx(&ch->dma);
index b994b80d5714ad142a3af4711c65d62b7b1d5a6c..37ba7c78859db17aa7ecfa76648ca54adb71790b 100644 (file)
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
        delayed_event_start(priv);
 
        dev_ctx->context = intf->add(dev);
-       set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-       if (intf->attach)
-               set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
        if (dev_ctx->context) {
+               set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+               if (intf->attach)
+                       set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
                spin_lock_irq(&priv->ctx_lock);
                list_add_tail(&dev_ctx->list, &priv->ctx_list);
 
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
        if (intf->attach) {
                if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
                        goto out;
-               intf->attach(dev, dev_ctx->context);
+               if (intf->attach(dev, dev_ctx->context))
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
        } else {
                if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
                        goto out;
                dev_ctx->context = intf->add(dev);
+               if (!dev_ctx->context)
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
        }
 
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
                }
 }
 
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 {
-       return (u16)((dev->pdev->bus->number << 8) |
+       return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+                    (dev->pdev->bus->number << 8) |
                     PCI_SLOT(dev->pdev->devfn));
 }
 
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
-       u16 pci_id = mlx5_gen_pci_id(dev);
+       u32 pci_id = mlx5_gen_pci_id(dev);
        struct mlx5_core_dev *res = NULL;
        struct mlx5_core_dev *tmp_dev;
        struct mlx5_priv *priv;
index 75bb981e00b7205ad10b467dc64a821a4f8867a9..41cde926cdab6d3d061d62d424ed1bff8c860c9d 100644 (file)
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
 {
        if (psrc_m) {
                MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
-               MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+               MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
        }
 
        if (pdst_m) {
index f72b5c9dcfe95f98cc388676462207bf76247504..3028e8d90920e2940cc3247775bd7a498fd906ef 100644 (file)
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
        if (err)
                goto miss_rule_err;
 
+       kvfree(flow_group_in);
        return 0;
 
 miss_rule_err:
index f418541af7cf13f96ea24613648150bb727397be..37d114c668b7ba70ca968f76c88c42af84967f25 100644 (file)
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
        return version;
 }
 
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+                 u32 *match_value,
+                 bool take_write)
+{
+       struct fs_fte *fte_tmp;
+
+       if (take_write)
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+       else
+               nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+                                        rhash_fte);
+       if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+               fte_tmp = NULL;
+               goto out;
+       }
+
+       nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+       if (take_write)
+               up_write_ref_node(&g->node);
+       else
+               up_read_ref_node(&g->node);
+       return fte_tmp;
+}
+
 static struct mlx5_flow_handle *
 try_add_to_existing_fg(struct mlx5_flow_table *ft,
                       struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
        if (IS_ERR(fte))
                return  ERR_PTR(-ENOMEM);
 
-       list_for_each_entry(iter, match_head, list) {
-               nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
-       }
-
 search_again_locked:
        version = matched_fgs_get_version(match_head);
        /* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
                struct fs_fte *fte_tmp;
 
                g = iter->g;
-               fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
-                                                rhash_fte);
-               if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+               fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+               if (!fte_tmp)
                        continue;
-
-               nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-               if (!take_write) {
-                       list_for_each_entry(iter, match_head, list)
-                               up_read_ref_node(&iter->g->node);
-               } else {
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
-               }
-
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte_tmp);
                up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
                return rule;
        }
 
-       /* No group with matching fte found. Try to add a new fte to any
-        * matching fg.
-        */
-
-       if (!take_write) {
-               list_for_each_entry(iter, match_head, list)
-                       up_read_ref_node(&iter->g->node);
-               list_for_each_entry(iter, match_head, list)
-                       nested_down_write_ref_node(&iter->g->node,
-                                                  FS_LOCK_PARENT);
-               take_write = true;
-       }
-
        /* Check the ft version, for case that new flow group
         * was added while the fgs weren't locked
         */
@@ -1657,27 +1656,30 @@ search_again_locked:
        /* Check the fgs version, for case the new FTE with the
         * same values was added while the fgs weren't locked
         */
-       if (version != matched_fgs_get_version(match_head))
+       if (version != matched_fgs_get_version(match_head)) {
+               take_write = true;
                goto search_again_locked;
+       }
 
        list_for_each_entry(iter, match_head, list) {
                g = iter->g;
 
                if (!g->node.active)
                        continue;
+
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
                err = insert_fte(g, fte);
                if (err) {
+                       up_write_ref_node(&g->node);
                        if (err == -ENOSPC)
                                continue;
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
                        kmem_cache_free(steering->ftes_cache, fte);
                        return ERR_PTR(err);
                }
 
                nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
-               list_for_each_entry(iter, match_head, list)
-                       up_write_ref_node(&iter->g->node);
+               up_write_ref_node(&g->node);
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte);
                up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
        }
        rule = ERR_PTR(-ENOENT);
 out:
-       list_for_each_entry(iter, match_head, list)
-               up_write_ref_node(&iter->g->node);
        kmem_cache_free(steering->ftes_cache, fte);
        return rule;
 }
@@ -1726,6 +1726,8 @@ search_again_locked:
        if (err) {
                if (take_write)
                        up_write_ref_node(&ft->node);
+               else
+                       up_read_ref_node(&ft->node);
                return ERR_PTR(err);
        }
 
index d39b0b7011b2d9cf194180813a5a30d9fe226b6d..9f39aeca863f321fe169fb3d69ef1cff98bbaf18 100644 (file)
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
        add_timer(&health->timer);
 }
 
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
 {
        struct mlx5_core_health *health = &dev->priv.health;
+       unsigned long flags;
+
+       if (disable_health) {
+               spin_lock_irqsave(&health->wq_lock, flags);
+               set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+               set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+               spin_unlock_irqrestore(&health->wq_lock, flags);
+       }
 
        del_timer_sync(&health->timer);
 }
index cf3e4a6590524e1c9cdd9fb2da7ab512abbb2efc..b5e9f664fc66758d5642b18e2396503baf351415 100644 (file)
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        priv->numa_node = dev_to_node(&dev->pdev->dev);
 
        priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
-       if (!priv->dbg_root)
+       if (!priv->dbg_root) {
+               dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
                return -ENOMEM;
+       }
 
        err = mlx5_pci_enable_device(dev);
        if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
        mlx5_pci_disable_device(dev);
-       debugfs_remove(priv->dbg_root);
+       debugfs_remove_recursive(priv->dbg_root);
 }
 
 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
                mlx5_cleanup_once(dev);
 
 err_stop_poll:
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, boot);
        if (mlx5_cmd_teardown_hca(dev)) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
                goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        mlx5_free_irq_vectors(dev);
        if (cleanup)
                mlx5_cleanup_once(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, cleanup);
        err = mlx5_cmd_teardown_hca(dev);
        if (err) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
         * with the HCA, so the health polll is no longer needed.
         */
        mlx5_drain_health_wq(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, false);
 
        ret = mlx5_cmd_force_teardown_hca(dev);
        if (ret) {
index c8c315eb512804033fd88723c217504007010453..68e7f8df2a6d310989a2b6ee9b2e6c488c3b17e0 100644 (file)
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
        return (u32)wq->fbc.sz_m1 + 1;
 }
 
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
 {
-       return (u32)wq->fbc.frag_sz_m1 + 1;
+       return wq->fbc.frag_sz_m1 + 1;
 }
 
 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -138,7 +138,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
-       u32 sq_strides_offset;
+       u16 sq_strides_offset;
        u32 rq_pg_remainder;
        int err;
 
index 2bd4c3184eba21d866ea8df66699a41145e3ec10..3a1a170bb2d7f3244e7761a6acf6c1fb4a3534c3 100644 (file)
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                       void *wqc, struct mlx5_wq_cyc *wq,
                       struct mlx5_wq_ctrl *wq_ctrl);
 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
index 4327487553c5338581b200469136cc1077d028bf..3589432d164375240e013d476df8cd57e1c7c0ed 100644 (file)
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
        MLXSW_SP_SB_CM(1, 0xff, 0),
 };
 
index 9044496803e627e8a992fbcac23eb8e0201e1e76..46ba0cf257c6d8cdb80b0d84e80e078618d11c36 100644 (file)
--- a/