Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 May 2018 16:35:11 +0000 (09:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 May 2018 16:35:11 +0000 (09:35 -0700)
Pull more arm64 fixes from Will Deacon:

 - fix application of read-only permissions to kernel section mappings

 - sanitise reported ESR values for signals delivered on a kernel
   address

 - ensure tishift GCC helpers are exported to modules

 - fix inline asm constraints for some LSE atomics

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Make sure permission updates happen for pmd/pud
  arm64: fault: Don't leak data in ESR context for user fault on kernel VA
  arm64: export tishift functions to modules
  arm64: lse: Add early clobbers to some input/output asm operands

200 files changed:
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
Documentation/userspace-api/index.rst
Documentation/userspace-api/spec_ctrl.rst [new file with mode: 0644]
MAINTAINERS
arch/alpha/Kconfig
arch/alpha/include/asm/dma-mapping.h
arch/alpha/kernel/io.c
arch/alpha/kernel/pci-noop.c
arch/alpha/kernel/pci_iommu.c
arch/arm/mm/dma-mapping.c
arch/mips/boot/compressed/uart-16550.c
arch/mips/boot/dts/xilfpga/Makefile
arch/mips/generic/Platform
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kvm/mips.c
arch/mips/mm/c-r4k.c
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/security_features.h
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/security.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/setup.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/spec-ctrl.h [new file with mode: 0644]
arch/x86/include/asm/thread_info.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/base/cpu.c
drivers/base/power/main.c
drivers/block/loop.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hns/hns_roce_cq.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_hw.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/i40iw/i40iw_verbs.h
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/infiniband/ulp/srpt/Kconfig
drivers/mfd/cros_ec_spi.c
drivers/mmc/core/block.c
drivers/mmc/host/sdhci-iproc.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ipvlan/ipvlan_main.c
drivers/net/phy/micrel.c
drivers/net/tun.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/nvme/host/Kconfig
drivers/nvme/target/Kconfig
drivers/platform/chrome/cros_ec_proto.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/Makefile
drivers/scsi/sg.c
drivers/scsi/sr_ioctl.c
drivers/staging/lustre/lnet/Kconfig
drivers/target/target_core_user.c
drivers/xen/swiotlb-xen.c
fs/affs/namei.c
fs/aio.c
fs/befs/linuxvfs.c
fs/btrfs/inode.c
fs/cachefiles/namei.c
fs/cifs/Kconfig
fs/cramfs/inode.c
fs/dcache.c
fs/ecryptfs/inode.c
fs/ext2/inode.c
fs/ext2/namei.c
fs/ext4/namei.c
fs/f2fs/namei.c
fs/jffs2/dir.c
fs/jfs/namei.c
fs/kernfs/mount.c
fs/nfsd/vfs.c
fs/nilfs2/namei.c
fs/orangefs/namei.c
fs/proc/array.c
fs/reiserfs/namei.c
fs/super.c
fs/sysfs/mount.c
fs/udf/namei.c
fs/ufs/namei.c
include/linux/bpf_verifier.h
include/linux/cpu.h
include/linux/dcache.h
include/linux/memory_hotplug.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/nospec.h
include/linux/sched.h
include/linux/seccomp.h
include/net/netfilter/nf_tables.h
include/net/tls.h
include/rdma/ib_umem.h
include/rdma/uverbs_ioctl.h
include/uapi/linux/netfilter/nf_conntrack_tcp.h
include/uapi/linux/prctl.h
include/uapi/linux/seccomp.h
kernel/bpf/core.c
kernel/bpf/sockmap.c
kernel/bpf/verifier.c
kernel/seccomp.c
kernel/sys.c
lib/iov_iter.c
mm/cma.c
mm/compaction.c
mm/internal.h
mm/page_alloc.c
net/9p/Kconfig
net/bridge/netfilter/ebt_stp.c
net/core/dev.c
net/core/filter.c
net/core/sock.c
net/dsa/dsa2.c
net/ipv4/fib_frontend.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/route.c
net/ipv4/tcp_output.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6_tables.c
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_compat.c
net/netfilter/nft_immediate.c
net/netfilter/x_tables.c
net/packet/af_packet.c
net/rds/Kconfig
net/sched/act_vlan.c
net/sched/sch_red.c
net/sched/sch_tbf.c
net/smc/smc_pnet.c
net/sunrpc/Kconfig
net/tls/tls_sw.c
security/selinux/hooks.c
sound/core/timer.c
sound/pci/hda/hda_local.h
tools/lib/bpf/libbpf.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/seccomp/seccomp_bpf.c

index 025b7cf3768dc62f3e5e68129fcc36bdf994c659..bd4975e132d3438984d2f838daa814b1397fae5f 100644 (file)
@@ -478,6 +478,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/meltdown
                /sys/devices/system/cpu/vulnerabilities/spectre_v1
                /sys/devices/system/cpu/vulnerabilities/spectre_v2
+               /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
index 11fc28ecdb6d9f2ea1ce28807caf59c96c93c164..f2040d46f0956bcc8c4502404d8f6d46892f0351 100644 (file)
                        allow data leaks with this option, which is equivalent
                        to spectre_v2=off.
 
+       nospec_store_bypass_disable
+                       [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+
        noxsave         [BUGS=X86] Disables x86 extended register state save
                        and restore using xsave. The kernel will fallback to
                        enabling legacy floating-point and sse state.
                        Not specifying this option is equivalent to
                        spectre_v2=auto.
 
+       spec_store_bypass_disable=
+                       [HW] Control Speculative Store Bypass (SSB) Disable mitigation
+                       (Speculative Store Bypass vulnerability)
+
+                       Certain CPUs are vulnerable to an exploit against a
+                       a common industry wide performance optimization known
+                       as "Speculative Store Bypass" in which recent stores
+                       to the same memory location may not be observed by
+                       later loads during speculative execution. The idea
+                       is that such stores are unlikely and that they can
+                       be detected prior to instruction retirement at the
+                       end of a particular speculation execution window.
+
+                       In vulnerable processors, the speculatively forwarded
+                       store can be used in a cache side channel attack, for
+                       example to read memory to which the attacker does not
+                       directly have access (e.g. inside sandboxed code).
+
+                       This parameter controls whether the Speculative Store
+                       Bypass optimization is used.
+
+                       on      - Unconditionally disable Speculative Store Bypass
+                       off     - Unconditionally enable Speculative Store Bypass
+                       auto    - Kernel detects whether the CPU model contains an
+                                 implementation of Speculative Store Bypass and
+                                 picks the most appropriate mitigation. If the
+                                 CPU is not vulnerable, "off" is selected. If the
+                                 CPU is vulnerable the default mitigation is
+                                 architecture and Kconfig dependent. See below.
+                       prctl   - Control Speculative Store Bypass per thread
+                                 via prctl. Speculative Store Bypass is enabled
+                                 for a process by default. The state of the control
+                                 is inherited on fork.
+                       seccomp - Same as "prctl" above, but all seccomp threads
+                                 will disable SSB unless they explicitly opt out.
+
+                       Not specifying this option is equivalent to
+                       spec_store_bypass_disable=auto.
+
+                       Default mitigations:
+                       X86:    If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+
        spia_io_base=   [HW,MTD]
        spia_fio_base=
        spia_pedr=
index 42a248301615d9e69915a424104f1fc932605175..e22d8cfea687435550f72e56385da9b07a0b739e 100644 (file)
@@ -57,6 +57,13 @@ KSZ9031:
       - txd2-skew-ps : Skew control of TX data 2 pad
       - txd3-skew-ps : Skew control of TX data 3 pad
 
+    - micrel,force-master:
+        Boolean, force phy to master mode. Only set this option if the phy
+        reference clock provided at CLK125_NDO pin is used as MAC reference
+        clock because the clock jitter in slave mode is to high (errata#2).
+        Attention: The link partner must be configurable as slave otherwise
+        no link will be established.
+
 Examples:
 
        mdio {
index 7b2eb1b7d4cab3f68b2a7569977a5fa8b7a39f23..a3233da7fa88ed94bf73aebceaf2b12a6a1169fc 100644 (file)
@@ -19,6 +19,7 @@ place where this information is gathered.
    no_new_privs
    seccomp_filter
    unshare
+   spec_ctrl
 
 .. only::  subproject and html
 
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
new file mode 100644 (file)
index 0000000..32f3d55
--- /dev/null
@@ -0,0 +1,94 @@
+===================
+Speculation Control
+===================
+
+Quite some CPUs have speculation-related misfeatures which are in
+fact vulnerabilities causing data leaks in various forms even across
+privilege domains.
+
+The kernel provides mitigation for such vulnerabilities in various
+forms. Some of these mitigations are compile-time configurable and some
+can be supplied on the kernel command line.
+
+There is also a class of mitigations which are very expensive, but they can
+be restricted to a certain set of processes or tasks in controlled
+environments. The mechanism to control these mitigations is via
+:manpage:`prctl(2)`.
+
+There are two prctl options which are related to this:
+
+ * PR_GET_SPECULATION_CTRL
+
+ * PR_SET_SPECULATION_CTRL
+
+PR_GET_SPECULATION_CTRL
+-----------------------
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+the following meaning:
+
+==== ===================== ===================================================
+Bit  Define                Description
+==== ===================== ===================================================
+0    PR_SPEC_PRCTL         Mitigation can be controlled per task by
+                           PR_SET_SPECULATION_CTRL.
+1    PR_SPEC_ENABLE        The speculation feature is enabled, mitigation is
+                           disabled.
+2    PR_SPEC_DISABLE       The speculation feature is disabled, mitigation is
+                           enabled.
+3    PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+                           subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+==== ===================== ===================================================
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL
+-----------------------
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
+PR_SPEC_FORCE_DISABLE.
+
+Common error codes
+------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+EINVAL  The prctl is not implemented by the architecture or unused
+        prctl(2) arguments are not 0.
+
+ENODEV  arg2 is selecting a not supported speculation misfeature.
+======= =================================================================
+
+PR_SET_SPECULATION_CTRL error codes
+-----------------------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+0       Success
+
+ERANGE  arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+        PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
+
+ENXIO   Control of the selected speculation misfeature is not possible.
+        See PR_GET_SPECULATION_CTRL.
+
+EPERM   Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
+        tried to enable it again.
+======= =================================================================
+
+Speculation misfeature controls
+-------------------------------
+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
index 078fd80f664fb652b5de36a45920f9e31d56e0a6..a9ca122957e977bd80e24f2bd2ca71839066542e 100644 (file)
@@ -5388,7 +5388,6 @@ S:        Maintained
 F:     drivers/iommu/exynos-iommu.c
 
 EZchip NPS platform support
-M:     Elad Kanfi <eladkan@mellanox.com>
 M:     Vineet Gupta <vgupta@synopsys.com>
 S:     Supported
 F:     arch/arc/plat-eznps
@@ -9021,7 +9020,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 F:     drivers/net/ethernet/mellanox/mlx5/core/en_*
 
 MELLANOX ETHERNET INNOVA DRIVER
-M:     Ilan Tayari <ilant@mellanox.com>
 R:     Boris Pismenny <borisp@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -9031,7 +9029,6 @@ F:        drivers/net/ethernet/mellanox/mlx5/core/fpga/*
 F:     include/linux/mlx5/mlx5_ifc_fpga.h
 
 MELLANOX ETHERNET INNOVA IPSEC DRIVER
-M:     Ilan Tayari <ilant@mellanox.com>
 R:     Boris Pismenny <borisp@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -9087,7 +9084,6 @@ F:        include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX5 core VPI driver
 M:     Saeed Mahameed <saeedm@mellanox.com>
-M:     Matan Barak <matanb@mellanox.com>
 M:     Leon Romanovsky <leonro@mellanox.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
@@ -9098,7 +9094,6 @@ F:        drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
 
 MELLANOX MLX5 IB driver
-M:     Matan Barak <matanb@mellanox.com>
 M:     Leon Romanovsky <leonro@mellanox.com>
 L:     linux-rdma@vger.kernel.org
 W:     http://www.mellanox.com
@@ -9832,7 +9827,6 @@ F:        net/netfilter/xt_CONNSECMARK.c
 F:     net/netfilter/xt_SECMARK.c
 
 NETWORKING [TLS]
-M:     Ilya Lesokhin <ilyal@mellanox.com>
 M:     Aviad Yehezkel <aviadye@mellanox.com>
 M:     Dave Watson <davejwatson@fb.com>
 L:     netdev@vger.kernel.org
index b2022885ced8ab05f9f1837b2c6cfde62f37a249..f19dc31288c83e11fec0e8850835b2fc53c50fc4 100644 (file)
@@ -211,6 +211,7 @@ config ALPHA_EIGER
 config ALPHA_JENSEN
        bool "Jensen"
        depends on BROKEN
+       select DMA_DIRECT_OPS
        help
          DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
          of the first-generation Alpha systems. A number of these systems
index b78f61f20796b2ea20e6b5b17777f44193783a5d..8beeafd4f68e45c8e7e1a6a006719f549cf70ca3 100644 (file)
@@ -2,11 +2,15 @@
 #ifndef _ALPHA_DMA_MAPPING_H
 #define _ALPHA_DMA_MAPPING_H
 
-extern const struct dma_map_ops *dma_ops;
+extern const struct dma_map_ops alpha_pci_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       return dma_ops;
+#ifdef CONFIG_ALPHA_JENSEN
+       return &dma_direct_ops;
+#else
+       return &alpha_pci_ops;
+#endif
 }
 
 #endif /* _ALPHA_DMA_MAPPING_H */
index 3e3d49c254c52ef09d80c1b964530c9289d00ec7..c025a3e5e3578beb3ecaa521333e3b1d97e98760 100644 (file)
@@ -37,20 +37,20 @@ unsigned int ioread32(void __iomem *addr)
 
 void iowrite8(u8 b, void __iomem *addr)
 {
-       IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
        mb();
+       IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
 }
 
 void iowrite16(u16 b, void __iomem *addr)
 {
-       IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
        mb();
+       IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
 }
 
 void iowrite32(u32 b, void __iomem *addr)
 {
-       IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
        mb();
+       IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
 }
 
 EXPORT_SYMBOL(ioread8);
@@ -176,26 +176,26 @@ u64 readq(const volatile void __iomem *addr)
 
 void writeb(u8 b, volatile void __iomem *addr)
 {
-       __raw_writeb(b, addr);
        mb();
+       __raw_writeb(b, addr);
 }
 
 void writew(u16 b, volatile void __iomem *addr)
 {
-       __raw_writew(b, addr);
        mb();
+       __raw_writew(b, addr);
 }
 
 void writel(u32 b, volatile void __iomem *addr)
 {
-       __raw_writel(b, addr);
        mb();
+       __raw_writel(b, addr);
 }
 
 void writeq(u64 b, volatile void __iomem *addr)
 {
-       __raw_writeq(b, addr);
        mb();
+       __raw_writeq(b, addr);
 }
 
 EXPORT_SYMBOL(readb);
index b6ebb65127a80e8bfc3dfce3580655bcc7d0a540..c7c5879869d35092d45fcb9c35a1245f916fbdf4 100644 (file)
@@ -102,36 +102,3 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
        else
                return -ENODEV;
 }
-
-static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
-                                      dma_addr_t *dma_handle, gfp_t gfp,
-                                      unsigned long attrs)
-{
-       void *ret;
-
-       if (!dev || *dev->dma_mask >= 0xffffffffUL)
-               gfp &= ~GFP_DMA;
-       ret = (void *)__get_free_pages(gfp, get_order(size));
-       if (ret) {
-               memset(ret, 0, size);
-               *dma_handle = virt_to_phys(ret);
-       }
-       return ret;
-}
-
-static int alpha_noop_supported(struct device *dev, u64 mask)
-{
-       return mask < 0x00ffffffUL ? 0 : 1;
-}
-
-const struct dma_map_ops alpha_noop_ops = {
-       .alloc                  = alpha_noop_alloc_coherent,
-       .free                   = dma_noop_free_coherent,
-       .map_page               = dma_noop_map_page,
-       .map_sg                 = dma_noop_map_sg,
-       .mapping_error          = dma_noop_mapping_error,
-       .dma_supported          = alpha_noop_supported,
-};
-
-const struct dma_map_ops *dma_ops = &alpha_noop_ops;
-EXPORT_SYMBOL(dma_ops);
index 83b34b9188ea192517ce72a0d9d260abb5b2e2ac..6923b0d9c1e195d1751d19e4335271b40d3c0226 100644 (file)
@@ -950,6 +950,4 @@ const struct dma_map_ops alpha_pci_ops = {
        .mapping_error          = alpha_pci_mapping_error,
        .dma_supported          = alpha_pci_supported,
 };
-
-const struct dma_map_ops *dma_ops = &alpha_pci_ops;
-EXPORT_SYMBOL(dma_ops);
+EXPORT_SYMBOL(alpha_pci_ops);
index 8c398fedbbb6af30b461fb09cabd89b04cc09648..ada8eb206a90b6824427d24c5019100895d225f2 100644 (file)
@@ -466,12 +466,6 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
 void __init dma_contiguous_remap(void)
 {
        int i;
-
-       if (!dma_mmu_remap_num)
-               return;
-
-       /* call flush_cache_all() since CMA area would be large enough */
-       flush_cache_all();
        for (i = 0; i < dma_mmu_remap_num; i++) {
                phys_addr_t start = dma_mmu_remap[i].base;
                phys_addr_t end = start + dma_mmu_remap[i].size;
@@ -504,15 +498,7 @@ void __init dma_contiguous_remap(void)
                flush_tlb_kernel_range(__phys_to_virt(start),
                                       __phys_to_virt(end));
 
-               /*
-                * All the memory in CMA region will be on ZONE_MOVABLE.
-                * If that zone is considered as highmem, the memory in CMA
-                * region is also considered as highmem even if it's
-                * physical address belong to lowmem. In this case,
-                * re-mapping isn't required.
-                */
-               if (!is_highmem_idx(ZONE_MOVABLE))
-                       iotable_init(&map, 1);
+               iotable_init(&map, 1);
        }
 }
 
index b3043c08f7694244604af5a09065800e7a97f587..aee8d7b8f09143fd8e4ce30a9552bf827ec357e4 100644 (file)
@@ -18,9 +18,9 @@
 #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
 #endif
 
-#if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780)
-#include <asm/mach-jz4740/base.h>
-#define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset))
+#ifdef CONFIG_MACH_INGENIC
+#define INGENIC_UART0_BASE_ADDR        0x10030000
+#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
 #endif
 
 #ifdef CONFIG_CPU_XLR
index 9987e0e378c50c6f19eb0457eae914f827f9688b..69ca00590b8de6cbf1b3fb3397e8e4116c690ebf 100644 (file)
@@ -1,4 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)    += nexys4ddr.dtb
-
-obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
index b51432dd10b6fa3789ff01b1d0b902ac200fe89d..0dd0d5d460a5fc7988b03f856f47563589d7e995 100644 (file)
@@ -16,3 +16,4 @@ all-$(CONFIG_MIPS_GENERIC)    := vmlinux.gz.itb
 its-y                                  := vmlinux.its.S
 its-$(CONFIG_FIT_IMAGE_FDT_BOSTON)     += board-boston.its.S
 its-$(CONFIG_FIT_IMAGE_FDT_NI169445)   += board-ni169445.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)    += board-xilfpga.its.S
index 0b23b1ad99e65f1e21d1810340f9dd306483b8d3..8d098b9f395c13746a4f0855f54a6303bfc21098 100644 (file)
@@ -463,7 +463,7 @@ static int fpr_get_msa(struct task_struct *target,
 /*
  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
  * Choose the appropriate helper for general registers, and then copy
- * the FCSR register separately.
+ * the FCSR and FIR registers separately.
  */
 static int fpr_get(struct task_struct *target,
                   const struct user_regset *regset,
@@ -471,6 +471,7 @@ static int fpr_get(struct task_struct *target,
                   void *kbuf, void __user *ubuf)
 {
        const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+       const int fir_pos = fcr31_pos + sizeof(u32);
        int err;
 
        if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
@@ -483,6 +484,12 @@ static int fpr_get(struct task_struct *target,
        err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fpu.fcr31,
                                  fcr31_pos, fcr31_pos + sizeof(u32));
+       if (err)
+               return err;
+
+       err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &boot_cpu_data.fpu_id,
+                                 fir_pos, fir_pos + sizeof(u32));
 
        return err;
 }
@@ -531,7 +538,8 @@ static int fpr_set_msa(struct task_struct *target,
 /*
  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
  * Choose the appropriate helper for general registers, and then copy
- * the FCSR register separately.
+ * the FCSR register separately.  Ignore the incoming FIR register
+ * contents though, as the register is read-only.
  *
  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
  * which is supposed to have been guaranteed by the kernel before
@@ -545,6 +553,7 @@ static int fpr_set(struct task_struct *target,
                   const void *kbuf, const void __user *ubuf)
 {
        const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+       const int fir_pos = fcr31_pos + sizeof(u32);
        u32 fcr31;
        int err;
 
@@ -572,6 +581,11 @@ static int fpr_set(struct task_struct *target,
                ptrace_setfcr31(target, fcr31);
        }
 
+       if (count > 0)
+               err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                               fir_pos,
+                                               fir_pos + sizeof(u32));
+
        return err;
 }
 
@@ -793,7 +807,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        fregs = get_fpu_regs(child);
 
 #ifdef CONFIG_32BIT
-                       if (test_thread_flag(TIF_32BIT_FPREGS)) {
+                       if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
                                /*
                                 * The odd registers are actually the high
                                 * order bits of the values stored in the even
@@ -888,7 +902,7 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        init_fp_ctx(child);
 #ifdef CONFIG_32BIT
-                       if (test_thread_flag(TIF_32BIT_FPREGS)) {
+                       if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
                                /*
                                 * The odd registers are actually the high
                                 * order bits of the values stored in the even
index 2b9260f92ccd3019fe3d733c96a631faa7f59e2b..656a137c1fe2c4dcaa9fb410d5787c9e9b39ce1f 100644 (file)
@@ -99,7 +99,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                                break;
                        }
                        fregs = get_fpu_regs(child);
-                       if (test_thread_flag(TIF_32BIT_FPREGS)) {
+                       if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
                                /*
                                 * The odd registers are actually the high
                                 * order bits of the values stored in the even
@@ -212,7 +212,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                                       sizeof(child->thread.fpu));
                                child->thread.fpu.fcr31 = 0;
                        }
-                       if (test_thread_flag(TIF_32BIT_FPREGS)) {
+                       if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
                                /*
                                 * The odd registers are actually the high
                                 * order bits of the values stored in the even
index 2549fdd27ee16842c1ce7dd2bd422f27a2d3a769..0f725e9cee8f69230ca7ddff5f6023c30294395c 100644 (file)
@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "cache",        VCPU_STAT(cache_exits),        KVM_STAT_VCPU },
        { "signal",       VCPU_STAT(signal_exits),       KVM_STAT_VCPU },
        { "interrupt",    VCPU_STAT(int_exits),          KVM_STAT_VCPU },
-       { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+       { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
        { "tlbmod",       VCPU_STAT(tlbmod_exits),       KVM_STAT_VCPU },
        { "tlbmiss_ld",   VCPU_STAT(tlbmiss_ld_exits),   KVM_STAT_VCPU },
        { "tlbmiss_st",   VCPU_STAT(tlbmiss_st_exits),   KVM_STAT_VCPU },
index 6f534b2099717da8c2d7be70bfa035a05ed5aede..e12dfa48b478dd3ec51369236bb84040c044bd82 100644 (file)
@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
        /*
         * Either no secondary cache or the available caches don't have the
         * subset property so we have to flush the primary caches
-        * explicitly
+        * explicitly.
+        * If we would need IPI to perform an INDEX-type operation, then
+        * we have to use the HIT-type alternative as IPI cannot be used
+        * here due to interrupts possibly being disabled.
         */
-       if (size >= dcache_size) {
+       if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                return;
        }
 
-       if (size >= dcache_size) {
+       if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
index 471b2274fbeba815f04c1957d975f0f1a74bcdbe..c40b4380951cb45518656a0e1030280d9253d852 100644 (file)
  */
 #define EX_R3          EX_DAR
 
+#define STF_ENTRY_BARRIER_SLOT                                         \
+       STF_ENTRY_BARRIER_FIXUP_SECTION;                                \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define STF_EXIT_BARRIER_SLOT                                          \
+       STF_EXIT_BARRIER_FIXUP_SECTION;                                 \
+       nop;                                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+/*
+ * r10 must be free to use, r13 must be paca
+ */
+#define INTERRUPT_TO_KERNEL                                            \
+       STF_ENTRY_BARRIER_SLOT
+
 /*
  * Macros for annotating the expected destination of (h)rfid
  *
        rfid
 
 #define RFI_TO_USER                                                    \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        rfid;                                                           \
        b       rfi_flush_fallback
 
 #define RFI_TO_USER_OR_KERNEL                                          \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        rfid;                                                           \
        b       rfi_flush_fallback
 
 #define RFI_TO_GUEST                                                   \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        rfid;                                                           \
        b       rfi_flush_fallback
        hrfid
 
 #define HRFI_TO_USER                                                   \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        hrfid;                                                          \
        b       hrfi_flush_fallback
 
 #define HRFI_TO_USER_OR_KERNEL                                         \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        hrfid;                                                          \
        b       hrfi_flush_fallback
 
 #define HRFI_TO_GUEST                                                  \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        hrfid;                                                          \
        b       hrfi_flush_fallback
 
 #define HRFI_TO_UNKNOWN                                                        \
+       STF_EXIT_BARRIER_SLOT;                                          \
        RFI_FLUSH_SLOT;                                                 \
        hrfid;                                                          \
        b       hrfi_flush_fallback
@@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 #define __EXCEPTION_PROLOG_1_PRE(area)                                 \
        OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);         \
        OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);          \
+       INTERRUPT_TO_KERNEL;                                            \
        SAVE_CTR(r10, area);                                            \
        mfcr    r9;
 
index 1e82eb3caabd19c69289957da188b563d0bcd0d6..a9b64df34e2a365a6916c89786d3398f4311413b 100644 (file)
@@ -187,6 +187,22 @@ label##3:                                          \
        FTR_ENTRY_OFFSET label##1b-label##3b;           \
        .popsection;
 
+#define STF_ENTRY_BARRIER_FIXUP_SECTION                        \
+953:                                                   \
+       .pushsection __stf_entry_barrier_fixup,"a";     \
+       .align 2;                                       \
+954:                                                   \
+       FTR_ENTRY_OFFSET 953b-954b;                     \
+       .popsection;
+
+#define STF_EXIT_BARRIER_FIXUP_SECTION                 \
+955:                                                   \
+       .pushsection __stf_exit_barrier_fixup,"a";      \
+       .align 2;                                       \
+956:                                                   \
+       FTR_ENTRY_OFFSET 955b-956b;                     \
+       .popsection;
+
 #define RFI_FLUSH_FIXUP_SECTION                                \
 951:                                                   \
        .pushsection __rfi_flush_fixup,"a";             \
@@ -199,6 +215,9 @@ label##3:                                           \
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 
+extern long stf_barrier_fallback;
+extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
 
 void apply_feature_fixups(void);
index fa4d2e1cf772c883ec4bf77822c660bfe167711a..44989b22383c24b92caaf3dbb3d9831c79cd967f 100644 (file)
 extern unsigned long powerpc_security_features;
 extern bool rfi_flush;
 
+/* These are bit flags */
+enum stf_barrier_type {
+       STF_BARRIER_NONE        = 0x1,
+       STF_BARRIER_FALLBACK    = 0x2,
+       STF_BARRIER_EIEIO       = 0x4,
+       STF_BARRIER_SYNC_ORI    = 0x8,
+};
+
+void setup_stf_barrier(void);
+void do_stf_barrier_fixups(enum stf_barrier_type types);
+
 static inline void security_ftr_set(unsigned long feature)
 {
        powerpc_security_features |= feature;
index 3f30c994e9316a1476086334de4fa84edba9de0e..458b928dbd8447008a7f6c83ff9db27d06d7508d 100644 (file)
@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        li      r4,(LPCR_LPES1 >> LPCR_LPES_SH)
        bl      __init_LPCR_ISA206
@@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        li      r4,(LPCR_LPES1 >> LPCR_LPES_SH)
        bl      __init_LPCR_ISA206
@@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        ori     r3, r3, LPCR_PECEDH
        li      r4,0 /* LPES = 0 */
@@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        ori     r3, r3, LPCR_PECEDH
        li      r4,0 /* LPES = 0 */
@@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
        mtspr   SPRN_PID,r0
+       mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE  | LPCR_HEIC)
        or      r3, r3, r4
@@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
        mtspr   SPRN_PID,r0
+       mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
        or      r3, r3, r4
index 8ab51f6ca03af52c4d9cff18e552b456022e459e..c904477abaf38d33c63ee5d0822714f1d36f9b85 100644 (file)
@@ -101,6 +101,7 @@ static void __restore_cpu_cpufeatures(void)
        if (hv_mode) {
                mtspr(SPRN_LPID, 0);
                mtspr(SPRN_HFSCR, system_registers.hfscr);
+               mtspr(SPRN_PCR, 0);
        }
        mtspr(SPRN_FSCR, system_registers.fscr);
 
index ae6a849db60b1ae8440abcc776b8a5b59e57a641..f283958129f27165b1f1c72219479652941268a1 100644 (file)
@@ -885,7 +885,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
 
 
-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
+EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
 EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
 TRAMP_KVM(PACA_EXGEN, 0x900)
 EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
@@ -961,6 +961,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
        mtctr   r13;                                                    \
        GET_PACA(r13);                                                  \
        std     r10,PACA_EXGEN+EX_R10(r13);                             \
+       INTERRUPT_TO_KERNEL;                                            \
        KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
        HMT_MEDIUM;                                                     \
        mfctr   r9;
@@ -969,7 +970,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
 #define SYSCALL_KVMTEST                                                        \
        HMT_MEDIUM;                                                     \
        mr      r9,r13;                                                 \
-       GET_PACA(r13);
+       GET_PACA(r13);                                                  \
+       INTERRUPT_TO_KERNEL;
 #endif
        
 #define LOAD_SYSCALL_HANDLER(reg)                                      \
@@ -1507,6 +1509,19 @@ masked_##_H##interrupt:                                  \
        b       .;                                      \
        MASKED_DEC_HANDLER(_H)
 
+TRAMP_REAL_BEGIN(stf_barrier_fallback)
+       std     r9,PACA_EXRFI+EX_R9(r13)
+       std     r10,PACA_EXRFI+EX_R10(r13)
+       sync
+       ld      r9,PACA_EXRFI+EX_R9(r13)
+       ld      r10,PACA_EXRFI+EX_R10(r13)
+       ori     31,31,0
+       .rept 14
+       b       1f
+1:
+       .endr
+       blr
+
 TRAMP_REAL_BEGIN(rfi_flush_fallback)
        SET_SCRATCH0(r13);
        GET_PACA(r13);
index bab5a27ea8056c8317340716d33ff084e08d3b2b..b98a722da9151bd41351de9448b21bfbc417cd6b 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/device.h>
 #include <linux/seq_buf.h>
 
+#include <asm/debugfs.h>
 #include <asm/security_features.h>
 
 
@@ -86,3 +87,151 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
 
        return s.len;
 }
+
+/*
+ * Store-forwarding barrier support.
+ */
+
+static enum stf_barrier_type stf_enabled_flush_types;
+static bool no_stf_barrier;
+bool stf_barrier;
+
+static int __init handle_no_stf_barrier(char *p)
+{
+       pr_info("stf-barrier: disabled on command line.");
+       no_stf_barrier = true;
+       return 0;
+}
+
+early_param("no_stf_barrier", handle_no_stf_barrier);
+
+/* This is the generic flag used by other architectures */
+static int __init handle_ssbd(char *p)
+{
+       if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
+               /* Until firmware tells us, we have the barrier with auto */
+               return 0;
+       } else if (strncmp(p, "off", 3) == 0) {
+               handle_no_stf_barrier(NULL);
+               return 0;
+       } else
+               return 1;
+
+       return 0;
+}
+early_param("spec_store_bypass_disable", handle_ssbd);
+
+/* This is the generic flag used by other architectures */
+static int __init handle_no_ssbd(char *p)
+{
+       handle_no_stf_barrier(NULL);
+       return 0;
+}
+early_param("nospec_store_bypass_disable", handle_no_ssbd);
+
+static void stf_barrier_enable(bool enable)
+{
+       if (enable)
+               do_stf_barrier_fixups(stf_enabled_flush_types);
+       else
+               do_stf_barrier_fixups(STF_BARRIER_NONE);
+
+       stf_barrier = enable;
+}
+
+void setup_stf_barrier(void)
+{
+       enum stf_barrier_type type;
+       bool enable, hv;
+
+       hv = cpu_has_feature(CPU_FTR_HVMODE);
+
+       /* Default to fallback in case fw-features are not available */
+       if (cpu_has_feature(CPU_FTR_ARCH_300))
+               type = STF_BARRIER_EIEIO;
+       else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+               type = STF_BARRIER_SYNC_ORI;
+       else if (cpu_has_feature(CPU_FTR_ARCH_206))
+               type = STF_BARRIER_FALLBACK;
+       else
+               type = STF_BARRIER_NONE;
+
+       enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+               (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
+                (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
+
+       if (type == STF_BARRIER_FALLBACK) {
+               pr_info("stf-barrier: fallback barrier available\n");
+       } else if (type == STF_BARRIER_SYNC_ORI) {
+               pr_info("stf-barrier: hwsync barrier available\n");
+       } else if (type == STF_BARRIER_EIEIO) {
+               pr_info("stf-barrier: eieio barrier available\n");
+       }
+
+       stf_enabled_flush_types = type;
+
+       if (!no_stf_barrier)
+               stf_barrier_enable(enable);
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
+               const char *type;
+               switch (stf_enabled_flush_types) {
+               case STF_BARRIER_EIEIO:
+                       type = "eieio";
+                       break;
+               case STF_BARRIER_SYNC_ORI:
+                       type = "hwsync";
+                       break;
+               case STF_BARRIER_FALLBACK:
+                       type = "fallback";
+                       break;
+               default:
+                       type = "unknown";
+               }
+               return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
+       }
+
+       if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
+           !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
+               return sprintf(buf, "Not affected\n");
+
+       return sprintf(buf, "Vulnerable\n");
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int stf_barrier_set(void *data, u64 val)
+{
+       bool enable;
+
+       if (val == 1)
+               enable = true;
+       else if (val == 0)
+               enable = false;
+       else
+               return -EINVAL;
+
+       /* Only do anything if we're changing state */
+       if (enable != stf_barrier)
+               stf_barrier_enable(enable);
+
+       return 0;
+}
+
+static int stf_barrier_get(void *data, u64 *val)
+{
+       *val = stf_barrier ? 1 : 0;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
+
+static __init int stf_barrier_debugfs_init(void)
+{
+       debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
+       return 0;
+}
+device_initcall(stf_barrier_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
index c8af90ff49f0526630ffb938c9c5d48cd0279933..b8d82678f8b41b04027df27642f15a01be7321e2 100644 (file)
@@ -133,6 +133,20 @@ SECTIONS
        RO_DATA(PAGE_SIZE)
 
 #ifdef CONFIG_PPC64
+       . = ALIGN(8);
+       __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
+               __start___stf_entry_barrier_fixup = .;
+               *(__stf_entry_barrier_fixup)
+               __stop___stf_entry_barrier_fixup = .;
+       }
+
+       . = ALIGN(8);
+       __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
+               __start___stf_exit_barrier_fixup = .;
+               *(__stf_exit_barrier_fixup)
+               __stop___stf_exit_barrier_fixup = .;
+       }
+
        . = ALIGN(8);
        __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
                __start___rfi_flush_fixup = .;
index 288fe4f0db4ea326321915e1904da3771380978d..e1bcdc32a851cf6439e9cba8aa47d7cb2bf31475 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
+#include <asm/security_features.h>
 #include <asm/firmware.h>
 
 struct fixup_entry {
@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 }
 
 #ifdef CONFIG_PPC_BOOK3S_64
+void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
+{
+       unsigned int instrs[3], *dest;
+       long *start, *end;
+       int i;
+
+       start = PTRRELOC(&__start___stf_entry_barrier_fixup),
+       end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
+
+       instrs[0] = 0x60000000; /* nop */
+       instrs[1] = 0x60000000; /* nop */
+       instrs[2] = 0x60000000; /* nop */
+
+       i = 0;
+       if (types & STF_BARRIER_FALLBACK) {
+               instrs[i++] = 0x7d4802a6; /* mflr r10           */
+               instrs[i++] = 0x60000000; /* branch patched below */
+               instrs[i++] = 0x7d4803a6; /* mtlr r10           */
+       } else if (types & STF_BARRIER_EIEIO) {
+               instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+       } else if (types & STF_BARRIER_SYNC_ORI) {
+               instrs[i++] = 0x7c0004ac; /* hwsync             */
+               instrs[i++] = 0xe94d0000; /* ld r10,0(r13)      */
+               instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+       }
+
+       for (i = 0; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction(dest, instrs[0]);
+
+               if (types & STF_BARRIER_FALLBACK)
+                       patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
+                                    BRANCH_SET_LINK);
+               else
+                       patch_instruction(dest + 1, instrs[1]);
+
+               patch_instruction(dest + 2, instrs[2]);
+       }
+
+       printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
+               (types == STF_BARRIER_NONE)                  ? "no" :
+               (types == STF_BARRIER_FALLBACK)              ? "fallback" :
+               (types == STF_BARRIER_EIEIO)                 ? "eieio" :
+               (types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
+                                                          : "unknown");
+}
+
+void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
+{
+       unsigned int instrs[6], *dest;
+       long *start, *end;
+       int i;
+
+       start = PTRRELOC(&__start___stf_exit_barrier_fixup),
+       end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
+
+       instrs[0] = 0x60000000; /* nop */
+       instrs[1] = 0x60000000; /* nop */
+       instrs[2] = 0x60000000; /* nop */
+       instrs[3] = 0x60000000; /* nop */
+       instrs[4] = 0x60000000; /* nop */
+       instrs[5] = 0x60000000; /* nop */
+
+       i = 0;
+       if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
+               if (cpu_has_feature(CPU_FTR_HVMODE)) {
+                       instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
+                       instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
+               } else {
+                       instrs[i++] = 0x7db243a6; /* mtsprg 2,r13       */
+                       instrs[i++] = 0x7db142a6; /* mfsprg r13,1    */
+               }
+               instrs[i++] = 0x7c0004ac; /* hwsync             */
+               instrs[i++] = 0xe9ad0000; /* ld r13,0(r13)      */
+               instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+               if (cpu_has_feature(CPU_FTR_HVMODE)) {
+                       instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
+               } else {
+                       instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
+               }
+       } else if (types & STF_BARRIER_EIEIO) {
+               instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+       }
+
+       for (i = 0; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction(dest, instrs[0]);
+               patch_instruction(dest + 1, instrs[1]);
+               patch_instruction(dest + 2, instrs[2]);
+               patch_instruction(dest + 3, instrs[3]);
+               patch_instruction(dest + 4, instrs[4]);
+               patch_instruction(dest + 5, instrs[5]);
+       }
+       printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
+               (types == STF_BARRIER_NONE)                  ? "no" :
+               (types == STF_BARRIER_FALLBACK)              ? "fallback" :
+               (types == STF_BARRIER_EIEIO)                 ? "eieio" :
+               (types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
+                                                          : "unknown");
+}
+
+
+void do_stf_barrier_fixups(enum stf_barrier_type types)
+{
+       do_stf_entry_barrier_fixups(types);
+       do_stf_exit_barrier_fixups(types);
+}
+
 void do_rfi_flush_fixups(enum l1d_flush_type types)
 {
        unsigned int instrs[3], *dest;
index ef8c9ce53a616910d264f4875b9ec72311a110a8..a6648ec99ca76ce30e9e34cb70e0bde63fe93fbc 100644 (file)
@@ -131,6 +131,7 @@ static void __init pnv_setup_arch(void)
        set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
 
        pnv_setup_rfi_flush();
+       setup_stf_barrier();
 
        /* Initialize SMP */
        pnv_smp_init();
index b55ad4286dc7f81f0c9d2e7d130860757773c3b3..fdb32e056ef42a66553387f127e20d19c2e7a666 100644 (file)
@@ -710,6 +710,7 @@ static void __init pSeries_setup_arch(void)
        fwnmi_init();
 
        pseries_setup_rfi_flush();
+       setup_stf_barrier();
 
        /* By default, only probe PCI (can be overridden by rtas_pci) */
        pci_add_flags(PCI_PROBE_ONLY);
index 578793e97431da25b0d5f3cbc20ae4c0655db075..fb00a2fca9901eb02ea7b730ddbac957e8ecc947 100644 (file)
 #define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data Prioritization L3 */
 #define X86_FEATURE_INVPCID_SINGLE     ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
-
 #define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME                        ( 7*32+10) /* AMD Secure Memory Encryption */
 #define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
-
+#define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD               ( 7*32+17) /* Speculative Store Bypass Disable */
 #define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_RSB_CTXSW          ( 7*32+19) /* "" Fill RSB on context switches */
 #define X86_FEATURE_SEV                        ( 7*32+20) /* AMD Secure Encrypted Virtualization */
-
 #define X86_FEATURE_USE_IBPB           ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW                ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE  ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD                ( 7*32+24)  /* "" AMD SSBD implementation via LS_CFG MSR */
+#define X86_FEATURE_IBRS               ( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB               ( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
 #define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
-#define X86_FEATURE_IBPB               (13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_IBRS               (13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_STIBP              (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
 #define X86_FEATURE_SPEC_CTRL          (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP                (18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES  (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD     (18*32+31) /* "" Speculative Store Bypass Disable */
 
 /*
  * BUG word(s)
 #define X86_BUG_CPU_MELTDOWN           X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
 #define X86_BUG_SPECTRE_V1             X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2             X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS      X86_BUG(17) /* CPU is affected by speculative store bypass attack */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index c25775fad4ed19d73452f530f4fecfb8d2373176..f4b2588865e9f7ad16696d3e70255a2b794d26b3 100644 (file)
@@ -924,7 +924,7 @@ struct kvm_x86_ops {
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
        bool (*cpu_has_accelerated_tpr)(void);
-       bool (*cpu_has_high_real_mode_segbase)(void);
+       bool (*has_emulated_msr)(int index);
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
        struct kvm *(*vm_alloc)(void);
index 53d5b1b9255eb8c924b5b1e650d740b0a574f68f..fda2114197b36935558f0a376644dcd0a0c268ce 100644 (file)
@@ -42,6 +42,8 @@
 #define MSR_IA32_SPEC_CTRL             0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS                 (1 << 0)   /* Indirect Branch Restricted Speculation */
 #define SPEC_CTRL_STIBP                        (1 << 1)   /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD                 (1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  (1 << 0)   /* Indirect Branch Prediction Barrier */
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               (1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              (1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SSB_NO                        (1 << 4)   /*
+                                                   * Not susceptible to Speculative Store Bypass
+                                                   * attack, so no Speculative Store Bypass
+                                                   * control required.
+                                                   */
 
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
 #define MSR_AMD64_SEV_ENABLED_BIT      0
 #define MSR_AMD64_SEV_ENABLED          BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
 
+#define MSR_AMD64_VIRT_SPEC_CTRL       0xc001011f
+
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF                        0xc00000e9
 
index f928ad9b143fedea1085dedc508658fa745b4ceb..8b38df98548e8dfd1176f564b02379f6a728da49 100644 (file)
@@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
        SPECTRE_V2_IBRS,
 };
 
+/* The Speculative Store Bypass disable variants */
+enum ssb_mitigation {
+       SPEC_STORE_BYPASS_NONE,
+       SPEC_STORE_BYPASS_DISABLE,
+       SPEC_STORE_BYPASS_PRCTL,
+       SPEC_STORE_BYPASS_SECCOMP,
+};
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
@@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
 #endif
 }
 
-#define alternative_msr_write(_msr, _val, _feature)            \
-       asm volatile(ALTERNATIVE("",                            \
-                                "movl %[msr], %%ecx\n\t"       \
-                                "movl %[val], %%eax\n\t"       \
-                                "movl $0, %%edx\n\t"           \
-                                "wrmsr",                       \
-                                _feature)                      \
-                    : : [msr] "i" (_msr), [val] "i" (_val)     \
-                    : "eax", "ecx", "edx", "memory")
+static __always_inline
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+{
+       asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+               : : "c" (msr),
+                   "a" ((u32)val),
+                   "d" ((u32)(val >> 32)),
+                   [feature] "i" (feature)
+               : "memory");
+}
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
-                             X86_FEATURE_USE_IBPB);
+       u64 val = PRED_CMD_IBPB;
+
+       alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
 }
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
 /*
  * With retpoline, we must use IBRS to restrict branch prediction
  * before calling into firmware.
@@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
  */
 #define firmware_restrict_branch_speculation_start()                   \
 do {                                                                   \
+       u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;                  \
+                                                                       \
        preempt_disable();                                              \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,       \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
                              X86_FEATURE_USE_IBRS_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
 do {                                                                   \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,                    \
+       u64 val = x86_spec_ctrl_base;                                   \
+                                                                       \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
                              X86_FEATURE_USE_IBRS_FW);                 \
        preempt_enable();                                               \
 } while (0)
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644 (file)
index 0000000..ae7c2c5
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SPECCTRL_H_
+#define _ASM_X86_SPECCTRL_H_
+
+#include <linux/thread_info.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
+
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl:           The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:      The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *                             (may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+       x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+}
+
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl:           The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:      The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *                             (may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+       x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
+}
+
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+{
+       BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+       return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+       BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+       return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+{
+       return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+}
+
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
+extern void speculative_store_bypass_update(unsigned long tif);
+
+static inline void speculative_store_bypass_update_current(void)
+{
+       speculative_store_bypass_update(current_thread_info()->flags);
+}
+
+#endif
index a5d9521bb2cbaa1732bf8d377c2efe04b3764405..2ff2a30a264f4c5f02a01b3b87e4148e8992dc5a 100644 (file)
@@ -79,6 +79,7 @@ struct thread_info {
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
+#define TIF_SSBD                       5       /* Reduced data speculation */
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
@@ -105,6 +106,7 @@ struct thread_info {
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
+#define _TIF_SSBD              (1 << TIF_SSBD)
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
@@ -144,7 +146,7 @@ struct thread_info {
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        \
-       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
+       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
index 12bc0a1139dac57fefe8b299d1ef9685fa0e3147..1b18be3f35a8e8fdb34db6feff1ee8e416a73c96 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/spec-ctrl.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
 #include <asm/delay.h>
@@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                rdmsrl(MSR_FAM10H_NODE_ID, value);
                nodes_per_socket = ((value >> 3) & 7) + 1;
        }
+
+       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+               unsigned int bit;
+
+               switch (c->x86) {
+               case 0x15: bit = 54; break;
+               case 0x16: bit = 33; break;
+               case 0x17: bit = 10; break;
+               default: return;
+               }
+               /*
+                * Try to cache the base value so further operations can
+                * avoid RMW. If that faults, do not enable SSBD.
+                */
+               if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+                       setup_force_cpu_cap(X86_FEATURE_SSBD);
+                       x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+               }
+       }
 }
 
 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
@@ -791,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
 {
+       set_cpu_cap(c, X86_FEATURE_ZEN);
        /*
         * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
         * all up to and including B1.
index bfca937bdcc36ce8d9523f03dcc92e93d3c39d5c..7416fc206b4a0e3f17be821e932d9fd840c03079 100644 (file)
 #include <linux/utsname.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/intel-family.h>
 
 static void __init spectre_v2_select_mitigation(void);
+static void __init ssb_select_mitigation(void);
+
+/*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+u64 __ro_after_init x86_spec_ctrl_base;
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/*
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+u64 __ro_after_init x86_amd_ls_cfg_base;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
 void __init check_bugs(void)
 {
@@ -37,9 +60,27 @@ void __init check_bugs(void)
                print_cpu_info(&boot_cpu_data);
        }
 
+       /*
+        * Read the SPEC_CTRL MSR to account for reserved bits which may
+        * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+        * init code as it is not enumerated and depends on the family.
+        */
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+       /* Allow STIBP in MSR_SPEC_CTRL if supported */
+       if (boot_cpu_has(X86_FEATURE_STIBP))
+               x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
        /* Select the proper spectre mitigation before patching alternatives */
        spectre_v2_select_mitigation();
 
+       /*
+        * Select proper mitigation for any exposure to the Speculative Store
+        * Bypass vulnerability.
+        */
+       ssb_select_mitigation();
+
 #ifdef CONFIG_X86_32
        /*
         * Check whether we are able to run this kernel safely on SMP.
@@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+
+void
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+{
+       u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+       struct thread_info *ti = current_thread_info();
+
+       /* Is MSR_SPEC_CTRL implemented ? */
+       if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+               /*
+                * Restrict guest_spec_ctrl to supported values. Clear the
+                * modifiable bits in the host base value and or the
+                * modifiable bits from the guest value.
+                */
+               guestval = hostval & ~x86_spec_ctrl_mask;
+               guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
+               /* SSBD controlled in MSR_SPEC_CTRL */
+               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+                       hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+               if (hostval != guestval) {
+                       msrval = setguest ? guestval : hostval;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+               }
+       }
+
+       /*
+        * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
+        * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
+        */
+       if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+           !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+               return;
+
+       /*
+        * If the host has SSBD mitigation enabled, force it in the host's
+        * virtual MSR value. If its not permanently enabled, evaluate
+        * current's TIF_SSBD thread flag.
+        */
+       if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+               hostval = SPEC_CTRL_SSBD;
+       else
+               hostval = ssbd_tif_to_spec_ctrl(ti->flags);
+
+       /* Sanitize the guest value */
+       guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+
+       if (hostval != guestval) {
+               unsigned long tif;
+
+               tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+                                ssbd_spec_ctrl_to_tif(hostval);
+
+               speculative_store_bypass_update(tif);
+       }
+}
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+static void x86_amd_ssb_disable(void)
+{
+       u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+       if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+               wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+       else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+               wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
 
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
@@ -312,32 +422,289 @@ retpoline_auto:
 }
 
 #undef pr_fmt
+#define pr_fmt(fmt)    "Speculative Store Bypass: " fmt
+
+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+/* The kernel command line selection */
+enum ssb_mitigation_cmd {
+       SPEC_STORE_BYPASS_CMD_NONE,
+       SPEC_STORE_BYPASS_CMD_AUTO,
+       SPEC_STORE_BYPASS_CMD_ON,
+       SPEC_STORE_BYPASS_CMD_PRCTL,
+       SPEC_STORE_BYPASS_CMD_SECCOMP,
+};
+
+static const char *ssb_strings[] = {
+       [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
+       [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
+       [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
+       [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+};
+
+static const struct {
+       const char *option;
+       enum ssb_mitigation_cmd cmd;
+} ssb_mitigation_options[] = {
+       { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
+       { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
+       { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
+       { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
+       { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+};
+
+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+{
+       enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
+       char arg[20];
+       int ret, i;
+
+       if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+               return SPEC_STORE_BYPASS_CMD_NONE;
+       } else {
+               ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
+                                         arg, sizeof(arg));
+               if (ret < 0)
+                       return SPEC_STORE_BYPASS_CMD_AUTO;
+
+               for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
+                       if (!match_option(arg, ret, ssb_mitigation_options[i].option))
+                               continue;
+
+                       cmd = ssb_mitigation_options[i].cmd;
+                       break;
+               }
+
+               if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
+                       pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+                       return SPEC_STORE_BYPASS_CMD_AUTO;
+               }
+       }
+
+       return cmd;
+}
+
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
+{
+       enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+       enum ssb_mitigation_cmd cmd;
+
+       if (!boot_cpu_has(X86_FEATURE_SSBD))
+               return mode;
+
+       cmd = ssb_parse_cmdline();
+       if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
+           (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
+            cmd == SPEC_STORE_BYPASS_CMD_AUTO))
+               return mode;
+
+       switch (cmd) {
+       case SPEC_STORE_BYPASS_CMD_AUTO:
+       case SPEC_STORE_BYPASS_CMD_SECCOMP:
+               /*
+                * Choose prctl+seccomp as the default mode if seccomp is
+                * enabled.
+                */
+               if (IS_ENABLED(CONFIG_SECCOMP))
+                       mode = SPEC_STORE_BYPASS_SECCOMP;
+               else
+                       mode = SPEC_STORE_BYPASS_PRCTL;
+               break;
+       case SPEC_STORE_BYPASS_CMD_ON:
+               mode = SPEC_STORE_BYPASS_DISABLE;
+               break;
+       case SPEC_STORE_BYPASS_CMD_PRCTL:
+               mode = SPEC_STORE_BYPASS_PRCTL;
+               break;
+       case SPEC_STORE_BYPASS_CMD_NONE:
+               break;
+       }
+
+       /*
+        * We have three CPU feature flags that are in play here:
+        *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+        *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+        *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+        */
+       if (mode == SPEC_STORE_BYPASS_DISABLE) {
+               setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+               /*
+                * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+                * a completely different MSR and bit dependent on family.
+                */
+               switch (boot_cpu_data.x86_vendor) {
+               case X86_VENDOR_INTEL:
+                       x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+                       x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+                       break;
+               case X86_VENDOR_AMD:
+                       x86_amd_ssb_disable();
+                       break;
+               }
+       }
+
+       return mode;
+}
+
+static void ssb_select_mitigation(void)
+{
+       ssb_mode = __ssb_select_mitigation();
+
+       if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+               pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Speculation prctl: " fmt
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+       bool update;
+
+       if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+           ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+               return -ENXIO;
+
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               /* If speculation is force disabled, enable is not allowed */
+               if (task_spec_ssb_force_disable(task))
+                       return -EPERM;
+               task_clear_spec_ssb_disable(task);
+               update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       case PR_SPEC_DISABLE:
+               task_set_spec_ssb_disable(task);
+               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       case PR_SPEC_FORCE_DISABLE:
+               task_set_spec_ssb_disable(task);
+               task_set_spec_ssb_force_disable(task);
+               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       /*
+        * If being set on non-current task, delay setting the CPU
+        * mitigation until it is next scheduled.
+        */
+       if (task == current && update)
+               speculative_store_bypass_update_current();
+
+       return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+                            unsigned long ctrl)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssb_prctl_set(task, ctrl);
+       default:
+               return -ENODEV;
+       }
+}
+
+#ifdef CONFIG_SECCOMP
+void arch_seccomp_spec_mitigate(struct task_struct *task)
+{
+       if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+               ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+}
+#endif
+
+static int ssb_prctl_get(struct task_struct *task)
+{
+       switch (ssb_mode) {
+       case SPEC_STORE_BYPASS_DISABLE:
+               return PR_SPEC_DISABLE;
+       case SPEC_STORE_BYPASS_SECCOMP:
+       case SPEC_STORE_BYPASS_PRCTL:
+               if (task_spec_ssb_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ssb_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       default:
+               if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+                       return PR_SPEC_ENABLE;
+               return PR_SPEC_NOT_AFFECTED;
+       }
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssb_prctl_get(task);
+       default:
+               return -ENODEV;
+       }
+}
+
+void x86_spec_ctrl_setup_ap(void)
+{
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+               x86_amd_ssb_disable();
+}
 
 #ifdef CONFIG_SYSFS
-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+                              char *buf, unsigned int bug)
 {
-       if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+       if (!boot_cpu_has_bug(bug))
                return sprintf(buf, "Not affected\n");
-       if (boot_cpu_has(X86_FEATURE_PTI))
-               return sprintf(buf, "Mitigation: PTI\n");
+
+       switch (bug) {
+       case X86_BUG_CPU_MELTDOWN:
+               if (boot_cpu_has(X86_FEATURE_PTI))
+                       return sprintf(buf, "Mitigation: PTI\n");
+
+               break;
+
+       case X86_BUG_SPECTRE_V1:
+               return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+       case X86_BUG_SPECTRE_V2:
+               return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+                              boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+                              boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+                              spectre_v2_module_string());
+
+       case X86_BUG_SPEC_STORE_BYPASS:
+               return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
+       default:
+               break;
+       }
+
        return sprintf(buf, "Vulnerable\n");
 }
 
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
+}
+
 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
-               return sprintf(buf, "Not affected\n");
-       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+       return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
 }
 
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-               return sprintf(buf, "Not affected\n");
+       return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+}
 
-       return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-                      boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
-                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-                      spectre_v2_module_string());
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
 }
 #endif
index ce243f7d2d4e0879c9cbff297ff112e236a6802c..78decc3e306712dbca3ffd4c71392f681ba9f5df 100644 (file)
@@ -757,17 +757,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
         * and they also have a different bit for STIBP support. Also,
         * a hypervisor might have set the individual AMD bits even on
         * Intel CPUs, for finer-grained selection of what's available.
-        *
-        * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
-        * features, which are visible in /proc/cpuinfo and used by the
-        * kernel. So set those accordingly from the Intel bits.
         */
        if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
                set_cpu_cap(c, X86_FEATURE_IBRS);
                set_cpu_cap(c, X86_FEATURE_IBPB);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
        }
+
        if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
                set_cpu_cap(c, X86_FEATURE_STIBP);
+
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+           cpu_has(c, X86_FEATURE_VIRT_SSBD))
+               set_cpu_cap(c, X86_FEATURE_SSBD);
+
+       if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+               set_cpu_cap(c, X86_FEATURE_IBRS);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
+
+       if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+               set_cpu_cap(c, X86_FEATURE_IBPB);
+
+       if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+               set_cpu_cap(c, X86_FEATURE_STIBP);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -927,21 +942,55 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
        {}
 };
 
-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PINEVIEW        },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_LINCROFT        },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PENWELL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CLOVERVIEW      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CEDARVIEW       },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT1     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT2     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MERRIFIELD      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
+       { X86_VENDOR_CENTAUR,   5,                                      },
+       { X86_VENDOR_INTEL,     5,                                      },
+       { X86_VENDOR_NSC,       5,                                      },
+       { X86_VENDOR_AMD,       0x12,                                   },
+       { X86_VENDOR_AMD,       0x11,                                   },
+       { X86_VENDOR_AMD,       0x10,                                   },
+       { X86_VENDOR_AMD,       0xf,                                    },
+       { X86_VENDOR_ANY,       4,                                      },
+       {}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = 0;
 
-       if (x86_match_cpu(cpu_no_meltdown))
-               return false;
-
        if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
+       if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+          !(ia32_cap & ARCH_CAP_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+       if (x86_match_cpu(cpu_no_speculation))
+               return;
+
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+       if (x86_match_cpu(cpu_no_meltdown))
+               return;
+
        /* Rogue Data Cache Load? No! */
        if (ia32_cap & ARCH_CAP_RDCL_NO)
-               return false;
+               return;
 
-       return true;
+       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 }
 
 /*
@@ -992,12 +1041,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
-       if (!x86_match_cpu(cpu_no_speculation)) {
-               if (cpu_vulnerable_to_meltdown(c))
-                       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-               setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
-               setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-       }
+       cpu_set_bug_bits(c);
 
        fpu__init_system(c);
 
@@ -1359,6 +1403,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #endif
        mtrr_ap_init();
        validate_apic_and_package_id(c);
+       x86_spec_ctrl_setup_ap();
 }
 
 static __init int setup_noclflush(char *arg)
index e806b11a99af4c72c5868731c7a8555cfb3957d9..37672d299e357430f2d16941905e352e9e89f648 100644 (file)
@@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
 
 unsigned int aperfmperf_get_khz(int cpu);
 
+extern void x86_spec_ctrl_setup_ap(void);
+
 #endif /* ARCH_X86_CPU_H */
index 60d1897041da89c19b97f03ca2c577102ec3186c..577e7f7ae2733f293107f18b0673ed233850c914 100644 (file)
@@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_IBPB);
                setup_clear_cpu_cap(X86_FEATURE_STIBP);
                setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+               setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
                setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+               setup_clear_cpu_cap(X86_FEATURE_SSBD);
+               setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
        }
 
        /*
index 03408b942adbad2bd1bca42b89e559e2d198d9da..30ca2d1a92319726ff31d3ddb8264140bcec17cf 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/switch_to.h>
 #include <asm/desc.h>
 #include <asm/prctl.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
        }
 }
 
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+       struct ssb_state        *shared_state;
+       raw_spinlock_t          lock;
+       unsigned int            disable_state;
+       unsigned long           local_state;
+};
+
+#define LSTATE_SSB     0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+       struct ssb_state *st = this_cpu_ptr(&ssb_state);
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
+
+       st->local_state = 0;
+
+       /*
+        * Shared state setup happens once on the first bringup
+        * of the CPU. It's not destroyed on CPU hotunplug.
+        */
+       if (st->shared_state)
+               return;
+
+       raw_spin_lock_init(&st->lock);
+
+       /*
+        * Go over HT siblings and check whether one of them has set up the
+        * shared state pointer already.
+        */
+       for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+               if (cpu == this_cpu)
+                       continue;
+
+               if (!per_cpu(ssb_state, cpu).shared_state)
+                       continue;
+
+               /* Link it to the state of the sibling: */
+               st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+               return;
+       }
+
+       /*
+        * First HT sibling to come up on the core.  Link shared state of
+        * the first HT sibling to itself. The siblings on the same core
+        * which come up later will see the shared state pointer and link
+        * themself to the state of this CPU.
+        */
+       st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+       struct ssb_state *st = this_cpu_ptr(&ssb_state);
+       u64 msr = x86_amd_ls_cfg_base;
+
+       if (!static_cpu_has(X86_FEATURE_ZEN)) {
+               msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+               wrmsrl(MSR_AMD64_LS_CFG, msr);
+               return;
+       }
+
+       if (tifn & _TIF_SSBD) {
+               /*
+                * Since this can race with prctl(), block reentry on the
+                * same CPU.
+                */
+               if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+                       return;
+
+               msr |= x86_amd_ls_cfg_ssbd_mask;
+
+               raw_spin_lock(&st->shared_state->lock);
+               /* First sibling enables SSBD: */
+               if (!st->shared_state->disable_state)
+                       wrmsrl(MSR_AMD64_LS_CFG, msr);
+               st->shared_state->disable_state++;
+               raw_spin_unlock(&st->shared_state->lock);
+       } else {
+               if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+                       return;
+
+               raw_spin_lock(&st->shared_state->lock);
+               st->shared_state->disable_state--;
+               if (!st->shared_state->disable_state)
+                       wrmsrl(MSR_AMD64_LS_CFG, msr);
+               raw_spin_unlock(&st->shared_state->lock);
+       }
+}
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+       u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+       wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+       /*
+        * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+        * so ssbd_tif_to_spec_ctrl() just works.
+        */
+       wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+       u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+       if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+               amd_set_ssb_virt_state(tifn);
+       else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+               amd_set_core_ssb_state(tifn);
+       else
+               intel_set_ssb_state(tifn);
+}
+
+void speculative_store_bypass_update(unsigned long tif)
+{
+       preempt_disable();
+       __speculative_store_bypass_update(tif);
+       preempt_enable();
+}
+
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                      struct tss_struct *tss)
 {
@@ -309,6 +452,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+       if ((tifp ^ tifn) & _TIF_SSBD)
+               __speculative_store_bypass_update(tifn);
 }
 
 /*
index 0f1cbb042f49b82e7a01b7ace0bce08061a8e6f1..9dd324ae4832914e5368911da65fc29cbce7f14e 100644 (file)
@@ -79,6 +79,7 @@
 #include <asm/qspinlock.h>
 #include <asm/intel-family.h>
 #include <asm/cpu_device_id.h>
+#include <asm/spec-ctrl.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -244,6 +245,8 @@ static void notrace start_secondary(void *unused)
         */
        check_tsc_sync_target();
 
+       speculative_store_bypass_ht_init();
+
        /*
         * Lock vector_lock, set CPU online and bring the vector
         * allocator online. Online must be set with vector_lock held
@@ -1292,6 +1295,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        set_mtrr_aps_delayed_init();
 
        smp_quirk_init_udelay();
+
+       speculative_store_bypass_ht_init();
 }
 
 void arch_enable_nonboot_cpus_begin(void)
index 82055b90a8b31480e50eb6fe5ee57febc641d32c..ced851169730a0cd3910d05baae61293d1f56a4f 100644 (file)
@@ -379,7 +379,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 
        /* cpuid 0x80000008.ebx */
        const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-               F(IBPB) | F(IBRS);
+               F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
 
        /* cpuid 0xC0000001.edx */
        const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -407,7 +407,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 
        /* cpuid 7.0.edx*/
        const u32 kvm_cpuid_7_0_edx_x86_features =
-               F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
+               F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
                F(ARCH_CAPABILITIES);
 
        /* all calls to cpuid_count() should be made on the same cpu */
@@ -647,13 +647,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                        g_phys_as = phys_as;
                entry->eax = g_phys_as | (virt_as << 8);
                entry->edx = 0;
-               /* IBRS and IBPB aren't necessarily present in hardware cpuid */
-               if (boot_cpu_has(X86_FEATURE_IBPB))
-                       entry->ebx |= F(IBPB);
-               if (boot_cpu_has(X86_FEATURE_IBRS))
-                       entry->ebx |= F(IBRS);
+               /*
+                * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+                * hardware cpuid
+                */
+               if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+                       entry->ebx |= F(AMD_IBPB);
+               if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+                       entry->ebx |= F(AMD_IBRS);
+               if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+                       entry->ebx |= F(VIRT_SSBD);
                entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
                cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+               if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+                       entry->ebx |= F(VIRT_SSBD);
                break;
        }
        case 0x80000019:
index 1fc05e428aba824e252bc3c2dcb9e1b6f4fb2e69..26110c202b19c44f6b52f4a105646b49ee7a23b5 100644 (file)
@@ -49,7 +49,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -213,6 +213,12 @@ struct vcpu_svm {
        } host;
 
        u64 spec_ctrl;
+       /*
+        * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+        * translated into the appropriate L2_CFG bits on the host to
+        * perform speculative control.
+        */
+       u64 virt_spec_ctrl;
 
        u32 *msrpm;
 
@@ -2060,6 +2066,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        vcpu->arch.microcode_version = 0x01000065;
        svm->spec_ctrl = 0;
+       svm->virt_spec_ctrl = 0;
 
        if (!init_event) {
                svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -4108,11 +4115,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
                        return 1;
 
                msr_info->data = svm->spec_ctrl;
                break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+                       return 1;
+
+               msr_info->data = svm->virt_spec_ctrl;
+               break;
        case MSR_F15H_IC_CFG: {
 
                int family, model;
@@ -4203,7 +4217,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
                        return 1;
 
                /* The STIBP bit doesn't fault even if it's not advertised */
@@ -4230,7 +4244,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                break;
        case MSR_IA32_PRED_CMD:
                if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
                        return 1;
 
                if (data & ~PRED_CMD_IBPB)
@@ -4244,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                        break;
                set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
                break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               if (!msr->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+                       return 1;
+
+               if (data & ~SPEC_CTRL_SSBD)
+                       return 1;
+
+               svm->virt_spec_ctrl = data;
+               break;
        case MSR_STAR:
                svm->vmcb->save.star = data;
                break;
@@ -5557,8 +5581,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -5652,6 +5675,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
                );
 
+       /* Eliminate branch target predictions from guest mode */
+       vmexit_fill_RSB();
+
+#ifdef CONFIG_X86_64
+       wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+       loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+       loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
        /*
         * We do not use IBRS in the kernel. If this vCPU has used the
         * SPEC_CTRL MSR it may have left it on; save the value and
@@ -5670,20 +5705,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
-
-       /* Eliminate branch target predictions from guest mode */
-       vmexit_fill_RSB();
-
-#ifdef CONFIG_X86_64
-       wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
-       loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
-       loadsegment(gs, svm->host.gs);
-#endif
-#endif
+       x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
        reload_tss(vcpu);
 
@@ -5786,7 +5808,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
        return false;
 }
 
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
 {
        return true;
 }
@@ -7012,7 +7034,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
        .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+       .has_emulated_msr = svm_has_emulated_msr,
 
        .vcpu_create = svm_create_vcpu,
        .vcpu_free = svm_free_vcpu,
index 3f1696570b41475848ae7684565da935888e6363..40aa29204baf80aee54056dffb69519cc6cb5f89 100644 (file)
@@ -51,7 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 #include <asm/mshyperv.h>
 
 #include "trace.h"
@@ -3529,7 +3529,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_get_msr_common(vcpu, msr_info);
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
@@ -3648,12 +3647,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
                /* The STIBP bit doesn't fault even if it's not advertised */
-               if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+               if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
                        return 1;
 
                vmx->spec_ctrl = data;
@@ -3679,7 +3677,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case MSR_IA32_PRED_CMD:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
@@ -9488,9 +9485,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 }
 STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
 
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
 {
-       return enable_unrestricted_guest || emulate_invalid_guest_state;
+       switch (index) {
+       case MSR_IA32_SMBASE:
+               /*
+                * We cannot do SMM unless we can run the guest in big
+                * real mode.
+                */
+               return enable_unrestricted_guest || emulate_invalid_guest_state;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               /* This is AMD only.  */
+               return false;
+       default:
+               return true;
+       }
 }
 
 static bool vmx_mpx_supported(void)
@@ -9722,8 +9731,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
        vmx->__launched = vmx->loaded_vmcs->launched;
 
@@ -9871,8 +9879,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -12632,7 +12639,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .hardware_enable = hardware_enable,
        .hardware_disable = hardware_disable,
        .cpu_has_accelerated_tpr = report_flexpriority,
-       .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+       .has_emulated_msr = vmx_has_emulated_msr,
 
        .vm_init = vmx_vm_init,
        .vm_alloc = vmx_vm_alloc,
index 59371de5d722d9687f72e39bd32161ddd75fa04d..22a183aac1c63f333efb446f1376d1ac09b85cd0 100644 (file)
@@ -1061,6 +1061,7 @@ static u32 emulated_msrs[] = {
        MSR_SMI_COUNT,
        MSR_PLATFORM_INFO,
        MSR_MISC_FEATURES_ENABLES,
+       MSR_AMD64_VIRT_SPEC_CTRL,
 };
 
 static unsigned num_emulated_msrs;
@@ -2906,7 +2907,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                 * fringe case that is not enabled except via specific settings
                 * of the module parameters.
                 */
-               r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+               r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
                break;
        case KVM_CAP_VAPIC:
                r = !kvm_x86_ops->cpu_has_accelerated_tpr();
@@ -4606,14 +4607,8 @@ static void kvm_init_msr_list(void)
        num_msrs_to_save = j;
 
        for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-               switch (emulated_msrs[i]) {
-               case MSR_IA32_SMBASE:
-                       if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
-                               continue;
-                       break;
-               default:
-                       break;
-               }
+               if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+                       continue;
 
                if (j < i)
                        emulated_msrs[j] = emulated_msrs[i];
index 6389c88b3500a0bb220b92efa85531cf564a974c..738fb22978ddcd14ad1956c5119972f19b17d2a6 100644 (file)
@@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
        { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
        { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
        { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
        { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
index 68596bd4cf06c2362692f7ded557b669efc7f983..346b163f6e89eacc24cfe0a205fa2f35f7d7cce6 100644 (file)
@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
        { "C300-CTFDDAC128MAG", "0001",         ATA_HORKAGE_NONCQ, },
 
+       /* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
+          SD7SN6S256G and SD8SN8U256G */
+       { "SanDisk SD[78]SN*G", NULL,           ATA_HORKAGE_NONCQ, },
+
        /* devices which puke on READ_NATIVE_MAX */
        { "HDS724040KLSA80",    "KFAOA20N",     ATA_HORKAGE_BROKEN_HPA, },
        { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4549,13 +4553,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM |
                                                ATA_HORKAGE_NOLPM, },
 
-       /* This specific Samsung model/firmware-rev does not handle LPM well */
+       /* These specific Samsung models/firmware-revs do not handle LPM well */
        { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
+       { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
 
        /* Sandisk devices which are known to not handle LPM well */
        { "SanDisk SD7UB3Q*G1001",      NULL,   ATA_HORKAGE_NOLPM, },
 
        /* devices that don't properly handle queued TRIM commands */
+       { "Micron_M500IT_*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Micron_M500_*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M500*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
index 2da998baa75cb26714783bcce909332f8ae8e75e..30cc9c877ebb64c3e384f395daee6ebdc750ddef 100644 (file)
@@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
        return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+                                         struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
        &dev_attr_spectre_v1.attr,
        &dev_attr_spectre_v2.attr,
+       &dev_attr_spec_store_bypass.attr,
        NULL
 };
 
index 02a497e7c78549a633d1079212da0936033075d1..e5e067091572e02243fde97f710c0a2d07a4d0b4 100644 (file)
@@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
 
        dev->power.wakeup_path = false;
 
-       if (dev->power.no_pm_callbacks) {
-               ret = 1;        /* Let device go direct_complete */
+       if (dev->power.no_pm_callbacks)
                goto unlock;
-       }
 
        if (dev->pm_domain)
                callback = dev->pm_domain->ops.prepare;
@@ -1960,7 +1958,8 @@ unlock:
         */
        spin_lock_irq(&dev->power.lock);
        dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
-               pm_runtime_suspended(dev) && ret > 0 &&
+               ((pm_runtime_suspended(dev) && ret > 0) ||
+                dev->power.no_pm_callbacks) &&
                !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
        spin_unlock_irq(&dev->power.lock);
        return 0;
index 5d4e31655d9629732c42b02a602ebdc20bec2cda..55cf554bc91451d1ffe0eb0af50079de6b7fe13a 100644 (file)
@@ -1068,6 +1068,7 @@ static int loop_clr_fd(struct loop_device *lo)
        if (bdev) {
                bdput(bdev);
                invalidate_bdev(bdev);
+               bdev->bd_inode->i_mapping->wb_err = 0;
        }
        set_capacity(lo->lo_disk, 0);
        loop_sysfs_exit(lo);
index 3d2d3bbd1342052b2ddedcc0e95d0b4b84e6880c..155ad840f3c59d6cfae28caaba6a863ceb3880a9 100644 (file)
@@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
        const struct drm_display_mode *panel_mode;
        struct drm_crtc_state *crtc_state;
 
+       if (!state->crtc)
+               return 0;
+
        if (list_empty(&connector->modes)) {
                dev_dbg(lvds->dev, "connector: empty modes list\n");
                return -EINVAL;
index 70e1a8820a7c01644cc1f5016d0a878b88c9dc23..8b770a8e02cdda09caee81be15ebc27434b4b67e 100644 (file)
@@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
-
-       vmw_fb_refresh(dev_priv);
 }
 
 /**
@@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
                        vmw_kms_resume(dev);
                if (dev_priv->enable_fb)
                        vmw_fb_on(dev_priv);
-               vmw_fb_refresh(dev_priv);
                return -EBUSY;
        }
 
@@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
        if (dev_priv->enable_fb)
                vmw_fb_on(dev_priv);
 
-       vmw_fb_refresh(dev_priv);
-
        return 0;
 }
 
index f34f368c1a2ebc1713f35a7aab0f94c83061cbfa..5fcbe1620d50b34898815be80ec7560115c68997 100644 (file)
@@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
 int vmw_fb_close(struct vmw_private *dev_priv);
 int vmw_fb_off(struct vmw_private *vmw_priv);
 int vmw_fb_on(struct vmw_private *vmw_priv);
-void vmw_fb_refresh(struct vmw_private *vmw_priv);
 
 /**
  * Kernel modesetting - vmwgfx_kms.c
index ba0cdb743c3e50d664848c021a0f63bc72953aa2..54e300365a5ccd04b2d5d6173fbb379bde860322 100644 (file)
@@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
        spin_lock_irqsave(&par->dirty.lock, flags);
        par->dirty.active = true;
        spin_unlock_irqrestore(&par->dirty.lock, flags);
-       return 0;
-}
 
-/**
- * vmw_fb_refresh - Refresh fb display
- *
- * @vmw_priv: Pointer to device private
- *
- * Call into kms to show the fbdev display(s).
- */
-void vmw_fb_refresh(struct vmw_private *vmw_priv)
-{
-       if (!vmw_priv->fb_info)
-               return;
+       /*
+        * Need to reschedule a dirty update, because otherwise that's
+        * only done in dirty_mark() if the previous coalesced
+        * dirty region was empty.
+        */
+       schedule_delayed_work(&par->local_work, 0);
 
-       vmw_fb_set_par(vmw_priv->fb_info);
+       return 0;
 }
index cdff99211602cc44901135f6dc0cdd781e849a21..21d746bdc922bc55ce27d25cd4d5c2ad935d595e 100644 (file)
@@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
        struct rpc_channel channel;
        char *msg, *reply = NULL;
        size_t reply_len = 0;
-       int ret = 0;
-
 
        if (!vmw_msg_enabled)
                return -ENODEV;
@@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
                return -ENOMEM;
        }
 
-       if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
-           vmw_send_msg(&channel, msg) ||
-           vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
-           vmw_close_channel(&channel)) {
-               DRM_ERROR("Failed to get %s", guest_info_param);
+       if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+               goto out_open;
 
-               ret = -EINVAL;
-       }
+       if (vmw_send_msg(&channel, msg) ||
+           vmw_recv_msg(&channel, (void *) &reply, &reply_len))
+               goto out_msg;
 
+       vmw_close_channel(&channel);
        if (buffer && reply && reply_len > 0) {
                /* Remove reply code, which are the first 2 characters of
                 * the reply
@@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
        kfree(reply);
        kfree(msg);
 
-       return ret;
+       return 0;
+
+out_msg:
+       vmw_close_channel(&channel);
+       kfree(reply);
+out_open:
+       *length = 0;
+       kfree(msg);
+       DRM_ERROR("Failed to get %s", guest_info_param);
+
+       return -EINVAL;
 }
 
 
@@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
                return -ENOMEM;
        }
 
-       if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
-           vmw_send_msg(&channel, msg) ||
-           vmw_close_channel(&channel)) {
-               DRM_ERROR("Failed to send log\n");
+       if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
+               goto out_open;
 
-               ret = -EINVAL;
-       }
+       if (vmw_send_msg(&channel, msg))
+               goto out_msg;
 
+       vmw_close_channel(&channel);
        kfree(msg);
 
-       return ret;
+       return 0;
+
+out_msg:
+       vmw_close_channel(&channel);
+out_open:
+       kfree(msg);
+       DRM_ERROR("Failed to send log\n");
+
+       return -EINVAL;
 }
index 557a033fb610f1dfcb3568aceebd5592c97b3c60..8545488aa0cfbe1bf1b1d14514d6794c0077834a 100644 (file)
 
 #else
 
-/* In the 32-bit version of this macro, we use "m" because there is no
- * more register left for bp
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
  */
 #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,     \
                        port_num, magic, bp,            \
                        eax, ebx, ecx, edx, si, di)     \
 ({                                                     \
-       asm volatile ("push %%ebp;"                     \
-               "mov %12, %%ebp;"                       \
+       asm volatile ("push %12;"                       \
+               "push %%ebp;"                           \
+               "mov 0x04(%%esp), %%ebp;"               \
                "rep outsb;"                            \
-               "pop %%ebp;" :                          \
+               "pop %%ebp;"                            \
+               "add $0x04, %%esp;" :                   \
                "=a"(eax),                              \
                "=b"(ebx),                              \
                "=c"(ecx),                              \
                       port_num, magic, bp,             \
                       eax, ebx, ecx, edx, si, di)      \
 ({                                                     \
-       asm volatile ("push %%ebp;"                     \
-               "mov %12, %%ebp;"                       \
+       asm volatile ("push %12;"                       \
+               "push %%ebp;"                           \
+               "mov 0x04(%%esp), %%ebp;"               \
                "rep insb;"                             \
-               "pop %%ebp" :                           \
+               "pop %%ebp;"                            \
+               "add $0x04, %%esp;" :                   \
                "=a"(eax),                              \
                "=b"(ebx),                              \
                "=c"(ecx),                              \
index 9a4e899d94b30a843e54f3a06e975ad632428a26..2b6c9b5160705a95d779b22aec4292904ec3b040 100644 (file)
@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        umem->length     = size;
        umem->address    = addr;
        umem->page_shift = PAGE_SHIFT;
-       umem->pid        = get_task_pid(current, PIDTYPE_PID);
        /*
         * We ask for writable memory if any of the following
         * access flags are set.  "Local write" and "remote write"
@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
 
        if (access & IB_ACCESS_ON_DEMAND) {
-               put_pid(umem->pid);
                ret = ib_umem_odp_get(context, umem, access);
                if (ret) {
                        kfree(umem);
@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list) {
-               put_pid(umem->pid);
                kfree(umem);
                return ERR_PTR(-ENOMEM);
        }
@@ -231,7 +228,6 @@ out:
        if (ret < 0) {
                if (need_release)
                        __ib_umem_release(context->device, umem, 0);
-               put_pid(umem->pid);
                kfree(umem);
        } else
                current->mm->pinned_vm = locked;
@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
 
        __ib_umem_release(umem->context->device, umem, 1);
 
-       task = get_pid_task(umem->pid, PIDTYPE_PID);
-       put_pid(umem->pid);
+       task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
        if (!task)
                goto out;
        mm = get_task_mm(task);
index e90f2fd8dc16dfd0d5e1b0365fe15514b1f8dbe0..1445918e32392f28ae4ce9ea74e7df0feeddf371 100644 (file)
@@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
 err_dereg_mem:
        dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
                  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
-err_free_wr_wait:
-       c4iw_put_wr_wait(mhp->wr_waitp);
 err_free_skb:
        kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+       c4iw_put_wr_wait(mhp->wr_waitp);
 err_free_mhp:
        kfree(mhp);
        return ERR_PTR(ret);
index e6a60fa59f2b5e32f240374e980c68558bdf3f86..e6bdd0c1e80a9b35fb5d0690b1ba198d628a30ab 100644 (file)
@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
        u64 status;
        u32 sw_index;
        int i = 0;
+       unsigned long irq_flags;
 
        sw_index = dd->hw_to_sw[hw_context];
        if (sw_index >= dd->num_send_contexts) {
@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
                return;
        }
        sci = &dd->send_contexts[sw_index];
+       spin_lock_irqsave(&dd->sc_lock, irq_flags);
        sc = sci->sc;
        if (!sc) {
                dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
                           sw_index, hw_context);
+               spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
                return;
        }
 
@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
         */
        if (sc->type != SC_USER)
                queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+       spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
 
        /*
         * Update the counters for the corresponding status bits.
index 14734d0d0b76086dfc5ea76e84ae3dc27a2a58b9..3a485f50fede1d59d30bd73240c3517c50dd9450 100644 (file)
@@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 
                        hr_cq->set_ci_db = hr_cq->db.db_record;
                        *hr_cq->set_ci_db = 0;
+                       hr_cq->db_en = 1;
                }
 
                /* Init mmt table and write buff address to mtt table */
index 47e1b6ac1e1acdd38e9f88cfec179a50471fee29..8013d69c5ac496fe5ddaa10b9e48ef80f5db3c5e 100644 (file)
@@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
        free_mr->mr_free_pd = to_hr_pd(pd);
        free_mr->mr_free_pd->ibpd.device  = &hr_dev->ib_dev;
        free_mr->mr_free_pd->ibpd.uobject = NULL;
+       free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
        atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
 
        attr.qp_access_flags    = IB_ACCESS_REMOTE_WRITE;
@@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 
        do {
                ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
-               if (ret < 0) {
+               if (ret < 0 && hr_qp) {
                        dev_err(dev,
                           "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
                           hr_qp->qpn, ret, hr_mr->key, ne);
index 25916e8522eda4e061c63e2b652db1fa8fdf5e41..1f0965bb64eedab6057ed511636ed4777c1fed30 100644 (file)
@@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        unsigned long flags;
        unsigned int ind;
        void *wqe = NULL;
-       u32 tmp_len = 0;
        bool loopback;
+       u32 tmp_len;
        int ret = 0;
        u8 *smac;
        int nreq;
@@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                owner_bit =
                       ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+               tmp_len = 0;
 
                /* Corresponding to the QP type, wqe process separately */
                if (ibqp->qp_type == IB_QPT_GSI) {
@@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                }
 
                if (i < hr_qp->rq.max_gs) {
-                       dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
-                       dseg[i].addr = 0;
+                       dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+                       dseg->addr = 0;
                }
 
                /* rq support inline data */
-               sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
-               hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
-               for (i = 0; i < wr->num_sge; i++) {
-                       sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
-                       sge_list[i].len = wr->sg_list[i].length;
+               if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+                       sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+                       hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
+                                                              (u32)wr->num_sge;
+                       for (i = 0; i < wr->num_sge; i++) {
+                               sge_list[i].addr =
+                                              (void *)(u64)wr->sg_list[i].addr;
+                               sge_list[i].len = wr->sg_list[i].length;
+                       }
                }
 
                hr_qp->rq.wrid[ind] = wr->wr_id;
@@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
        dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
                         ring->desc_num * sizeof(struct hns_roce_cmq_desc),
                         DMA_BIDIRECTIONAL);
+
+       ring->desc_dma_addr = 0;
        kfree(ring->desc);
 }
 
@@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
        if (ret) {
                dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
                        ret);
+               return ret;
        }
 
        /* Get pf resource owned by every pf */
@@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 
        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
                     mr->type == MR_TYPE_MR ? 0 : 1);
+       roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
+                    1);
        mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
 
        mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
@@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
                                    struct hns_roce_v2_qp_context *context,
                                    struct hns_roce_v2_qp_context *qpc_mask)
 {
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 
        /*
@@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
        context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
        qpc_mask->rq_db_record_addr = 0;
 
-       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
+       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
+                   (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
 
        roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
        {0, }
 };
 
+MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
+
 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
                                  struct hnae3_handle *handle)
 {
index 9d48bc07a9e683a22f91c8cac02a273e7f768a9c..96fb6a9ed93c4eb5948629f21d85f3f9b3c40672 100644 (file)
@@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
 
        memset(props, 0, sizeof(*props));
 
-       props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid);
+       props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
        props->max_mr_size = (u64)(~(0ULL));
        props->page_size_cap = hr_dev->caps.page_size_cap;
        props->vendor_id = hr_dev->vendor_id;
index d4aad34c21e2ca8a9bc36db6ba1f682ac1f1972f..baaf906f7c2e4d2546c4d53333123eca3350c38b 100644 (file)
@@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
                                goto err_rq_sge_list;
                        }
                        *hr_qp->rdb.db_record = 0;
+                       hr_qp->rdb_en = 1;
                }
 
                /* Allocate QP buf */
@@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
-               ret = 0;
+               if (hr_dev->caps.min_wqes) {
+                       ret = -EPERM;
+                       dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
+                               new_state);
+               } else {
+                       ret = 0;
+               }
+
                goto out;
        }
 
index d5d8c1be345a64ec0de46ddac9e54bc7d13fa95e..2f2b4426ded77569f563da934f9bce10fc67194b 100644 (file)
@@ -207,6 +207,7 @@ struct i40iw_msix_vector {
        u32 irq;
        u32 cpu_affinity;
        u32 ceq_id;
+       cpumask_t mask;
 };
 
 struct l2params_work {
index 4cfa8f4647e222ea3ef35fdc699fbe3e0ad9281e..f7c6fd9ff6e2c53101a15a8a81cdec94044afbb6 100644 (file)
@@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
        if (netif_is_bond_slave(netdev))
                netdev = netdev_master_upper_dev_get(netdev);
 
-       neigh = dst_neigh_lookup(dst, &dst_addr);
+       neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
 
        rcu_read_lock();
        if (neigh) {
index 6139836fb533adf194ede3ee693aefcbe55471d8..c9f62ca7643c85ac1a076c6486c19f17ff3ea80a 100644 (file)
@@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
                switch (info->ae_id) {
                case I40IW_AE_LLP_FIN_RECEIVED:
                        if (qp->term_flags)
-                               continue;
+                               break;
                        if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
                                iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
                                if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
@@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
                        break;
                case I40IW_AE_LLP_CONNECTION_RESET:
                        if (atomic_read(&iwqp->close_timer_started))
-                               continue;
+                               break;
                        i40iw_cm_disconn(iwqp);
                        break;
                case I40IW_AE_QP_SUSPEND_COMPLETE:
index 9cd0d3ef90575adff6103e9e3a7cf544d986fe79..05001e6da1f8fd473598e37d9ce7011eb2a19f19 100644 (file)
@@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
                                                         struct i40iw_msix_vector *msix_vec)
 {
        enum i40iw_status_code status;
-       cpumask_t mask;
 
        if (iwdev->msix_shared && !ceq_id) {
                tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
                status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
        }
 
-       cpumask_clear(&mask);
-       cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
-       irq_set_affinity_hint(msix_vec->irq, &mask);
+       cpumask_clear(&msix_vec->mask);
+       cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
+       irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
 
        if (status) {
                i40iw_pr_err("ceq irq config fail\n");
index 40e4f5ab2b46b15abd21081b3bc4f9d2145f4c63..68679ad4c6daf7b83b98d2d1113df522260e0d52 100644 (file)
@@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
 
        list_for_each_entry(iwpbl, pbl_list, list) {
                if (iwpbl->user_base == va) {
+                       iwpbl->on_list = false;
                        list_del(&iwpbl->list);
                        return iwpbl;
                }
@@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
                return ERR_PTR(-ENOMEM);
 
        iwqp = (struct i40iw_qp *)mem;
+       iwqp->allocated_buffer = mem;
        qp = &iwqp->sc_qp;
        qp->back_qp = (void *)iwqp;
        qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
@@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
                goto error;
        }
 
-       iwqp->allocated_buffer = mem;
        iwqp->iwdev = iwdev;
        iwqp->iwpd = iwpd;
        iwqp->ibqp.qp_num = qp_num;
@@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
                        goto error;
                spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
                list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+               iwpbl->on_list = true;
                spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
                break;
        case IW_MEMREG_TYPE_CQ:
@@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 
                spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
                list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+               iwpbl->on_list = true;
                spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
                break;
        case IW_MEMREG_TYPE_MEM:
@@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
        switch (iwmr->type) {
        case IW_MEMREG_TYPE_CQ:
                spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
-               if (!list_empty(&ucontext->cq_reg_mem_list))
+               if (iwpbl->on_list) {
+                       iwpbl->on_list = false;
                        list_del(&iwpbl->list);
+               }
                spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
                break;
        case IW_MEMREG_TYPE_QP:
                spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
-               if (!list_empty(&ucontext->qp_reg_mem_list))
+               if (iwpbl->on_list) {
+                       iwpbl->on_list = false;
                        list_del(&iwpbl->list);
+               }
                spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
                break;
        default:
index 9067443cd31151bb501a505e74b06bb56265f1b5..76cf173377ab24c9ff716ca31484b71d25652c80 100644 (file)
@@ -78,6 +78,7 @@ struct i40iw_pbl {
        };
 
        bool pbl_allocated;
+       bool on_list;
        u64 user_base;
        struct i40iw_pble_alloc pble_alloc;
        struct i40iw_mr *iwmr;
index b4d8ff8ab807a445ad93cc532ef1f4fe5d4242fb..69716a7ea9934a414594ffe2b1b86fdcb41a411d 100644 (file)
@@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
        MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
 }
 
-static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
+static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
                           bool inner)
 {
        if (inner) {
index 87b7c1be2a117b0a0d7840ce13d36dabb5c4a13c..2193dc1765fb2581ceacc89300e4369e0765eb36 100644 (file)
@@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
        return 1;
 }
 
-static int first_med_bfreg(void)
-{
-       return 1;
-}
-
 enum {
        /* this is the first blue flame register in the array of bfregs assigned
         * to a processes. Since we do not use it for blue flame but rather
@@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev,
        return n >= 0 ? n : 0;
 }
 
+static int first_med_bfreg(struct mlx5_ib_dev *dev,
+                          struct mlx5_bfreg_info *bfregi)
+{
+       return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
+}
+
 static int first_hi_bfreg(struct mlx5_ib_dev *dev,
                          struct mlx5_bfreg_info *bfregi)
 {
@@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
                                 struct mlx5_bfreg_info *bfregi)
 {
-       int minidx = first_med_bfreg();
+       int minidx = first_med_bfreg(dev, bfregi);
        int i;
 
-       for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
+       if (minidx < 0)
+               return minidx;
+
+       for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
                if (bfregi->count[i] < bfregi->count[minidx])
                        minidx = i;
                if (!bfregi->count[minidx])
index 7d3763b2e01c99fd7be7d4c72db653c1365e088c..3f9afc02d166b6bce9a2c4db87ad85878c33b3e6 100644 (file)
@@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
        struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
        struct qedr_dev *dev = get_qedr_dev(context->device);
-       unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
-       u64 unmapped_db = dev->db_phys_addr;
+       unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
        unsigned long len = (vma->vm_end - vma->vm_start);
-       int rc = 0;
-       bool found;
+       unsigned long dpi_start;
+
+       dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
 
        DP_DEBUG(dev, QEDR_MSG_INIT,
-                "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
-                vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
-       if (vma->vm_start & (PAGE_SIZE - 1)) {
-               DP_ERR(dev, "Vma_start not page aligned = %ld\n",
-                      vma->vm_start);
+                "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
+                (void *)vma->vm_start, (void *)vma->vm_end,
+                (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
+
+       if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
+               DP_ERR(dev,
+                      "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
+                      (void *)vma->vm_start, (void *)vma->vm_end);
                return -EINVAL;
        }
 
-       found = qedr_search_mmap(ucontext, vm_page, len);
-       if (!found) {
-               DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+       if (!qedr_search_mmap(ucontext, phys_addr, len)) {
+               DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
                       vma->vm_pgoff);
                return -EINVAL;
        }
 
-       DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
-
-       if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
-                                                    dev->db_size))) {
-               DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
-               if (vma->vm_flags & VM_READ) {
-                       DP_ERR(dev, "Trying to map doorbell bar for read\n");
-                       return -EPERM;
-               }
-
-               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       if (phys_addr < dpi_start ||
+           ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
+               DP_ERR(dev,
+                      "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
+                      (void *)phys_addr, (void *)dpi_start,
+                      ucontext->dpi_size);
+               return -EINVAL;
+       }
 
-               rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-                                       PAGE_SIZE, vma->vm_page_prot);
-       } else {
-               DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
-               rc = remap_pfn_range(vma, vma->vm_start,
-                                    vma->vm_pgoff, len, vma->vm_page_prot);
+       if (vma->vm_flags & VM_READ) {
+               DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
+               return -EINVAL;
        }
-       DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
-       return rc;
+
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
+                                 vma->vm_page_prot);
 }
 
 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
index 2cb52fd48cf12bdace50806228ec2c8c502cf1ae..73a00a1c06f62a80fa7522763ba4669f2c4e1c28 100644 (file)
@@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
        unsigned int mask;
        unsigned int length = 0;
        int i;
-       int must_sched;
 
        while (wr) {
                mask = wr_opcode_mask(wr->opcode, qp);
@@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
                wr = wr->next;
        }
 
-       /*
-        * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
-        * and the requester call ip_local_out_sk() that takes spin_lock_bh.
-        */
-       must_sched = (qp_type(qp) == IB_QPT_GSI) ||
-                       (queue_count(qp->sq.queue) > 1);
-
-       rxe_run_task(&qp->req.task, must_sched);
+       rxe_run_task(&qp->req.task, 1);
        if (unlikely(qp->req.state == QP_STATE_ERROR))
                rxe_run_task(&qp->comp.task, 1);
 
index fb8b7182f05ebd7413058d54e8be7fca974dcc44..25bf6955b6d0273b7ed4978954274cad20c2512d 100644 (file)
@@ -1,6 +1,6 @@
 config INFINIBAND_SRPT
        tristate "InfiniBand SCSI RDMA Protocol target support"
-       depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
+       depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
        ---help---
 
          Support for the SCSI RDMA Protocol (SRP) Target driver. The
index 1b52b8557034bcab08f46c85efa37eee9b3d6f34..2060d1483043d8ee52dba6e4a65d20ba18a2b97a 100644 (file)
@@ -419,10 +419,25 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
                        rx_byte = rx_buf[i];
+                       /*
+                        * Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
+                        * markers are all signs that the EC didn't fully
+                        * receive our command. e.g., if the EC is flashing
+                        * itself, it can't respond to any commands and instead
+                        * clocks out EC_SPI_PAST_END from its SPI hardware
+                        * buffer. Similar occurrences can happen if the AP is
+                        * too slow to clock out data after asserting CS -- the
+                        * EC will abort and fill its buffer with
+                        * EC_SPI_RX_BAD_DATA.
+                        *
+                        * In all cases, these errors should be safe to retry.
+                        * Report -EAGAIN and let the caller decide what to do
+                        * about that.
+                        */
                        if (rx_byte == EC_SPI_PAST_END  ||
                            rx_byte == EC_SPI_RX_BAD_DATA ||
                            rx_byte == EC_SPI_NOT_READY) {
-                               ret = -EREMOTEIO;
+                               ret = -EAGAIN;
                                break;
                        }
                }
@@ -431,7 +446,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret)
                ret = cros_ec_spi_receive_packet(ec_dev,
                                ec_msg->insize + sizeof(*response));
-       else
+       else if (ret != -EAGAIN)
                dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
 
        final_ret = terminate_request(ec_dev);
@@ -537,10 +552,11 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
                        rx_byte = rx_buf[i];
+                       /* See comments in cros_ec_pkt_xfer_spi() */
                        if (rx_byte == EC_SPI_PAST_END  ||
                            rx_byte == EC_SPI_RX_BAD_DATA ||
                            rx_byte == EC_SPI_NOT_READY) {
-                               ret = -EREMOTEIO;
+                               ret = -EAGAIN;
                                break;
                        }
                }
@@ -549,7 +565,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret)
                ret = cros_ec_spi_receive_response(ec_dev,
                                ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
-       else
+       else if (ret != -EAGAIN)
                dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
 
        final_ret = terminate_request(ec_dev);
index 9e923cd1d80ebee9c96d70aceabb87b2a0935581..38a7586b00ccafd6bb0eed4307f9a40f91fee90a 100644 (file)
@@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
                break;
        }
 
-       return 0;
+       return ret;
 }
 
 #ifdef CONFIG_COMPAT
index 0ef741bc515d9f354e03576bc2eb079b40a1d7e5..d0e83db42ae52614b5ab9f03d211290f7c100937 100644 (file)
@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
        const struct sdhci_iproc_data *data;
        u32 shadow_cmd;
        u32 shadow_blk;
+       bool is_cmd_shadowed;
+       bool is_blk_shadowed;
 };
 
 #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
 
 static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
 {
-       u32 val = sdhci_iproc_readl(host, (reg & ~3));
-       u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
+       u32 val;
+       u16 word;
+
+       if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
+               /* Get the saved transfer mode */
+               val = iproc_host->shadow_cmd;
+       } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+                  iproc_host->is_blk_shadowed) {
+               /* Get the saved block info */
+               val = iproc_host->shadow_blk;
+       } else {
+               val = sdhci_iproc_readl(host, (reg & ~3));
+       }
+       word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
        return word;
 }
 
@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
 
        if (reg == SDHCI_COMMAND) {
                /* Write the block now as we are issuing a command */
-               if (iproc_host->shadow_blk != 0) {
+               if (iproc_host->is_blk_shadowed) {
                        sdhci_iproc_writel(host, iproc_host->shadow_blk,
                                SDHCI_BLOCK_SIZE);
-                       iproc_host->shadow_blk = 0;
+                       iproc_host->is_blk_shadowed = false;
                }
                oldval = iproc_host->shadow_cmd;
-       } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+               iproc_host->is_cmd_shadowed = false;
+       } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+                  iproc_host->is_blk_shadowed) {
                /* Block size and count are stored in shadow reg */
                oldval = iproc_host->shadow_blk;
        } else {
@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
        if (reg == SDHCI_TRANSFER_MODE) {
                /* Save the transfer mode until the command is issued */
                iproc_host->shadow_cmd = newval;
+               iproc_host->is_cmd_shadowed = true;
        } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
                /* Save the block info until the command is issued */
                iproc_host->shadow_blk = newval;
+               iproc_host->is_blk_shadowed = true;
        } else {
                /* Command or other regular 32-bit write */
                sdhci_iproc_writel(host, newval, reg & ~3);
@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
 
 static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
        .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
-       .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+       .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
        .ops = &sdhci_iproc_32only_ops,
 };
 
@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
        .caps1 = SDHCI_DRIVER_TYPE_C |
                 SDHCI_DRIVER_TYPE_D |
                 SDHCI_SUPPORT_DDR50,
-       .mmc_caps = MMC_CAP_1_8V_DDR,
 };
 
 static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
index 23b45da784cb601a7abf84b212717aee7dc64403..b89acaee12d4364247a694ad989d2ae06ca640aa 100644 (file)
@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
        /* Locate the first rule available */
        if (fs->location == RX_CLS_LOC_ANY)
                rule_index = find_first_zero_bit(priv->cfp.used,
-                                                bcm_sf2_cfp_rule_size(priv));
+                                                priv->num_cfp_rules);
        else
                rule_index = fs->location;
 
+       if (rule_index > bcm_sf2_cfp_rule_size(priv))
+               return -ENOSPC;
+
        layout = &udf_tcpip4_layout;
        /* We only use one UDF slice for now */
        slice_num = bcm_sf2_get_slice_number(layout, 0);
@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
         * first half because the HW search is by incrementing addresses.
         */
        if (fs->location == RX_CLS_LOC_ANY)
-               rule_index[0] = find_first_zero_bit(priv->cfp.used,
-                                                   bcm_sf2_cfp_rule_size(priv));
+               rule_index[1] = find_first_zero_bit(priv->cfp.used,
+                                                   priv->num_cfp_rules);
        else
-               rule_index[0] = fs->location;
+               rule_index[1] = fs->location;
+       if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
+               return -ENOSPC;
 
        /* Flag it as used (cleared on error path) such that we can immediately
         * obtain a second one to chain from.
         */
-       set_bit(rule_index[0], priv->cfp.used);
+       set_bit(rule_index[1], priv->cfp.used);
 
-       rule_index[1] = find_first_zero_bit(priv->cfp.used,
-                                           bcm_sf2_cfp_rule_size(priv));
-       if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
+       rule_index[0] = find_first_zero_bit(priv->cfp.used,
+                                           priv->num_cfp_rules);
+       if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
                ret = -ENOSPC;
                goto out_err;
        }
@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        /* Flag the second half rule as being used now, return it as the
         * location, and flag it as unique while dumping rules
         */
-       set_bit(rule_index[1], priv->cfp.used);
+       set_bit(rule_index[0], priv->cfp.used);
        set_bit(rule_index[1], priv->cfp.unique);
        fs->location = rule_index[1];
 
        return ret;
 
 out_err:
-       clear_bit(rule_index[0], priv->cfp.used);
+       clear_bit(rule_index[1], priv->cfp.used);
        return ret;
 }
 
@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
        int ret;
        u32 reg;
 
-       /* Refuse deletion of unused rules, and the default reserved rule */
-       if (!test_bit(loc, priv->cfp.used) || loc == 0)
-               return -EINVAL;
-
        /* Indicate which rule we want to read */
        bcm_sf2_cfp_rule_addr_set(priv, loc);
 
@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
        u32 next_loc = 0;
        int ret;
 
+       /* Refuse deleting unused rules, and those that are not unique since
+        * that could leave IPv6 rules with one of the chained rule in the
+        * table.
+        */
+       if (!test_bit(loc, priv->cfp.unique) || loc == 0)
+               return -EINVAL;
+
        ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
        if (ret)
                return ret;
index 36c8950dbd2d80699f396f217f0f438479f68355..176861bd225258d6df955196d94966cfe5253cdd 100644 (file)
@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
        vp->mii.reg_num_mask = 0x1f;
 
        /* Makes sure rings are at least 16 byte aligned. */
-       vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+       vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
                                           + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
-                                          &vp->rx_ring_dma);
+                                          &vp->rx_ring_dma, GFP_KERNEL);
        retval = -ENOMEM;
        if (!vp->rx_ring)
                goto free_device;
@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
                return 0;
 
 free_ring:
-       pci_free_consistent(pdev,
-                                               sizeof(struct boom_rx_desc) * RX_RING_SIZE
-                                                       + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
-                                               vp->rx_ring,
-                                               vp->rx_ring_dma);
+       dma_free_coherent(&pdev->dev,
+               sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+               sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+               vp->rx_ring, vp->rx_ring_dma);
 free_device:
        free_netdev(dev);
        pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
                                break;                  /* Bad news!  */
 
                        skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
-                       dma = pci_map_single(VORTEX_PCI(vp), skb->data,
-                                            PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
-                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+                       dma = dma_map_single(vp->gendev, skb->data,
+                                            PKT_BUF_SZ, DMA_FROM_DEVICE);
+                       if (dma_mapping_error(vp->gendev, dma))
                                break;
                        vp->rx_ring[i].addr = cpu_to_le32(dma);
                }
@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (vp->bus_master) {
                /* Set the bus-master controller to transfer the packet. */
                int len = (skb->len + 3) & ~3;
-               vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
-                                               PCI_DMA_TODEVICE);
-               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+               vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
+                                               DMA_TO_DEVICE);
+               if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
                        dev_kfree_skb_any(skb);
                        dev->stats.tx_dropped++;
                        return NETDEV_TX_OK;
@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
 
        if (!skb_shinfo(skb)->nr_frags) {
-               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
-                                         PCI_DMA_TODEVICE);
-               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+               dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
+                                         DMA_TO_DEVICE);
+               if (dma_mapping_error(vp->gendev, dma_addr))
                        goto out_dma_err;
 
                vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        } else {
                int i;
 
-               dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
-                                         skb_headlen(skb), PCI_DMA_TODEVICE);
-               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+               dma_addr = dma_map_single(vp->gendev, skb->data,
+                                         skb_headlen(skb), DMA_TO_DEVICE);
+               if (dma_mapping_error(vp->gendev, dma_addr))
                        goto out_dma_err;
 
                vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-                       dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+                       dma_addr = skb_frag_dma_map(vp->gendev, frag,
                                                    0,
                                                    frag->size,
                                                    DMA_TO_DEVICE);
-                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+                       if (dma_mapping_error(vp->gendev, dma_addr)) {
                                for(i = i-1; i >= 0; i--)
-                                       dma_unmap_page(&VORTEX_PCI(vp)->dev,
+                                       dma_unmap_page(vp->gendev,
                                                       le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
                                                       le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
                                                       DMA_TO_DEVICE);
 
-                               pci_unmap_single(VORTEX_PCI(vp),
+                               dma_unmap_single(vp->gendev,
                                                 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
                                                 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
-                                                PCI_DMA_TODEVICE);
+                                                DMA_TO_DEVICE);
 
                                goto out_dma_err;
                        }
@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 #else
-       dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
-       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+       dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(vp->gendev, dma_addr))
                goto out_dma_err;
        vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
        vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
 out:
        return NETDEV_TX_OK;
 out_dma_err:
-       dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+       dev_err(vp->gendev, "Error mapping dma buffer\n");
        goto out;
 }
 
@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
                if (status & DMADone) {
                        if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
                                iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
-                               pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+                               dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
                                pkts_compl++;
                                bytes_compl += vp->tx_skb->len;
                                dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
                                        struct sk_buff *skb = vp->tx_skbuff[entry];
 #if DO_ZEROCOPY
                                        int i;
-                                       pci_unmap_single(VORTEX_PCI(vp),
+                                       dma_unmap_single(vp->gendev,
                                                        le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
                                                        le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
-                                                       PCI_DMA_TODEVICE);
+                                                       DMA_TO_DEVICE);
 
                                        for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
-                                                       pci_unmap_page(VORTEX_PCI(vp),
+                                                       dma_unmap_page(vp->gendev,
                                                                                         le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
                                                                                         le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
-                                                                                        PCI_DMA_TODEVICE);
+                                                                                        DMA_TO_DEVICE);
 #else
-                                       pci_unmap_single(VORTEX_PCI(vp),
-                                               le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+                                       dma_unmap_single(vp->gendev,
+                                               le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
 #endif
                                        pkts_compl++;
                                        bytes_compl += skb->len;
@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                if (vp->bus_master &&
                                        ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
-                                       dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
-                                                                          pkt_len, PCI_DMA_FROMDEVICE);
+                                       dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
+                                                                          pkt_len, DMA_FROM_DEVICE);
                                        iowrite32(dma, ioaddr + Wn7_MasterAddr);
                                        iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
                                        iowrite16(StartDMAUp, ioaddr + EL3_CMD);
                                        while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
                                                ;
-                                       pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
+                                       dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
                                } else {
                                        ioread32_rep(ioaddr + RX_FIFO,
                                                     skb_put(skb, pkt_len),
@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
                        if (pkt_len < rx_copybreak &&
                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                               pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                skb_put_data(skb, vp->rx_skbuff[entry]->data,
                                             pkt_len);
-                               pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
                                vp->rx_copy++;
                        } else {
                                /* Pre-allocate the replacement skb.  If it or its
@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
                                        dev->stats.rx_dropped++;
                                        goto clear_complete;
                                }
-                               newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
-                                                       PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
-                               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+                               newdma = dma_map_single(vp->gendev, newskb->data,
+                                                       PKT_BUF_SZ, DMA_FROM_DEVICE);
+                               if (dma_mapping_error(vp->gendev, newdma)) {
                                        dev->stats.rx_dropped++;
                                        consume_skb(newskb);
                                        goto clear_complete;
@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
                                vp->rx_skbuff[entry] = newskb;
                                vp->rx_ring[entry].addr = cpu_to_le32(newdma);
                                skb_put(skb, pkt_len);
-                               pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
                                vp->rx_nocopy++;
                        }
                        skb->protocol = eth_type_trans(skb, dev);
@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
        if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
                for (i = 0; i < RX_RING_SIZE; i++)
                        if (vp->rx_skbuff[i]) {
-                               pci_unmap_single(       VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
-                                                                       PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
+                                                                       PKT_BUF_SZ, DMA_FROM_DEVICE);
                                dev_kfree_skb(vp->rx_skbuff[i]);
                                vp->rx_skbuff[i] = NULL;
                        }
@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
                                int k;
 
                                for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
-                                               pci_unmap_single(VORTEX_PCI(vp),
+                                               dma_unmap_single(vp->gendev,
                                                                                 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
                                                                                 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
-                                                                                PCI_DMA_TODEVICE);
+                                                                                DMA_TO_DEVICE);
 #else
-                               pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+                               dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
 #endif
                                dev_kfree_skb(skb);
                                vp->tx_skbuff[i] = NULL;
@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
 
        pci_iounmap(pdev, vp->ioaddr);
 
-       pci_free_consistent(pdev,
-                                               sizeof(struct boom_rx_desc) * RX_RING_SIZE
-                                                       + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
-                                               vp->rx_ring,
-                                               vp->rx_ring_dma);
+       dma_free_coherent(&pdev->dev,
+                       sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+                       sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+                       vp->rx_ring, vp->rx_ring_dma);
 
        pci_release_regions(pdev);
 
index ac99d089ac7266c349fa974e4edabab1e127b90b..1c97e39b478e9f8957ff76f75bc47d819b248c03 100644 (file)
@@ -164,7 +164,9 @@ bad_clone_list[] __initdata = {
 #define NESM_START_PG  0x40    /* First page of TX buffer */
 #define NESM_STOP_PG   0x80    /* Last page +1 of RX ring */
 
-#if defined(CONFIG_ATARI)      /* 8-bit mode on Atari, normal on Q40 */
+#if defined(CONFIG_MACH_TX49XX)
+#  define DCR_VAL 0x48         /* 8-bit mode */
+#elif defined(CONFIG_ATARI)    /* 8-bit mode on Atari, normal on Q40 */
 #  define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
 #else
 #  define DCR_VAL 0x49
index b57acb8dc35bd1325d2e1932b9280d49c61d243f..dc25066c59a1052abe53491832a90e96ac3b26b8 100644 (file)
@@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
        {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
        {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
        {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
-       {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
-       {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
-       {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
-       {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
-       {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
-       {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
-       {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
-       {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
-       {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+       {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
+       {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
+       {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
+       {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
+       {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
+       {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
+       {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
+       {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
+       {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
 };
 
 static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
@@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
        {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
        {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
        {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
-       {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
-       {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
-       {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
-       {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
-       {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
-       {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
-       {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
-       {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
-       {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
-       {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
 };
 
 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
index db92f1858060ec685d7b59740ada8422097f178f..b76447baccaf35968156747d662ade1d71c98dc5 100644 (file)
@@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
 {
        struct tp_params *tp = &adap->params.tp;
        u64 hash_filter_mask = tp->hash_filter_mask;
-       u32 mask;
+       u64 ntuple_mask = 0;
 
        if (!is_hashfilter(adap))
                return false;
@@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
        if (!fs->val.fport || fs->mask.fport != 0xffff)
                return false;
 
-       if (tp->fcoe_shift >= 0) {
-               mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
-               if (mask && !fs->mask.fcoe)
-                       return false;
-       }
+       /* calculate tuple mask and compare with mask configured in hw */
+       if (tp->fcoe_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
 
-       if (tp->port_shift >= 0) {
-               mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
-               if (mask && !fs->mask.iport)
-                       return false;
-       }
+       if (tp->port_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
 
        if (tp->vnic_shift >= 0) {
-               mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
-
-               if ((adap->params.tp.ingress_config & VNIC_F)) {
-                       if (mask && !fs->mask.pfvf_vld)
-                               return false;
-               } else {
-                       if (mask && !fs->mask.ovlan_vld)
-                               return false;
-               }
+               if ((adap->params.tp.ingress_config & VNIC_F))
+                       ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
+               else
+                       ntuple_mask |= (u64)fs->mask.ovlan_vld <<
+                               tp->vnic_shift;
        }
 
-       if (tp->vlan_shift >= 0) {
-               mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
-               if (mask && !fs->mask.ivlan)
-                       return false;
-       }
+       if (tp->vlan_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
 
-       if (tp->tos_shift >= 0) {
-               mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
-               if (mask && !fs->mask.tos)
-                       return false;
-       }
+       if (tp->tos_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
 
-       if (tp->protocol_shift >= 0) {
-               mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
-               if (mask && !fs->mask.proto)
-                       return false;
-       }
+       if (tp->protocol_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
 
-       if (tp->ethertype_shift >= 0) {
-               mask = (hash_filter_mask >> tp->ethertype_shift) &
-                       FT_ETHERTYPE_W;
-               if (mask && !fs->mask.ethtype)
-                       return false;
-       }
+       if (tp->ethertype_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
 
-       if (tp->macmatch_shift >= 0) {
-               mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
-               if (mask && !fs->mask.macidx)
-                       return false;
-       }
+       if (tp->macmatch_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
+
+       if (tp->matchtype_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
+
+       if (tp->frag_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
+
+       if (ntuple_mask != hash_filter_mask)
+               return false;
 
-       if (tp->matchtype_shift >= 0) {
-               mask = (hash_filter_mask >> tp->matchtype_shift) &
-                       FT_MPSHITTYPE_W;
-               if (mask && !fs->mask.matchtype)
-                       return false;
-       }
-       if (tp->frag_shift >= 0) {
-               mask = (hash_filter_mask >> tp->frag_shift) &
-                       FT_FRAGMENTATION_W;
-               if (mask && !fs->mask.frag)
-                       return false;
-       }
        return true;
 }
 
index 6e8d6a6f6aaf2c809a92349f64c83f406bd35e9a..4bb4646a5f92f5617b6a3306a5002e180fc9cca1 100644 (file)
@@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
        if (adapter->fw_done_rc) {
                dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
                        adapter->fw_done_rc);
+               dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
                return -1;
        }
        return 0;
@@ -1821,9 +1822,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        if (rc)
                                return rc;
                }
+               ibmvnic_disable_irqs(adapter);
        }
-
-       ibmvnic_disable_irqs(adapter);
        adapter->state = VNIC_CLOSED;
 
        if (reset_state == VNIC_CLOSED)
@@ -4586,14 +4586,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
                release_crq_queue(adapter);
        }
 
-       rc = init_stats_buffers(adapter);
-       if (rc)
-               return rc;
-
-       rc = init_stats_token(adapter);
-       if (rc)
-               return rc;
-
        return rc;
 }
 
@@ -4662,13 +4654,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                        goto ibmvnic_init_fail;
        } while (rc == EAGAIN);
 
+       rc = init_stats_buffers(adapter);
+       if (rc)
+               goto ibmvnic_init_fail;
+
+       rc = init_stats_token(adapter);
+       if (rc)
+               goto ibmvnic_stats_fail;
+
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
        netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
        netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
 
        rc = device_create_file(&dev->dev, &dev_attr_failover);
        if (rc)
-               goto ibmvnic_init_fail;
+               goto ibmvnic_dev_file_err;
 
        netif_carrier_off(netdev);
        rc = register_netdev(netdev);
@@ -4687,6 +4687,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 ibmvnic_register_fail:
        device_remove_file(&dev->dev, &dev_attr_failover);
 
+ibmvnic_dev_file_err:
+       release_stats_token(adapter);
+
+ibmvnic_stats_fail:
+       release_stats_buffers(adapter);
+
 ibmvnic_init_fail:
        release_sub_crqs(adapter, 1);
        release_crq_queue(adapter);
index 211578ffc70d2ba12105e6f20d9ab7ecf7efd0ac..60172a38c4a43abfc0856b3159f9caeac452cf6c 100644 (file)
@@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
                mlx4_err(dev, "Failed to create file for port %d\n", port);
                devlink_port_unregister(&info->devlink_port);
                info->port = -1;
+               return err;
        }
 
        sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
                                   &info->port_attr);
                devlink_port_unregister(&info->devlink_port);
                info->port = -1;
+               return err;
        }
 
-       return err;
+       return 0;
 }
 
 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
index 1dc424685f4e772966ea9aa0bfc757cc0f41b18b..35fb31f682af4db9bae2efd946d7d2596fbd7eed 100644 (file)
@@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
                return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
 
        start = mem;
-       while (mem - start + 8 < nfp_cpp_area_size(area)) {
+       while (mem - start + 8 <= nfp_cpp_area_size(area)) {
                u8 __iomem *value;
                u32 type, length;
 
index 38502815d681d08086f02bbb49e340c06658766d..468c59d2e491aa0fd6616bd624c2cc956c5759b6 100644 (file)
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
        struct qed_ll2_tx_packet *p_pkt = NULL;
        struct qed_ll2_info *p_ll2_conn;
        struct qed_ll2_tx_queue *p_tx;
+       unsigned long flags = 0;
        dma_addr_t tx_frag;
 
        p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
        p_tx = &p_ll2_conn->tx_queue;
 
+       spin_lock_irqsave(&p_tx->lock, flags);
        while (!list_empty(&p_tx->active_descq)) {
                p_pkt = list_first_entry(&p_tx->active_descq,
                                         struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                list_del(&p_pkt->list_entry);
                b_last_packet = list_empty(&p_tx->active_descq);
                list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+               spin_unlock_irqrestore(&p_tx->lock, flags);
                if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                                                      b_last_frag,
                                                      b_last_packet);
                }
+               spin_lock_irqsave(&p_tx->lock, flags);
        }
+       spin_unlock_irqrestore(&p_tx->lock, flags);
 }
 
 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
        struct qed_ll2_info *p_ll2_conn = NULL;
        struct qed_ll2_rx_packet *p_pkt = NULL;
        struct qed_ll2_rx_queue *p_rx;
+       unsigned long flags = 0;
 
        p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
        if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
        p_rx = &p_ll2_conn->rx_queue;
 
+       spin_lock_irqsave(&p_rx->lock, flags);
        while (!list_empty(&p_rx->active_descq)) {
                p_pkt = list_first_entry(&p_rx->active_descq,
                                         struct qed_ll2_rx_packet, list_entry);
                if (!p_pkt)
                        break;
-
                list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
+               spin_unlock_irqrestore(&p_rx->lock, flags);
 
                if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
                        struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                                                      cookie,
                                                      rx_buf_addr, b_last);
                }
+               spin_lock_irqsave(&p_rx->lock, flags);
        }
+       spin_unlock_irqrestore(&p_rx->lock, flags);
+}
+
+static bool
+qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
+                               struct core_rx_slow_path_cqe *p_cqe)
+{
+       struct ooo_opaque *iscsi_ooo;
+       u32 cid;
+
+       if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
+               return false;
+
+       iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
+       if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+               return false;
+
+       /* Need to make a flush */
+       cid = le32_to_cpu(iscsi_ooo->cid);
+       qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
+
+       return true;
 }
 
 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
@@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
                cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
                cqe_type = cqe->rx_cqe_sp.type;
 
+               if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
+                       if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
+                                                           &cqe->rx_cqe_sp))
+                               continue;
+
                if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
                        DP_NOTICE(p_hwfn,
                                  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
@@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
        int rc;
 
+       if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+               return 0;
+
        rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
        if (rc)
                return rc;
@@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        u16 new_idx = 0, num_bds = 0;
        int rc;
 
+       if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+               return 0;
+
        new_idx = le16_to_cpu(*p_tx->p_fw_cons);
        num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
 
@@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
 
        /* Stop Tx & Rx of connection, if needed */
        if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+               p_ll2_conn->tx_queue.b_cb_registred = false;
+               smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
                rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
                if (rc)
                        goto out;
+
                qed_ll2_txq_flush(p_hwfn, connection_handle);
+               qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
        }
 
        if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+               p_ll2_conn->rx_queue.b_cb_registred = false;
+               smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
                rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
                if (rc)
                        goto out;
+
                qed_ll2_rxq_flush(p_hwfn, connection_handle);
+               qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
        }
 
        if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
@@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
        if (!p_ll2_conn)
                return;
 
-       if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
-               p_ll2_conn->rx_queue.b_cb_registred = false;
-               qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
-       }
-
-       if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
-               p_ll2_conn->tx_queue.b_cb_registred = false;
-               qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
-       }
-
        kfree(p_ll2_conn->tx_queue.descq_mem);
        qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
 
index a01e7d6e5442f079e9006811b82b4feb02dc23bc..f6655e251bbd71c5bec0404062612738122f8f17 100644 (file)
@@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
        DP_INFO(edev, "Starting qede_remove\n");
 
+       qede_rdma_dev_remove(edev);
        unregister_netdev(ndev);
        cancel_delayed_work_sync(&edev->sp_task);
 
        qede_ptp_disable(edev);
 
-       qede_rdma_dev_remove(edev);
-
        edev->ops->common->set_power_state(cdev, PCI_D0);
 
        pci_set_drvdata(pdev, NULL);
index a5b792ce2ae7d046e78ec4c7bfa886a805bc00e8..1bf930d4a1e52c1891953f8c709355eb0e6a6be9 100644 (file)
@@ -163,7 +163,7 @@ enum {
 };
 
 /* Driver's parameters */
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS)
 #define SH_ETH_RX_ALIGN                32
 #else
 #define SH_ETH_RX_ALIGN                2
index 450eec264a5ea53fe0e592e467de3626321a9f6d..4377c26f714d0522ebf5d1de6ac774b6e42024ea 100644 (file)
@@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused,
                break;
 
        case NETDEV_CHANGEADDR:
-               list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
                        ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+                       call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
+               }
                break;
 
        case NETDEV_PRE_TYPE_CHANGE:
index f41b224a9cdbf49ccf82d72b5052686548c005a7..ab195f0916d69e49719b24319821a0e908f31064 100644 (file)
@@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev)
                ksz9031_of_load_skew_values(phydev, of_node,
                                MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
                                tx_data_skews, 4);
+
+               /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+                * When the device links in the 1000BASE-T slave mode only,
+                * the optional 125MHz reference output clock (CLK125_NDO)
+                * has wide duty cycle variation.
+                *
+                * The optional CLK125_NDO clock does not meet the RGMII
+                * 45/55 percent (min/max) duty cycle requirement and therefore
+                * cannot be used directly by the MAC side for clocking
+                * applications that have setup/hold time requirements on
+                * rising and falling clock edges.
+                *
+                * Workaround:
+                * Force the phy to be the master to receive a stable clock
+                * which meets the duty cycle requirement.
+                */
+               if (of_property_read_bool(of_node, "micrel,force-master")) {
+                       result = phy_read(phydev, MII_CTRL1000);
+                       if (result < 0)
+                               goto err_force_master;
+
+                       /* enable master mode, config & prefer master */
+                       result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER;
+                       result = phy_write(phydev, MII_CTRL1000, result);
+                       if (result < 0)
+                               goto err_force_master;
+               }
        }
 
        return ksz9031_center_flp_timing(phydev);
+
+err_force_master:
+       phydev_err(phydev, "failed to force the phy to master mode\n");
+       return result;
 }
 
 #define KSZ8873MLL_GLOBAL_CONTROL_4    0x06
index ef33950a45d909b34dfe937396873ece728314f6..d45ac37e128740884d5570dd14cdbc11aac1fa6b 100644 (file)
@@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile)
        skb_queue_purge(&tfile->sk.sk_error_queue);
 }
 
-static void tun_cleanup_tx_ring(struct tun_file *tfile)
-{
-       if (tfile->tx_ring.queue) {
-               ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
-               xdp_rxq_info_unreg(&tfile->xdp_rxq);
-               memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-       }
-}
-
 static void __tun_detach(struct tun_file *tfile, bool clean)
 {
        struct tun_file *ntfile;
@@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                            tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
                }
-               tun_cleanup_tx_ring(tfile);
+               if (tun)
+                       xdp_rxq_info_unreg(&tfile->xdp_rxq);
+               ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
                sock_put(&tfile->sk);
        }
 }
@@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev)
                tun_napi_del(tun, tfile);
                /* Drop read queue */
                tun_queue_purge(tfile);
+               xdp_rxq_info_unreg(&tfile->xdp_rxq);
                sock_put(&tfile->sk);
-               tun_cleanup_tx_ring(tfile);
        }
        list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
                tun_enable_queue(tfile);
                tun_queue_purge(tfile);
+               xdp_rxq_info_unreg(&tfile->xdp_rxq);
                sock_put(&tfile->sk);
-               tun_cleanup_tx_ring(tfile);
        }
        BUG_ON(tun->numdisabled != 0);
 
@@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
        }
 
        if (!tfile->detached &&
-           ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
+           ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
+                           GFP_KERNEL, tun_ptr_free)) {
                err = -ENOMEM;
                goto out;
        }
@@ -3219,6 +3213,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
                                            &tun_proto, 0);
        if (!tfile)
                return -ENOMEM;
+       if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
+               sk_free(&tfile->sk);
+               return -ENOMEM;
+       }
+
        RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->flags = 0;
        tfile->ifindex = 0;
@@ -3239,8 +3238,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 
        sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
 
-       memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-
        return 0;
 }
 
index 9ebe2a689966d056a1b9a94e5ea557f0d0f9a55e..27a9bb8c9611ce1bb44c3372923248c7b7ccee6a 100644 (file)
@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
 
        gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
        while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+               /* Prevent any &gdesc->tcd field from being (speculatively)
+                * read before (&gdesc->tcd)->gen is read.
+                */
+               dma_rmb();
+
                completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
                                               &gdesc->tcd), tq, adapter->pdev,
                                               adapter);
@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                gdesc->txd.tci = skb_vlan_tag_get(skb);
        }
 
+       /* Ensure that the write to (&gdesc->txd)->gen will be observed after
+        * all other writes to &gdesc->txd.
+        */
+       dma_wmb();
+
        /* finally flips the GEN bit of the SOP desc. */
        gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
                                                  VMXNET3_TXD_GEN);
@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                         */
                        break;
                }
+
+               /* Prevent any rcd field from being (speculatively) read before
+                * rcd->gen is read.
+                */
+               dma_rmb();
+
                BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
                       rcd->rqID != rq->dataRingQid);
                idx = rcd->rxdIdx;
@@ -1528,6 +1544,12 @@ rcd_done:
                ring->next2comp = idx;
                num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
                ring = rq->rx_ring + ring_idx;
+
+               /* Ensure that the writes to rxd->gen bits will be observed
+                * after all other writes to rxd objects.
+                */
+               dma_wmb();
+
                while (num_to_alloc) {
                        vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
                                          &rxCmdDesc);
@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
 /* ==================== initialization and cleanup routines ============ */
 
 static int
-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
 {
        int err;
        unsigned long mmio_start, mmio_len;
@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
                return err;
        }
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
-               if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
-                       dev_err(&pdev->dev,
-                               "pci_set_consistent_dma_mask failed\n");
-                       err = -EIO;
-                       goto err_set_mask;
-               }
-               *dma64 = true;
-       } else {
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
-                       dev_err(&pdev->dev,
-                               "pci_set_dma_mask failed\n");
-                       err = -EIO;
-                       goto err_set_mask;
-               }
-               *dma64 = false;
-       }
-
        err = pci_request_selected_regions(pdev, (1 << 2) - 1,
                                           vmxnet3_driver_name);
        if (err) {
                dev_err(&pdev->dev,
                        "Failed to request region for adapter: error %d\n", err);
-               goto err_set_mask;
+               goto err_enable_device;
        }
 
        pci_set_master(pdev);
@@ -2751,7 +2755,7 @@ err_bar1:
        iounmap(adapter->hw_addr0);
 err_ioremap:
        pci_release_selected_regions(pdev, (1 << 2) - 1);
-err_set_mask:
+err_enable_device:
        pci_disable_device(pdev);
        return err;
 }
@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
 #endif
        };
        int err;
-       bool dma64 = false; /* stupid gcc */
+       bool dma64;
        u32 ver;
        struct net_device *netdev;
        struct vmxnet3_adapter *adapter;
@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
        adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
 
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+               if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+                       dev_err(&pdev->dev,
+                               "pci_set_consistent_dma_mask failed\n");
+                       err = -EIO;
+                       goto err_set_mask;
+               }
+               dma64 = true;
+       } else {
+               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+                       dev_err(&pdev->dev,
+                               "pci_set_dma_mask failed\n");
+                       err = -EIO;
+                       goto err_set_mask;
+               }
+               dma64 = false;
+       }
+
        spin_lock_init(&adapter->cmd_lock);
        adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
                                             sizeof(struct vmxnet3_adapter),
@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
                dev_err(&pdev->dev, "Failed to map dma\n");
                err = -EFAULT;
-               goto err_dma_map;
+               goto err_set_mask;
        }
        adapter->shared = dma_alloc_coherent(
                                &adapter->pdev->dev,
@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        }
 #endif /* VMXNET3_RSS */
 
-       err = vmxnet3_alloc_pci_resources(adapter, &dma64);
+       err = vmxnet3_alloc_pci_resources(adapter);
        if (err < 0)
                goto err_alloc_pci;
 
@@ -3504,7 +3526,7 @@ err_alloc_queue_desc:
 err_alloc_shared:
        dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
                         sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
-err_dma_map:
+err_set_mask:
        free_netdev(netdev);
        return err;
 }
index a3326463b71f1e26cd74b33b4480044f57ec4f31..a2c554f8a61bc3262823d9ffd551af6ccedfc299 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.16.0-k"
 
-/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040e00
+/* Each byte of this 32-bit integer encodes a version number in
+ * VMXNET3_DRIVER_VERSION_STRING.
+ */
+#define VMXNET3_DRIVER_VERSION_NUM      0x01041000
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 88a8b5916624ae6b0805366ee8ed8325a3191a08..dbb7464c018cac028c2601d76811fd302f34e117 100644 (file)
@@ -27,7 +27,7 @@ config NVME_FABRICS
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
-       depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
+       depends on INFINIBAND_ADDR_TRANS && BLOCK
        select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
index 3c7b61ddb0d186a017196d56378184a9988d525a..7595664ee7531d8b4856091eb6beec11aaded673 100644 (file)
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
 
 config NVME_TARGET_RDMA
        tristate "NVMe over Fabrics RDMA target support"
-       depends on INFINIBAND && INFINIBAND_ADDR_TRANS
+       depends on INFINIBAND_ADDR_TRANS
        depends on NVME_TARGET
        select SGL_ALLOC
        help
index e7bbdf947bbcf93ce4acebbcecd13c17ff247d5c..8350ca2311c73c3af5dd52eb60c967737e44950d 100644 (file)
@@ -91,6 +91,8 @@ static int send_command(struct cros_ec_device *ec_dev,
                        usleep_range(10000, 11000);
 
                        ret = (*xfer_fxn)(ec_dev, status_msg);
+                       if (ret == -EAGAIN)
+                               continue;
                        if (ret < 0)
                                break;
 
index a8b831000b2d687b9608a9658ac90650c7131b8e..18c4f933e8b9a82c51fa20e113b6f6ca20566311 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+                           struct zfcp_port *port, struct scsi_device *sdev,
+                           u8 want, u8 need)
+{
+       unsigned long flags;
+
+       read_lock_irqsave(&adapter->erp_lock, flags);
+       zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+       read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
 
 /**
  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
index bf8ea4df2bb8c9fa621da9061795dd99c56ee091..e5eed8aac0ce6ba1922e0585439f5d0c1426419a 100644 (file)
@@ -4,7 +4,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #ifndef ZFCP_EXT_H
@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
 extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
                              struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+                                  struct zfcp_port *port,
+                                  struct scsi_device *sdev, u8 want, u8 need);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
 extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
                                 struct zfcp_erp_action *erp);
index 4d2ba5682493221bf32f0c4000021da54dc57044..22f9562f415cbb09a098a83318818c49217a8237 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
        ids.port_id = port->d_id;
        ids.roles = FC_RPORT_ROLE_FCP_TARGET;
 
-       zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
-                         ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
-                         ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+       zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+                              ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+                              ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
        rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
        if (!rport) {
                dev_err(&port->adapter->ccw_device->dev,
@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
        struct fc_rport *rport = port->rport;
 
        if (rport) {
-               zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
-                                 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
-                                 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+               zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+                                      ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+                                      ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
                fc_remote_port_delete(rport);
                port->rport = NULL;
        }
index e29f9b8fd66db1b21167fd7d15eaf0723b59b826..56c940394729e896b46e504a14e5d0eadb324f0a 100644 (file)
@@ -182,7 +182,7 @@ zalon7xx-objs       := zalon.o ncr53c8xx.o
 NCR_Q720_mod-objs      := NCR_Q720.o ncr53c8xx.o
 
 # Files generated that shall be removed upon make clean
-clean-files := 53c700_d.h 53c700_u.h
+clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
 
 $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
 
index c198b96368dd69beba2dcffbe16efe7a3c74ae1d..5c40d809830f85916a7ed210798e54b2522fcda7 100644 (file)
@@ -1894,7 +1894,7 @@ retry:
                num = (rem_sz > scatter_elem_sz_prev) ?
                        scatter_elem_sz_prev : rem_sz;
 
-               schp->pages[k] = alloc_pages(gfp_mask, order);
+               schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
                if (!schp->pages[k])
                        goto out;
 
index 2a21f2d4859229693381e955f141beff3fa64fea..35fab1e18adc3414935b182fe1774c911d733291 100644 (file)
@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
        struct scsi_device *SDev;
        struct scsi_sense_hdr sshdr;
        int result, err = 0, retries = 0;
+       unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
 
        SDev = cd->device;
 
+       if (cgc->sense)
+               senseptr = sense_buffer;
+
       retry:
        if (!scsi_block_when_processing_errors(SDev)) {
                err = -ENODEV;
@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
        }
 
        result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
-                             cgc->buffer, cgc->buflen,
-                             (unsigned char *)cgc->sense, &sshdr,
+                             cgc->buffer, cgc->buflen, senseptr, &sshdr,
                              cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
 
+       if (cgc->sense)
+               memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
+
        /* Minimal error checking.  Ignore cases we know about, and report the rest. */
        if (driver_byte(result) != 0) {
                switch (sshdr.sense_key) {
index ad049e6f24e47a73b6894340be2742d95b21f692..f3b1ad4bd3dc77b41c1e6725470639251ba4829f 100644 (file)
@@ -34,7 +34,7 @@ config LNET_SELFTEST
 
 config LNET_XPRT_IB
        tristate "LNET infiniband support"
-       depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
+       depends on LNET && PCI && INFINIBAND_ADDR_TRANS
        default LNET && INFINIBAND
        help
          This option allows the LNET users to use infiniband as an
index 4ad89ea71a70118dad2e5d960f89f54431a1ad67..4f26bdc3d1dc5dcc98f1942bc9ab34ee814029f1 100644 (file)
@@ -2121,6 +2121,8 @@ static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
 
        if (val >= 0) {
                udev->qfull_time_out = val * MSEC_PER_SEC;
+       } else if (val == -1) {
+               udev->qfull_time_out = val;
        } else {
                printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
                return -EINVAL;
index e1c60899fdbc8de0b530078bb310f28bea33dd2c..a6f9ba85dc4ba8df4dd9519b317664b9e2ece94f 100644 (file)
@@ -351,7 +351,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
         * physical address */
        phys = xen_bus_to_phys(dev_addr);
 
-       if (((dev_addr + size - 1 > dma_mask)) ||
+       if (((dev_addr + size - 1 <= dma_mask)) ||
            range_straddles_page_boundary(phys, size))