Merge tag 'nfs-for-4.16-4' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 12 Mar 2018 17:47:03 +0000 (10:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 12 Mar 2018 17:47:03 +0000 (10:47 -0700)
Pull NFS client bugfixes from Trond Myklebust:
 "Hightlights include the following stable fixes:

   - NFS: Fix an incorrect type in struct nfs_direct_req

   - pNFS: Prevent the layout header refcount going to zero in
     pnfs_roc()

   - NFS: Fix unstable write completion"

* tag 'nfs-for-4.16-4' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  NFS: Fix unstable write completion
  pNFS: Prevent the layout header refcount going to zero in pnfs_roc()
  NFS: Fix an incorrect type in struct nfs_direct_req

178 files changed:
Documentation/accelerators/ocxl.rst
Documentation/devicetree/bindings/dma/mv-xor-v2.txt
Documentation/sphinx/kerneldoc.py
Makefile
arch/arm64/kernel/cpu_errata.c
arch/arm64/mm/mmu.c
arch/mips/ath25/board.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/kernel/smp-bmips.c
arch/mips/loongson64/Kconfig
arch/powerpc/boot/Makefile
arch/powerpc/kernel/prom_init.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/kvm-s390.c
arch/x86/Kconfig
arch/x86/entry/entry_64_compat.S
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/ia32/sys_ia32.c
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/sections.h
arch/x86/include/asm/sys_ia32.h
arch/x86/include/uapi/asm/mce.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/ioport.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/mm/pti.c
drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/clocksource/Kconfig
drivers/dma/mv_xor_v2.c
drivers/dma/sh/rcar-dmac.c
drivers/gpio/gpio-rcar.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
drivers/gpu/drm/amd/display/include/signal_types.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_dotclock.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tcon.h
drivers/infiniband/core/addr.c
drivers/infiniband/core/cq.c
drivers/infiniband/core/device.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.h
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_fp.h
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/bnxt_re/roce_hsi.h
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/qedr_iw_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/md/bcache/super.c
drivers/md/dm-bufio.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/misc/ocxl/file.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/pci/dwc/pcie-designware-host.c
drivers/perf/arm_pmu.c
drivers/platform/chrome/chromeos_laptop.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/dell-smbios-base.c [new file with mode: 0644]
drivers/platform/x86/dell-smbios-smm.c
drivers/platform/x86/dell-smbios-wmi.c
drivers/platform/x86/dell-smbios.c [deleted file]
drivers/platform/x86/dell-smbios.h
drivers/video/fbdev/sbuslib.c
drivers/watchdog/f71808e_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/sbsa_gwdt.c
drivers/xen/xenbus/xenbus_probe.c
fs/overlayfs/Kconfig
fs/overlayfs/export.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/xfs/xfs_iomap.c
include/linux/compat.h
include/linux/of_pci.h
include/uapi/misc/ocxl.h
kernel/compat.c
kernel/events/core.c
kernel/locking/rtmutex.c
kernel/panic.c
lib/bug.c
lib/test_kmod.c
mm/gup.c
mm/hugetlb.c
mm/memblock.c
mm/page_alloc.c
scripts/Makefile.lib
scripts/basic/fixdep.c
scripts/bloat-o-meter
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_memory.c
sound/core/seq/seq_memory.h
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
tools/arch/x86/include/asm/cpufeatures.h
tools/include/uapi/linux/kvm.h
tools/objtool/check.c
tools/perf/Documentation/perf-kallsyms.txt
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/ui/browsers/annotate.c
tools/perf/util/auxtrace.c
tools/perf/util/record.c
tools/perf/util/trigger.h
tools/testing/selftests/powerpc/mm/subpage_prot.c
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-trap.c
tools/testing/selftests/vm/run_vmtests
tools/testing/selftests/x86/test_vsyscall.c

index 4f7af841d935a261f5ffb96abd4f221364ee9e3d..ddcc58d01cfbce99a2da80a91101213203b83e82 100644 (file)
@@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD:
   Associate an event fd to an AFU interrupt so that the user process
   can be notified when the AFU sends an interrupt.
 
+OCXL_IOCTL_GET_METADATA:
+
+  Obtains configuration information from the card, such at the size of
+  MMIO areas, the AFU version, and the PASID for the current context.
+
 
 mmap
 ----
index 217a90eaabe7f87db72539a2c9046f5ebc892b7c..9c38bbe7e6d7d86993be1f24ad011ee27a8e1d59 100644 (file)
@@ -11,7 +11,11 @@ Required properties:
   interrupts.
 
 Optional properties:
-- clocks: Optional reference to the clock used by the XOR engine.
+- clocks: Optional reference to the clocks used by the XOR engine.
+- clock-names: mandatory if there is a second clock, in this case the
+   name must be "core" for the first clock and "reg" for the second
+   one
+
 
 Example:
 
index 39aa9e8697ccf8f1ad3e0d7210294eff20bd9cb0..fbedcc39460bb28b156db85ac085fbb077d3fe6e 100644 (file)
@@ -36,8 +36,7 @@ import glob
 
 from docutils import nodes, statemachine
 from docutils.statemachine import ViewList
-from docutils.parsers.rst import directives
-from sphinx.util.compat import Directive
+from docutils.parsers.rst import directives, Directive
 from sphinx.ext.autodoc import AutodocReporter
 
 __version__  = '1.0'
index c4322dea3ca2eea7d7b82bd51724397c7358db62..e02d092bc2d6b503db5d91762adcb3248dac5921 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 52f15cd896e11ad631ac3092d9709337a9629bb4..b5a28336c07712af8d10aa62f1669b8a798065d8 100644 (file)
@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
        case PSCI_CONDUIT_HVC:
                arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if (res.a0)
+               if ((int)res.a0 < 0)
                        return 0;
                cb = call_hvc_arch_workaround_1;
                smccc_start = __smccc_workaround_1_hvc_start;
@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
        case PSCI_CONDUIT_SMC:
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if (res.a0)
+               if ((int)res.a0 < 0)
                        return 0;
                cb = call_smc_arch_workaround_1;
                smccc_start = __smccc_workaround_1_smc_start;
index 84a019f5502293dbe0c28cc078465e4ddd6c2096..8c704f1e53c22352ffbdaf0bc56b91887d8d6347 100644 (file)
@@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
         * The following mapping attributes may be updated in live
         * kernel mappings without the need for break-before-make.
         */
-       static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+       static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
 
        /* creating or taking down mappings is always safe */
        if (old == 0 || new == 0)
@@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
        if ((old | new) & PTE_CONT)
                return false;
 
-       /* Transitioning from Global to Non-Global is safe */
-       if (((old ^ new) == PTE_NG) && (new & PTE_NG))
-               return true;
+       /* Transitioning from Non-Global to Global is unsafe */
+       if (old & ~new & PTE_NG)
+               return false;
 
        return ((old ^ new) & ~mask) == 0;
 }
index 9ab48ff80c1c8de3a5de0050e2d4d5f22dc0c773..6d11ae581ea775bc2e919e16bdd63cdb6b06d86f 100644 (file)
@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
        }
 
        board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
+       if (!board_data)
+               goto error;
        ath25_board.config = (struct ath25_boarddata *)board_data;
        memcpy_fromio(board_data, bcfg, 0x100);
        if (broken_boarddata) {
index 5b3a3f6a9ad31fd845bd18cdd3197d9c75833c84..d99f5242169e7acb31f8cfa71cd6e14d24e94c82 100644 (file)
@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
        }
 
        host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+       if (!host_data)
+               return -ENOMEM;
        raw_spin_lock_init(&host_data->lock);
 
        addr = of_get_address(ciu_node, 0, NULL, NULL);
index 9d41732a9146a31545b9114812cb12c669196478..159e83add4bb3e6b43f105b521761eb9cb80491b 100644 (file)
@@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
                return;
        }
 
-       if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
-                       "smp_ipi0", NULL))
+       if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+                       IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
                panic("Can't request IPI0 interrupt");
-       if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
-                       "smp_ipi1", NULL))
+       if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+                       IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
                panic("Can't request IPI1 interrupt");
 }
 
index bc2fdbfa8223c343e6ed455f44bf6393d8d43d0a..72af0c18396983df62327d5a4dcf000bf557d326 100644 (file)
@@ -7,6 +7,8 @@ choice
 config LEMOTE_FULOONG2E
        bool "Lemote Fuloong(2e) mini-PC"
        select ARCH_SPARSEMEM_ENABLE
+       select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_MIGHT_HAVE_PC_SERIO
        select CEVT_R4K
        select CSRC_R4K
        select SYS_HAS_CPU_LOONGSON2E
@@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E
 config LEMOTE_MACH2F
        bool "Lemote Loongson 2F family machines"
        select ARCH_SPARSEMEM_ENABLE
+       select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_MIGHT_HAVE_PC_SERIO
        select BOARD_SCACHE
        select BOOT_ELF32
        select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
@@ -62,6 +66,8 @@ config LEMOTE_MACH2F
 config LOONGSON_MACH3X
        bool "Generic Loongson 3 family machines"
        select ARCH_SPARSEMEM_ENABLE
+       select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_MIGHT_HAVE_PC_SERIO
        select GENERIC_ISA_DMA_SUPPORT_BROKEN
        select BOOT_ELF32
        select BOARD_SCACHE
index ef6549e5715717003bf3fe9fc3a3c869e1fd5b2f..26d5d2a5b8e99bc923eeb2a2fabd67ce5b3af1ab 100644 (file)
@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
 libfdt       := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
 libfdtheader := fdt.h libfdt.h libfdt_internal.h
 
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
+       treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
        $(addprefix $(obj)/,$(libfdtheader))
 
 src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
index d22c41c26bb309f7e5319994dab91c60eff3e14f..acf4b2e0530cb671df1e80d56927e8650f0c2f84 100644 (file)
@@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
                .mmu = 0,
                .hash_ext = 0,
                .radix_ext = 0,
-               .byte22 = 0,
        },
 
        /* option vector 6: IBM PAPR hints */
index 0c854816e653e25238f87c1cf9c44a8ed911df44..5cb4e4687107e1204667e3314bee2ce49de32de7 100644 (file)
@@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep)
        kmem_cache_free(kvm_pte_cache, ptep);
 }
 
+/* Like pmd_huge() and pmd_large(), but works regardless of config options */
+static inline int pmd_is_leaf(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
                             unsigned int level, unsigned long mmu_seq)
 {
@@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
        else
                new_pmd = pmd_alloc_one(kvm->mm, gpa);
 
-       if (level == 0 && !(pmd && pmd_present(*pmd)))
+       if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
                new_ptep = kvmppc_pte_alloc();
 
        /* Check if we might have been invalidated; let the guest retry if so */
@@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
                new_pmd = NULL;
        }
        pmd = pmd_offset(pud, gpa);
-       if (pmd_large(*pmd)) {
-               /* Someone else has instantiated a large page here; retry */
-               ret = -EAGAIN;
-               goto out_unlock;
-       }
-       if (level == 1 && !pmd_none(*pmd)) {
+       if (pmd_is_leaf(*pmd)) {
+               unsigned long lgpa = gpa & PMD_MASK;
+
+               /*
+                * If we raced with another CPU which has just put
+                * a 2MB pte in after we saw a pte page, try again.
+                */
+               if (level == 0 && !new_ptep) {
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
+               /* Valid 2MB page here already, remove it */
+               old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
+                                             ~0UL, 0, lgpa, PMD_SHIFT);
+               kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
+               if (old & _PAGE_DIRTY) {
+                       unsigned long gfn = lgpa >> PAGE_SHIFT;
+                       struct kvm_memory_slot *memslot;
+                       memslot = gfn_to_memslot(kvm, gfn);
+                       if (memslot && memslot->dirty_bitmap)
+                               kvmppc_update_dirty_map(memslot,
+                                                       gfn, PMD_SIZE);
+               }
+       } else if (level == 1 && !pmd_none(*pmd)) {
                /*
                 * There's a page table page here, but we wanted
                 * to install a large page.  Tell the caller and let
@@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        } else {
                page = pages[0];
                pfn = page_to_pfn(page);
-               if (PageHuge(page)) {
-                       page = compound_head(page);
-                       pte_size <<= compound_order(page);
+               if (PageCompound(page)) {
+                       pte_size <<= compound_order(compound_head(page));
                        /* See if we can insert a 2MB large-page PTE here */
                        if (pte_size >= PMD_SIZE &&
-                           (gpa & PMD_MASK & PAGE_MASK) ==
-                           (hva & PMD_MASK & PAGE_MASK)) {
+                           (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+                           (hva & (PMD_SIZE - PAGE_SIZE))) {
                                level = 1;
                                pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
                        }
                }
                /* See if we can provide write access */
                if (writing) {
-                       /*
-                        * We assume gup_fast has set dirty on the host PTE.
-                        */
                        pgflags |= _PAGE_WRITE;
                } else {
                        local_irq_save(flags);
                        ptep = find_current_mm_pte(current->mm->pgd,
                                                   hva, NULL, NULL);
-                       if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
+                       if (ptep && pte_write(*ptep))
                                pgflags |= _PAGE_WRITE;
                        local_irq_restore(flags);
                }
@@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                pte = pfn_pte(pfn, __pgprot(pgflags));
                ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
        }
-       if (ret == 0 || ret == -EAGAIN)
-               ret = RESUME_GUEST;
 
        if (page) {
-               /*
-                * We drop pages[0] here, not page because page might
-                * have been set to the head page of a compound, but
-                * we have to drop the reference on the correct tail
-                * page to match the get inside gup()
-                */
-               put_page(pages[0]);
+               if (!ret && (pgflags & _PAGE_WRITE))
+                       set_page_dirty_lock(page);
+               put_page(page);
        }
+
+       if (ret == 0 || ret == -EAGAIN)
+               ret = RESUME_GUEST;
        return ret;
 }
 
@@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm)
                                continue;
                        pmd = pmd_offset(pud, 0);
                        for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
-                               if (pmd_huge(*pmd)) {
+                               if (pmd_is_leaf(*pmd)) {
                                        pmd_clear(pmd);
                                        continue;
                                }
index 89707354c2efd89e95d1d1f861a170e8b6bfe51a..9cb9448163c4bf7021822d6632fb6c94452187ed 100644 (file)
@@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         */
        trace_hardirqs_on();
 
-       guest_enter();
+       guest_enter_irqoff();
 
        srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 
@@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 
        srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
 
-       guest_exit();
-
        trace_hardirqs_off();
        set_irq_happened(trap);
 
@@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        kvmppc_set_host_core(pcpu);
 
        local_irq_enable();
+       guest_exit();
 
        /* Let secondaries go back to the offline loop */
        for (i = 0; i < controlled_threads; ++i) {
@@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
                goto up_out;
 
        psize = vma_kernel_pagesize(vma);
-       porder = __ilog2(psize);
 
        up_read(&current->mm->mmap_sem);
 
        /* We can handle 4k, 64k or 16M pages in the VRMA */
-       err = -EINVAL;
-       if (!(psize == 0x1000 || psize == 0x10000 ||
-             psize == 0x1000000))
-               goto out_srcu;
+       if (psize >= 0x1000000)
+               psize = 0x1000000;
+       else if (psize >= 0x10000)
+               psize = 0x10000;
+       else
+               psize = 0x1000;
+       porder = __ilog2(psize);
 
        senc = slb_pgsize_encoding(psize);
        kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
index 403e642c78f5170b81855ef329e7148f454bfa3b..52c2053739862d2e7c53210458b0fbc9943f510b 100644 (file)
@@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
 int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
                unsigned int rt, int is_default_endian)
 {
-       enum emulation_result emulated;
+       enum emulation_result emulated = EMULATE_DONE;
 
        while (vcpu->arch.mmio_vmx_copy_nums) {
                emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
@@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        kvm_sigset_deactivate(vcpu);
 
+#ifdef CONFIG_ALTIVEC
 out:
+#endif
        vcpu_put(vcpu);
        return r;
 }
index 77d7818130db49cb0108acf68a3d737e064cb918..339ac0964590a1337935ea55ee0cbc7d7c2e441e 100644 (file)
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
        { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
        { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+       { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
        { "exit_wait_state", VCPU_STAT(exit_wait_state) },
        { "instruction_epsw", VCPU_STAT(instruction_epsw) },
        { "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
                /* we still need the basic sca for the ipte control */
                vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
                vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+               return;
        }
        read_lock(&vcpu->kvm->arch.sca_lock);
        if (vcpu->kvm->arch.use_esca) {
index eb7f43f235211257bca63e83ad0e33d9fa4d7ba3..0fa71a78ec99a9ae2e4dcbbadfa74773a5ebc031 100644 (file)
@@ -2307,7 +2307,7 @@ choice
          it can be used to assist security vulnerability exploitation.
 
          This setting can be changed at boot time via the kernel command
-         line parameter vsyscall=[native|emulate|none].
+         line parameter vsyscall=[emulate|none].
 
          On a system with recent enough glibc (2.14 or newer) and no
          static binaries, you can say None without a performance penalty
@@ -2315,15 +2315,6 @@ choice
 
          If unsure, select "Emulate".
 
-       config LEGACY_VSYSCALL_NATIVE
-               bool "Native"
-               help
-                 Actual executable code is located in the fixed vsyscall
-                 address mapping, implementing time() efficiently. Since
-                 this makes the mapping executable, it can be used during
-                 security vulnerability exploitation (traditionally as
-                 ROP gadgets). This configuration is not recommended.
-
        config LEGACY_VSYSCALL_EMULATE
                bool "Emulate"
                help
index e811dd9c5e99e1a1e61d20cce0fb9ba3cf23f535..08425c42f8b7c726e0daa70bb4e112582a513c20 100644 (file)
@@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat)
        pushq   2*8(%rdi)               /* regs->ip */
        pushq   1*8(%rdi)               /* regs->orig_ax */
 
-       movq    (%rdi), %rdi            /* restore %rdi */
-
-       pushq   %rdi                    /* pt_regs->di */
+       pushq   (%rdi)                  /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
@@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat)
        TRACE_IRQS_ON
        jmp     swapgs_restore_regs_and_return_to_usermode
 END(entry_INT80_compat)
-
-ENTRY(stub32_clone)
-       /*
-        * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
-        * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
-        *
-        * The native 64-bit kernel's sys_clone() implements the latter,
-        * so we need to swap arguments here before calling it:
-        */
-       xchg    %r8, %rcx
-       jmp     sys_clone
-ENDPROC(stub32_clone)
index 448ac2161112b7fd05c4388ff1327294499884f5..2a5e99cff8597278412685867f512858254f2b0c 100644 (file)
@@ -8,12 +8,12 @@
 #
 0      i386    restart_syscall         sys_restart_syscall
 1      i386    exit                    sys_exit
-2      i386    fork                    sys_fork                        sys_fork
+2      i386    fork                    sys_fork
 3      i386    read                    sys_read
 4      i386    write                   sys_write
 5      i386    open                    sys_open                        compat_sys_open
 6      i386    close                   sys_close
-7      i386    waitpid                 sys_waitpid                     sys32_waitpid
+7      i386    waitpid                 sys_waitpid                     compat_sys_x86_waitpid
 8      i386    creat                   sys_creat
 9      i386    link                    sys_link
 10     i386    unlink                  sys_unlink
@@ -78,7 +78,7 @@
 69     i386    ssetmask                sys_ssetmask
 70     i386    setreuid                sys_setreuid16
 71     i386    setregid                sys_setregid16
-72     i386    sigsuspend              sys_sigsuspend                  sys_sigsuspend
+72     i386    sigsuspend              sys_sigsuspend
 73     i386    sigpending              sys_sigpending                  compat_sys_sigpending
 74     i386    sethostname             sys_sethostname
 75     i386    setrlimit               sys_setrlimit                   compat_sys_setrlimit
@@ -96,7 +96,7 @@
 87     i386    swapon                  sys_swapon
 88     i386    reboot                  sys_reboot
 89     i386    readdir                 sys_old_readdir                 compat_sys_old_readdir
-90     i386    mmap                    sys_old_mmap                    sys32_mmap
+90     i386    mmap                    sys_old_mmap                    compat_sys_x86_mmap
 91     i386    munmap                  sys_munmap
 92     i386    truncate                sys_truncate                    compat_sys_truncate
 93     i386    ftruncate               sys_ftruncate                   compat_sys_ftruncate
 117    i386    ipc                     sys_ipc                         compat_sys_ipc
 118    i386    fsync                   sys_fsync
 119    i386    sigreturn               sys_sigreturn                   sys32_sigreturn
-120    i386    clone                   sys_clone                       stub32_clone
+120    i386    clone                   sys_clone                       compat_sys_x86_clone
 121    i386    setdomainname           sys_setdomainname
 122    i386    uname                   sys_newuname
 123    i386    modify_ldt              sys_modify_ldt
 177    i386    rt_sigtimedwait         sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait
 178    i386    rt_sigqueueinfo         sys_rt_sigqueueinfo             compat_sys_rt_sigqueueinfo
 179    i386    rt_sigsuspend           sys_rt_sigsuspend
-180    i386    pread64                 sys_pread64                     sys32_pread
-181    i386    pwrite64                sys_pwrite64                    sys32_pwrite
+180    i386    pread64                 sys_pread64                     compat_sys_x86_pread
+181    i386    pwrite64                sys_pwrite64                    compat_sys_x86_pwrite
 182    i386    chown                   sys_chown16
 183    i386    getcwd                  sys_getcwd
 184    i386    capget                  sys_capget
 187    i386    sendfile                sys_sendfile                    compat_sys_sendfile
 188    i386    getpmsg
 189    i386    putpmsg
-190    i386    vfork                   sys_vfork                       sys_vfork
+190    i386    vfork                   sys_vfork
 191    i386    ugetrlimit              sys_getrlimit                   compat_sys_getrlimit
 192    i386    mmap2                   sys_mmap_pgoff
-193    i386    truncate64              sys_truncate64                  sys32_truncate64
-194    i386    ftruncate64             sys_ftruncate64                 sys32_ftruncate64
-195    i386    stat64                  sys_stat64                      sys32_stat64
-196    i386    lstat64                 sys_lstat64                     sys32_lstat64
-197    i386    fstat64                 sys_fstat64                     sys32_fstat64
+193    i386    truncate64              sys_truncate64                  compat_sys_x86_truncate64
+194    i386    ftruncate64             sys_ftruncate64                 compat_sys_x86_ftruncate64
+195    i386    stat64                  sys_stat64                      compat_sys_x86_stat64
+196    i386    lstat64                 sys_lstat64                     compat_sys_x86_lstat64
+197    i386    fstat64                 sys_fstat64                     compat_sys_x86_fstat64
 198    i386    lchown32                sys_lchown
 199    i386    getuid32                sys_getuid
 200    i386    getgid32                sys_getgid
 # 222 is unused
 # 223 is unused
 224    i386    gettid                  sys_gettid
-225    i386    readahead               sys_readahead                   sys32_readahead
+225    i386    readahead               sys_readahead                   compat_sys_x86_readahead
 226    i386    setxattr                sys_setxattr
 227    i386    lsetxattr               sys_lsetxattr
 228    i386    fsetxattr               sys_fsetxattr
 247    i386    io_getevents            sys_io_getevents                compat_sys_io_getevents
 248    i386    io_submit               sys_io_submit                   compat_sys_io_submit
 249    i386    io_cancel               sys_io_cancel
-250    i386    fadvise64               sys_fadvise64                   sys32_fadvise64
+250    i386    fadvise64               sys_fadvise64                   compat_sys_x86_fadvise64
 # 251 is available for reuse (was briefly sys_set_zone_reclaim)
 252    i386    exit_group              sys_exit_group
 253    i386    lookup_dcookie          sys_lookup_dcookie              compat_sys_lookup_dcookie
 269    i386    fstatfs64               sys_fstatfs64                   compat_sys_fstatfs64
 270    i386    tgkill                  sys_tgkill
 271    i386    utimes                  sys_utimes                      compat_sys_utimes
-272    i386    fadvise64_64            sys_fadvise64_64                sys32_fadvise64_64
+272    i386    fadvise64_64            sys_fadvise64_64                compat_sys_x86_fadvise64_64
 273    i386    vserver
 274    i386    mbind                   sys_mbind
 275    i386    get_mempolicy           sys_get_mempolicy               compat_sys_get_mempolicy
 297    i386    mknodat                 sys_mknodat
 298    i386    fchownat                sys_fchownat
 299    i386    futimesat               sys_futimesat                   compat_sys_futimesat
-300    i386    fstatat64               sys_fstatat64                   sys32_fstatat
+300    i386    fstatat64               sys_fstatat64                   compat_sys_x86_fstatat
 301    i386    unlinkat                sys_unlinkat
 302    i386    renameat                sys_renameat
 303    i386    linkat                  sys_linkat
 311    i386    set_robust_list         sys_set_robust_list             compat_sys_set_robust_list
 312    i386    get_robust_list         sys_get_robust_list             compat_sys_get_robust_list
 313    i386    splice                  sys_splice
-314    i386    sync_file_range         sys_sync_file_range             sys32_sync_file_range
+314    i386    sync_file_range         sys_sync_file_range             compat_sys_x86_sync_file_range
 315    i386    tee                     sys_tee
 316    i386    vmsplice                sys_vmsplice                    compat_sys_vmsplice
 317    i386    move_pages              sys_move_pages                  compat_sys_move_pages
 321    i386    signalfd                sys_signalfd                    compat_sys_signalfd
 322    i386    timerfd_create          sys_timerfd_create
 323    i386    eventfd                 sys_eventfd
-324    i386    fallocate               sys_fallocate                   sys32_fallocate
+324    i386    fallocate               sys_fallocate                   compat_sys_x86_fallocate
 325    i386    timerfd_settime         sys_timerfd_settime             compat_sys_timerfd_settime
 326    i386    timerfd_gettime         sys_timerfd_gettime             compat_sys_timerfd_gettime
 327    i386    signalfd4               sys_signalfd4                   compat_sys_signalfd4
index 577fa8adb785baf5ea1c993a2bbc88adf43fbbcc..8560ef68a9d631163934a40415df65d2a495f2bc 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
 
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
-#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
-       NATIVE;
-#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
+static enum { EMULATE, NONE } vsyscall_mode =
+#ifdef CONFIG_LEGACY_VSYSCALL_NONE
        NONE;
 #else
        EMULATE;
@@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str)
        if (str) {
                if (!strcmp("emulate", str))
                        vsyscall_mode = EMULATE;
-               else if (!strcmp("native", str))
-                       vsyscall_mode = NATIVE;
                else if (!strcmp("none", str))
                        vsyscall_mode = NONE;
                else
@@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        WARN_ON_ONCE(address != regs->ip);
 
-       /* This should be unreachable in NATIVE mode. */
-       if (WARN_ON(vsyscall_mode == NATIVE))
-               return false;
-
        if (vsyscall_mode == NONE) {
                warn_bad_vsyscall(KERN_INFO, regs,
                                  "vsyscall attempted with vsyscall=none");
@@ -370,9 +362,7 @@ void __init map_vsyscall(void)
 
        if (vsyscall_mode != NONE) {
                __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
-                            vsyscall_mode == NATIVE
-                            ? PAGE_KERNEL_VSYSCALL
-                            : PAGE_KERNEL_VVAR);
+                            PAGE_KERNEL_VVAR);
                set_vsyscall_pgtable_user_bits(swapper_pg_dir);
        }
 
index 6d8044ab10607b6c668bfee0d6366266401e7e2f..22ec65bc033a93c0acd850b76bbb59d202f8c452 100644 (file)
@@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = {
 };
 
 static struct attribute *skx_upi_uncore_formats_attr[] = {
-       &format_attr_event_ext.attr,
+       &format_attr_event.attr,
        &format_attr_umask_ext.attr,
        &format_attr_edge.attr,
        &format_attr_inv.attr,
index 96cd33bbfc85494f52e4131f50b37129393d3b8a..6512498bbef69ced1f98c72afb5b11ade91ef4a5 100644 (file)
 #define AA(__x)                ((unsigned long)(__x))
 
 
-asmlinkage long sys32_truncate64(const char __user *filename,
-                                unsigned long offset_low,
-                                unsigned long offset_high)
+COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
+                      unsigned long, offset_low, unsigned long, offset_high)
 {
        return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
 }
 
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
-                                 unsigned long offset_high)
+COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
+                      unsigned long, offset_low, unsigned long, offset_high)
 {
        return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
 }
@@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
        return 0;
 }
 
-asmlinkage long sys32_stat64(const char __user *filename,
-                            struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
+                      struct stat64 __user *, statbuf)
 {
        struct kstat stat;
        int ret = vfs_stat(filename, &stat);
@@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
        return ret;
 }
 
-asmlinkage long sys32_lstat64(const char __user *filename,
-                             struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
+                      struct stat64 __user *, statbuf)
 {
        struct kstat stat;
        int ret = vfs_lstat(filename, &stat);
@@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename,
        return ret;
 }
 
-asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
+                      struct stat64 __user *, statbuf)
 {
        struct kstat stat;
        int ret = vfs_fstat(fd, &stat);
@@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
        return ret;
 }
 
-asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
-                             struct stat64 __user *statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
+                      const char __user *, filename,
+                      struct stat64 __user *, statbuf, int, flag)
 {
        struct kstat stat;
        int error;
@@ -153,7 +154,7 @@ struct mmap_arg_struct32 {
        unsigned int offset;
 };
 
-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
+COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
 {
        struct mmap_arg_struct32 a;
 
@@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
                               a.offset>>PAGE_SHIFT);
 }
 
-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
-                             int options)
+COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
+                      stat_addr, int, options)
 {
        return compat_sys_wait4(pid, stat_addr, options, NULL);
 }
 
 /* warning: next two assume little endian */
-asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
-                           u32 poslo, u32 poshi)
+COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
+                      u32, count, u32, poslo, u32, poshi)
 {
        return sys_pread64(fd, ubuf, count,
                         ((loff_t)AA(poshi) << 32) | AA(poslo));
 }
 
-asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
-                            u32 count, u32 poslo, u32 poshi)
+COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
+                      u32, count, u32, poslo, u32, poshi)
 {
        return sys_pwrite64(fd, ubuf, count,
                          ((loff_t)AA(poshi) << 32) | AA(poslo));
@@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
  * Some system calls that need sign extended arguments. This could be
  * done by a generic wrapper.
  */
-long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
-                       __u32 len_low, __u32 len_high, int advice)
+COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
+                      __u32, offset_high, __u32, len_low, __u32, len_high,
+                      int, advice)
 {
        return sys_fadvise64_64(fd,
                               (((u64)offset_high)<<32) | offset_low,
@@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
                                advice);
 }
 
-asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
-                                  size_t count)
+COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
+                      unsigned int, off_hi, size_t, count)
 {
        return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
 }
 
-asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
-                                     unsigned n_low, unsigned n_hi,  int flags)
+COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
+                      unsigned int, off_hi, unsigned int, n_low,
+                      unsigned int, n_hi, int, flags)
 {
        return sys_sync_file_range(fd,
                                   ((u64)off_hi << 32) | off_low,
                                   ((u64)n_hi << 32) | n_low, flags);
 }
 
-asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
-                               size_t len, int advice)
+COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
+                      unsigned int, offset_hi, size_t, len, int, advice)
 {
        return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
                                len, advice);
 }
 
-asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
-                               unsigned offset_hi, unsigned len_lo,
-                               unsigned len_hi)
+COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
+                      unsigned int, offset_lo, unsigned int, offset_hi,
+                      unsigned int, len_lo, unsigned int, len_hi)
 {
        return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
                             ((u64)len_hi << 32) | len_lo);
 }
+
+/*
+ * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
+ */
+COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
+                      unsigned long, newsp, int __user *, parent_tidptr,
+                      unsigned long, tls_val, int __user *, child_tidptr)
+{
+       return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
+                       tls_val);
+}
index 246f15b4e64ced7b9f70fd789d1a2270214fb7a2..acfe755562a6aa85ecd74294fa1d063093223976 100644 (file)
@@ -174,7 +174,6 @@ enum page_cache_mode {
 #define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
 #define __PAGE_KERNEL_RX               (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 #define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_NOCACHE)
-#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RX | _PAGE_USER)
 #define __PAGE_KERNEL_VVAR             (__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -206,7 +205,6 @@ enum page_cache_mode {
 #define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_VSYSCALL   __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
 #define PAGE_KERNEL_VVAR       __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
 
 #define PAGE_KERNEL_IO         __pgprot(__PAGE_KERNEL_IO)
index d6baf23782bcc23811c12bac8a6a181c2a6cdb5b..5c019d23d06b1168da0ea965d7c35bebd4d02307 100644 (file)
@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
 
 #if defined(CONFIG_X86_64)
 extern char __end_rodata_hpage_align[];
+extern char __entry_trampoline_start[], __entry_trampoline_end[];
 #endif
 
 #endif /* _ASM_X86_SECTIONS_H */
index 82c34ee25a651760c9950ce6c54625896fd9ea2f..906794aa034e732ec57d32a8be0ef77085a553a6 100644 (file)
 #include <asm/ia32.h>
 
 /* ia32/sys_ia32.c */
-asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
-asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
+asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
+                                         unsigned long);
+asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
+                                          unsigned long);
 
-asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
-asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
-asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
-asmlinkage long sys32_fstatat(unsigned int, const char __user *,
+asmlinkage long compat_sys_x86_stat64(const char __user *,
+                                     struct stat64 __user *);
+asmlinkage long compat_sys_x86_lstat64(const char __user *,
+                                      struct stat64 __user *);
+asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
+asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
                              struct stat64 __user *, int);
 struct mmap_arg_struct32;
-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
+asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
 
-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
+                                      int);
 
-asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
-asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
+asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
+                                    u32);
+asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
+                                     u32, u32);
 
-long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
-long sys32_vm86_warning(void);
+asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
+                                           int);
 
-asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
-asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
-                                     unsigned, unsigned, int);
-asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
-asmlinkage long sys32_fallocate(int, int, unsigned,
-                               unsigned, unsigned, unsigned);
+asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
+                                           size_t);
+asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
+                                              unsigned int, unsigned int,
+                                              int);
+asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
+                                        size_t, int);
+asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
+                                        unsigned int, unsigned int);
+asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
+                                    unsigned long, int __user *);
 
 /* ia32/ia32_signal.c */
 asmlinkage long sys32_sigreturn(void);
index 91723461dc1feb0d63352c653fe0dafe625af379..435db58a7badec77e38e3d9e7f6ead5954efc4cc 100644 (file)
@@ -30,6 +30,7 @@ struct mce {
        __u64 synd;     /* MCA_SYND MSR: only valid on SMCA systems */
        __u64 ipid;     /* MCA_IPID MSR: only valid on SMCA systems */
        __u64 ppin;     /* Protected Processor Inventory Number */
+       __u32 microcode;/* Microcode revision */
 };
 
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)
index d19e903214b403289aaf304eba85cc585c100c5e..4aa9fd3793905924d1fde4ec23e3e35231545596 100644 (file)
@@ -144,6 +144,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 {
        int i;
 
+       /*
+        * We know that the hypervisor lie to us on the microcode version so
+        * we may as well hope that it is running the correct version.
+        */
+       if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+               return false;
+
        for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
                if (c->x86_model == spectre_bad_microcodes[i].model &&
                    c->x86_stepping == spectre_bad_microcodes[i].stepping)
index 8ff94d1e2dce54e87cc72c63812365d610476ec8..466f47301334ba0c8e64c17fd8fa5b7903cca44f 100644 (file)
@@ -56,6 +56,9 @@
 
 static DEFINE_MUTEX(mce_log_mutex);
 
+/* sysfs synchronization */
+static DEFINE_MUTEX(mce_sysfs_mutex);
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/mce.h>
 
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
 
        if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
                rdmsrl(MSR_PPIN, m->ppin);
+
+       m->microcode = boot_cpu_data.microcode;
 }
 
 DEFINE_PER_CPU(struct mce, injectm);
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
         */
        pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
                m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
-               cpu_data(m->extcpu).microcode);
+               m->microcode);
 }
 
 static void print_mce(struct mce *m)
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
        if (kstrtou64(buf, 0, &new) < 0)
                return -EINVAL;
 
+       mutex_lock(&mce_sysfs_mutex);
        if (mca_cfg.ignore_ce ^ !!new) {
                if (new) {
                        /* disable ce features */
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
                        on_each_cpu(mce_enable_ce, (void *)1, 1);
                }
        }
+       mutex_unlock(&mce_sysfs_mutex);
+
        return size;
 }
 
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
        if (kstrtou64(buf, 0, &new) < 0)
                return -EINVAL;
 
+       mutex_lock(&mce_sysfs_mutex);
        if (mca_cfg.cmci_disabled ^ !!new) {
                if (new) {
                        /* disable cmci */
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
                        on_each_cpu(mce_enable_ce, NULL, 1);
                }
        }
+       mutex_unlock(&mce_sysfs_mutex);
+
        return size;
 }
 
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
                                      struct device_attribute *attr,
                                      const char *buf, size_t size)
 {
-       ssize_t ret = device_store_int(s, attr, buf, size);
+       unsigned long old_check_interval = check_interval;
+       ssize_t ret = device_store_ulong(s, attr, buf, size);
+
+       if (check_interval == old_check_interval)
+               return ret;
+
+       if (check_interval < 1)
+               check_interval = 1;
+
+       mutex_lock(&mce_sysfs_mutex);
        mce_restart();
+       mutex_unlock(&mce_sysfs_mutex);
+
        return ret;
 }
 
index aa1b9a422f2be0daf18a7084837165172ef0fafb..70ecbc8099c94f3f5f1205b0fe9c841f4754d82b 100644 (file)
 #define pr_fmt(fmt) "microcode: " fmt
 
 #include <linux/platform_device.h>
+#include <linux/stop_machine.h>
 #include <linux/syscore_ops.h>
 #include <linux/miscdevice.h>
 #include <linux/capability.h>
 #include <linux/firmware.h>
 #include <linux/kernel.h>
+#include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/cpu.h>
+#include <linux/nmi.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
  */
 static DEFINE_MUTEX(microcode_mutex);
 
+/*
+ * Serialize late loading so that CPUs get updated one-by-one.
+ */
+static DEFINE_SPINLOCK(update_lock);
+
 struct ucode_cpu_info          ucode_cpu_info[NR_CPUS];
 
 struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
        return ret;
 }
 
-struct apply_microcode_ctx {
-       enum ucode_state err;
-};
-
 static void apply_microcode_local(void *arg)
 {
-       struct apply_microcode_ctx *ctx = arg;
+       enum ucode_state *err = arg;
 
-       ctx->err = microcode_ops->apply_microcode(smp_processor_id());
+       *err = microcode_ops->apply_microcode(smp_processor_id());
 }
 
 static int apply_microcode_on_target(int cpu)
 {
-       struct apply_microcode_ctx ctx = { .err = 0 };
+       enum ucode_state err;
        int ret;
 
-       ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
-       if (!ret)
-               ret = ctx.err;
-
+       ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
+       if (!ret) {
+               if (err == UCODE_ERROR)
+                       ret = 1;
+       }
        return ret;
 }
 
@@ -489,19 +494,100 @@ static void __exit microcode_dev_exit(void)
 /* fake device for request_firmware */
 static struct platform_device  *microcode_pdev;
 
-static enum ucode_state reload_for_cpu(int cpu)
+/*
+ * Late loading dance. Why the heavy-handed stomp_machine effort?
+ *
+ * - HT siblings must be idle and not execute other code while the other sibling
+ *   is loading microcode in order to avoid any negative interactions caused by
+ *   the loading.
+ *
+ * - In addition, microcode update on the cores must be serialized until this
+ *   requirement can be relaxed in the future. Right now, this is conservative
+ *   and good.
+ */
+#define SPINUNIT 100 /* 100 nsec */
+
+static int check_online_cpus(void)
 {
-       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-       enum ucode_state ustate;
+       if (num_online_cpus() == num_present_cpus())
+               return 0;
 
-       if (!uci->valid)
-               return UCODE_OK;
+       pr_err("Not all CPUs online, aborting microcode update.\n");
+
+       return -EINVAL;
+}
+
+static atomic_t late_cpus;
+
+/*
+ * Returns:
+ * < 0 - on error
+ *   0 - no update done
+ *   1 - microcode was updated
+ */
+static int __reload_late(void *info)
+{
+       unsigned int timeout = NSEC_PER_SEC;
+       int all_cpus = num_online_cpus();
+       int cpu = smp_processor_id();
+       enum ucode_state err;
+       int ret = 0;
+
+       atomic_dec(&late_cpus);
+
+       /*
+        * Wait for all CPUs to arrive. A load will not be attempted unless all
+        * CPUs show up.
+        * */
+       while (atomic_read(&late_cpus)) {
+               if (timeout < SPINUNIT) {
+                       pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+                               atomic_read(&late_cpus));
+                       return -1;
+               }
 
-       ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
-       if (ustate != UCODE_OK)
-               return ustate;
+               ndelay(SPINUNIT);
+               timeout -= SPINUNIT;
 
-       return apply_microcode_on_target(cpu);
+               touch_nmi_watchdog();
+       }
+
+       spin_lock(&update_lock);
+       apply_microcode_local(&err);
+       spin_unlock(&update_lock);
+
+       if (err > UCODE_NFOUND) {
+               pr_warn("Error reloading microcode on CPU %d\n", cpu);
+               ret = -1;
+       } else if (err == UCODE_UPDATED) {
+               ret = 1;
+       }
+
+       atomic_inc(&late_cpus);
+
+       while (atomic_read(&late_cpus) != all_cpus)
+               cpu_relax();
+
+       return ret;
+}
+
+/*
+ * Reload microcode late on all CPUs. Wait for a sec until they
+ * all gather together.
+ */
+static int microcode_reload_late(void)
+{
+       int ret;
+
+       atomic_set(&late_cpus, num_online_cpus());
+
+       ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+       if (ret < 0)
+               return ret;
+       else if (ret > 0)
+               microcode_check();
+
+       return ret;
 }
 
 static ssize_t reload_store(struct device *dev,
@@ -509,10 +595,9 @@ static ssize_t reload_store(struct device *dev,
                            const char *buf, size_t size)
 {
        enum ucode_state tmp_ret = UCODE_OK;
-       bool do_callback = false;
+       int bsp = boot_cpu_data.cpu_index;
        unsigned long val;
        ssize_t ret = 0;
-       int cpu;
 
        ret = kstrtoul(buf, 0, &val);
        if (ret)
@@ -521,29 +606,24 @@ static ssize_t reload_store(struct device *dev,
        if (val != 1)
                return size;
 
-       get_online_cpus();
-       mutex_lock(&microcode_mutex);
-       for_each_online_cpu(cpu) {
-               tmp_ret = reload_for_cpu(cpu);
-               if (tmp_ret > UCODE_NFOUND) {
-                       pr_warn("Error reloading microcode on CPU %d\n", cpu);
-
-                       /* set retval for the first encountered reload error */
-                       if (!ret)
-                               ret = -EINVAL;
-               }
+       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+       if (tmp_ret != UCODE_OK)
+               return size;
 
-               if (tmp_ret == UCODE_UPDATED)
-                       do_callback = true;
-       }
+       get_online_cpus();
 
-       if (!ret && do_callback)
-               microcode_check();
+       ret = check_online_cpus();
+       if (ret)
+               goto put;
 
+       mutex_lock(&microcode_mutex);
+       ret = microcode_reload_late();
        mutex_unlock(&microcode_mutex);
+
+put:
        put_online_cpus();
 
-       if (!ret)
+       if (ret >= 0)
                ret = size;
 
        return ret;
index 923054a6b76013a1474372b7810749f577b0c811..2aded9db1d425c299d0be8998bfbc5c1cc619547 100644 (file)
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
        if (!mc)
                return 0;
 
+       /*
+        * Save us the MSR write below - which is a particular expensive
+        * operation - when the other hyperthread has updated the microcode
+        * already.
+        */
+       rev = intel_get_microcode_revision();
+       if (rev >= mc->hdr.rev) {
+               uci->cpu_sig.rev = rev;
+               return UCODE_OK;
+       }
+
+       /*
+        * Writeback and invalidate caches before updating microcode to avoid
+        * internal issues depending on what the microcode is updating.
+        */
+       native_wbinvd();
+
        /* write microcode via MSR 0x79 */
        native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
 
 static enum ucode_state apply_microcode_intel(int cpu)
 {
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_intel *mc;
-       struct ucode_cpu_info *uci;
-       struct cpuinfo_x86 *c;
        static int prev_rev;
        u32 rev;
 
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
        if (WARN_ON(raw_smp_processor_id() != cpu))
                return UCODE_ERROR;
 
-       uci = ucode_cpu_info + cpu;
-       mc = uci->mc;
+       /* Look for a newer patch in our cache: */
+       mc = find_patch(uci);
        if (!mc) {
-               /* Look for a newer patch in our cache: */
-               mc = find_patch(uci);
+               mc = uci->mc;
                if (!mc)
                        return UCODE_NFOUND;
        }
 
+       /*
+        * Save us the MSR write below - which is a particular expensive
+        * operation - when the other hyperthread has updated the microcode
+        * already.
+        */
+       rev = intel_get_microcode_revision();
+       if (rev >= mc->hdr.rev) {
+               uci->cpu_sig.rev = rev;
+               c->microcode = rev;
+               return UCODE_OK;
+       }
+
+       /*
+        * Writeback and invalidate caches before updating microcode to avoid
+        * internal issues depending on what the microcode is updating.
+        */
+       native_wbinvd();
+
        /* write microcode via MSR 0x79 */
        wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
                prev_rev = rev;
        }
 
-       c = &cpu_data(cpu);
-
        uci->cpu_sig.rev = rev;
        c->microcode = rev;
 
index 2f723301eb58fc5ad0d6796b342446ae2ee0c9e6..38deafebb21b726227fb2a12a7386f49603189fe 100644 (file)
@@ -23,7 +23,7 @@
 /*
  * this changes the io permissions bitmap in the current task.
  */
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
 {
        struct thread_struct *t = &current->thread;
        struct tss_struct *tss;
index bd36f3c33cd0f96f47b61034b02c205301fe87d1..0715f827607c4a2742e140f8d9a656ed4514d226 100644 (file)
@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
 
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
+       bool is_in_entry_trampoline_section = false;
+
+#ifdef CONFIG_X86_64
+       is_in_entry_trampoline_section =
+               (addr >= (unsigned long)__entry_trampoline_start &&
+                addr < (unsigned long)__entry_trampoline_end);
+#endif
        return  (addr >= (unsigned long)__kprobes_text_start &&
                 addr < (unsigned long)__kprobes_text_end) ||
                (addr >= (unsigned long)__entry_text_start &&
-                addr < (unsigned long)__entry_text_end);
+                addr < (unsigned long)__entry_text_end) ||
+               is_in_entry_trampoline_section;
 }
 
 int __init arch_init_kprobes(void)
index 9b138a06c1a468e6a6d3fe41748abef3a436ace3..b854ebf5851b7c8fb6225b53e7d3a81b16ec43db 100644 (file)
@@ -118,9 +118,11 @@ SECTIONS
 
 #ifdef CONFIG_X86_64
                . = ALIGN(PAGE_SIZE);
+               VMLINUX_SYMBOL(__entry_trampoline_start) = .;
                _entry_trampoline = .;
                *(.entry_trampoline)
                . = ALIGN(PAGE_SIZE);
+               VMLINUX_SYMBOL(__entry_trampoline_end) = .;
                ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
 #endif
 
index ce38f165489b5a13d92091c8671879f30ce44e20..631507f0c1980cc03367c161b7164a16c40de496 100644 (file)
@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
 }
 
 /*
- * Clone the ESPFIX P4D into the user space visinble page table
+ * Clone the ESPFIX P4D into the user space visible page table
  */
 static void __init pti_setup_espfix64(void)
 {
index 87855b5123a6307cb014f78af2ae07b43ef77cf4..ee62d2d517bf4537c60cdd83c70363382403fc57 100644 (file)
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
        struct iov_iter i;
        ssize_t bw;
 
-       iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
+       iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
 
        file_start_write(file);
        bw = vfs_iter_write(file, &i, ppos, 0);
index e126e4cac2ca499566da91a6e3da01d0b1e4381e..92ec1bbece51d31c44f88eb6a2037333dd7a9f40 100644 (file)
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
 
 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
 static void blkfront_gather_backend_features(struct blkfront_info *info);
+static int negotiate_mq(struct blkfront_info *info);
 
 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
 {
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
        unsigned int i, max_page_order;
        unsigned int ring_page_order;
 
+       if (!info)
+               return -ENODEV;
+
        max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
                                              "max-ring-page-order", 0);
        ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
        info->nr_ring_pages = 1 << ring_page_order;
 
+       err = negotiate_mq(info);
+       if (err)
+               goto destroy_blkring;
+
        for (i = 0; i < info->nr_rings; i++) {
                struct blkfront_ring_info *rinfo = &info->rinfo[i];
 
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
        }
 
        info->xbdev = dev;
-       err = negotiate_mq(info);
-       if (err) {
-               kfree(info);
-               return err;
-       }
 
        mutex_init(&info->mutex);
        info->vdevice = vdevice;
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
 
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
-       err = negotiate_mq(info);
-       if (err)
-               return err;
-
        err = talk_to_blkback(dev, info);
        if (!err)
                blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
index b3b4ed9b68742bfbb8219af9c24c9d542d6f02ec..d2e5382821a43621310994ea1611e2ad27e798a8 100644 (file)
@@ -386,6 +386,7 @@ config ATMEL_PIT
 
 config ATMEL_ST
        bool "Atmel ST timer support" if COMPILE_TEST
+       depends on HAS_IOMEM
        select TIMER_OF
        select MFD_SYSCON
        help
index f652a0e0f5a2a46d78bece1d41cd0895dfc5d593..3548caa9e9339f17208a62066ad055c842491e3b 100644 (file)
@@ -163,6 +163,7 @@ struct mv_xor_v2_device {
        void __iomem *dma_base;
        void __iomem *glob_base;
        struct clk *clk;
+       struct clk *reg_clk;
        struct tasklet_struct irq_tasklet;
        struct list_head free_sw_desc;
        struct dma_device dmadev;
@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
+       if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
+               if (!IS_ERR(xor_dev->reg_clk)) {
+                       ret = clk_prepare_enable(xor_dev->reg_clk);
+                       if (ret)
+                               return ret;
+               } else {
+                       return PTR_ERR(xor_dev->reg_clk);
+               }
+       }
+
        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+               ret = EPROBE_DEFER;
+               goto disable_reg_clk;
+       }
        if (!IS_ERR(xor_dev->clk)) {
                ret = clk_prepare_enable(xor_dev->clk);
                if (ret)
-                       return ret;
+                       goto disable_reg_clk;
        }
 
        ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
@@ -866,8 +880,9 @@ free_hw_desq:
 free_msi_irqs:
        platform_msi_domain_free_irqs(&pdev->dev);
 disable_clk:
-       if (!IS_ERR(xor_dev->clk))
-               clk_disable_unprepare(xor_dev->clk);
+       clk_disable_unprepare(xor_dev->clk);
+disable_reg_clk:
+       clk_disable_unprepare(xor_dev->reg_clk);
        return ret;
 }
 
index e3ff162c03fc6a011cb0139a4ffbda2d2600fd32..d0cacdb0713eca47360e4f5ceedc8dc6428145bb 100644 (file)
@@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
 
        rcar_dmac_chan_configure_desc(chan, desc);
 
-       max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
+       max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
 
        /*
         * Allocate and fill the transfer chunk descriptors. We own the only
index e76de57dd617d7e2c918c057dcc0ced6636be7b2..ebaea8b1594b7fc4919a71fdac7ed9ef8418df7b 100644 (file)
@@ -14,7 +14,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/init.h>
@@ -37,10 +36,9 @@ struct gpio_rcar_priv {
        struct platform_device *pdev;
        struct gpio_chip gpio_chip;
        struct irq_chip irq_chip;
-       struct clk *clk;
        unsigned int irq_parent;
+       atomic_t wakeup_path;
        bool has_both_edge_trigger;
-       bool needs_clk;
 };
 
 #define IOINTSEL 0x00  /* General IO/Interrupt Switching Register */
@@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
                }
        }
 
-       if (!p->clk)
-               return 0;
-
        if (on)
-               clk_enable(p->clk);
+               atomic_inc(&p->wakeup_path);
        else
-               clk_disable(p->clk);
+               atomic_dec(&p->wakeup_path);
 
        return 0;
 }
@@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
 
 struct gpio_rcar_info {
        bool has_both_edge_trigger;
-       bool needs_clk;
 };
 
 static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
        .has_both_edge_trigger = false,
-       .needs_clk = false,
 };
 
 static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
        .has_both_edge_trigger = true,
-       .needs_clk = true,
 };
 
 static const struct of_device_id gpio_rcar_of_table[] = {
@@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
        ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
        *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
        p->has_both_edge_trigger = info->has_both_edge_trigger;
-       p->needs_clk = info->needs_clk;
 
        if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
                dev_warn(&p->pdev->dev,
@@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, p);
 
-       p->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(p->clk)) {
-               if (p->needs_clk) {
-                       dev_err(dev, "unable to get clock\n");
-                       ret = PTR_ERR(p->clk);
-                       goto err0;
-               }
-               p->clk = NULL;
-       }
-
        pm_runtime_enable(dev);
 
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int __maybe_unused gpio_rcar_suspend(struct device *dev)
+{
+       struct gpio_rcar_priv *p = dev_get_drvdata(dev);
+
+       if (atomic_read(&p->wakeup_path))
+               device_set_wakeup_path(dev);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL);
+
 static struct platform_driver gpio_rcar_device_driver = {
        .probe          = gpio_rcar_probe,
        .remove         = gpio_rcar_remove,
        .driver         = {
                .name   = "gpio_rcar",
+               .pm     = &gpio_rcar_pm_ops,
                .of_match_table = of_match_ptr(gpio_rcar_of_table),
        }
 };
index 57afad79f55d086b673cd6c61c688d532620a033..8fa850a070e0fe8ea7a67823008ff7e543d8d1dc 100644 (file)
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
        size_t size;
        u32 retry = 3;
 
+       if (amdgpu_acpi_pcie_notify_device_ready(adev))
+               return -EINVAL;
+
        /* Get the device handle */
        handle = ACPI_HANDLE(&adev->pdev->dev);
        if (!handle)
index 13044e66dcaf4e6ab0b79fab23aef8db987170db..561d3312af3280a56bccaee8c04dc7eab9017710 100644 (file)
@@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
        result = 0;
 
        if (*pos < 12) {
-               early[0] = amdgpu_ring_get_rptr(ring);
+               early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
                early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
                early[2] = ring->wptr & ring->buf_mask;
                for (i = *pos / 4; i < 3 && size; i++) {
index b2eae86bf906abe6ed0bd7e89f67f2d9acbfa809..5c26a8e806b93dfe4a0aced5838f20b0fa234c80 100644 (file)
@@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 
        cancel_delayed_work_sync(&adev->uvd.idle_work);
 
-       for (i = 0; i < adev->uvd.max_handles; ++i)
-               if (atomic_read(&adev->uvd.handles[i]))
-                       break;
+       /* only valid for physical mode */
+       if (adev->asic_type < CHIP_POLARIS10) {
+               for (i = 0; i < adev->uvd.max_handles; ++i)
+                       if (atomic_read(&adev->uvd.handles[i]))
+                               break;
 
-       if (i == AMDGPU_MAX_UVD_HANDLES)
-               return 0;
+               if (i == adev->uvd.max_handles)
+                       return 0;
+       }
 
        size = amdgpu_bo_size(adev->uvd.vcpu_bo);
        ptr = adev->uvd.cpu_addr;
index bd2c4f727df661866733d3b21254769220f7101c..a712f4b285f6c2d636b5546c841ce528b114c884 100644 (file)
@@ -3093,7 +3093,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
                tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
                schedule_work(&adev->hotplug_work);
-               DRM_INFO("IH: HPD%d\n", hpd + 1);
+               DRM_DEBUG("IH: HPD%d\n", hpd + 1);
        }
 
        return 0;
index a066c5eda135a782d8e01f730cfc10172210c7c9..a4309698e76c898fc038b362ed1d124a8e3d6806 100644 (file)
@@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
        case CHIP_KAVERI:
                adev->gfx.config.max_shader_engines = 1;
                adev->gfx.config.max_tile_pipes = 4;
-               if ((adev->pdev->device == 0x1304) ||
-                   (adev->pdev->device == 0x1305) ||
-                   (adev->pdev->device == 0x130C) ||
-                   (adev->pdev->device == 0x130F) ||
-                   (adev->pdev->device == 0x1310) ||
-                   (adev->pdev->device == 0x1311) ||
-                   (adev->pdev->device == 0x131C)) {
-                       adev->gfx.config.max_cu_per_sh = 8;
-                       adev->gfx.config.max_backends_per_se = 2;
-               } else if ((adev->pdev->device == 0x1309) ||
-                          (adev->pdev->device == 0x130A) ||
-                          (adev->pdev->device == 0x130D) ||
-                          (adev->pdev->device == 0x1313) ||
-                          (adev->pdev->device == 0x131D)) {
-                       adev->gfx.config.max_cu_per_sh = 6;
-                       adev->gfx.config.max_backends_per_se = 2;
-               } else if ((adev->pdev->device == 0x1306) ||
-                          (adev->pdev->device == 0x1307) ||
-                          (adev->pdev->device == 0x130B) ||
-                          (adev->pdev->device == 0x130E) ||
-                          (adev->pdev->device == 0x1315) ||
-                          (adev->pdev->device == 0x131B)) {
-                       adev->gfx.config.max_cu_per_sh = 4;
-                       adev->gfx.config.max_backends_per_se = 1;
-               } else {
-                       adev->gfx.config.max_cu_per_sh = 3;
-                       adev->gfx.config.max_backends_per_se = 1;
-               }
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_backends_per_se = 2;
                adev->gfx.config.max_sh_per_se = 1;
                adev->gfx.config.max_texture_channel_caches = 4;
                adev->gfx.config.max_gprs = 256;
index 543101d5a5edd053c83beea6de7db5adbf9844eb..2095173aaabf864345c0643170a8975ef08866b4 100644 (file)
@@ -31,6 +31,7 @@
 #include "amdgpu_uvd.h"
 #include "amdgpu_vce.h"
 #include "atom.h"
+#include "amd_pcie.h"
 #include "amdgpu_powerplay.h"
 #include "sid.h"
 #include "si_ih.h"
@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 {
        struct pci_dev *root = adev->pdev->bus->self;
        int bridge_pos, gpu_pos;
-       u32 speed_cntl, mask, current_data_rate;
-       int ret, i;
+       u32 speed_cntl, current_data_rate;
+       int i;
        u16 tmp16;
 
        if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
        if (adev->flags & AMD_IS_APU)
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+       if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+                                       CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
                LC_CURRENT_DATA_RATE_SHIFT;
-       if (mask & DRM_PCIE_SPEED_80) {
+       if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
                if (current_data_rate == 2) {
                        DRM_INFO("PCIE gen 3 link speeds already enabled\n");
                        return;
                }
                DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
-       } else if (mask & DRM_PCIE_SPEED_50) {
+       } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
                if (current_data_rate == 1) {
                        DRM_INFO("PCIE gen 2 link speeds already enabled\n");
                        return;
@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
        if (!gpu_pos)
                return;
 
-       if (mask & DRM_PCIE_SPEED_80) {
+       if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
                if (current_data_rate != 2) {
                        u16 bridge_cfg, gpu_cfg;
                        u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 
        pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
        tmp16 &= ~0xf;
-       if (mask & DRM_PCIE_SPEED_80)
+       if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
                tmp16 |= 3;
-       else if (mask & DRM_PCIE_SPEED_50)
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
                tmp16 |= 2;
        else
                tmp16 |= 1;
index ce675a7f179a6a0f3b9f7b254589694541aea3cd..22f0b7ff3ac9731d05643b9c6af9ce7ef2ee3208 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu_pm.h"
 #include "amdgpu_dpm.h"
 #include "amdgpu_atombios.h"
+#include "amd_pcie.h"
 #include "sid.h"
 #include "r600_dpm.h"
 #include "si_dpm.h"
@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
        }
 }
 
-static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
-                                              u32 sys_mask,
-                                              enum amdgpu_pcie_gen asic_gen,
-                                              enum amdgpu_pcie_gen default_gen)
-{
-       switch (asic_gen) {
-       case AMDGPU_PCIE_GEN1:
-               return AMDGPU_PCIE_GEN1;
-       case AMDGPU_PCIE_GEN2:
-               return AMDGPU_PCIE_GEN2;
-       case AMDGPU_PCIE_GEN3:
-               return AMDGPU_PCIE_GEN3;
-       default:
-               if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
-                       return AMDGPU_PCIE_GEN3;
-               else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
-                       return AMDGPU_PCIE_GEN2;
-               else
-                       return AMDGPU_PCIE_GEN1;
-       }
-       return AMDGPU_PCIE_GEN1;
-}
-
 static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
                            u32 *p, u32 *u)
 {
@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
                                                              table->ACPIState.levels[0].vddc.index,
                                                              &table->ACPIState.levels[0].std_vddc);
                }
-               table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
-                                                                                   si_pi->sys_pcie_mask,
-                                                                                   si_pi->boot_pcie_gen,
-                                                                                   AMDGPU_PCIE_GEN1);
+               table->ACPIState.levels[0].gen2PCIE =
+                       (u8)amdgpu_get_pcie_gen_support(adev,
+                                                       si_pi->sys_pcie_mask,
+                                                       si_pi->boot_pcie_gen,
+                                                       AMDGPU_PCIE_GEN1);
 
                if (si_pi->vddc_phase_shed_control)
                        si_populate_phase_shedding_value(adev,
@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
        pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
        pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
        pl->flags = le32_to_cpu(clock_info->si.ulFlags);
-       pl->pcie_gen = r600_get_pcie_gen_support(adev,
-                                                si_pi->sys_pcie_mask,
-                                                si_pi->boot_pcie_gen,
-                                                clock_info->si.ucPCIEGen);
+       pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
+                                                  si_pi->sys_pcie_mask,
+                                                  si_pi->boot_pcie_gen,
+                                                  clock_info->si.ucPCIEGen);
 
        /* patch up vddc if necessary */
        ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
        struct si_power_info *si_pi;
        struct atom_clock_dividers dividers;
        int ret;
-       u32 mask;
 
        si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
        if (si_pi == NULL)
@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
        eg_pi = &ni_pi->eg;
        pi = &eg_pi->rv7xx;
 
-       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-       if (ret)
-               si_pi->sys_pcie_mask = 0;
-       else
-               si_pi->sys_pcie_mask = mask;
+       si_pi->sys_pcie_mask =
+               (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+               CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
        si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
        si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
 
index 862835dc054e1ce66c76f114c5c9ccc2cf098d38..c345e645f1d72c763f027faaa36be21da75aedbc 100644 (file)
@@ -1037,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
                if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+
+                       if (aconnector->fake_enable)
+                               aconnector->fake_enable = false;
+
                        amdgpu_dm_update_connector_after_detect(aconnector);
 
 
@@ -2012,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
        dst.width = stream->timing.h_addressable;
        dst.height = stream->timing.v_addressable;
 
-       rmx_type = dm_state->scaling;
-       if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
-               if (src.width * dst.height <
-                               src.height * dst.width) {
-                       /* height needs less upscaling/more downscaling */
-                       dst.width = src.width *
-                                       dst.height / src.height;
-               } else {
-                       /* width needs less upscaling/more downscaling */
-                       dst.height = src.height *
-                                       dst.width / src.width;
+       if (dm_state) {
+               rmx_type = dm_state->scaling;
+               if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+                       if (src.width * dst.height <
+                                       src.height * dst.width) {
+                               /* height needs less upscaling/more downscaling */
+                               dst.width = src.width *
+                                               dst.height / src.height;
+                       } else {
+                               /* width needs less upscaling/more downscaling */
+                               dst.height = src.height *
+                                               dst.width / src.width;
+                       }
+               } else if (rmx_type == RMX_CENTER) {
+                       dst = src;
                }
-       } else if (rmx_type == RMX_CENTER) {
-               dst = src;
-       }
 
-       dst.x = (stream->timing.h_addressable - dst.width) / 2;
-       dst.y = (stream->timing.v_addressable - dst.height) / 2;
+               dst.x = (stream->timing.h_addressable - dst.width) / 2;
+               dst.y = (stream->timing.v_addressable - dst.height) / 2;
 
-       if (dm_state->underscan_enable) {
-               dst.x += dm_state->underscan_hborder / 2;
-               dst.y += dm_state->underscan_vborder / 2;
-               dst.width -= dm_state->underscan_hborder;
-               dst.height -= dm_state->underscan_vborder;
+               if (dm_state->underscan_enable) {
+                       dst.x += dm_state->underscan_hborder / 2;
+                       dst.y += dm_state->underscan_vborder / 2;
+                       dst.width -= dm_state->underscan_hborder;
+                       dst.height -= dm_state->underscan_vborder;
+               }
        }
 
        stream->src = src;
@@ -2360,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (aconnector == NULL) {
                DRM_ERROR("aconnector is NULL!\n");
-               goto drm_connector_null;
-       }
-
-       if (dm_state == NULL) {
-               DRM_ERROR("dm_state is NULL!\n");
-               goto dm_state_null;
+               return stream;
        }
 
        drm_connector = &aconnector->base;
@@ -2377,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                if (aconnector->mst_port) {
                        dm_dp_mst_dc_sink_create(drm_connector);
-                       goto mst_dc_sink_create_done;
+                       return stream;
                }
 
                if (create_fake_sink(aconnector))
-                       goto stream_create_fail;
+                       return stream;
        }
 
        stream = dc_create_stream_for_sink(aconnector->dc_sink);
 
        if (stream == NULL) {
                DRM_ERROR("Failed to create stream for sink!\n");
-               goto stream_create_fail;
+               return stream;
        }
 
        list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2414,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        } else {
                decide_crtc_timing_for_drm_display_mode(
                                &mode, preferred_mode,
-                               dm_state->scaling != RMX_OFF);
+                               dm_state ? (dm_state->scaling != RMX_OFF) : false);
        }
 
+       if (!dm_state)
+               drm_mode_set_crtcinfo(&mode, 0);
+
        fill_stream_properties_from_drm_display_mode(stream,
                        &mode, &aconnector->base);
        update_stream_scaling_settings(&mode, dm_state, stream);
@@ -2426,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                drm_connector,
                aconnector->dc_sink);
 
-stream_create_fail:
-dm_state_null:
-drm_connector_null:
-mst_dc_sink_create_done:
+       update_stream_signal(stream);
+
        return stream;
 }
 
@@ -2497,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
        return &state->base;
 }
 
+
+static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+{
+       enum dc_irq_source irq_source;
+       struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+       return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+}
+
+static int dm_enable_vblank(struct drm_crtc *crtc)
+{
+       return dm_set_vblank(crtc, true);
+}
+
+static void dm_disable_vblank(struct drm_crtc *crtc)
+{
+       dm_set_vblank(crtc, false);
+}
+
 /* Implemented only the options currently availible for the driver */
 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
        .reset = dm_crtc_reset_state,
@@ -2506,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
        .page_flip = drm_atomic_helper_page_flip,
        .atomic_duplicate_state = dm_crtc_duplicate_state,
        .atomic_destroy_state = dm_crtc_destroy_state,
+       .enable_vblank = dm_enable_vblank,
+       .disable_vblank = dm_disable_vblank,
 };
 
 static enum drm_connector_status
@@ -2800,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
                goto fail;
        }
 
-       stream = dc_create_stream_for_sink(dc_sink);
+       stream = create_stream_for_sink(aconnector, mode, NULL);
        if (stream == NULL) {
                DRM_ERROR("Failed to create stream for sink!\n");
                goto fail;
@@ -3060,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
        if (!dm_plane_state->dc_state)
                return 0;
 
+       if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
+               return -EINVAL;
+
        if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
                return 0;
 
@@ -4632,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc,
        bool pflip_needed  = !state->allow_modeset;
        int ret = 0;
 
-       if (pflip_needed)
-               return ret;
 
        /* Add new planes */
        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4648,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc,
 
                /* Remove any changed/removed planes */
                if (!enable) {
+                       if (pflip_needed)
+                               continue;
 
                        if (!old_plane_crtc)
                                continue;
@@ -4679,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc,
                        *lock_and_validation_needed = true;
 
                } else { /* Add new planes */
+                       struct dc_plane_state *dc_new_plane_state;
 
                        if (drm_atomic_plane_disabling(plane->state, new_plane_state))
                                continue;
@@ -4692,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc,
                        if (!dm_new_crtc_state->stream)
                                continue;
 
+                       if (pflip_needed)
+                               continue;
 
                        WARN_ON(dm_new_plane_state->dc_state);
 
-                       dm_new_plane_state->dc_state = dc_create_plane_state(dc);
-
-                       DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
-                                       plane->base.id, new_plane_crtc->base.id);
-
-                       if (!dm_new_plane_state->dc_state) {
+                       dc_new_plane_state = dc_create_plane_state(dc);
+                       if (!dc_new_plane_state) {
                                ret = -EINVAL;
                                return ret;
                        }
 
+                       DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+                                       plane->base.id, new_plane_crtc->base.id);
+
                        ret = fill_plane_attributes(
                                new_plane_crtc->dev->dev_private,
-                               dm_new_plane_state->dc_state,
+                               dc_new_plane_state,
                                new_plane_state,
                                new_crtc_state);
-                       if (ret)
+                       if (ret) {
+                               dc_plane_state_release(dc_new_plane_state);
                                return ret;
+                       }
 
-
+                       /*
+                        * Any atomic check errors that occur after this will
+                        * not need a release. The plane state will be attached
+                        * to the stream, and therefore part of the atomic
+                        * state. It'll be released when the atomic state is
+                        * cleaned.
+                        */
                        if (!dc_add_plane_to_context(
                                        dc,
                                        dm_new_crtc_state->stream,
-                                       dm_new_plane_state->dc_state,
+                                       dc_new_plane_state,
                                        dm_state->context)) {
 
+                               dc_plane_state_release(dc_new_plane_state);
                                ret = -EINVAL;
                                return ret;
                        }
 
+                       dm_new_plane_state->dc_state = dc_new_plane_state;
+
                        /* Tell DC to do a full surface update every time there
                         * is a plane change. Inefficient, but works for now.
                         */
@@ -4737,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc,
        return ret;
 }
 
+static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
+                                         struct drm_crtc *crtc)
+{
+       struct drm_plane *plane;
+       struct drm_crtc_state *crtc_state;
+
+       WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+
+       drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+               struct drm_plane_state *plane_state =
+                       drm_atomic_get_plane_state(state, plane);
+
+               if (IS_ERR(plane_state))
+                       return -EDEADLK;
+
+               crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+               if (crtc->primary == plane && crtc_state->active) {
+                       if (!plane_state->fb)
+                               return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
                                  struct drm_atomic_state *state)
 {
@@ -4760,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                goto fail;
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               ret = dm_atomic_check_plane_state_fb(state, crtc);
+               if (ret)
+                       goto fail;
+
                if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
                    !new_crtc_state->color_mgmt_changed)
                        continue;
index 1874b6cee6afa1dd81aeb0177c605c69c1b68e65..422055080df4a1b59b5fa4c66a12d6b5bd90f5ac 100644 (file)
@@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
 
 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.num_crtc > 0)
-               adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
-       else
-               adev->crtc_irq.num_types = 0;
+
+       adev->crtc_irq.num_types = adev->mode_info.num_crtc;
        adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
 
        adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
index f3d87f418d2efffd349358fd08b3b52e2a3cca5d..93421dad21bd3fdeba7f7d19946bf4c0005a4921 100644 (file)
@@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
 
+       /*
+        * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
+        */
+       if (!aconnector->port || !aconnector->port->aux.ddc.algo)
+               return;
+
        edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
 
        if (!edid) {
index 35e84ed031de08f5edfadbfb71e550291ae8ebef..12868c769606b8de546b193412d5c28052353dd5 100644 (file)
@@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
        return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
 }
 
-void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
 {
 
        if (dc == NULL)
-               return;
+               return false;
 
-       dal_irq_service_set(dc->res_pool->irqs, src, enable);
+       return dal_irq_service_set(dc->res_pool->irqs, src, enable);
 }
 
 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
index a3742827157361ac0c3c57733eb845cd34ea4720..be5546181fa84d0dbbf91935878cae6e10129154 100644 (file)
@@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
                        link->link_enc,
                        pipe_ctx->clock_source->id,
                        display_color_depth,
-                       pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
-                       pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+                       pipe_ctx->stream->signal,
                        stream->phy_pix_clk);
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
index 95b8dd0e53c6951a1e28e988fd4cb13dcaf04f0d..4d07ffebfd3112f937d0082b4482df4e4330451d 100644 (file)
@@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged(
        return true;
 }
 
-/* Maximum TMDS single link pixel clock 165MHz */
-#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
-
 static void update_stream_engine_usage(
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
index 539c3e0a62922b7c7b774a1d8a115d6fcc4ab2b4..cd5819789d76aa7b1106dc8991685a17b1e4c6af 100644 (file)
@@ -33,8 +33,7 @@
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
-#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
-static void update_stream_signal(struct dc_stream_state *stream)
+void update_stream_signal(struct dc_stream_state *stream)
 {
 
        struct dc_sink *dc_sink = stream->sink;
@@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)
                stream->signal = dc_sink->sink_signal;
 
        if (dc_is_dvi_signal(stream->signal)) {
-               if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
-                       stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+               if (stream->ctx->dc->caps.dual_link_dvi &&
+                   stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
+                   stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
                        stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
                else
                        stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -193,6 +193,7 @@ bool dc_stream_set_cursor_attributes(
 
        core_dc = stream->ctx->dc;
        res_ctx = &core_dc->current_state->res_ctx;
+       stream->cursor_attributes = *attributes;
 
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -204,34 +205,8 @@ bool dc_stream_set_cursor_attributes(
                        continue;
 
 
-               if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
-                       pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
-                                               pipe_ctx->plane_res.ipp, attributes);
-
-               if (pipe_ctx->plane_res.hubp != NULL &&
-                               pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
-                       pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
-                                       pipe_ctx->plane_res.hubp, attributes);
-
-               if (pipe_ctx->plane_res.mi != NULL &&
-                               pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
-                       pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
-                                       pipe_ctx->plane_res.mi, attributes);
-
-
-               if (pipe_ctx->plane_res.xfm != NULL &&
-                               pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
-                       pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
-                               pipe_ctx->plane_res.xfm, attributes);
-
-               if (pipe_ctx->plane_res.dpp != NULL &&
-                               pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
-                       pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
-                               pipe_ctx->plane_res.dpp, attributes->color_format);
+               core_dc->hwss.set_cursor_attribute(pipe_ctx);
        }
-
-       stream->cursor_attributes = *attributes;
-
        return true;
 }
 
@@ -255,21 +230,10 @@ bool dc_stream_set_cursor_position(
 
        core_dc = stream->ctx->dc;
        res_ctx = &core_dc->current_state->res_ctx;
+       stream->cursor_position = *position;
 
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-               struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
-               struct mem_input *mi = pipe_ctx->plane_res.mi;
-               struct hubp *hubp = pipe_ctx->plane_res.hubp;
-               struct dpp *dpp = pipe_ctx->plane_res.dpp;
-               struct dc_cursor_position pos_cpy = *position;
-               struct dc_cursor_mi_param param = {
-                       .pixel_clk_khz = stream->timing.pix_clk_khz,
-                       .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
-                       .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
-                       .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
-                       .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
-               };
 
                if (pipe_ctx->stream != stream ||
                                (!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||
@@ -278,33 +242,9 @@ bool dc_stream_set_cursor_position(
                                !pipe_ctx->plane_res.ipp)
                        continue;
 
-               if (pipe_ctx->plane_state->address.type
-                               == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
-                       pos_cpy.enable = false;
-
-               if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
-                       pos_cpy.enable = false;
-
-
-               if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
-                       ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
-
-               if (mi != NULL && mi->funcs->set_cursor_position != NULL)
-                       mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
-
-               if (!hubp)
-                       continue;
-
-               if (hubp->funcs->set_cursor_position != NULL)
-                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
-
-               if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
-                       dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
-
+               core_dc->hwss.set_cursor_position(pipe_ctx);
        }
 
-       stream->cursor_position = *position;
-
        return true;
 }
 
index e2e3c9df79ea0f976dbbc6f6bf889169ccce0484..d6d56611604eb417e47d009e1f0119305cb2ab91 100644 (file)
@@ -62,6 +62,7 @@ struct dc_caps {
        bool dcc_const_color;
        bool dynamic_audio;
        bool is_apu;
+       bool dual_link_dvi;
 };
 
 struct dc_dcc_surface_param {
@@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
                struct dc *dc,
                uint32_t src_id,
                uint32_t ext_id);
-void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
 enum dc_irq_source dc_get_hpd_irq_source_at_index(
                struct dc *dc, uint32_t link_index);
index 01c60f11b2bdeec208f5df9e5cfd2092a614eac5..456e4d29eaddcc4b19f0b28290efdeefd8a2f3e8 100644 (file)
@@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
  */
 struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
 
+void update_stream_signal(struct dc_stream_state *stream);
+
 void dc_stream_retain(struct dc_stream_state *dc_stream);
 void dc_stream_release(struct dc_stream_state *dc_stream);
 
index b73db9e784375618f87422f143cf36d00e1ce552..a993279a8f2d85181cf4193d99b0916a977339ae 100644 (file)
        SR(D2VGA_CONTROL), \
        SR(D3VGA_CONTROL), \
        SR(D4VGA_CONTROL), \
+       SR(VGA_TEST_CONTROL), \
        SR(DC_IP_REQUEST_CNTL), \
        BL_REG_LIST()
 
@@ -337,6 +338,7 @@ struct dce_hwseq_registers {
        uint32_t D2VGA_CONTROL;
        uint32_t D3VGA_CONTROL;
        uint32_t D4VGA_CONTROL;
+       uint32_t VGA_TEST_CONTROL;
        /* MMHUB registers. read only. temporary hack */
        uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
        uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -493,6 +495,9 @@ struct dce_hwseq_registers {
        HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+       HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
+       HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
+       HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
        HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
        HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
 
@@ -583,7 +588,10 @@ struct dce_hwseq_registers {
        type DCFCLK_GATE_DIS; \
        type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
        type DENTIST_DPPCLK_WDIVIDER; \
-       type DENTIST_DISPCLK_WDIVIDER;
+       type DENTIST_DISPCLK_WDIVIDER; \
+       type VGA_TEST_ENABLE; \
+       type VGA_TEST_RENDER_START; \
+       type D1VGA_MODE_ENABLE;
 
 struct dce_hwseq_shift {
        HWSEQ_REG_FIELD_LIST(uint8_t)
index a266e3f5e75fd7ae82a46f7d3e09f900660c72cc..e4741f1a2b01b2f4d31ede97dcc8be7ad71e2b64 100644 (file)
 #define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
 #define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
 
-/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
-#define TMDS_MIN_PIXEL_CLOCK 25000
-/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
-#define TMDS_MAX_PIXEL_CLOCK 165000
-/* For current ASICs pixel clock - 600MHz */
-#define MAX_ENCODER_CLOCK 600000
-
 enum {
        DP_MST_UPDATE_MAX_RETRY = 50
 };
@@ -683,6 +676,7 @@ void dce110_link_encoder_construct(
 {
        struct bp_encoder_cap_info bp_cap_info = {0};
        const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+       enum bp_result result = BP_RESULT_OK;
 
        enc110->base.funcs = &dce110_lnk_enc_funcs;
        enc110->base.ctx = init_data->ctx;
@@ -757,15 +751,24 @@ void dce110_link_encoder_construct(
                enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
        }
 
+       /* default to one to mirror Windows behavior */
+       enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+       result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
+                                               enc110->base.id, &bp_cap_info);
+
        /* Override features with DCE-specific values */
-       if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
-                       enc110->base.ctx->dc_bios, enc110->base.id,
-                       &bp_cap_info)) {
+       if (BP_RESULT_OK == result) {
                enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
                                bp_cap_info.DP_HBR2_EN;
                enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
                                bp_cap_info.DP_HBR3_EN;
                enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+       } else {
+               dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
+                               "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+                               __func__,
+                               result);
        }
 }
 
@@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output(
        struct link_encoder *enc,
        enum clock_source_id clock_source,
        enum dc_color_depth color_depth,
-       bool hdmi,
-       bool dual_link,
+       enum signal_type signal,
        uint32_t pixel_clock)
 {
        struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
@@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output(
        cntl.engine_id = enc->preferred_engine;
        cntl.transmitter = enc110->base.transmitter;
        cntl.pll_id = clock_source;
-       if (hdmi) {
-               cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
-               cntl.lanes_number = 4;
-       } else if (dual_link) {
-               cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+       cntl.signal = signal;
+       if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
                cntl.lanes_number = 8;
-       } else {
-               cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+       else
                cntl.lanes_number = 4;
-       }
+
        cntl.hpd_sel = enc110->base.hpd_source;
 
        cntl.pixel_clock = pixel_clock;
index 8ca9afe47a2b268af39b502cc8547f5a0ba7e3cc..0ec3433d34b622cdc9ebd6251cbf75a0c3261002 100644 (file)
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
        struct link_encoder *enc,
        enum clock_source_id clock_source,
        enum dc_color_depth color_depth,
-       bool hdmi,
-       bool dual_link,
+       enum signal_type signal,
        uint32_t pixel_clock);
 
 /* enables DP PHY output */
index 3ea43e2a9450ce562cd01b8a5c18a9ee060d2a62..442dd2d93618d57eeff5c21ae576750f86c50416 100644 (file)
@@ -852,6 +852,7 @@ static bool construct(
        dc->caps.max_downscale_ratio = 200;
        dc->caps.i2c_speed_in_khz = 40;
        dc->caps.max_cursor_size = 128;
+       dc->caps.dual_link_dvi = true;
 
        for (i = 0; i < pool->base.pipe_count; i++) {
                pool->base.timing_generators[i] =
index 86cdd7b4811fb7f1195ce7d8e73db9e8e42c6c3a..6f382a3ac90f19a3c210ef33c97dca43ac55df42 100644 (file)
@@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
        struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
        struct dc_link *link = pipe_ctx->stream->sink->link;
 
-       /* 1. update AVI info frame (HDMI, DP)
-        * we always need to update info frame
-       */
+
        uint32_t active_total_with_borders;
        uint32_t early_control = 0;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
 
-       /* TODOFPGA may change to hwss.update_info_frame */
+       /* For MST, there are multiply stream go to only one link.
+        * connect DIG back_end to front_end while enable_stream and
+        * disconnect them during disable_stream
+        * BY this, it is logic clean to separate stream and link */
+       link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+                                                   pipe_ctx->stream_res.stream_enc->id, true);
+
+       /* update AVI info frame (HDMI, DP)*/
+       /* TODO: FPGA may change to hwss.update_info_frame */
        dce110_update_info_frame(pipe_ctx);
+
        /* enable early control to avoid corruption on DP monitor*/
        active_total_with_borders =
                        timing->h_addressable
@@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
        }
 
-       /* For MST, there are multiply stream go to only one link.
-        * connect DIG back_end to front_end while enable_stream and
-        * disconnect them during disable_stream
-        * BY this, it is logic clean to separate stream and link */
-       link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
-                                                   pipe_ctx->stream_res.stream_enc->id, true);
+
+
 
 }
 
@@ -1690,9 +1693,13 @@ static void apply_min_clocks(
  *  Check if FBC can be enabled
  */
 static bool should_enable_fbc(struct dc *dc,
-                             struct dc_state *context)
+                             struct dc_state *context,
+                             uint32_t *pipe_idx)
 {
-       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
+       uint32_t i;
+       struct pipe_ctx *pipe_ctx = NULL;
+       struct resource_context *res_ctx = &context->res_ctx;
+
 
        ASSERT(dc->fbc_compressor);
 
@@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc,
        if (context->stream_count != 1)
                return false;
 
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (res_ctx->pipe_ctx[i].stream) {
+                       pipe_ctx = &res_ctx->pipe_ctx[i];
+                       *pipe_idx = i;
+                       break;
+               }
+       }
+
        /* Only supports eDP */
        if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
                return false;
@@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc,
 static void enable_fbc(struct dc *dc,
                       struct dc_state *context)
 {
-       if (should_enable_fbc(dc, context)) {
+       uint32_t pipe_idx = 0;
+
+       if (should_enable_fbc(dc, context, &pipe_idx)) {
                /* Program GRPH COMPRESSED ADDRESS and PITCH */
                struct compr_addr_and_pitch_params params = {0, 0, 0};
                struct compressor *compr = dc->fbc_compressor;
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
 
                params.source_view_width = pipe_ctx->stream->timing.h_addressable;
                params.source_view_height = pipe_ctx->stream->timing.v_addressable;
@@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
        }
 }
 
+void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+{
+       struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
+       struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+       struct mem_input *mi = pipe_ctx->plane_res.mi;
+       struct dc_cursor_mi_param param = {
+               .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+               .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+               .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+               .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+               .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+       };
+
+       if (pipe_ctx->plane_state->address.type
+                       == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+               pos_cpy.enable = false;
+
+       if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+               pos_cpy.enable = false;
+
+       if (ipp->funcs->ipp_cursor_set_position)
+               ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
+       if (mi->funcs->set_cursor_position)
+               mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
+}
+
+void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+       struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+       if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
+               pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+                               pipe_ctx->plane_res.ipp, attributes);
+
+       if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
+               pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+                               pipe_ctx->plane_res.mi, attributes);
+
+       if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
+               pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+                               pipe_ctx->plane_res.xfm, attributes);
+}
+
 static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
 
 static void optimize_shared_resources(struct dc *dc) {}
@@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .edp_backlight_control = hwss_edp_backlight_control,
        .edp_power_control = hwss_edp_power_control,
        .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+       .set_cursor_position = dce110_set_cursor_position,
+       .set_cursor_attribute = dce110_set_cursor_attribute
 };
 
 void dce110_hw_sequencer_construct(struct dc *dc)
index 7c4779578fb76528caef31a1a531f73214c86059..00f18c485e1e7b5a7d38e7bfb50df0d92d207a71 100644 (file)
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
        return result;
 }
 
+enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+                                    struct dc_caps *caps)
+{
+       if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
+           ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
+               return DC_FAIL_SURFACE_VALIDATE;
+
+       return DC_OK;
+}
+
 static bool dce110_validate_surface_sets(
                struct dc_state *context)
 {
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
                                        plane->src_rect.height > 1080))
                                        return false;
 
+                               /* we don't have the logic to support underlay
+                                * only yet so block the use case where we get
+                                * NV12 plane as top layer
+                                */
+                               if (j == 0)
+                                       return false;
+
                                /* irrespective of plane format,
                                 * stream should be RGB encoded
                                 */
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
        .link_enc_create = dce110_link_encoder_create,
        .validate_guaranteed = dce110_validate_guaranteed,
        .validate_bandwidth = dce110_validate_bandwidth,
+       .validate_plane = dce110_validate_plane,
        .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
        .add_stream_to_ctx = dce110_add_stream_to_ctx,
        .validate_global = dce110_validate_global
index 663e0a047a4becc5bae59b672ad951df24cedcfc..98d9cd0109e1f3cf5dbfa7b9111353c163aa4ad4 100644 (file)
@@ -1103,6 +1103,8 @@ static bool construct(
        dc->caps.max_downscale_ratio = 200;
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.max_cursor_size = 128;
+       dc->caps.dual_link_dvi = true;
+
 
        /*************************************************
         *  Create resources                             *
index 57cd67359567b5bd80a61bf2df3697a765d92dd5..5aab01db28ee78e1cd1c83fef2b240a3911cca52 100644 (file)
@@ -835,6 +835,8 @@ static bool construct(
        dc->caps.max_downscale_ratio = 200;
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.max_cursor_size = 128;
+       dc->caps.dual_link_dvi = true;
+
        dc->debug = debug_defaults;
 
        /*************************************************
index 8f2bd56f3461d665e8dd7c15e6280b32b0f135c1..25d7eb1567aeb10bc61d23df101f6b3303fe107c 100644 (file)
@@ -793,6 +793,7 @@ static bool dce80_construct(
        dc->caps.max_downscale_ratio = 200;
        dc->caps.i2c_speed_in_khz = 40;
        dc->caps.max_cursor_size = 128;
+       dc->caps.dual_link_dvi = true;
 
        /*************************************************
         *  Create resources                             *
index 82572863acab747759e3d8c2787a1e2cffc1f8df..072e4485e85e8f4c473e96dedb4b6665f3941206 100644 (file)
@@ -238,10 +238,24 @@ static void enable_power_gating_plane(
 static void disable_vga(
        struct dce_hwseq *hws)
 {
+       unsigned int in_vga_mode = 0;
+
+       REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
+
+       if (in_vga_mode == 0)
+               return;
+
        REG_WRITE(D1VGA_CONTROL, 0);
-       REG_WRITE(D2VGA_CONTROL, 0);
-       REG_WRITE(D3VGA_CONTROL, 0);
-       REG_WRITE(D4VGA_CONTROL, 0);
+
+       /* HW Engineer's Notes:
+        *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
+        *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
+        *
+        *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
+        *  VGA_TEST_ENABLE, to leave it in the same state as before.
+        */
+       REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
+       REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
 }
 
 static void dpp_pg_control(
@@ -1761,6 +1775,11 @@ static void update_dchubp_dpp(
                        &pipe_ctx->plane_res.scl_data.viewport_c);
        }
 
+       if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+               dc->hwss.set_cursor_position(pipe_ctx);
+               dc->hwss.set_cursor_attribute(pipe_ctx);
+       }
+
        if (plane_state->update_flags.bits.full_update) {
                /*gamut remap*/
                program_gamut_remap(pipe_ctx);
@@ -2296,7 +2315,7 @@ static bool dcn10_dummy_display_power_gating(
        return true;
 }
 
-void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 {
        struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2316,12 +2335,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
        }
 }
 
-void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
 {
        if (hws->ctx->dc->res_pool->hubbub != NULL)
                hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
 }
 
+static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+{
+       struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
+       struct hubp *hubp = pipe_ctx->plane_res.hubp;
+       struct dpp *dpp = pipe_ctx->plane_res.dpp;
+       struct dc_cursor_mi_param param = {
+               .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+               .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+               .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+               .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+               .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+       };
+
+       if (pipe_ctx->plane_state->address.type
+                       == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+               pos_cpy.enable = false;
+
+       if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+               pos_cpy.enable = false;
+
+       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+       dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+}
+
+static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+       struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+       pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+                       pipe_ctx->plane_res.hubp, attributes);
+       pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+               pipe_ctx->plane_res.dpp, attributes->color_format);
+}
+
 static const struct hw_sequencer_funcs dcn10_funcs = {
        .program_gamut_remap = program_gamut_remap,
        .program_csc_matrix = program_csc_matrix,
@@ -2362,6 +2415,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .edp_backlight_control = hwss_edp_backlight_control,
        .edp_power_control = hwss_edp_power_control,
        .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+       .set_cursor_position = dcn10_set_cursor_position,
+       .set_cursor_attribute = dcn10_set_cursor_attribute
 };
 
 
index 0fd329deacd8a047c6302e092075972698191fff..54d8a13861423483dfc01050a78edc1d190424da 100644 (file)
@@ -123,8 +123,7 @@ struct link_encoder_funcs {
        void (*enable_tmds_output)(struct link_encoder *enc,
                enum clock_source_id clock_source,
                enum dc_color_depth color_depth,
-               bool hdmi,
-               bool dual_link,
+               enum signal_type signal,
                uint32_t pixel_clock);
        void (*enable_dp_output)(struct link_encoder *enc,
                const struct dc_link_settings *link_settings,
index 4c0aa56f7bae255e42b18db495efba17b9868971..379c6ecd271a5919b87640dff21edb96caf36933 100644 (file)
@@ -198,6 +198,9 @@ struct hw_sequencer_funcs {
                        bool enable);
        void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
 
+       void (*set_cursor_position)(struct pipe_ctx *pipe);
+       void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+
 };
 
 void color_space_to_black_color(
index f7e40b292dfbbbdc5b81dfd8a444a63f42542a5b..d3e1923b01a8d0eaa076b6617ba659c459c4810e 100644 (file)
@@ -217,7 +217,7 @@ bool dce110_vblank_set(
                        core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
 
        if (enable) {
-               if (!tg->funcs->arm_vert_intr(tg, 2)) {
+               if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
                        DC_ERROR("Failed to get VBLANK!\n");
                        return false;
                }
index 57a54a7b89e5f73d5f67dcb8177368ccc0f8acd0..1c079ba37c3006cd7f51c49ea74599575ed46e05 100644 (file)
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
        struct link_encoder *enc,
        enum clock_source_id clock_source,
        enum dc_color_depth color_depth,
-       bool hdmi,
-       bool dual_link,
+       enum signal_type signal,
        uint32_t pixel_clock) {}
 
 static void virtual_link_encoder_enable_dp_output(
index 7a9b43f84a31636bbb958597a3b66cbabf792810..36bbad5942674bcda382624f2d13b2fe3f6b304e 100644 (file)
@@ -419,11 +419,6 @@ struct bios_event_info {
        bool backlight_changed;
 };
 
-enum {
-       HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
-       TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
-};
-
 /*
  * DFS-bypass flag
  */
index b5ebde642207431080f62db9adbbf18491f6a33b..199c5db67cbca1e0ed7aeba6a721c92d4ba25468 100644 (file)
 #ifndef __DC_SIGNAL_TYPES_H__
 #define __DC_SIGNAL_TYPES_H__
 
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+
 enum signal_type {
        SIGNAL_TYPE_NONE                = 0L,           /* no signal */
        SIGNAL_TYPE_DVI_SINGLE_LINK     = (1 << 0),
index dd89abd2263d20334403ae4c4b1ef61af5482135..66ee9d888d1626dc79284f8fb07fcd65c489b13a 100644 (file)
@@ -3205,8 +3205,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
         * rolling the global seqno forward (since this would complete requests
         * for which we haven't set the fence error to EIO yet).
         */
-       for_each_engine(engine, i915, id)
+       for_each_engine(engine, i915, id) {
+               i915_gem_reset_prepare_engine(engine);
                engine->submit_request = nop_submit_request;
+       }
 
        /*
         * Make sure no one is running the old callback before we proceed with
@@ -3244,6 +3246,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
                intel_engine_init_global_seqno(engine,
                                               intel_engine_last_submit(engine));
                spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+               i915_gem_reset_finish_engine(engine);
        }
 
        set_bit(I915_WEDGED, &i915->gpu_error.flags);
index 0be50e43507de0e15ef06245335940eed75d3f6b..f8fe5ffcdcfff8cb27667efdcec2914eb3b7b06b 100644 (file)
@@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
        dev_priv->perf.oa.exclusive_stream = NULL;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-
        dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        free_oa_buffer(dev_priv);
 
@@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
  * Note: it's only the RCS/Render context that has any OA state.
  */
 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
-                                      const struct i915_oa_config *oa_config,
-                                      bool interruptible)
+                                      const struct i915_oa_config *oa_config)
 {
        struct i915_gem_context *ctx;
        int ret;
        unsigned int wait_flags = I915_WAIT_LOCKED;
 
-       if (interruptible) {
-               ret = i915_mutex_lock_interruptible(&dev_priv->drm);
-               if (ret)
-                       return ret;
-
-               wait_flags |= I915_WAIT_INTERRUPTIBLE;
-       } else {
-               mutex_lock(&dev_priv->drm.struct_mutex);
-       }
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
        /* Switch away from any user context. */
        ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
        }
 
  out:
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-
        return ret;
 }
 
@@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
         * to make sure all slices/subslices are ON before writing to NOA
         * registers.
         */
-       ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
+       ret = gen8_configure_all_contexts(dev_priv, oa_config);
        if (ret)
                return ret;
 
@@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
 {
        /* Reset all contexts' slices/subslices configurations. */
-       gen8_configure_all_contexts(dev_priv, NULL, false);
+       gen8_configure_all_contexts(dev_priv, NULL);
 
        I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
                                      ~GT_NOA_ENABLE));
@@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
 {
        /* Reset all contexts' slices/subslices configurations. */
-       gen8_configure_all_contexts(dev_priv, NULL, false);
+       gen8_configure_all_contexts(dev_priv, NULL);
 
        /* Make sure we disable noa to save power. */
        I915_WRITE(RPM_CONFIG1,
@@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
        if (ret)
                goto err_oa_buf_alloc;
 
+       ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+       if (ret)
+               goto err_lock;
+
        ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
                                                      stream->oa_config);
        if (ret)
@@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 
        stream->ops = &i915_oa_stream_ops;
 
-       /* Lock device for exclusive_stream access late because
-        * enable_metric_set() might lock as well on gen8+.
-        */
-       ret = i915_mutex_lock_interruptible(&dev_priv->drm);
-       if (ret)
-               goto err_lock;
-
        dev_priv->perf.oa.exclusive_stream = stream;
 
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        return 0;
 
-err_lock:
+err_enable:
        dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-err_enable:
+err_lock:
        free_oa_buffer(dev_priv);
 
 err_oa_buf_alloc:
index 7ece2f061b9e8087ce7f5b22179377435d50af9b..e0fca035ff789be49a9811eade75cd92a67eea3a 100644 (file)
@@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        struct rb_node *rb;
        unsigned long flags;
 
+       GEM_TRACE("%s\n", engine->name);
+
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
        /* Cancel the requests on the HW and clear the ELSP tracker. */
@@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
         */
        clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 
+       /* Mark all CS interrupts as complete */
+       execlists->active = 0;
+
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
index d3045a371a557261776ff32a88aca99b8836c538..7c73bc7e2f854e815aeb249bb4f80dbb932794f4 100644 (file)
@@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
        case CHIP_KAVERI:
                rdev->config.cik.max_shader_engines = 1;
                rdev->config.cik.max_tile_pipes = 4;
-               if ((rdev->pdev->device == 0x1304) ||
-                   (rdev->pdev->device == 0x1305) ||
-                   (rdev->pdev->device == 0x130C) ||
-                   (rdev->pdev->device == 0x130F) ||
-                   (rdev->pdev->device == 0x1310) ||
-                   (rdev->pdev->device == 0x1311) ||
-                   (rdev->pdev->device == 0x131C)) {
-                       rdev->config.cik.max_cu_per_sh = 8;
-                       rdev->config.cik.max_backends_per_se = 2;
-               } else if ((rdev->pdev->device == 0x1309) ||
-                          (rdev->pdev->device == 0x130A) ||
-                          (rdev->pdev->device == 0x130D) ||
-                          (rdev->pdev->device == 0x1313) ||
-                          (rdev->pdev->device == 0x131D)) {
-                       rdev->config.cik.max_cu_per_sh = 6;
-                       rdev->config.cik.max_backends_per_se = 2;
-               } else if ((rdev->pdev->device == 0x1306) ||
-                          (rdev->pdev->device == 0x1307) ||
-                          (rdev->pdev->device == 0x130B) ||
-                          (rdev->pdev->device == 0x130E) ||
-                          (rdev->pdev->device == 0x1315) ||
-                          (rdev->pdev->device == 0x1318) ||
-                          (rdev->pdev->device == 0x131B)) {
-                       rdev->config.cik.max_cu_per_sh = 4;
-                       rdev->config.cik.max_backends_per_se = 1;
-               } else {
-                       rdev->config.cik.max_cu_per_sh = 3;
-                       rdev->config.cik.max_backends_per_se = 1;
-               }
+               rdev->config.cik.max_cu_per_sh = 8;
+               rdev->config.cik.max_backends_per_se = 2;
                rdev->config.cik.max_sh_per_se = 1;
                rdev->config.cik.max_texture_channel_caches = 4;
                rdev->config.cik.max_gprs = 256;
index 5decae0069d0bfda0f45a0c78ea033f1186282c6..78cbc3145e44063426ed40c09b5fadbb35bbdd75 100644 (file)
@@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
 
        DRM_DEBUG_DRIVER("Disabling the CRTC\n");
 
+       drm_crtc_vblank_off(crtc);
+
        sun4i_tcon_set_status(scrtc->tcon, encoder, false);
 
        if (crtc->state->event && !crtc->state->active) {
@@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
        DRM_DEBUG_DRIVER("Enabling the CRTC\n");
 
        sun4i_tcon_set_status(scrtc->tcon, encoder, true);
+
+       drm_crtc_vblank_on(crtc);
 }
 
 static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
index 023f39bda633de70825243b3a28335e2686091ed..e36004fbe45360deb9487fa80cdd564c33fa030e 100644 (file)
@@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
 static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
 {
        struct sun4i_dclk *dclk = hw_to_dclk(hw);
+       u32 val = degrees / 120;
+
+       val <<= 28;
 
        regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
                           GENMASK(29, 28),
-                          degrees / 120);
+                          val);
 
        return 0;
 }
index 832f8f9bc47fd046baebde3af7c4d406b301a40c..b8da5a50a61d3b820d8ee5c8badc26132efe0b2b 100644 (file)
@@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_DRIVER("Vertical parameters OK\n");
 
+       tcon->dclk_min_div = 6;
+       tcon->dclk_max_div = 127;
        rounded_rate = clk_round_rate(tcon->dclk, rate);
        if (rounded_rate < rate)
                return MODE_CLOCK_LOW;
index b3960118deb9eb22da1134ca9525e0c87dc17275..2de586b7c98b58ea4d03470f720744840d3a026f 100644 (file)
@@ -101,10 +101,12 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
                return;
        }
 
-       if (enabled)
+       if (enabled) {
                clk_prepare_enable(clk);
-       else
+       } else {
+               clk_rate_exclusive_put(clk);
                clk_disable_unprepare(clk);
+       }
 }
 
 static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
@@ -873,52 +875,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                return ret;
        }
 
-       /*
-        * This can only be made optional since we've had DT nodes
-        * without the LVDS reset properties.
-        *
-        * If the property is missing, just disable LVDS, and print a
-        * warning.
-        */
-       tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
-       if (IS_ERR(tcon->lvds_rst)) {
-               dev_err(dev, "Couldn't get our reset line\n");
-               return PTR_ERR(tcon->lvds_rst);
-       } else if (tcon->lvds_rst) {
-               has_lvds_rst = true;
-               reset_control_reset(tcon->lvds_rst);
-       } else {
-               has_lvds_rst = false;
-       }
+       if (tcon->quirks->supports_lvds) {
+               /*
+                * This can only be made optional since we've had DT
+                * nodes without the LVDS reset properties.
+                *
+                * If the property is missing, just disable LVDS, and
+                * print a warning.
+                */
+               tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
+               if (IS_ERR(tcon->lvds_rst)) {
+                       dev_err(dev, "Couldn't get our reset line\n");
+                       return PTR_ERR(tcon->lvds_rst);
+               } else if (tcon->lvds_rst) {
+                       has_lvds_rst = true;
+                       reset_control_reset(tcon->lvds_rst);
+               } else {
+                       has_lvds_rst = false;
+               }
 
-       /*
-        * This can only be made optional since we've had DT nodes
-        * without the LVDS reset properties.
-        *
-        * If the property is missing, just disable LVDS, and print a
-        * warning.
-        */
-       if (tcon->quirks->has_lvds_alt) {
-               tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
-               if (IS_ERR(tcon->lvds_pll)) {
-                       if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
-                               has_lvds_alt = false;
+               /*
+                * This can only be made optional since we've had DT
+                * nodes without the LVDS reset properties.
+                *
+                * If the property is missing, just disable LVDS, and
+                * print a warning.
+                */
+               if (tcon->quirks->has_lvds_alt) {
+                       tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
+                       if (IS_ERR(tcon->lvds_pll)) {
+                               if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
+                                       has_lvds_alt = false;
+                               } else {
+                                       dev_err(dev, "Couldn't get the LVDS PLL\n");
+                                       return PTR_ERR(tcon->lvds_pll);
+                               }
                        } else {
-                               dev_err(dev, "Couldn't get the LVDS PLL\n");
-                               return PTR_ERR(tcon->lvds_pll);
+                               has_lvds_alt = true;
                        }
-               } else {
-                       has_lvds_alt = true;
                }
-       }
 
-       if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
-               dev_warn(dev,
-                        "Missing LVDS properties, Please upgrade your DT\n");
-               dev_warn(dev, "LVDS output disabled\n");
-               can_lvds = false;
+               if (!has_lvds_rst ||
+                   (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
+                       dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
+                       dev_warn(dev, "LVDS output disabled\n");
+                       can_lvds = false;
+               } else {
+                       can_lvds = true;
+               }
        } else {
-               can_lvds = true;
+               can_lvds = false;
        }
 
        ret = sun4i_tcon_init_clocks(dev, tcon);
@@ -1137,7 +1143,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
-       /* nothing is supported */
+       .supports_lvds          = true,
 };
 
 static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
index b761c7b823c560536b0f44a3c6eab6337d3e32d7..278700c7bf9f6f71ec3c85686c81919362aa40fa 100644 (file)
@@ -175,6 +175,7 @@ struct sun4i_tcon_quirks {
        bool    has_channel_1;  /* a33 does not have channel 1 */
        bool    has_lvds_alt;   /* Does the LVDS clock have a parent other than the TCON clock? */
        bool    needs_de_be_mux; /* sun6i needs mux to select backend */
+       bool    supports_lvds;   /* Does the TCON support an LVDS output? */
 
        /* callback to handle tcon muxing options */
        int     (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
index a5b4cf030c11b74291baa6859ee370295fd0c42f..9183d148d644484c11f4e26fc87ff332c4fbb4eb 100644 (file)
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
                dst_release(dst);
        }
 
-       if (ndev->flags & IFF_LOOPBACK) {
-               ret = rdma_translate_ip(dst_in, addr);
-               /*
-                * Put the loopback device and get the translated
-                * device instead.
-                */
+       if (ndev) {
+               if (ndev->flags & IFF_LOOPBACK)
+                       ret = rdma_translate_ip(dst_in, addr);
+               else
+                       addr->bound_dev_if = ndev->ifindex;
                dev_put(ndev);
-               ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
-       } else {
-               addr->bound_dev_if = ndev->ifindex;
        }
-       dev_put(ndev);
 
        return ret;
 }
index bc79ca8215d7c25ff22c2a901db04b209b49052a..af5ad6a56ae404d1cd2aae64f95e59d7ccded0ac 100644 (file)
@@ -17,6 +17,7 @@
 
 /* # of WCs to poll for with a single call to ib_poll_cq */
 #define IB_POLL_BATCH                  16
+#define IB_POLL_BATCH_DIRECT           8
 
 /* # of WCs to iterate over before yielding */
 #define IB_POLL_BUDGET_IRQ             256
 #define IB_POLL_FLAGS \
        (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
 
-static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
+                          int batch)
 {
        int i, n, completed = 0;
-       struct ib_wc *wcs = poll_wc ? : cq->wc;
 
        /*
         * budget might be (-1) if the caller does not
         * want to bound this call, thus we need unsigned
         * minimum here.
         */
-       while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
-                       budget - completed), wcs)) > 0) {
+       while ((n = ib_poll_cq(cq, min_t(u32, batch,
+                                        budget - completed), wcs)) > 0) {
                for (i = 0; i < n; i++) {
                        struct ib_wc *wc = &wcs[i];
 
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
 
                completed += n;
 
-               if (n != IB_POLL_BATCH ||
-                   (budget != -1 && completed >= budget))
+               if (n != batch || (budget != -1 && completed >= budget))
                        break;
        }
 
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
  */
 int ib_process_cq_direct(struct ib_cq *cq, int budget)
 {
-       struct ib_wc wcs[IB_POLL_BATCH];
+       struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
 
-       return __ib_process_cq(cq, budget, wcs);
+       return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
 }
 EXPORT_SYMBOL(ib_process_cq_direct);
 
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
        struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
        int completed;
 
-       completed = __ib_process_cq(cq, budget, NULL);
+       completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
        if (completed < budget) {
                irq_poll_complete(&cq->iop);
                if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
        struct ib_cq *cq = container_of(work, struct ib_cq, work);
        int completed;
 
-       completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
+       completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
+                                   IB_POLL_BATCH);
        if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
            ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
                queue_work(ib_comp_wq, &cq->work);
index e8010e73a1cf4fe27ec9e5d3c25190003d55107a..bb065c9449be46617bd82e2b26d7648aa00212a3 100644 (file)
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
        ret = device->query_device(device, &device->attrs, &uhw);
        if (ret) {
                pr_warn("Couldn't query the device attributes\n");
-               goto cache_cleanup;
+               goto cg_cleanup;
        }
 
        ret = ib_device_register_sysfs(device, port_callback);
        if (ret) {
                pr_warn("Couldn't register device %s with driver model\n",
                        device->name);
-               goto cache_cleanup;
+               goto cg_cleanup;
        }
 
        device->reg_state = IB_DEV_REGISTERED;
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
        mutex_unlock(&device_mutex);
        return 0;
 
+cg_cleanup:
+       ib_device_unregister_rdmacg(device);
 cache_cleanup:
        ib_cache_cleanup_one(device);
        ib_cache_release_one(device);
index 8cf15d4a8ac438db82971ad3d3bc16649a5ed6da..9f029a1ca5ea9555b2ab366012d5eabd04168e3c 100644 (file)
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
 
                resolved_dev = dev_get_by_index(dev_addr.net,
                                                dev_addr.bound_dev_if);
-               if (resolved_dev->flags & IFF_LOOPBACK) {
-                       dev_put(resolved_dev);
-                       resolved_dev = idev;
-                       dev_hold(resolved_dev);
+               if (!resolved_dev) {
+                       dev_put(idev);
+                       return -ENODEV;
                }
                ndev = ib_get_ndev_from_path(rec);
                rcu_read_lock();
index f015f1bf88c9c8c52c2a739bb130c2949348f0e4..3a9d0f5b5881c9b316435b5f8ba18ad5fefcbea8 100644 (file)
@@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
+       if (cmd.qp_state > IB_QPS_ERR)
+               return -EINVAL;
+
        ctx = ucma_get_ctx(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
@@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
+       if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+               return -EINVAL;
+
        optval = memdup_user((void __user *) (unsigned long) cmd.optval,
                             cmd.optlen);
        if (IS_ERR(optval)) {
index 643174d949a8c979a1f5878a258933ba7a939645..0dd75f449872404701b5294db1a02737c85523fb 100644 (file)
@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
        return 0;
 }
 
-static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
        __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
 {
        unsigned long flags;
@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
        return flags;
 }
 
-static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
-                              unsigned long flags)
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+                       unsigned long flags)
        __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
 {
        if (qp->rcq != qp->scq)
@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
        int status;
        union ib_gid sgid;
        struct ib_gid_attr sgid_attr;
+       unsigned int flags;
        u8 nw_type;
 
        qp->qplib_qp.modify_flags = 0;
@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
                        dev_dbg(rdev_to_dev(rdev),
                                "Move QP = %p to flush list\n",
                                qp);
+                       flags = bnxt_re_lock_cqs(qp);
                        bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+                       bnxt_re_unlock_cqs(qp, flags);
                }
                if (!qp->sumem &&
                    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
                        dev_dbg(rdev_to_dev(rdev),
                                "Move QP = %p out of flush list\n",
                                qp);
+                       flags = bnxt_re_lock_cqs(qp);
                        bnxt_qplib_clean_qp(&qp->qplib_qp);
+                       bnxt_re_unlock_cqs(qp, flags);
                }
        }
        if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
        wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
        wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
 
+       /* Need unconditional fence for local invalidate
+        * opcode to work as expected.
+        */
+       wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+
        if (wr->send_flags & IB_SEND_SIGNALED)
                wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
-       if (wr->send_flags & IB_SEND_FENCE)
-               wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
        if (wr->send_flags & IB_SEND_SOLICITED)
                wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
 
@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
        wqe->frmr.levels = qplib_frpl->hwq.level + 1;
        wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
 
-       if (wr->wr.send_flags & IB_SEND_FENCE)
-               wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+       /* Need unconditional fence for reg_mr
+        * opcode to function as expected.
+        */
+
+       wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+
        if (wr->wr.send_flags & IB_SEND_SIGNALED)
                wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
 
index b88a48d43a9dddd49c210d0a16779f3d4d0369b9..e62b7c2c7da6a1953605fbe7d089a3d9b6f9a50a 100644 (file)
@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
                                           struct ib_udata *udata);
 int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
 #endif /* __BNXT_RE_IB_VERBS_H__ */
index 33a448036c2ebd99217a40b9bb9d6372a798af0d..f6e361750466f50acab01a4f1a397260e3a84472 100644 (file)
@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
                                         struct bnxt_re_qp *qp)
 {
        struct ib_event event;
+       unsigned int flags;
+
+       if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+               flags = bnxt_re_lock_cqs(qp);
+               bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+               bnxt_re_unlock_cqs(qp, flags);
+       }
 
        memset(&event, 0, sizeof(event));
        if (qp->qplib_qp.srq) {
@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
        switch (re_work->event) {
        case NETDEV_REGISTER:
                rc = bnxt_re_ib_reg(rdev);
-               if (rc)
+               if (rc) {
                        dev_err(rdev_to_dev(rdev),
                                "Failed to register with IB: %#x", rc);
+                       bnxt_re_remove_one(rdev);
+                       bnxt_re_dev_unreg(rdev);
+               }
                break;
        case NETDEV_UP:
                bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
index 3ea5b9624f6b79c3f54c6d069b4af7ab133d1f31..06b42c880fd4594b20bc50263dc4add1db17460d 100644 (file)
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
        }
 }
 
-void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
-                                unsigned long *flags)
-       __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
+static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
+                                      unsigned long *flags)
+       __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
 {
-       spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
+       spin_lock_irqsave(&qp->scq->flush_lock, *flags);
        if (qp->scq == qp->rcq)
-               __acquire(&qp->rcq->hwq.lock);
+               __acquire(&qp->rcq->flush_lock);
        else
-               spin_lock(&qp->rcq->hwq.lock);
+               spin_lock(&qp->rcq->flush_lock);
 }
 
-void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
-                                unsigned long *flags)
-       __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
+static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
+                                      unsigned long *flags)
+       __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
 {
        if (qp->scq == qp->rcq)
-               __release(&qp->rcq->hwq.lock);
+               __release(&qp->rcq->flush_lock);
        else
-               spin_unlock(&qp->rcq->hwq.lock);
-       spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
-}
-
-static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
-                                                     struct bnxt_qplib_cq *cq)
-{
-       struct bnxt_qplib_cq *buddy_cq = NULL;
-
-       if (qp->scq == qp->rcq)
-               buddy_cq = NULL;
-       else if (qp->scq == cq)
-               buddy_cq = qp->rcq;
-       else
-               buddy_cq = qp->scq;
-       return buddy_cq;
-}
-
-static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
-                                    struct bnxt_qplib_cq *cq)
-       __acquires(&buddy_cq->hwq.lock)
-{
-       struct bnxt_qplib_cq *buddy_cq = NULL;
-
-       buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
-       if (!buddy_cq)
-               __acquire(&cq->hwq.lock);
-       else
-               spin_lock(&buddy_cq->hwq.lock);
-}
-
-static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
-                                      struct bnxt_qplib_cq *cq)
-       __releases(&buddy_cq->hwq.lock)
-{
-       struct bnxt_qplib_cq *buddy_cq = NULL;
-
-       buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
-       if (!buddy_cq)
-               __release(&cq->hwq.lock);
-       else
-               spin_unlock(&buddy_cq->hwq.lock);
+               spin_unlock(&qp->rcq->flush_lock);
+       spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
 }
 
 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
 {
        unsigned long flags;
 
-       bnxt_qplib_acquire_cq_locks(qp, &flags);
+       bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
        __bnxt_qplib_add_flush_qp(qp);
-       bnxt_qplib_release_cq_locks(qp, &flags);
+       bnxt_qplib_release_cq_flush_locks(qp, &flags);
 }
 
 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
 {
        unsigned long flags;
 
-       bnxt_qplib_acquire_cq_locks(qp, &flags);
+       bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
        __clean_cq(qp->scq, (u64)(unsigned long)qp);
        qp->sq.hwq.prod = 0;
        qp->sq.hwq.cons = 0;
@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
        qp->rq.hwq.cons = 0;
 
        __bnxt_qplib_del_flush_qp(qp);
-       bnxt_qplib_release_cq_locks(qp, &flags);
+       bnxt_qplib_release_cq_flush_locks(qp, &flags);
 }
 
 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
        /* Must block new posting of SQ and RQ */
        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
        bnxt_qplib_cancel_phantom_processing(qp);
-
-       /* Add qp to flush list of the CQ */
-       __bnxt_qplib_add_flush_qp(qp);
 }
 
 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
                                sw_sq_cons, cqe->wr_id, cqe->status);
                        cqe++;
                        (*budget)--;
-                       bnxt_qplib_lock_buddy_cq(qp, cq);
                        bnxt_qplib_mark_qp_error(qp);
-                       bnxt_qplib_unlock_buddy_cq(qp, cq);
+                       /* Add qp to flush list of the CQ */
+                       bnxt_qplib_add_flush_qp(qp);
                } else {
                        if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
                                /* Before we complete, do WA 9060 */
@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
                if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
                        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
                        /* Add qp to flush list of the CQ */
-                       bnxt_qplib_lock_buddy_cq(qp, cq);
-                       __bnxt_qplib_add_flush_qp(qp);
-                       bnxt_qplib_unlock_buddy_cq(qp, cq);
+                       bnxt_qplib_add_flush_qp(qp);
                }
        }
 
@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
                if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
                        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
                        /* Add qp to flush list of the CQ */
-                       bnxt_qplib_lock_buddy_cq(qp, cq);
-                       __bnxt_qplib_add_flush_qp(qp);
-                       bnxt_qplib_unlock_buddy_cq(qp, cq);
+                       bnxt_qplib_add_flush_qp(qp);
                }
        }
 done:
@@ -2501,11 +2454,9 @@ done:
 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
 {
        struct cq_base *hw_cqe, **hw_cqe_ptr;
-       unsigned long flags;
        u32 sw_cons, raw_cons;
        bool rc = true;
 
-       spin_lock_irqsave(&cq->hwq.lock, flags);
        raw_cons = cq->hwq.cons;
        sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
        hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
 
         /* Check for Valid bit. If the CQE is valid, return false */
        rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
-       spin_unlock_irqrestore(&cq->hwq.lock, flags);
        return rc;
 }
 
@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
                if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
                        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
                        /* Add qp to flush list of the CQ */
-                       bnxt_qplib_lock_buddy_cq(qp, cq);
-                       __bnxt_qplib_add_flush_qp(qp);
-                       bnxt_qplib_unlock_buddy_cq(qp, cq);
+                       bnxt_qplib_add_flush_qp(qp);
                }
        }
 
@@ -2719,9 +2667,7 @@ do_rq:
         */
 
        /* Add qp to flush list of the CQ */
-       bnxt_qplib_lock_buddy_cq(qp, cq);
-       __bnxt_qplib_add_flush_qp(qp);
-       bnxt_qplib_unlock_buddy_cq(qp, cq);
+       bnxt_qplib_add_flush_qp(qp);
 done:
        return rc;
 }
@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
        u32 budget = num_cqes;
        unsigned long flags;
 
-       spin_lock_irqsave(&cq->hwq.lock, flags);
+       spin_lock_irqsave(&cq->flush_lock, flags);
        list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
                dev_dbg(&cq->hwq.pdev->dev,
                        "QPLIB: FP: Flushing SQ QP= %p",
@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
                        qp);
                __flush_rq(&qp->rq, qp, &cqe, &budget);
        }
-       spin_unlock_irqrestore(&cq->hwq.lock, flags);
+       spin_unlock_irqrestore(&cq->flush_lock, flags);
 
        return num_cqes - budget;
 }
@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
                       int num_cqes, struct bnxt_qplib_qp **lib_qp)
 {
        struct cq_base *hw_cqe, **hw_cqe_ptr;
-       unsigned long flags;
        u32 sw_cons, raw_cons;
        int budget, rc = 0;
 
-       spin_lock_irqsave(&cq->hwq.lock, flags);
        raw_cons = cq->hwq.cons;
        budget = num_cqes;
 
@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
                bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
        }
 exit:
-       spin_unlock_irqrestore(&cq->hwq.lock, flags);
        return num_cqes - budget;
 }
 
 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->hwq.lock, flags);
        if (arm_type)
                bnxt_qplib_arm_cq(cq, arm_type);
        /* Using cq->arm_state variable to track whether to issue cq handler */
        atomic_set(&cq->arm_state, 1);
-       spin_unlock_irqrestore(&cq->hwq.lock, flags);
 }
 
 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
index ca0a2ffa3509093a7953a80ea310f3f9314ace03..ade9f13c0fd1bb2e8cc04a8a1a85c9da1221db41 100644 (file)
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
        struct list_head                sqf_head, rqf_head;
        atomic_t                        arm_state;
        spinlock_t                      compl_lock; /* synch CQ handlers */
+/* Locking Notes:
+ * QP can move to error state from modify_qp, async error event or error
+ * CQE as part of poll_cq. When QP is moved to error state, it gets added
+ * to two flush lists, one each for SQ and RQ.
+ * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
+ * flush_locks should be acquired when QP is moved to error. The control path
+ * operations(modify_qp and async error events) are synchronized with poll_cq
+ * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
+ * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
+ * of the same QP while manipulating the flush list.
+ */
+       spinlock_t                      flush_lock; /* QP flush management */
 };
 
 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE        sizeof(struct xrrq_irrq)
index 8329ec6a794696ddbc4892bf255ea22150cd18e7..80027a494730df22944bfbdc678ca36deadcec51 100644 (file)
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
                        err_event->res_err_state_reason);
                if (!qp)
                        break;
-               bnxt_qplib_acquire_cq_locks(qp, &flags);
                bnxt_qplib_mark_qp_error(qp);
-               bnxt_qplib_release_cq_locks(qp, &flags);
+               rcfw->aeq_handler(rcfw, qp_event, qp);
                break;
        default:
                /* Command Response */
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
        int rc;
 
        RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
-
+       /* Supply (log-base-2-of-host-page-size - base-page-shift)
+        * to bono to adjust the doorbell page sizes.
+        */
+       req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
+                                          RCFW_DBR_BASE_PAGE_SHIFT);
        /*
         * VFs need not setup the HW context area, PF
         * shall setup this area for VF. Skipping the
index 6bee6e3636ea400d5bb1be21a1b064af77475f37..c7cce2e4185e687d2572f3cedc875566a5b3e4ff 100644 (file)
@@ -49,6 +49,7 @@
 #define RCFW_COMM_SIZE                 0x104
 
 #define RCFW_DBR_PCI_BAR_REGION                2
+#define RCFW_DBR_BASE_PAGE_SHIFT       12
 
 #define RCFW_CMD_PREP(req, CMD, cmd_flags)                             \
        do {                                                            \
index 03057983341f78b251370888095a6c9492112040..ee98e5efef84652f4accdc05231dccbf935f029d 100644 (file)
@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
        attr->max_pkey = le32_to_cpu(sb->max_pkeys);
 
        attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
-       attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
+       attr->l2_db_size = (sb->l2_db_space_size + 1) *
+                           (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
        attr->max_sgid = le32_to_cpu(sb->max_gid);
 
        bnxt_qplib_query_version(rcfw, attr->fw_ver);
index 2d7ea096a247478392acf58b4a234112ea35f54a..3e5a4f760d0eb6332032f824bf3a9367d1bbd07a 100644 (file)
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
        #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M               (0x3UL << 4)
        #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M               (0x4UL << 4)
        #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G               (0x5UL << 4)
-       __le16 reserved16;
+       /* This value is (log-base-2-of-DBR-page-size - 12).
+        * 0 for 4KB. HW supported values are enumerated below.
+        */
+       __le16  log2_dbr_pg_size;
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK        0xfUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT         0
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K       0x0UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K       0x1UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K      0x2UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K      0x3UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K      0x4UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K     0x5UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K     0x6UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K     0x7UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M       0x8UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M       0x9UL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M       0xaUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M       0xbUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M      0xcUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M      0xdUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M      0xeUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M     0xfUL
+       #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST                \
+                       CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
        __le64 qpc_page_dir;
        __le64 mrw_page_dir;
        __le64 srq_page_dir;
index 9a566ee3ceffeff4b76b90800f732727c1902ebd..82adc0d1d30ef39dfb716758164269a4a0702d52 100644 (file)
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
        wc->dlid_path_bits = 0;
 
        if (is_eth) {
+               wc->slid = 0;
                wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
                memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
                memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -851,7 +852,6 @@ repoll:
                        }
                }
 
-               wc->slid           = be16_to_cpu(cqe->rlid);
                g_mlpath_rqpn      = be32_to_cpu(cqe->g_mlpath_rqpn);
                wc->src_qp         = g_mlpath_rqpn & 0xffffff;
                wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -860,6 +860,7 @@ repoll:
                wc->wc_flags      |= mlx4_ib_ipoib_csum_ok(cqe->status,
                                        cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
                if (is_eth) {
+                       wc->slid = 0;
                        wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
                        if (be32_to_cpu(cqe->vlan_my_qpn) &
                                        MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -871,6 +872,7 @@ repoll:
                        memcpy(wc->smac, cqe->smac, ETH_ALEN);
                        wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
                } else {
+                       wc->slid = be16_to_cpu(cqe->rlid);
                        wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
                        wc->vlan_id = 0xffff;
                }
index 8d2ee9322f2e04448cfbdbf72112432f8554c963..5a0e4fc4785aa0164fa9d25c7a11a8bbda8f1da5 100644 (file)
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
                        gid_tbl[i].version = 2;
                        if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
                                gid_tbl[i].type = 1;
-                       else
-                               memset(&gid_tbl[i].gid, 0, 12);
                }
        }
 
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
                if (!gids) {
                        ret = -ENOMEM;
                } else {
-                       for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
-                               memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+                       for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+                               memcpy(&gids[i].gid,
+                                      &port_gid_table->gids[i].gid,
+                                      sizeof(union ib_gid));
+                               gids[i].gid_type =
+                                   port_gid_table->gids[i].gid_type;
+                       }
                }
        }
        spin_unlock_bh(&iboe->lock);
index 5b974fb97611bc57b6dd2110e1df487e9a001d42..15457c9569a778aae2a6d057f52e0672b651a115 100644 (file)
@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
                wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
                break;
        }
-       wc->slid           = be16_to_cpu(cqe->slid);
        wc->src_qp         = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
        wc->dlid_path_bits = cqe->ml_path;
        g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
        }
 
        if (ll != IB_LINK_LAYER_ETHERNET) {
+               wc->slid = be16_to_cpu(cqe->slid);
                wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
                return;
        }
 
+       wc->slid = 0;
        vlan_present = cqe->l4_l3_hdr_type & 0x1;
        roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
        if (vlan_present) {
@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (ucmd.reserved0 || ucmd.reserved1)
                return -EINVAL;
 
-       umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+       /* check multiplication overflow */
+       if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+               return -EINVAL;
+
+       umem = ib_umem_get(context, ucmd.buf_addr,
+                          (size_t)ucmd.cqe_size * entries,
                           IB_ACCESS_LOCAL_WRITE, 1);
        if (IS_ERR(umem)) {
                err = PTR_ERR(umem);
index 4236c80868200822d30cfc3d8adea4c97ce9afc2..033b6af90de9eeacffba3a4f9ca7293d557eb8b5 100644 (file)
@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
        struct mlx5_ib_multiport_info *mpi;
        struct mlx5_ib_port *port;
 
+       if (!mlx5_core_mp_enabled(ibdev->mdev) ||
+           ll != IB_LINK_LAYER_ETHERNET) {
+               if (native_port_num)
+                       *native_port_num = ib_port_num;
+               return ibdev->mdev;
+       }
+
        if (native_port_num)
                *native_port_num = 1;
 
-       if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
-               return ibdev->mdev;
-
        port = &ibdev->port[ib_port_num - 1];
        if (!port)
                return NULL;
@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
        struct mlx5_ib_dev *ibdev;
        struct ib_event ibev;
        bool fatal = false;
-       u8 port = 0;
+       u8 port = (u8)work->param;
 
        if (mlx5_core_is_mp_slave(work->dev)) {
                ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
        case MLX5_DEV_EVENT_PORT_INITIALIZED:
-               port = (u8)work->param;
-
                /* In RoCE, port up/down events are handled in
                 * mlx5_netdev_event().
                 */
@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
 
        case MLX5_DEV_EVENT_LID_CHANGE:
                ibev.event = IB_EVENT_LID_CHANGE;
-               port = (u8)work->param;
                break;
 
        case MLX5_DEV_EVENT_PKEY_CHANGE:
                ibev.event = IB_EVENT_PKEY_CHANGE;
-               port = (u8)work->param;
-
                schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
                break;
 
        case MLX5_DEV_EVENT_GUID_CHANGE:
                ibev.event = IB_EVENT_GID_CHANGE;
-               port = (u8)work->param;
                break;
 
        case MLX5_DEV_EVENT_CLIENT_REREG:
                ibev.event = IB_EVENT_CLIENT_REREGISTER;
-               port = (u8)work->param;
                break;
        case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
                schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
        ibev.device           = &ibdev->ib_dev;
        ibev.element.port_num = port;
 
-       if (port < 1 || port > ibdev->num_ports) {
+       if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
                mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
                goto out;
        }
index 556e015678de26809f45d74ac6357ecf9dcbf501..1961c6a454372550528c817a78cec24232de2f3c 100644 (file)
@@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
 
        mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
        mr->ibmr.length = 0;
-       mr->ndescs = sg_nents;
 
        for_each_sg(sgl, sg, sg_nents, i) {
                if (unlikely(i >= mr->max_descs))
@@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
 
                sg_offset = 0;
        }
+       mr->ndescs = i;
 
        if (sg_offset_p)
                *sg_offset_p = sg_offset;
index 39d24bf694a864788f2d3dc806c3019bb4b12b8b..36197fbac63acef7c5557c74e5b0351f23d9a7c6 100644 (file)
@@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        u32 uidx = MLX5_IB_DEFAULT_UIDX;
        struct mlx5_ib_create_qp ucmd;
        struct mlx5_ib_qp_base *base;
+       int mlx5_st;
        void *qpc;
        u32 *in;
        int err;
@@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
 
+       mlx5_st = to_mlx5_st(init_attr->qp_type);
+       if (mlx5_st < 0)
+               return -EINVAL;
+
        if (init_attr->rwq_ind_tbl) {
                if (!udata)
                        return -ENOSYS;
@@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 
-       MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
+       MLX5_SET(qpc, qpc, st, mlx5_st);
        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 
        if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                goto out;
 
        if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
-           !optab[mlx5_cur][mlx5_new])
+           !optab[mlx5_cur][mlx5_new]) {
+               err = -EINVAL;
                goto out;
+       }
 
        op = optab[mlx5_cur][mlx5_new];
        optpar = ib_mask_to_mlx5_opt(attr_mask);
index 478b7317b80ab49fa281cb80621efb29ccde5e9a..26dc374787f74843fd0326ca87b37a312e422c3d 100644 (file)
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
                }
                return -EINVAL;
        }
-       neigh = dst_neigh_lookup(dst, &dst_in);
-
+       neigh = dst_neigh_lookup(dst, &fl6.daddr);
        if (neigh) {
                rcu_read_lock();
                if (neigh->nud_state & NUD_VALID) {
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        qp = idr_find(&dev->qpidr, conn_param->qpn);
 
-       laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-       laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
-       raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+       laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+       laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+       raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+       DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
+                ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
+                ntohs(raddr->sin_port));
 
        DP_DEBUG(dev, QEDR_MSG_IWARP,
                 "Connect source address: %pISpc, remote address: %pISpc\n",
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        int rc;
        int i;
 
-       laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+       laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
 
        DP_DEBUG(dev, QEDR_MSG_IWARP,
                 "Create Listener address: %pISpc\n", &cm_id->local_addr);
index 53f00dbf313f757941d32451ae23e62305f9cf53..875b17272d65289d2cfa826f3635683831e19886 100644 (file)
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        switch (wr->opcode) {
        case IB_WR_SEND_WITH_IMM:
+               if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+                       rc = -EINVAL;
+                       *bad_wr = wr;
+                       break;
+               }
                wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
                swqe = (struct rdma_sq_send_wqe_1st *)wqe;
                swqe->wqe_size = 2;
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                break;
 
        case IB_WR_RDMA_WRITE_WITH_IMM:
+               if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+                       rc = -EINVAL;
+                       *bad_wr = wr;
+                       break;
+               }
                wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
                rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
 
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 {
        struct qedr_dev *dev = get_qedr_dev(ibcq->device);
        struct qedr_cq *cq = get_qedr_cq(ibcq);
-       union rdma_cqe *cqe = cq->latest_cqe;
+       union rdma_cqe *cqe;
        u32 old_cons, new_cons;
        unsigned long flags;
        int update = 0;
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                return qedr_gsi_poll_cq(ibcq, num_entries, wc);
 
        spin_lock_irqsave(&cq->cq_lock, flags);
+       cqe = cq->latest_cqe;
        old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
        while (num_entries && is_valid_cqe(cq, cqe)) {
                struct qedr_qp *qp;
index 4d1d8dfb2d2a4d1de7d6ee59e2b8d802a68827e5..f2273143b3cb2384109ba47bf3518adf31337ad8 100644 (file)
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
        uint32_t rtime = cpu_to_le32(get_seconds());
        struct uuid_entry *u;
        char buf[BDEVNAME_SIZE];
+       struct cached_dev *exist_dc, *t;
 
        bdevname(dc->bdev, buf);
 
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
                return -EINVAL;
        }
 
+       /* Check whether already attached */
+       list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+               if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+                       pr_err("Tried to attach %s but duplicate UUID already attached",
+                               buf);
+
+                       return -EINVAL;
+               }
+       }
+
        u = uuid_find(c, dc->sb.uuid);
 
        if (u &&
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
 
        return;
 err:
-       pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+       pr_notice("error %s: %s", bdevname(bdev, name), err);
        bcache_device_stop(&dc->disk);
 }
 
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
        const char *err = NULL; /* must be set for any error case */
        int ret = 0;
 
+       bdevname(bdev, name);
+
        memcpy(&ca->sb, sb, sizeof(struct cache_sb));
        ca->bdev = bdev;
        ca->bdev->bd_holder = ca;
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
        bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
        get_page(sb_page);
 
-       if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+       if (blk_queue_discard(bdev_get_queue(bdev)))
                ca->discard = CACHE_DISCARD(&ca->sb);
 
        ret = cache_alloc(ca);
        if (ret != 0) {
+               blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
                if (ret == -ENOMEM)
                        err = "cache_alloc(): -ENOMEM";
                else
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                goto out;
        }
 
-       pr_info("registered cache device %s", bdevname(bdev, name));
+       pr_info("registered cache device %s", name);
 
 out:
        kobject_put(&ca->kobj);
 
 err:
        if (err)
-               pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+               pr_notice("error %s: %s", name, err);
 
        return ret;
 }
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
        if (err)
                goto err_close;
 
+       err = "failed to register device";
        if (SB_IS_BDEV(sb)) {
                struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
                if (!dc)
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                        goto err_close;
 
                if (register_cache(sb, sb_page, bdev, ca) != 0)
-                       goto err_close;
+                       goto err;
        }
 out:
        if (sb_page)
@@ -2041,7 +2056,7 @@ out:
 err_close:
        blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 err:
-       pr_info("error opening %s: %s", path, err);
+       pr_info("error %s: %s", path, err);
        ret = -EINVAL;
        goto out;
 }
index 414c9af54ded2fde89531cedf3da430c53fcac9d..aa2032fa80d49eebaccf937a4a9a5182d91dadc9 100644 (file)
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
                               enum data_mode *data_mode)
 {
-       unsigned noio_flag;
-       void *ptr;
-
        if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
                *data_mode = DATA_MODE_SLAB;
                return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
         * all allocations done by this process (including pagetables) are done
         * as if GFP_NOIO was specified.
         */
+       if (gfp_mask & __GFP_NORETRY) {
+               unsigned noio_flag = memalloc_noio_save();
+               void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
 
-       if (gfp_mask & __GFP_NORETRY)
-               noio_flag = memalloc_noio_save();
-
-       ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
-
-       if (gfp_mask & __GFP_NORETRY)
                memalloc_noio_restore(noio_flag);
+               return ptr;
+       }
 
-       return ptr;
+       return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
 }
 
 /*
index 7d3e572072f51f865d07a79ae5a0ac974bd98e4a..3fde9e9faddd049d9de9092fd3fadb7378ff9e69 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/time.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
+#include <scsi/scsi_device.h>
 #include <scsi/scsi_dh.h>
 #include <linux/atomic.h>
 #include <linux/blk-mq.h>
@@ -211,25 +212,13 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
                else
                        m->queue_mode = DM_TYPE_REQUEST_BASED;
 
-       } else if (m->queue_mode == DM_TYPE_BIO_BASED ||
-                  m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
+       } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
                INIT_WORK(&m->process_queued_bios, process_queued_bios);
-
-               if (m->queue_mode == DM_TYPE_BIO_BASED) {
-                       /*
-                        * bio-based doesn't support any direct scsi_dh management;
-                        * it just discovers if a scsi_dh is attached.
-                        */
-                       set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
-               }
-       }
-
-       if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
-               set_bit(MPATHF_QUEUE_IO, &m->flags);
-               atomic_set(&m->pg_init_in_progress, 0);
-               atomic_set(&m->pg_init_count, 0);
-               m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
-               init_waitqueue_head(&m->pg_init_wait);
+               /*
+                * bio-based doesn't support any direct scsi_dh management;
+                * it just discovers if a scsi_dh is attached.
+                */
+               set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
        }
 
        dm_table_set_type(ti->table, m->queue_mode);
@@ -337,14 +326,12 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
 {
        m->current_pg = pg;
 
-       if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
-               return;
-
        /* Must we initialise the PG first, and queue I/O till it's ready? */
        if (m->hw_handler_name) {
                set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
                set_bit(MPATHF_QUEUE_IO, &m->flags);
        } else {
+               /* FIXME: not needed if no scsi_dh is attached */
                clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
                clear_bit(MPATHF_QUEUE_IO, &m->flags);
        }
@@ -385,8 +372,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
        unsigned bypassed = 1;
 
        if (!atomic_read(&m->nr_valid_paths)) {
-               if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
-                       clear_bit(MPATHF_QUEUE_IO, &m->flags);
+               clear_bit(MPATHF_QUEUE_IO, &m->flags);
                goto failed;
        }
 
@@ -599,7 +585,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
        return pgpath;
 }
 
-static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
+static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
 {
        struct pgpath *pgpath;
        unsigned long flags;
@@ -634,8 +620,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 {
        struct pgpath *pgpath;
 
-       if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
-               pgpath = __map_bio_nvme(m, bio);
+       if (!m->hw_handler_name)
+               pgpath = __map_bio_fast(m, bio);
        else
                pgpath = __map_bio(m, bio);
 
@@ -675,8 +661,7 @@ static void process_queued_io_list(struct multipath *m)
 {
        if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
                dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
-       else if (m->queue_mode == DM_TYPE_BIO_BASED ||
-                m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+       else if (m->queue_mode == DM_TYPE_BIO_BASED)
                queue_work(kmultipathd, &m->process_queued_bios);
 }
 
@@ -838,6 +823,16 @@ retain:
                         */
                        kfree(m->hw_handler_name);
                        m->hw_handler_name = attached_handler_name;
+
+                       /*
+                        * Init fields that are only used when a scsi_dh is attached
+                        */
+                       if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) {
+                               atomic_set(&m->pg_init_in_progress, 0);
+                               atomic_set(&m->pg_init_count, 0);
+                               m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+                               init_waitqueue_head(&m->pg_init_wait);
+                       }
                }
        }
 
@@ -873,6 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        int r;
        struct pgpath *p;
        struct multipath *m = ti->private;
+       struct scsi_device *sdev;
 
        /* we need at least a path arg */
        if (as->argc < 1) {
@@ -891,7 +887,9 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
                goto bad;
        }
 
-       if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+       sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev));
+       if (sdev) {
+               put_device(&sdev->sdev_gendev);
                INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
                r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
                if (r) {
@@ -1001,8 +999,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
        if (!hw_argc)
                return 0;
 
-       if (m->queue_mode == DM_TYPE_BIO_BASED ||
-           m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
+       if (m->queue_mode == DM_TYPE_BIO_BASED) {
                dm_consume_args(as, hw_argc);
                DMERR("bio-based multipath doesn't allow hardware handler args");
                return 0;
@@ -1091,8 +1088,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
 
                        if (!strcasecmp(queue_mode_name, "bio"))
                                m->queue_mode = DM_TYPE_BIO_BASED;
-                       else if (!strcasecmp(queue_mode_name, "nvme"))
-                               m->queue_mode = DM_TYPE_NVME_BIO_BASED;
                        else if (!strcasecmp(queue_mode_name, "rq"))
                                m->queue_mode = DM_TYPE_REQUEST_BASED;
                        else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1193,7 +1188,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->num_discard_bios = 1;
        ti->num_write_same_bios = 1;
        ti->num_write_zeroes_bios = 1;
-       if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+       if (m->queue_mode == DM_TYPE_BIO_BASED)
                ti->per_io_data_size = multipath_per_bio_data_size();
        else
                ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1730,9 +1725,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
                        case DM_TYPE_BIO_BASED:
                                DMEMIT("queue_mode bio ");
                                break;
-                       case DM_TYPE_NVME_BIO_BASED:
-                               DMEMIT("queue_mode nvme ");
-                               break;
                        case DM_TYPE_MQ_REQUEST_BASED:
                                DMEMIT("queue_mode mq ");
                                break;
index 7ef469e902c620126b95f69d899e528ae114b3bc..c1d1034ff7b75eb740d40920615c0ae10a308c43 100644 (file)
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
        } else {
-               if (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
-                   test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
-                   test_bit(MD_RECOVERY_RUNNING, &recovery))
+               if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
+                   (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+                    test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+                    test_bit(MD_RECOVERY_RUNNING, &recovery)))
                        r = mddev->curr_resync_completed;
                else
                        r = mddev->recovery_cp;
index 5fe7ec356c333c940edb41e61f0e5844de0aea72..7eb3e2a3c07d5a607669d36aa0a462bfd175d9b3 100644 (file)
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t)
 
        if (t->type != DM_TYPE_NONE) {
                /* target already set the table's type */
-               if (t->type == DM_TYPE_BIO_BASED)
-                       return 0;
-               else if (t->type == DM_TYPE_NVME_BIO_BASED) {
-                       if (!dm_table_does_not_support_partial_completion(t)) {
-                               DMERR("nvme bio-based is only possible with devices"
-                                     " that don't support partial completion");
-                               return -EINVAL;
-                       }
-                       /* Fallthru, also verify all devices are blk-mq */
+               if (t->type == DM_TYPE_BIO_BASED) {
+                       /* possibly upgrade to a variant of bio-based */
+                       goto verify_bio_based;
                }
                BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
+               BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
                goto verify_rq_based;
        }
 
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t)
        }
 
        if (bio_based) {
+verify_bio_based:
                /* We must use this table as bio-based */
                t->type = DM_TYPE_BIO_BASED;
                if (dm_table_supports_dax(t) ||
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev
        char b[BDEVNAME_SIZE];
 
        /* For now, NVMe devices are the only devices of this class */
-       return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0);
+       return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
 }
 
 static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
index 68136806d365821f63ace7675ce21ba2bf10ed8d..45328d8b2859640d04a01b6b7b14cd9990fa2b7e 100644 (file)
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return dm_get_geometry(md, geo);
 }
 
-static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
-                                 struct block_device **bdev,
-                                 fmode_t *mode)
+static char *_dm_claim_ptr = "I belong to device-mapper";
+
+static int dm_get_bdev_for_ioctl(struct mapped_device *md,
+                                struct block_device **bdev,
+                                fmode_t *mode)
 {
        struct dm_target *tgt;
        struct dm_table *map;
@@ -490,6 +492,10 @@ retry:
                goto out;
 
        bdgrab(*bdev);
+       r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
+       if (r < 0)
+               goto out;
+
        dm_put_live_table(md, srcu_idx);
        return r;
 
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
        struct mapped_device *md = bdev->bd_disk->private_data;
        int r;
 
-       r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+       r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
        if (r < 0)
                return r;
 
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
 
        r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 out:
-       bdput(bdev);
+       blkdev_put(bdev, mode);
        return r;
 }
 
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
 static int open_table_device(struct table_device *td, dev_t dev,
                             struct mapped_device *md)
 {
-       static char *_claim_ptr = "I belong to device-mapper";
        struct block_device *bdev;
 
        int r;
 
        BUG_ON(td->dm_dev.bdev);
 
-       bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
+       bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
 
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
        fmode_t mode;
        int r;
 
-       r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+       r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
        if (r < 0)
                return r;
 
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
        else
                r = -EOPNOTSUPP;
 
-       bdput(bdev);
+       blkdev_put(bdev, mode);
        return r;
 }
 
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
        fmode_t mode;
        int r;
 
-       r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+       r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
        if (r < 0)
                return r;
 
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
        else
                r = -EOPNOTSUPP;
 
-       bdput(bdev);
+       blkdev_put(bdev, mode);
        return r;
 }
 
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
        fmode_t mode;
        int r;
 
-       r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+       r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
        if (r < 0)
                return r;
 
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
        else
                r = -EOPNOTSUPP;
 
-       bdput(bdev);
+       blkdev_put(bdev, mode);
        return r;
 }
 
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
        fmode_t mode;
        int r;
 
-       r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+       r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
        if (r < 0)
                return r;
 
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
        else
                r = -EOPNOTSUPP;
 
-       bdput(bdev);
+       blkdev_put(bdev, mode);
        return r;
 }
 
index 337462e1569fee2628fc6126c29c3acc9cc904ae..038509e5d031f44e99fee275c59b7b08421c9f29 100644 (file)
@@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
        return rc;
 }
 
+static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
+               struct ocxl_ioctl_metadata __user *uarg)
+{
+       struct ocxl_ioctl_metadata arg;
+
+       memset(&arg, 0, sizeof(arg));
+
+       arg.version = 0;
+
+       arg.afu_version_major = ctx->afu->config.version_major;
+       arg.afu_version_minor = ctx->afu->config.version_minor;
+       arg.pasid = ctx->pasid;
+       arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride;
+       arg.global_mmio_size = ctx->afu->config.global_mmio_size;
+
+       if (copy_to_user(uarg, &arg, sizeof(arg)))
+               return -EFAULT;
+
+       return 0;
+}
+
 #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" :                        \
                        x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" :       \
                        x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" :         \
                        x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" :     \
+                       x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
                        "UNKNOWN")
 
 static long afu_ioctl(struct file *file, unsigned int cmd,
@@ -159,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
                                        irq_fd.eventfd);
                break;
 
+       case OCXL_IOCTL_GET_METADATA:
+               rc = afu_ioctl_get_metadata(ctx,
+                               (struct ocxl_ioctl_metadata __user *) args);
+               break;
+
        default:
                rc = -EINVAL;
        }
index 21d29f7936f6c5d1e26c6e0d3f10644fd0f096c8..d39b0b7011b2d9cf194180813a5a30d9fe226b6d 100644 (file)
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
                trigger_cmd_completions(dev);
        }
 
-       mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+       mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
        mlx5_core_err(dev, "end\n");
 
 unlock:
index 817e5e2766da36b7312e6992952b073ae2e0111a..7aeca5db791613f345f733513f2558d4de14e2ae 100644 (file)
@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
                        ns->disk->disk_name);
 
        nvme_mpath_add_disk(ns->head);
-       nvme_mpath_add_disk_links(ns);
        return;
  out_unlink_ns:
        mutex_lock(&ctrl->subsys->lock);
@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                return;
 
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-               nvme_mpath_remove_disk_links(ns);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
                if (ns->ndev)
index a1c58e35075e9e180cae240d6951c807bb7a4a82..8f0f34d06d46965168e4472ea2f9f5b5daca6a20 100644 (file)
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -EINVAL;
                                goto out;
                        }
+                       if (opts->discovery_nqn) {
+                               pr_debug("Ignoring nr_io_queues value for discovery controller\n");
+                               break;
+                       }
+
                        opts->nr_io_queues = min_t(unsigned int,
                                        num_online_cpus(), token);
                        break;
index 7f51f8414b97238e647ef37f13942755e4b83a16..1dc1387b71342e67bb0f6848104e2ee4ab901661 100644 (file)
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
                                sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
 
        assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
-       assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
+       assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
        /* Linux supports only Dynamic controllers */
        assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
        uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                                sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
        conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
        conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
-       conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
+       conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
 
        lsop->queue = queue;
        lsreq->rqstaddr = conn_rqst;
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
                goto out_free_tag_set;
        }
 
-       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_cleanup_blk_queue;
 
-       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_delete_hw_queues;
 
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_free_io_queues;
 
-       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_free_io_queues;
 
-       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+       ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_delete_hw_queues;
 
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        nvme_fc_init_queue(ctrl, 0);
 
        ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
-                               NVME_AQ_BLK_MQ_DEPTH);
+                               NVME_AQ_DEPTH);
        if (ret)
                goto out_free_queue;
 
        ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
-                               NVME_AQ_BLK_MQ_DEPTH,
-                               (NVME_AQ_BLK_MQ_DEPTH / 4));
+                               NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
        if (ret)
                goto out_delete_hw_queue;
 
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
 
        ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
        if (ret)
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
                opts->queue_size = ctrl->ctrl.maxcmd;
        }
 
+       if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
+               /* warn if sqsize is lower than queue_size */
+               dev_warn(ctrl->ctrl.device,
+                       "queue_size %zu > ctrl sqsize %u, clamping down\n",
+                       opts->queue_size, ctrl->ctrl.sqsize + 1);
+               opts->queue_size = ctrl->ctrl.sqsize&nb