Merge tag 'acpi-4.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Dec 2017 18:05:53 +0000 (10:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Dec 2017 18:05:53 +0000 (10:05 -0800)
Pull ACPI fix from Rafael Wysocki:
 "This fixes an out of bounds warning from KASAN in the ACPI CPPC
  driver"

* tag 'acpi-4.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / CPPC: Fix KASAN global out of bounds warning

220 files changed:
Documentation/core-api/genericirq.rst
Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
Documentation/driver-api/dmaengine/client.rst
Documentation/driver-api/pci.rst
Documentation/scsi/scsi_mid_low_api.txt
MAINTAINERS
arch/m68k/configs/stmark2_defconfig
arch/m68k/kernel/vmlinux-nommu.lds
arch/m68k/kernel/vmlinux-std.lds
arch/m68k/kernel/vmlinux-sun3.lds
arch/mips/include/asm/Kbuild
arch/mips/include/asm/serial.h [new file with mode: 0644]
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/segment.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/pci/broadcom_bus.c
arch/x86/platform/uv/uv_nmi.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm_64.S
drivers/Makefile
drivers/android/binder.c
drivers/base/Kconfig
drivers/base/isa.c
drivers/base/power/runtime.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/runtime-map.c
drivers/firmware/google/vpd.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpio/gpio-74x164.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-pca953x.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/iio/adc/cpcap-adc.c
drivers/iio/adc/meson_saradc.c
drivers/iio/health/max30102.c
drivers/iio/industrialio-core.c
drivers/iio/proximity/sx9500.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/security.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_hem.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_d.h
drivers/iommu/intel-iommu.c
drivers/net/can/flexcan.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/mcba_usb.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/phy/micrel.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/tap.c
drivers/net/tun.c
drivers/pinctrl/intel/pinctrl-denverton.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-gemini.c
drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/rx.c
drivers/scsi/aacraid/src.c
drivers/scsi/scsi_lib.c
drivers/scsi/ufs/ufshcd.c
drivers/staging/ccree/ssi_hash.c
drivers/staging/comedi/drivers/ni_atmio.c
drivers/staging/octeon-usb/octeon-hcd.c
drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
drivers/tty/serdev/serdev-ttyport.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/8250/8250_pci.c
drivers/usb/common/ulpi.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/legacy/Kconfig
drivers/usb/gadget/udc/bdc/bdc_core.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/serial/option.c
drivers/usb/serial/usb_debug.c
drivers/usb/storage/uas-detect.h
drivers/usb/typec/Kconfig
drivers/usb/typec/ucsi/Kconfig
drivers/usb/usbip/vhci_hcd.c
drivers/vhost/net.c
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
fs/proc/base.c
include/linux/debugfs.h
include/linux/dma-mapping.h
include/linux/hyperv.h
include/linux/iio/timer/stm32-lptim-trigger.h
include/linux/irqdesc.h
include/linux/serdev.h
include/linux/skbuff.h
include/linux/sysfs.h
include/net/sctp/structs.h
include/net/tc_act/tc_sample.h
include/net/tcp.h
include/scsi/libsas.h
include/trace/events/xdp.h
include/uapi/linux/usb/ch9.h
kernel/bpf/core.c
kernel/bpf/offload.c
kernel/cpu.c
kernel/debug/kdb/kdb_io.c
kernel/events/core.c
kernel/irq/matrix.c
kernel/locking/lockdep.c
kernel/printk/printk.c
kernel/sched/fair.c
kernel/sched/wait.c
kernel/trace/bpf_trace.c
net/dccp/minisocks.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/rxrpc/af_rxrpc.c
net/rxrpc/call_event.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/sendmsg.c
net/sched/act_sample.c
net/sctp/chunk.c
net/sctp/outqueue.c
net/tipc/udp_media.c
samples/bpf/bpf_load.c
scripts/kernel-doc
sound/core/pcm.c
sound/core/seq/seq_timer.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm_perf.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/bpf/bpftool/Documentation/Makefile
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/hv/hv_kvp_daemon.c
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/kcmp.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/prctl.h
tools/objtool/Makefile
tools/objtool/orc_dump.c
tools/perf/bench/numa.c
tools/perf/builtin-help.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/tests/task-exit.c
tools/perf/trace/beauty/mmap.c
tools/perf/util/annotate.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/intel-pt-decoder/inat.h
tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
tools/perf/util/machine.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/pmu.c
tools/testing/selftests/bpf/test_verifier_log.c
tools/usb/usbip/libsrc/vhci_driver.c

index 0054bd48be849035146239cb6efc0d7dba0c0a62..4da67b65cecfa68e536efe7a6905cc0757df0417 100644 (file)
@@ -225,9 +225,9 @@ interrupts.
 
 The following control flow is implemented (simplified excerpt)::
 
-    :c:func:`desc->irq_data.chip->irq_mask_ack`;
+    desc->irq_data.chip->irq_mask_ack();
     handle_irq_event(desc->action);
-    :c:func:`desc->irq_data.chip->irq_unmask`;
+    desc->irq_data.chip->irq_unmask();
 
 
 Default Fast EOI IRQ flow handler
@@ -239,7 +239,7 @@ which only need an EOI at the end of the handler.
 The following control flow is implemented (simplified excerpt)::
 
     handle_irq_event(desc->action);
-    :c:func:`desc->irq_data.chip->irq_eoi`;
+    desc->irq_data.chip->irq_eoi();
 
 
 Default Edge IRQ flow handler
@@ -251,15 +251,15 @@ interrupts.
 The following control flow is implemented (simplified excerpt)::
 
     if (desc->status & running) {
-        :c:func:`desc->irq_data.chip->irq_mask_ack`;
+        desc->irq_data.chip->irq_mask_ack();
         desc->status |= pending | masked;
         return;
     }
-    :c:func:`desc->irq_data.chip->irq_ack`;
+    desc->irq_data.chip->irq_ack();
     desc->status |= running;
     do {
         if (desc->status & masked)
-            :c:func:`desc->irq_data.chip->irq_unmask`;
+            desc->irq_data.chip->irq_unmask();
         desc->status &= ~pending;
         handle_irq_event(desc->action);
     } while (status & pending);
@@ -293,10 +293,10 @@ simplified version without locking.
 The following control flow is implemented (simplified excerpt)::
 
     if (desc->irq_data.chip->irq_ack)
-        :c:func:`desc->irq_data.chip->irq_ack`;
+        desc->irq_data.chip->irq_ack();
     handle_irq_event(desc->action);
     if (desc->irq_data.chip->irq_eoi)
-            :c:func:`desc->irq_data.chip->irq_eoi`;
+        desc->irq_data.chip->irq_eoi();
 
 
 EOI Edge IRQ flow handler
index 7f57271df2bc96b41168d337c911511ccb726e33..0d0158728f897bd9fbeaa41e884955445ca1f062 100644 (file)
@@ -27,7 +27,7 @@ Required properties:
        ti,tca6424
        ti,tca9539
        ti,tca9554
-       onsemi,pca9654
+       onnn,pca9654
        exar,xra1202
 
 Optional properties:
index 6245c99af8c1157176fd1d22681bc221917d3d7f..fbbb2831f29f8c7f50675238289478c98f472606 100644 (file)
@@ -185,7 +185,7 @@ The details of these operations are:
       void dma_async_issue_pending(struct dma_chan *chan);
 
 Further APIs:
-------------
+-------------
 
 1. Terminate APIs
 
index 01a6c8b7d3a7b59dcee9a11d828968e464af9c1c..ca85e5e78b2c439b3279fbe797b67eeaea84b141 100644 (file)
@@ -25,9 +25,6 @@ PCI Support Library
 .. kernel-doc:: drivers/pci/irq.c
    :export:
 
-.. kernel-doc:: drivers/pci/htirq.c
-   :export:
-
 .. kernel-doc:: drivers/pci/probe.c
    :export:
 
index 6338400eed73d7e2f8b90807186e15d54cd16cde..2c31d9ee6776ea7fef125d8fd00e7f2ef8614e36 100644 (file)
@@ -319,12 +319,12 @@ struct Scsi_Host:
         instance. If the reference count reaches 0 then the given instance
         is freed
 
-The Scsi_device structure has had reference counting infrastructure added.
-This effectively spreads the ownership of struct Scsi_device instances
+The scsi_device structure has had reference counting infrastructure added.
+This effectively spreads the ownership of struct scsi_device instances
 across the various SCSI layers which use them. Previously such instances
 were exclusively owned by the mid level. See the access functions declared
 towards the end of include/scsi/scsi_device.h . If an LLD wants to keep
-a copy of a pointer to a Scsi_device instance it should use scsi_device_get()
+a copy of a pointer to a scsi_device instance it should use scsi_device_get()
 to bump its reference count. When it is finished with the pointer it can
 use scsi_device_put() to decrement its reference count (and potentially
 delete it).
index d4fdcb12616c7a10c2a05407ca3586b208ed11a4..85773bf90b0a7a81ae2dabffdbc63400d8dff27d 100644 (file)
@@ -554,13 +554,13 @@ S:        Orphan
 F:     Documentation/filesystems/affs.txt
 F:     fs/affs/
 
-AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN
+AFS FILESYSTEM
 M:     David Howells <dhowells@redhat.com>
 L:     linux-afs@lists.infradead.org
 S:     Supported
 F:     fs/afs/
-F:     include/net/af_rxrpc.h
-F:     net/rxrpc/af_rxrpc.c
+F:     include/trace/events/afs.h
+F:     Documentation/filesystems/afs.txt
 W:     https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
@@ -859,7 +859,8 @@ F:  kernel/configs/android*
 ANDROID DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:     Arve Hjønnevåg <arve@android.com>
-M:     Riley Andrews <riandrews@android.com>
+M:     Todd Kjos <tkjos@android.com>
+M:     Martijn Coenen <maco@android.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
 L:     devel@driverdev.osuosl.org
 S:     Supported
@@ -7766,6 +7767,7 @@ F:        security/keys/
 
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
+M:     Daniel Thompson <daniel.thompson@linaro.org>
 W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
@@ -11776,6 +11778,18 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtl8xxxu/
 
+RXRPC SOCKETS (AF_RXRPC)
+M:     David Howells <dhowells@redhat.com>
+L:     linux-afs@lists.infradead.org
+S:     Supported
+F:     net/rxrpc/
+F:     include/keys/rxrpc-type.h
+F:     include/net/af_rxrpc.h
+F:     include/trace/events/rxrpc.h
+F:     include/uapi/linux/rxrpc.h
+F:     Documentation/networking/rxrpc.txt
+W:     https://www.infradead.org/~dhowells/kafs/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
 L:     linux-fbdev@vger.kernel.org
@@ -13647,10 +13661,8 @@ F:     drivers/net/wireless/ti/
 F:     include/linux/wl12xx.h
 
 TILE ARCHITECTURE
-M:     Chris Metcalf <cmetcalf@mellanox.com>
 W:     http://www.mellanox.com/repository/solutions/tile-scm/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
-S:     Supported
+S:     Orphan
 F:     arch/tile/
 F:     drivers/char/tile-srom.c
 F:     drivers/edac/tile_edac.c
index 55e55dbc2fb66410f757f635a7cc46a7c28b1287..3d07b1de7eb0aa807d9aa637b3b0cec07e4ca1e2 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../uClinux-dist/romfs"
 # CONFIG_RD_BZIP2 is not set
 # CONFIG_RD_LZMA is not set
 # CONFIG_RD_XZ is not set
index 3aa571a513b5dfa1f48bf2e17c120289f1a0d246..cf6edda389719535e25d505dfdc31d885995cffb 100644 (file)
@@ -45,6 +45,8 @@ SECTIONS {
        .text : {
                HEAD_TEXT
                TEXT_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                SCHED_TEXT
                CPUIDLE_TEXT
                LOCK_TEXT
index 89172b8974b95444f4def01ce4104b664391eaea..625a5785804faf8d706442150e6cdcac0ee117c3 100644 (file)
@@ -16,6 +16,8 @@ SECTIONS
   .text : {
        HEAD_TEXT
        TEXT_TEXT
+       IRQENTRY_TEXT
+       SOFTIRQENTRY_TEXT
        SCHED_TEXT
        CPUIDLE_TEXT
        LOCK_TEXT
index 293990efc9173288d38313e22c81e2c93aea0909..9868270b0984487c5a83100514cd88e8ddd743cd 100644 (file)
@@ -16,6 +16,8 @@ SECTIONS
   .text : {
        HEAD_TEXT
        TEXT_TEXT
+       IRQENTRY_TEXT
+       SOFTIRQENTRY_TEXT
        SCHED_TEXT
        CPUIDLE_TEXT
        LOCK_TEXT
index 7c8aab23bce8da59f727ff8725250a8f0d385b12..b1f66699677dbaa31931e84f702c88ad0d2feb8f 100644 (file)
@@ -16,7 +16,6 @@ generic-y += qrwlock.h
 generic-y += qspinlock.h
 generic-y += sections.h
 generic-y += segment.h
-generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
 generic-y += user.h
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
new file mode 100644 (file)
index 0000000..1d830c6
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 MIPS Tech, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __ASM__SERIAL_H
+#define __ASM__SERIAL_H
+
+#ifdef CONFIG_MIPS_GENERIC
+/*
+ * Generic kernels cannot know a correct value for all platforms at
+ * compile time. Set it to 0 to prevent 8250_early using it
+ */
+#define BASE_BAUD 0
+#else
+#include <asm-generic/serial.h>
+#endif
+
+#endif /* __ASM__SERIAL_H */
index 11b13c4b43d55f8d6c8b239f478ecb302d4cfd07..f19856d95c60919c92d1679e0037d9339c4c2a65 100644 (file)
@@ -324,5 +324,5 @@ notrace time_t __vdso_time(time_t *t)
                *t = result;
        return result;
 }
-int time(time_t *t)
+time_t time(time_t *t)
        __attribute__((weak, alias("__vdso_time")));
index c0b0e9e8aa66eb645eba71784e80aa93b0f0df79..800104c8a3edfee7f4f52a33b8451a51ee0ed90a 100644 (file)
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
index b20f9d623f9c639fd212c5e313bd5e450747b1eb..8f09012b92e779d7aabf4ad663b8eb10b2379c37 100644 (file)
  */
 #define EARLY_IDT_HANDLER_SIZE 9
 
+/*
+ * xen_early_idt_handler_array is for Xen pv guests: for each entry in
+ * early_idt_handler_array it contains a prequel in the form of
+ * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
+ * max 8 bytes.
+ */
+#define XEN_EARLY_IDT_HANDLER_SIZE 8
+
 #ifndef __ASSEMBLY__
 
 extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 extern void early_ignore_irq(void);
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
+#endif
+
 /*
  * Load a segment. Fall back on loading the zero segment if something goes
  * wrong.  This variant assumes that loading zero fully clears the segment.
index 509046cfa5ce893357366348468a5c5ff8e86a09..877b5c1a1b1247116e20e7272dbade77e1874fc4 100644 (file)
@@ -173,40 +173,43 @@ static inline void cr4_init_shadow(void)
        this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
+static inline void __cr4_set(unsigned long cr4)
+{
+       lockdep_assert_irqs_disabled();
+       this_cpu_write(cpu_tlbstate.cr4, cr4);
+       __write_cr4(cr4);
+}
+
 /* Set in this cpu's CR4. */
 static inline void cr4_set_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 | mask) != cr4) {
-               cr4 |= mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 | mask) != cr4)
+               __cr4_set(cr4 | mask);
+       local_irq_restore(flags);
 }
 
 /* Clear in this cpu's CR4. */
 static inline void cr4_clear_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 & ~mask) != cr4) {
-               cr4 &= ~mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 & ~mask) != cr4)
+               __cr4_set(cr4 & ~mask);
+       local_irq_restore(flags);
 }
 
-static inline void cr4_toggle_bits(unsigned long mask)
+static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
 {
        unsigned long cr4;
 
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       cr4 ^= mask;
-       this_cpu_write(cpu_tlbstate.cr4, cr4);
-       __write_cr4(cr4);
+       __cr4_set(cr4 ^ mask);
 }
 
 /* Read the CR4 shadow. */
index 6a823a25eaff03787660bd1f92e587362259e54d..750449152b04b4feed3c98ddc772db91f003f818 100644 (file)
@@ -542,8 +542,8 @@ error:
 }
 
 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
-void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
-                          struct irq_data *irqd, int ind)
+static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
+                                 struct irq_data *irqd, int ind)
 {
        unsigned int cpu, vector, prev_cpu, prev_vector;
        struct apic_chip_data *apicd;
index d58184b7cd4438144e2d0ac3f4744d19ff4ffb31..bcb75dc97d44075d2eecb3137b91f934072352b0 100644 (file)
@@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x17: init_amd_zn(c); break;
        }
 
-       /* Enable workaround for FXSAVE leak */
-       if (c->x86 >= 6)
+       /*
+        * Enable workaround for FXSAVE leak on CPUs
+        * without a XSaveErPtr feature
+        */
+       if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
                set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 
        cpu_detect_cache_sizes(c);
index c6daec4bdba5b180e45c5f78019fcba7b2880428..330b8462d426faad0dccdc480f34eec34cd8b92f 100644 (file)
@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
+#define F17H_MPB_MAX_SIZE 3200
 
        switch (family) {
        case 0x14:
@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        case 0x16:
                max_size = F16H_MPB_MAX_SIZE;
                break;
+       case 0x17:
+               max_size = F17H_MPB_MAX_SIZE;
+               break;
        default:
                max_size = F1XH_MPB_MAX_SIZE;
                break;
index 97fb3e5737f5d0b5d50f8d9232726923c2692e65..bb988a24db927d758f9120d45f90d1c160628790 100644 (file)
@@ -299,7 +299,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        }
 
        if ((tifp ^ tifn) & _TIF_NOTSC)
-               cr4_toggle_bits(X86_CR4_TSD);
+               cr4_toggle_bits_irqsoff(X86_CR4_TSD);
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
index 3d01df7d7cf60cdbe1342fe84006405712394663..05a97d5fe29840e387a66aa00f5041d8c749ad17 100644 (file)
@@ -237,7 +237,7 @@ static void notrace start_secondary(void *unused)
        load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 #endif
-
+       load_current_idt();
        cpu_init();
        x86_cpuinit.early_percpu_clock_init();
        preempt_disable();
index 3321b446b66cdb99f16fe145bf9bad50f9d1fd01..9fe656c42aa5b16560e139cebba247ca52756c80 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/extable.h>
 #include <linux/uaccess.h>
 #include <linux/sched/debug.h>
+#include <xen/xen.h>
 
 #include <asm/fpu/internal.h>
 #include <asm/traps.h>
@@ -82,7 +83,7 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
 
        return true;
 }
-EXPORT_SYMBOL_GPL(ex_handler_refcount);
+EXPORT_SYMBOL(ex_handler_refcount);
 
 /*
  * Handler for when we fail to restore a task's FPU state.  We should never get
@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
         * Old CPUs leave the high bits of CS on the stack
         * undefined.  I'm not sure which CPUs do this, but at least
         * the 486 DX works this way.
+        * Xen pv domains are not using the default __KERNEL_CS.
         */
-       if (regs->cs != __KERNEL_CS)
+       if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
                goto fail;
 
        /*
index 78ca9a8ee4548a270045e81841ef6380ed6d260a..febf6980e6535572f998cf2fa0ee63d296bdc6f1 100644 (file)
@@ -701,7 +701,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
        else
                printk(KERN_CONT "paging request");
 
-       printk(KERN_CONT " at %p\n", (void *) address);
+       printk(KERN_CONT " at %px\n", (void *) address);
        printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
 
        dump_pagetable(address);
index bb461cfd01abc78cdc45c6e69f013128e04ccdb4..526536c81ddc41d395fd971d909a3b687e46d989 100644 (file)
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
         * We should get host bridge information from ACPI unless the BIOS
         * doesn't support it.
         */
-       if (acpi_os_get_root_pointer())
+       if (!acpi_disabled && acpi_os_get_root_pointer())
                return 0;
 #endif
 
index c34bd8233f7c81ddff649e970f79802b9ae7ef27..5f64f30873e257757091b88f8e263711d8db548f 100644 (file)
@@ -905,7 +905,7 @@ static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 /*
  * UV NMI handler
  */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 {
        struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
        int cpu = smp_processor_id();
@@ -1013,7 +1013,7 @@ void uv_nmi_init(void)
 }
 
 /* Setup HUB NMI info */
-void __init uv_nmi_setup_common(bool hubbed)
+static void __init uv_nmi_setup_common(bool hubbed)
 {
        int size = sizeof(void *) * (1 << NODES_SHIFT);
        int cpu;
index 84fcfde53f8f3f5bb4b85efc20ab106c419dcc11..5191de14f4df9aea4e452925cda5b9845d8d1bf4 100644 (file)
@@ -226,8 +226,20 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        load_idt((const struct desc_ptr *)&ctxt->idt_limit);
 #endif
 
+#ifdef CONFIG_X86_64
        /*
-        * segment registers
+        * We need GSBASE restored before percpu access can work.
+        * percpu access can happen in exception handlers or in complicated
+        * helpers like load_gs_index().
+        */
+       wrmsrl(MSR_GS_BASE, ctxt->gs_base);
+#endif
+
+       fix_processor_context();
+
+       /*
+        * Restore segment registers.  This happens after restoring the GDT
+        * and LDT, which happen in fix_processor_context().
         */
 #ifdef CONFIG_X86_32
        loadsegment(es, ctxt->es);
@@ -248,13 +260,14 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        load_gs_index(ctxt->gs);
        asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
 
+       /*
+        * Restore FSBASE and user GSBASE after reloading the respective
+        * segment selectors.
+        */
        wrmsrl(MSR_FS_BASE, ctxt->fs_base);
-       wrmsrl(MSR_GS_BASE, ctxt->gs_base);
        wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
 #endif
 
-       fix_processor_context();
-
        do_fpu_end();
        tsc_verify_tsc_adjust(true);
        x86_platform.restore_sched_clock_state();
index 5b2b3f3f653112fbe00484f5dcae0de02543df3c..f2414c6c5e7c455b43fc45773fbd1264cf86c24e 100644 (file)
@@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = {
        { simd_coprocessor_error,      xen_simd_coprocessor_error,      false },
 };
 
-static bool get_trap_addr(void **addr, unsigned int ist)
+static bool __ref get_trap_addr(void **addr, unsigned int ist)
 {
        unsigned int nr;
        bool ist_okay = false;
@@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
                }
        }
 
+       if (nr == ARRAY_SIZE(trap_array) &&
+           *addr >= (void *)early_idt_handler_array[0] &&
+           *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
+               nr = (*addr - (void *)early_idt_handler_array[0]) /
+                    EARLY_IDT_HANDLER_SIZE;
+               *addr = (void *)xen_early_idt_handler_array[nr];
+       }
+
        if (WARN_ON(ist != 0 && !ist_okay))
                return false;
 
@@ -1262,6 +1270,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
        xen_setup_gdt(0);
 
        xen_init_irq_ops();
+
+       /* Let's presume PV guests always boot on vCPU with id 0. */
+       per_cpu(xen_vcpu_id, 0) = 0;
+
+       /*
+        * Setup xen_vcpu early because idt_setup_early_handler needs it for
+        * local_irq_disable(), irqs_disabled().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
+
+       idt_setup_early_handler();
+
        xen_init_capabilities();
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -1295,18 +1318,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
         */
        acpi_numa = -1;
 #endif
-       /* Let's presume PV guests always boot on vCPU with id 0. */
-       per_cpu(xen_vcpu_id, 0) = 0;
-
-       /*
-        * Setup xen_vcpu early because start_kernel needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
 
        local_irq_disable();
index 8a10c9a9e2b50651b2c8dd322956298402e79e7d..417b339e5c8e1aadedd20231c9be82ac93dbe728 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <xen/interface/xen.h>
 
+#include <linux/init.h>
 #include <linux/linkage.h>
 
 .macro xen_pv_trap name
@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
 #endif
 xen_pv_trap hypervisor_callback
 
+       __INIT
+ENTRY(xen_early_idt_handler_array)
+       i = 0
+       .rept NUM_EXCEPTION_VECTORS
+       pop %rcx
+       pop %r11
+       jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
+       i = i + 1
+       .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+       .endr
+END(xen_early_idt_handler_array)
+       __FINIT
+
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
  * Xen64 iret frame:
index 1d034b6804310a1d740b4e8aa40c649922748d77..e06f7f633f73ff285c64e19b89041d17e2be23ce 100644 (file)
@@ -105,6 +105,7 @@ obj-$(CONFIG_TC)            += tc/
 obj-$(CONFIG_UWB)              += uwb/
 obj-$(CONFIG_USB_PHY)          += usb/
 obj-$(CONFIG_USB)              += usb/
+obj-$(CONFIG_USB_SUPPORT)      += usb/
 obj-$(CONFIG_PCI)              += usb/
 obj-$(CONFIG_USB_GADGET)       += usb/
 obj-$(CONFIG_OF)               += usb/
index a73596a4f804c817bfd277d3ff51eb42ca969bae..bccec9de05330b2fe6822369e5c7a409e8759e95 100644 (file)
@@ -1947,6 +1947,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
        }
 }
 
+/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t:         transaction that needs to be cleaned up
+ * @reason:    reason the transaction wasn't delivered
+ * @error_code:        error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+                                      const char *reason,
+                                      uint32_t error_code)
+{
+       if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+               binder_send_failed_reply(t, error_code);
+       } else {
+               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                       "undelivered transaction %d, %s\n",
+                       t->debug_id, reason);
+               binder_free_transaction(t);
+       }
+}
+
 /**
  * binder_validate_object() - checks for a valid metadata object in a buffer.
  * @buffer:    binder_buffer that we're parsing.
@@ -4015,12 +4035,20 @@ retry:
                if (put_user(cmd, (uint32_t __user *)ptr)) {
                        if (t_from)
                                binder_thread_dec_tmpref(t_from);
+
+                       binder_cleanup_transaction(t, "put_user failed",
+                                                  BR_FAILED_REPLY);
+
                        return -EFAULT;
                }
                ptr += sizeof(uint32_t);
                if (copy_to_user(ptr, &tr, sizeof(tr))) {
                        if (t_from)
                                binder_thread_dec_tmpref(t_from);
+
+                       binder_cleanup_transaction(t, "copy_to_user failed",
+                                                  BR_FAILED_REPLY);
+
                        return -EFAULT;
                }
                ptr += sizeof(tr);
@@ -4090,15 +4118,9 @@ static void binder_release_work(struct binder_proc *proc,
                        struct binder_transaction *t;
 
                        t = container_of(w, struct binder_transaction, work);
-                       if (t->buffer->target_node &&
-                           !(t->flags & TF_ONE_WAY)) {
-                               binder_send_failed_reply(t, BR_DEAD_REPLY);
-                       } else {
-                               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                                       "undelivered transaction %d\n",
-                                       t->debug_id);
-                               binder_free_transaction(t);
-                       }
+
+                       binder_cleanup_transaction(t, "process died.",
+                                                  BR_DEAD_REPLY);
                } break;
                case BINDER_WORK_RETURN_ERROR: {
                        struct binder_error *e = container_of(
index 2f6614c9a229ab213f8a140a3e0bcc272dcc1bfd..bdc87907d6a1b297276e9c4acbb89175f69d0d08 100644 (file)
@@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
        depends on FW_LOADER
        default y
        help
-         The kernel source tree includes a number of firmware 'blobs'
-         that are used by various drivers. The recommended way to
-         use these is to run "make firmware_install", which, after
-         converting ihex files to binary, copies all of the needed
-         binary files in firmware/ to /lib/firmware/ on your system so
-         that they can be loaded by userspace helpers on request.
+         Various drivers in the kernel source tree may require firmware,
+         which is generally available in your distribution's linux-firmware
+         package.
+
+         The linux-firmware package should install firmware into
+         /lib/firmware/ on your system, so they can be loaded by userspace
+         helpers on request.
 
          Enabling this option will build each required firmware blob
-         into the kernel directly, where request_firmware() will find
-         them without having to call out to userspace. This may be
-         useful if your root file system requires a device that uses
-         such firmware and do not wish to use an initrd.
+         specified by EXTRA_FIRMWARE into the kernel directly, where
+         request_firmware() will find them without having to call out to
+         userspace. This may be useful if your root file system requires a
+         device that uses such firmware and you do not wish to use an
+         initrd.
 
          This single option controls the inclusion of firmware for
-         every driver that uses request_firmware() and ships its
-         firmware in the kernel source tree, which avoids a
+         every driver that uses request_firmware(), which avoids a
          proliferation of 'Include firmware for xxx device' options.
 
          Say 'N' and let firmware be loaded from userspace.
index cd6ccdcf9df0c5ef2a37fabb2b4b7bbfa3c88755..372d10af26009dfae317affd625c756a2700fef1 100644 (file)
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->probe)
+       if (isa_driver && isa_driver->probe)
                return isa_driver->probe(dev, to_isa_dev(dev)->id);
 
        return 0;
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->remove)
+       if (isa_driver && isa_driver->remove)
                return isa_driver->remove(dev, to_isa_dev(dev)->id);
 
        return 0;
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->shutdown)
+       if (isa_driver && isa_driver->shutdown)
                isa_driver->shutdown(dev, to_isa_dev(dev)->id);
 }
 
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->suspend)
+       if (isa_driver && isa_driver->suspend)
                return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
 
        return 0;
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->resume)
+       if (isa_driver && isa_driver->resume)
                return isa_driver->resume(dev, to_isa_dev(dev)->id);
 
        return 0;
index 027d159ac3810e9b5b190345c1bee60dbbc13790..6e89b51ea3d92b40eba2059bfdb47703916454d9 100644 (file)
@@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev)
                        continue;
 
                retval = pm_runtime_get_sync(link->supplier);
-               if (retval < 0) {
+               /* Ignore suppliers with disabled runtime PM. */
+               if (retval < 0 && retval != -EACCES) {
                        pm_runtime_put_noidle(link->supplier);
                        return retval;
                }
index f70febf680c392b37217ce5e6f8c8c4301234869..557a47829d03f2b14b0c3b664e1044e7e2cb86bc 100644 (file)
@@ -109,6 +109,8 @@ struct kobject *efi_kobj;
 /*
  * Let's not leave out systab information that snuck into
  * the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
  */
 static ssize_t systab_show(struct kobject *kobj,
                           struct kobj_attribute *attr, char *buf)
@@ -143,8 +145,7 @@ static ssize_t systab_show(struct kobject *kobj,
        return str - buf;
 }
 
-static struct kobj_attribute efi_attr_systab =
-                       __ATTR(systab, 0400, systab_show, NULL);
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 
 #define EFI_FIELD(var) efi.var
 
index bd7ed3c1148a7ccd5032e367e3af0ba66af18d20..c47e0c6ec00f858c0b9960f605974b4f2e4b1294 100644 (file)
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
 };
 
 /* Generic ESRT Entry ("ESRE") support. */
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
 {
        char *str = buf;
 
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
        return str - buf;
 }
 
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
-       esre_fw_class_show, NULL);
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
 
 #define esre_attr_decl(name, size, fmt) \
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
 { \
        return sprintf(buf, fmt "\n", \
                       le##size##_to_cpu(entry->esre.esre1->name)); \
 } \
 \
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
-       esre_##name##_show, NULL)
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
 
 esre_attr_decl(fw_type, 32, "%u");
 esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
 
 /* support for displaying ESRT fields at the top level */
 #define esrt_attr_decl(name, size, fmt) \
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
+static ssize_t name##_show(struct kobject *kobj, \
                                  struct kobj_attribute *attr, char *buf)\
 { \
        return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
 } \
 \
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
-       esrt_##name##_show, NULL)
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
 
 esrt_attr_decl(fw_resource_count, 32, "%u");
 esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -431,7 +428,7 @@ err_remove_group:
 err_remove_esrt:
        kobject_put(esrt_kobj);
 err:
-       kfree(esrt);
+       memunmap(esrt);
        esrt = NULL;
        return error;
 }
index 8e64b77aeac95e43c0e0571694f42bbe6c8ba73f..f377609ff141bca733bf498babc25f9d215aefad 100644 (file)
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
        return map_attr->show(entry, buf);
 }
 
-static struct map_attribute map_type_attr = __ATTR_RO(type);
-static struct map_attribute map_phys_addr_attr   = __ATTR_RO(phys_addr);
-static struct map_attribute map_virt_addr_attr  = __ATTR_RO(virt_addr);
-static struct map_attribute map_num_pages_attr  = __ATTR_RO(num_pages);
-static struct map_attribute map_attribute_attr  = __ATTR_RO(attribute);
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
 
 /*
  * These are default attributes that are added for every memmap entry.
index 35e553b3b19051b45985991b9b66dc19366fc41e..e4b40f2b46274a0871d1cb881732e8358a324112 100644 (file)
@@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       return vpd_sections_init(entry.cbmem_addr);
+       vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+       if (!vpd_kobj)
+               return -ENOMEM;
+
+       ret = vpd_sections_init(entry.cbmem_addr);
+       if (ret) {
+               kobject_put(vpd_kobj);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int vpd_remove(struct platform_device *pdev)
+{
+       vpd_section_destroy(&ro_vpd);
+       vpd_section_destroy(&rw_vpd);
+
+       kobject_put(vpd_kobj);
+
+       return 0;
 }
 
 static struct platform_driver vpd_driver = {
        .probe = vpd_probe,
+       .remove = vpd_remove,
        .driver = {
                .name = "vpd",
        },
 };
 
+static struct platform_device *vpd_pdev;
+
 static int __init vpd_platform_init(void)
 {
-       struct platform_device *pdev;
-
-       pdev = platform_device_register_simple("vpd", -1, NULL, 0);
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
+       int ret;
 
-       vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
-       if (!vpd_kobj)
-               return -ENOMEM;
+       ret = platform_driver_register(&vpd_driver);
+       if (ret)
+               return ret;
 
-       platform_driver_register(&vpd_driver);
+       vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
+       if (IS_ERR(vpd_pdev)) {
+               platform_driver_unregister(&vpd_driver);
+               return PTR_ERR(vpd_pdev);
+       }
 
        return 0;
 }
 
 static void __exit vpd_platform_exit(void)
 {
-       vpd_section_destroy(&ro_vpd);
-       vpd_section_destroy(&rw_vpd);
-       kobject_put(vpd_kobj);
+       platform_device_unregister(vpd_pdev);
+       platform_driver_unregister(&vpd_driver);
 }
 
 module_init(vpd_platform_init);
index 5cfe39f7a45f080f56f36eea6259ec4c1b1df8b6..deb483064f53c3e680d34b655360faac04853c3f 100644 (file)
@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
 {
        pr_debug("fw_cfg: unloading.\n");
        fw_cfg_sysfs_cache_cleanup();
+       sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+       fw_cfg_io_cleanup();
        fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
        fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
-       fw_cfg_io_cleanup();
        return 0;
 }
 
index 6b535ec858cc35330baa773782dd51bcccd4da24..15a1f4b348c41b2915755dba173d71704a862768 100644 (file)
@@ -23,6 +23,7 @@
 struct gen_74x164_chip {
        struct gpio_chip        gpio_chip;
        struct mutex            lock;
+       struct gpio_desc        *gpiod_oe;
        u32                     registers;
        /*
         * Since the registers are chained, every byte sent will make
@@ -31,8 +32,7 @@ struct gen_74x164_chip {
         * register at the end of the transfer. So, to have a logical
         * numbering, store the bytes in reverse order.
         */
-       u8                      buffer[0];
-       struct gpio_desc        *gpiod_oe;
+       u8                      buffer[];
 };
 
 static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
index f75d8443ecaff631d07e8b474e2bdf769357adb0..e4b3d7db68c95a2d87b9766e54f688fe2dd13f36 100644 (file)
@@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
        u32 mask;
 
        d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
-       g = (struct davinci_gpio_regs __iomem *)d->regs;
+       g = (struct davinci_gpio_regs __iomem *)d->regs[0];
        mask = __gpio_mask(data->irq - d->base_irq);
 
        if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
index babb7bd2ba59b60aa723fa10606c186862a12af3..a0a5f9730aa77b92ea5bc520c22f64b386edb583 100644 (file)
@@ -947,7 +947,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
        { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
        { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
 
-       { .compatible = "onsemi,pca9654", .data = OF_953X( 8, PCA_INT), },
+       { .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
 
        { .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
        { }
index 19f0cf37e0ed0748be1c0c5cd8cf482cb3384eec..ba0a092ae085d64e309ec9c5b19a5d80d6372a93 100644 (file)
@@ -659,22 +659,28 @@ void vmbus_close(struct vmbus_channel *channel)
                 */
                return;
        }
-       mutex_lock(&vmbus_connection.channel_mutex);
        /*
         * Close all the sub-channels first and then close the
         * primary channel.
         */
        list_for_each_safe(cur, tmp, &channel->sc_list) {
                cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
-               vmbus_close_internal(cur_channel);
                if (cur_channel->rescind) {
+                       wait_for_completion(&cur_channel->rescind_event);
+                       mutex_lock(&vmbus_connection.channel_mutex);
+                       vmbus_close_internal(cur_channel);
                        hv_process_channel_removal(
                                           cur_channel->offermsg.child_relid);
+               } else {
+                       mutex_lock(&vmbus_connection.channel_mutex);
+                       vmbus_close_internal(cur_channel);
                }
+               mutex_unlock(&vmbus_connection.channel_mutex);
        }
        /*
         * Now close the primary.
         */
+       mutex_lock(&vmbus_connection.channel_mutex);
        vmbus_close_internal(channel);
        mutex_unlock(&vmbus_connection.channel_mutex);
 }
index ec5454f3f4a698219fe42e1899537c44f8397a45..c21020b69114b18648ff83562aa97ab95b3b65b3 100644 (file)
@@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
                return NULL;
 
        spin_lock_init(&channel->lock);
+       init_completion(&channel->rescind_event);
 
        INIT_LIST_HEAD(&channel->sc_list);
        INIT_LIST_HEAD(&channel->percpu_list);
@@ -898,6 +899,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        /*
         * Now wait for offer handling to complete.
         */
+       vmbus_rescind_cleanup(channel);
        while (READ_ONCE(channel->probe_done) == false) {
                /*
                 * We wait here until any channel offer is currently
@@ -913,7 +915,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        if (channel->device_obj) {
                if (channel->chn_rescind_callback) {
                        channel->chn_rescind_callback(channel);
-                       vmbus_rescind_cleanup(channel);
                        return;
                }
                /*
@@ -922,7 +923,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 */
                dev = get_device(&channel->device_obj->device);
                if (dev) {
-                       vmbus_rescind_cleanup(channel);
                        vmbus_device_unregister(channel->device_obj);
                        put_device(dev);
                }
@@ -936,13 +936,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 * 2. Then close the primary channel.
                 */
                mutex_lock(&vmbus_connection.channel_mutex);
-               vmbus_rescind_cleanup(channel);
                if (channel->state == CHANNEL_OPEN_STATE) {
                        /*
                         * The channel is currently not open;
                         * it is safe for us to cleanup the channel.
                         */
                        hv_process_channel_removal(rescind->child_relid);
+               } else {
+                       complete(&channel->rescind_event);
                }
                mutex_unlock(&vmbus_connection.channel_mutex);
        }
index 3576ec73ec232a9b33538377bf9a924e743e26bf..9ad60421d360539db31636b65baf42fe4bf67824 100644 (file)
@@ -1011,7 +1011,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, indio_dev);
 
        ddata->irq = platform_get_irq_byname(pdev, "adcdone");
-       if (!ddata->irq)
+       if (ddata->irq < 0)
                return -ENODEV;
 
        error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
index 9c6932ffc0afdbc66b3dd1cb7032adf84fb8720e..36047147ce7c727003a0f08df34880302ebd2e08 100644 (file)
@@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
 
 struct meson_sar_adc_data {
        bool                                    has_bl30_integration;
+       u32                                     bandgap_reg;
        unsigned int                            resolution;
        const char                              *name;
+       const struct regmap_config              *regmap_config;
 };
 
 struct meson_sar_adc_priv {
@@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
        int                                     calibscale;
 };
 
-static const struct regmap_config meson_sar_adc_regmap_config = {
+static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
        .reg_bits = 8,
        .val_bits = 32,
        .reg_stride = 4,
        .max_register = MESON_SAR_ADC_REG13,
 };
 
+static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
+       .reg_bits = 8,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = MESON_SAR_ADC_DELTA_10,
+};
+
 static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
        init.num_parents = 1;
 
        priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
-       priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN);
+       priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
        priv->clk_gate.hw.init = &init;
 
        priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
@@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
        return 0;
 }
 
+static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
+{
+       struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+       u32 enable_mask;
+
+       if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
+               enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
+       else
+               enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
+
+       regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
+                          on_off ? enable_mask : 0);
+}
+
 static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
        regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
                           MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN);
+
+       meson_sar_adc_set_bandgap(indio_dev, true);
+
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN,
                           MESON_SAR_ADC_REG3_ADC_EN);
@@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 err_adc_clk:
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN, 0);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+       meson_sar_adc_set_bandgap(indio_dev, false);
        clk_disable_unprepare(priv->sana_clk);
 err_sana_clk:
        clk_disable_unprepare(priv->core_clk);
@@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
 
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN, 0);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+
+       meson_sar_adc_set_bandgap(indio_dev, false);
 
        clk_disable_unprepare(priv->sana_clk);
        clk_disable_unprepare(priv->core_clk);
@@ -844,30 +866,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
        .has_bl30_integration = false,
+       .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+       .regmap_config = &meson_sar_adc_regmap_config_meson8,
        .resolution = 10,
        .name = "meson-meson8-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
        .has_bl30_integration = false,
+       .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+       .regmap_config = &meson_sar_adc_regmap_config_meson8,
        .resolution = 10,
        .name = "meson-meson8b-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 10,
        .name = "meson-gxbb-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 12,
        .name = "meson-gxl-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 12,
        .name = "meson-gxm-saradc",
 };
@@ -945,7 +977,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
                return ret;
 
        priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
-                                            &meson_sar_adc_regmap_config);
+                                            priv->data->regmap_config);
        if (IS_ERR(priv->regmap))
                return PTR_ERR(priv->regmap);
 
index 203ffb9cad6a2a3a98ac1eaf71951039303099ea..147a8c14235f3f8b39302c8f78beec811f6871dd 100644 (file)
@@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
                mutex_unlock(&indio_dev->mlock);
                break;
        case IIO_CHAN_INFO_SCALE:
-               *val = 1;  /* 0.0625 */
+               *val = 1000;  /* 62.5 */
                *val2 = 16;
                ret = IIO_VAL_FRACTIONAL;
                break;
index 9c4cfd19b7398677a8b1ac3c0fee5985160880e0..2f0998ebeed214dc0f062664595e1d4362d27e3e 100644 (file)
@@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
  * iio_format_value() - Formats a IIO value into its string representation
  * @buf:       The buffer to which the formatted value gets written
  *             which is assumed to be big enough (i.e. PAGE_SIZE).
- * @type:      One of the IIO_VAL_... constants. This decides how the val
+ * @type:      One of the IIO_VAL_* constants. This decides how the val
  *             and val2 parameters are formatted.
  * @size:      Number of IIO value entries contained in vals
  * @vals:      Pointer to the values, exact meaning depends on the
@@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
  *
  * Return: 0 by default, a negative number on failure or the
  *        total number of characters written for a type that belongs
- *        to the IIO_VAL_... constant.
+ *        to the IIO_VAL_* constant.
  */
 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
 {
index 53c5d653e7809560ea7393f249e4d15a7e22bce0..df23dbcc030aea5b2d6e0ddde7237b4d906bf365 100644 (file)
@@ -869,6 +869,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
 static void sx9500_gpio_probe(struct i2c_client *client,
                              struct sx9500_data *data)
 {
+       struct gpio_desc *gpiod_int;
        struct device *dev;
 
        if (!client)
@@ -876,6 +877,14 @@ static void sx9500_gpio_probe(struct i2c_client *client,
 
        dev = &client->dev;
 
+       if (client->irq <= 0) {
+               gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
+               if (IS_ERR(gpiod_int))
+                       dev_err(dev, "gpio get irq failed\n");
+               else
+                       client->irq = gpiod_to_irq(gpiod_int);
+       }
+
        data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
        if (IS_ERR(data->gpiod_rst)) {
                dev_warn(dev, "gpio get reset pin failed\n");
index 98ac46ed7214f574fbe13d5f617b9f2b0836bc40..cbf186522016f97f3024ac84bdac4330ddf1a26f 100644 (file)
@@ -1,6 +1,6 @@
 menuconfig INFINIBAND
        tristate "InfiniBand support"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        depends on NET
        depends on INET
        depends on m || IPV6 != m
index 1fdb473b5df7be38c09ecd49121d516b4893ef13..f6983357145de1adeedd2d8a1bb0401a848b34c0 100644 (file)
@@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
        INIT_LIST_HEAD(&id_priv->mc_list);
        get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
        id_priv->id.route.addr.dev_addr.net = get_net(net);
+       id_priv->seq_num &= 0x00ffffff;
 
        return &id_priv->id;
 }
index 84fc32a2c8b3e8fc4dfed5be20cbea0921ee737e..5e1be4949d5fa8d4cfa4d802fa34a5ff71d4701b 100644 (file)
@@ -1253,5 +1253,5 @@ static void __exit ib_core_cleanup(void)
 
 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
 
-module_init(ib_core_init);
+subsys_initcall(ib_core_init);
 module_exit(ib_core_cleanup);
index 23278ed5be4517fd42d8bba6eb07ccd485e5f7f6..a337386652b07381ec1f858ec1279fcba7128a8e 100644 (file)
@@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
 
 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
 {
+       u8 i = rdma_start_port(dev);
+       bool is_ib = false;
        int ret;
 
+       while (i <= rdma_end_port(dev) && !is_ib)
+               is_ib = rdma_protocol_ib(dev, i++);
+
+       /* If this isn't an IB device don't create the security context */
+       if (!is_ib)
+               return 0;
+
        qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
        if (!qp->qp_sec)
                return -ENOMEM;
@@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
 
 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
 {
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        mutex_lock(&sec->mutex);
 
        /* Remove the QP from the lists so it won't get added to
@@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
        int ret;
        int i;
 
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        /* If a concurrent cache update is in progress this
         * QP security could be marked for an error state
         * transition.  Wait for this to complete.
@@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
 {
        int i;
 
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        /* If a concurrent cache update is occurring we must
         * wait until this QP security structure is processed
         * in the QP to error flow before destroying it because
@@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
 {
        int ret = 0;
        struct ib_ports_pkeys *tmp_pps;
-       struct ib_ports_pkeys *new_pps;
+       struct ib_ports_pkeys *new_pps = NULL;
        struct ib_qp *real_qp = qp->real_qp;
        bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
                           real_qp->qp_type == IB_QPT_GSI ||
@@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
        bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
                           (qp_attr_mask & IB_QP_ALT_PATH));
 
+       WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
+                  rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
+                  !real_qp->qp_sec),
+                  "%s: QP security is not initialized for IB QP: %d\n",
+                  __func__, real_qp->qp_num);
+
        /* The port/pkey settings are maintained only for the real QP. Open
         * handles on the real QP will be in the shared_qp_list. When
         * enforcing security on the real QP all the shared QPs will be
         * checked as well.
         */
 
-       if (pps_change && !special_qp) {
+       if (pps_change && !special_qp && real_qp->qp_sec) {
                mutex_lock(&real_qp->qp_sec->mutex);
                new_pps = get_new_pps(real_qp,
                                      qp_attr,
                                      qp_attr_mask);
-
+               if (!new_pps) {
+                       mutex_unlock(&real_qp->qp_sec->mutex);
+                       return -ENOMEM;
+               }
                /* Add this QP to the lists for the new port
                 * and pkey settings before checking for permission
                 * in case there is a concurrent cache update
@@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
                                                 qp_attr_mask,
                                                 udata);
 
-       if (pps_change && !special_qp) {
+       if (new_pps) {
                /* Clean up the lists and free the appropriate
                 * ports_pkeys structure.
                 */
@@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
        u16 pkey;
        int ret;
 
+       if (!rdma_protocol_ib(dev, port_num))
+               return 0;
+
        ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
        if (ret)
                return ret;
@@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 {
        int ret;
 
+       if (!rdma_protocol_ib(agent->device, agent->port_num))
+               return 0;
+
        ret = security_ib_alloc_security(&agent->security);
        if (ret)
                return ret;
@@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 
 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 {
+       if (!rdma_protocol_ib(agent->device, agent->port_num))
+               return;
+
        security_ib_free_security(agent->security);
        if (agent->lsm_nb_reg)
                unregister_lsm_notifier(&agent->lsm_nb);
@@ -697,6 +736,9 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 
 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
 {
+       if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
+               return 0;
+
        if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
                return -EACCES;
 
index fd01a760259fa1887d2f233a2fcbb3ee581e6f5f..af5f7936f7e5ed9eac26598a76fa31e5f5155046 100644 (file)
@@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
        struct hfi1_16b_header *hdr = &opa_hdr->opah;
        struct ib_other_headers *ohdr;
-       u32 bth0, bth1;
+       u32 bth0, bth1 = 0;
        u16 len, pkey;
        u8 becn = !!is_fecn;
        u8 l4 = OPA_16B_L4_IB_LOCAL;
index 3e4c5253ab5c23d5cd9d7b8789f38fef205b02f2..a40ec939ece58236cfcbbf285fb2a389b23ccadc 100644 (file)
@@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 {
        int i;
        struct device *dev = hr_dev->dev;
-       u32 bits_per_long = BITS_PER_LONG;
 
        if (buf->nbufs == 1) {
                dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
        } else {
-               if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
-                       vunmap(buf->direct.buf);
-
                for (i = 0; i < buf->nbufs; ++i)
                        if (buf->page_list[i].buf)
                                dma_free_coherent(dev, 1 << buf->page_shift,
@@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 {
        int i = 0;
        dma_addr_t t;
-       struct page **pages;
        struct device *dev = hr_dev->dev;
-       u32 bits_per_long = BITS_PER_LONG;
        u32 page_size = 1 << page_shift;
        u32 order;
 
@@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
                        buf->page_list[i].map = t;
                        memset(buf->page_list[i].buf, 0, page_size);
                }
-               if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
-                       pages = kmalloc_array(buf->nbufs, sizeof(*pages),
-                                             GFP_KERNEL);
-                       if (!pages)
-                               goto err_free;
-
-                       for (i = 0; i < buf->nbufs; ++i)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
-                                              PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               } else {
-                       buf->direct.buf = NULL;
-               }
        }
 
        return 0;
index 01d3d695cbba1f2926929f0b59b47691cb08dd18..b154ce40cded846676feba3cabfdbd5240ad0b5a 100644 (file)
@@ -726,11 +726,9 @@ static inline struct hns_roce_qp
 
 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
 {
-       u32 bits_per_long_val = BITS_PER_LONG;
        u32 page_size = 1 << buf->page_shift;
 
-       if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
-           buf->nbufs == 1)
+       if (buf->nbufs == 1)
                return (char *)(buf->direct.buf) + offset;
        else
                return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
index 8b733a66fae5f27da8c3b183ba8e4b0ad60d018d..0eeabfbee192efed31c46d948ee6db264d1085fd 100644 (file)
@@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
                        sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
                        chunk->npages = 0;
                        chunk->nsg = 0;
+                       memset(chunk->buf, 0, sizeof(chunk->buf));
                        list_add_tail(&chunk->list, &hem->chunk_list);
                }
 
@@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
                if (!buf)
                        goto fail;
 
-               sg_set_buf(mem, buf, PAGE_SIZE << order);
-               WARN_ON(mem->offset);
+               chunk->buf[chunk->npages] = buf;
                sg_dma_len(mem) = PAGE_SIZE << order;
 
                ++chunk->npages;
@@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
        list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i)
                        dma_free_coherent(hr_dev->dev,
-                                  chunk->mem[i].length,
-                                  lowmem_page_address(sg_page(&chunk->mem[i])),
+                                  sg_dma_len(&chunk->mem[i]),
+                                  chunk->buf[i],
                                   sg_dma_address(&chunk->mem[i]));
                kfree(chunk);
        }
@@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
        struct hns_roce_hem_chunk *chunk;
        struct hns_roce_hem_mhop mhop;
        struct hns_roce_hem *hem;
-       struct page *page = NULL;
+       void *addr = NULL;
        unsigned long mhop_obj = obj;
        unsigned long obj_per_chunk;
        unsigned long idx_offset;
        int offset, dma_offset;
+       int length;
        int i, j;
        u32 hem_idx = 0;
 
@@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
 
        list_for_each_entry(chunk, &hem->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i) {
+                       length = sg_dma_len(&chunk->mem[i]);
                        if (dma_handle && dma_offset >= 0) {
-                               if (sg_dma_len(&chunk->mem[i]) >
-                                   (u32)dma_offset)
+                               if (length > (u32)dma_offset)
                                        *dma_handle = sg_dma_address(
                                                &chunk->mem[i]) + dma_offset;
-                               dma_offset -= sg_dma_len(&chunk->mem[i]);
+                               dma_offset -= length;
                        }
 
-                       if (chunk->mem[i].length > (u32)offset) {
-                               page = sg_page(&chunk->mem[i]);
+                       if (length > (u32)offset) {
+                               addr = chunk->buf[i] + offset;
                                goto out;
                        }
-                       offset -= chunk->mem[i].length;
+                       offset -= length;
                }
        }
 
 out:
        mutex_unlock(&table->mutex);
-       return page ? lowmem_page_address(page) + offset : NULL;
+       return addr;
 }
 EXPORT_SYMBOL_GPL(hns_roce_table_find);
 
index db66db12075e2b42151fbc24d148615e0841b8f9..e8850d59e7804caa45dd5e2cd77b140c7bfd7047 100644 (file)
@@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
        int                      npages;
        int                      nsg;
        struct scatterlist       mem[HNS_ROCE_HEM_CHUNK_LEN];
+       void                     *buf[HNS_ROCE_HEM_CHUNK_LEN];
 };
 
 struct hns_roce_hem {
index 8f719c00467b833e15a507e522ac05c7857c5940..8e18445714a96db307d6e4cdda09117333c567ae 100644 (file)
@@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 {
        struct hns_roce_v2_mpt_entry *mpt_entry;
        struct scatterlist *sg;
+       u64 page_addr;
        u64 *pages;
+       int i, j;
+       int len;
        int entry;
-       int i;
 
        mpt_entry = mb_buf;
        memset(mpt_entry, 0, sizeof(*mpt_entry));
@@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 
        i = 0;
        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
-               pages[i] = ((u64)sg_dma_address(sg)) >> 6;
-
-               /* Record the first 2 entry directly to MTPT table */
-               if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
-                       break;
-               i++;
+               len = sg_dma_len(sg) >> PAGE_SHIFT;
+               for (j = 0; j < len; ++j) {
+                       page_addr = sg_dma_address(sg) +
+                                   (j << mr->umem->page_shift);
+                       pages[i] = page_addr >> 6;
+
+                       /* Record the first 2 entry directly to MTPT table */
+                       if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+                               goto found;
+                       i++;
+               }
        }
 
+found:
        mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
        roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
                       V2_MPT_BYTE_56_PA0_H_S,
index 493d6ef3d2d57e4f1e020fd680ab4fc009f8cb7e..77870f9e173684d91f99a5a0627d0490b14342e9 100644 (file)
@@ -1043,7 +1043,7 @@ negotiate_done:
  * i40iw_schedule_cm_timer
  * @@cm_node: connection's node
  * @sqbuf: buffer to send
- * @type: if it es send ot close
+ * @type: if it is send or close
  * @send_retrans: if rexmits to be done
  * @close_when_complete: is cm_node to be removed
  *
@@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 
        new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
        if (!new_send) {
-               i40iw_free_sqbuf(vsi, (void *)sqbuf);
+               if (type != I40IW_TIMER_TYPE_CLOSE)
+                       i40iw_free_sqbuf(vsi, (void *)sqbuf);
                return -ENOMEM;
        }
        new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
                new_send->timetosend += (HZ / 10);
                if (cm_node->close_entry) {
                        kfree(new_send);
-                       i40iw_free_sqbuf(vsi, (void *)sqbuf);
                        i40iw_pr_err("already close entry\n");
                        return -EINVAL;
                }
@@ -2947,8 +2947,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
                        loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
                        cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
                        loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
-                       loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
-                       i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
                }
                return cm_node;
        }
@@ -3689,11 +3687,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        cm_id->add_ref(cm_id);
        i40iw_add_ref(&iwqp->ibqp);
 
-       i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
-
        attr.qp_state = IB_QPS_RTS;
        cm_node->qhash_set = false;
        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+       cm_node->accelerated = 1;
+       status =
+               i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+       if (status)
+               i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
+
        if (cm_node->loopbackpartner) {
                cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
 
@@ -3704,7 +3707,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
        }
 
-       cm_node->accelerated = 1;
        if (cm_node->accept_pend) {
                atomic_dec(&cm_node->listener->pend_accepts_cnt);
                cm_node->accept_pend = 0;
@@ -3864,6 +3866,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                        goto err;
        }
 
+       if (cm_node->loopbackpartner) {
+               cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
+               i40iw_create_event(cm_node->loopbackpartner,
+                                  I40IW_CM_EVENT_MPA_REQ);
+       }
+
        i40iw_debug(cm_node->dev,
                    I40IW_DEBUG_CM,
                    "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
@@ -4044,9 +4052,6 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
        dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
        if (iwqp->page)
                kunmap(iwqp->page);
-       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
-       if (status)
-               i40iw_pr_err("send cm event\n");
 
        memset(&attr, 0, sizeof(attr));
        attr.qp_state = IB_QPS_RTS;
@@ -4054,6 +4059,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
 
        cm_node->accelerated = 1;
+       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+                                    0);
+       if (status)
+               i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
 
        return;
 
index d88c6cf47cf275565ea98aba66f9638387db702c..da9821a10e0dfe8e3cb2fcede8f34b92f8a8f41e 100644 (file)
@@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
 
        ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
                                          &cqp->sdbuf,
-                                         128,
+                                         I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
                                          I40IW_SD_BUF_ALIGNMENT);
 
        if (ret_code)
@@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
 }
 
 /**
- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
- * @cqp: struct for cqp hw
- * @wqe_idx: we index of cqp ring
+ * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
+ * @cqp: pointer to CQP structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index for next WQE on CQP SQ
  */
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
+                                              u64 scratch, u32 *wqe_idx)
 {
        u64 *wqe = NULL;
-       u32     wqe_idx;
        enum i40iw_status_code ret_code;
 
        if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
@@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
                            cqp->sq_ring.size);
                return NULL;
        }
-       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
        cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
        if (ret_code)
                return NULL;
-       if (!wqe_idx)
+       if (!*wqe_idx)
                cqp->polarity = !cqp->polarity;
 
-       wqe = cqp->sq_base[wqe_idx].elem;
-       cqp->scratch_array[wqe_idx] = scratch;
+       wqe = cqp->sq_base[*wqe_idx].elem;
+       cqp->scratch_array[*wqe_idx] = scratch;
        I40IW_CQP_INIT_WQE(wqe);
 
        return wqe;
 }
 
+/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+       u32 wqe_idx;
+
+       return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+
 /**
  * i40iw_sc_cqp_destroy - destroy cqp during close
  * @cqp: struct for cqp hw
@@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
        u64 *wqe;
        int mem_entries, wqe_entries;
        struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+       u64 offset;
+       u32 wqe_idx;
 
-       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
        if (!wqe)
                return I40IW_ERR_RING_FULL;
 
@@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
                 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
 
        if (mem_entries) {
-               memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
-               data = sdbuf->pa;
+               offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
+               memcpy((char *)sdbuf->va + offset, &info->entry[3],
+                      mem_entries << 4);
+               data = (u64)sdbuf->pa + offset;
        } else {
                data = 0;
        }
index 65ec39e3746b42fe351e3c8efaf1fc4e9960f3fa..029083cb81d53d1809c3ea0967fa95a2230d3345 100644 (file)
 #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
 
 #define I40IWQPC_ARPIDX_SHIFT 48
-#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
 
 #define I40IWQPC_FLOWLABEL_SHIFT 0
 #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
@@ -1526,7 +1526,7 @@ enum i40iw_alignment {
        I40IW_AEQ_ALIGNMENT =           0x100,
        I40IW_CEQ_ALIGNMENT =           0x100,
        I40IW_CQ0_ALIGNMENT =           0x100,
-       I40IW_SD_BUF_ALIGNMENT =        0x100
+       I40IW_SD_BUF_ALIGNMENT =        0x80
 };
 
 #define I40IW_WQE_SIZE_64      64
@@ -1534,6 +1534,8 @@ enum i40iw_alignment {
 #define I40IW_QP_WQE_MIN_SIZE  32
 #define I40IW_QP_WQE_MAX_SIZE  128
 
+#define I40IW_UPDATE_SD_BUF_SIZE 128
+
 #define I40IW_CQE_QTYPE_RQ 0
 #define I40IW_CQE_QTYPE_SQ 1
 
index a0babdbf71460dda5dc156bfe7b7b9313e0da679..4a2de34895ec3177eb07082afe46a8921fa9f958 100644 (file)
@@ -2250,10 +2250,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                uint64_t tmp;
 
                if (!sg_res) {
+                       unsigned int pgoff = sg->offset & ~PAGE_MASK;
+
                        sg_res = aligned_nrpages(sg->offset, sg->length);
-                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
                        sg->dma_length = sg->length;
-                       pteval = page_to_phys(sg_page(sg)) | prot;
+                       pteval = (sg_phys(sg) - pgoff) | prot;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -3787,7 +3789,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
        for_each_sg(sglist, sg, nelems, i) {
                BUG_ON(!sg_page(sg));
-               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+               sg->dma_address = sg_phys(sg);
                sg->dma_length = sg->length;
        }
        return nelems;
index a13a4896a8bddad19ae48f8c58bbaf2f3c8dce84..0626dcfd1f3d83ceaad91968cffd65370189cfac 100644 (file)
  * Below is some version info we got:
  *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
  *                                Filter? connected?  Passive detection  ception in MB
- *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX25  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
- *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
  *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
- *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
+ *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
 
 static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
index b4efd711f824ccd1c832af8817e09bf2e00b2b5c..788c3464a3b0e95aaa101591750b9de493a34a18 100644 (file)
@@ -825,7 +825,10 @@ err_release_regions:
 err_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 /* free the board structure object, as well as its resources: */
index 131026fbc2d77cbc3ccb5903daa10f8920f8ae17..5adc95c922eef2d9f968a2dea3bac7c2dd3bfda2 100644 (file)
@@ -717,7 +717,10 @@ failure_release_regions:
 failure_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 static void peak_pci_remove(struct pci_dev *pdev)
index 4d4941469cfc06bfff3aeafaa0c3562b63702730..db6ea936dc3fc3fca00c939b2db6a938cf5011dc 100644 (file)
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
                mbx_mask = hecc_read(priv, HECC_CANMIM);
                mbx_mask |= HECC_TX_MBOX_MASK;
                hecc_write(priv, HECC_CANMIM, mbx_mask);
+       } else {
+               /* repoll is done only if whole budget is used */
+               num_pkts = quota;
        }
 
        return num_pkts;
index 9b18d96ef52633ab34bb5ff39f4f643023dc308a..f95945915d209df8353645c6390702d3e67d694d 100644 (file)
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                        }
 
                        if (pos + tmp->len > actual_len) {
-                               dev_err(dev->udev->dev.parent,
-                                       "Format error\n");
+                               dev_err_ratelimited(dev->udev->dev.parent,
+                                                   "Format error\n");
                                break;
                        }
 
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
        if (err) {
                netdev_err(netdev, "Error transmitting URB\n");
                usb_unanchor_urb(urb);
+               kfree(buf);
                usb_free_urb(urb);
                return err;
        }
@@ -1333,7 +1334,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                goto resubmit_urb;
        }
 
-       while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+       while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
                msg = urb->transfer_buffer + pos;
 
                /* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1353,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                }
 
                if (pos + msg->len > urb->actual_length) {
-                       dev_err(dev->udev->dev.parent, "Format error\n");
+                       dev_err_ratelimited(dev->udev->dev.parent,
+                                           "Format error\n");
                        break;
                }
 
@@ -1768,6 +1770,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
                usb_unanchor_urb(urb);
+               kfree(buf);
 
                stats->tx_dropped++;
 
index 7f0272558befe9ecdeaf110d2b95f8754cea8939..ef417dcddbf74a59dffff3b82fbff55c13277174 100644 (file)
@@ -592,6 +592,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
        case -ESHUTDOWN:
                return;
 
@@ -862,7 +863,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
                goto cleanup_unregister_candev;
        }
 
-       dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
+       dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
 
        return 0;
 
index b721a2009b5030f440bed9eab8ed4f7003ae25a4..23b45da784cb601a7abf84b212717aee7dc64403 100644 (file)
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
                                slice_num, false);
        bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
-                               slice_num, true);
+                               SLICE_NUM_MASK, true);
 
        /* Insert into TCAM now because we need to insert a second rule */
        bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        /* Insert into Action and policer RAMs now, set chain ID to
         * the one we are chained to
         */
-       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
                                      queue_num, true);
        if (ret)
                goto out_err;
index c5c38d4b7d1ccd04044f972777c5d3844e755a02..28f5e94274ee68f7d62cba41c1db2cdba5cf4d32 100644 (file)
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        u16 cp_ring_id, len = 0;
        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
        u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+       struct hwrm_short_input short_input = {0};
 
        req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
        memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD) {
                void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
-               struct hwrm_short_input short_input = {0};
 
                memcpy(short_cmd_req, req, msg_len);
                memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
        if (netif_running(dev))
                dev_close(dev);
 
+       bnxt_ulp_shutdown(bp);
+
        if (system_state == SYSTEM_POWER_OFF) {
-               bnxt_ulp_shutdown(bp);
                bnxt_clear_int_mode(bp);
                pci_wake_from_d3(pdev, bp->wol);
                pci_set_power_state(pdev, PCI_D3hot);
index d5031f436f8341ac98d7b1074f22bec9e107245d..3d201d7324bdc7b2c50377e5da5b3ab3acb8a423 100644 (file)
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
 {
        int ifindex = tcf_mirred_ifindex(tc_act);
        struct net_device *dev;
-       u16 dst_fid;
 
        dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
        if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
                return -EINVAL;
        }
 
-       /* find the FID from dev */
-       dst_fid = bnxt_flow_get_dst_fid(bp, dev);
-       if (dst_fid == BNXT_FID_INVALID) {
-               netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
-               return -EINVAL;
-       }
-
        actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
-       actions->dst_fid = dst_fid;
        actions->dst_dev = dev;
        return 0;
 }
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
        if (rc)
                return rc;
 
-       /* Tunnel encap/decap action must be accompanied by a redirect action */
-       if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
-            actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
-           !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
-               netdev_info(bp->dev,
-                           "error: no redir action along with encap/decap");
-               return -EINVAL;
+       if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
+               if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
+                       /* dst_fid is PF's fid */
+                       actions->dst_fid = bp->pf.fw_fid;
+               } else {
+                       /* find the FID from dst_dev */
+                       actions->dst_fid =
+                               bnxt_flow_get_dst_fid(bp, actions->dst_dev);
+                       if (actions->dst_fid == BNXT_FID_INVALID)
+                               return -EINVAL;
+               }
        }
 
        return rc;
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
        }
 
        if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
-               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
-                          CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
+               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
                ether_addr_copy(req.dst_macaddr, l2_info->dmac);
-               ether_addr_copy(req.src_macaddr, l2_info->smac);
        }
        if (l2_info->num_vlans) {
                enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
 
 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
                                       struct ip_tunnel_key *tun_key,
-                                      struct bnxt_tc_l2_key *l2_info,
-                                      struct net_device *real_dst_dev)
+                                      struct bnxt_tc_l2_key *l2_info)
 {
 #ifdef CONFIG_INET
+       struct net_device *real_dst_dev = bp->dev;
        struct flowi4 flow = { {0} };
        struct net_device *dst_dev;
        struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
         */
        tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
        tun_key.tp_dst = flow->tun_key.tp_dst;
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
        if (rc)
                goto put_decap;
 
-       decap_key->ttl = tun_key.ttl;
        decap_l2_info = &decap_node->l2_info;
+       /* decap smac is wildcarded */
        ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
-       ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
        if (l2_info.num_vlans) {
                decap_l2_info->num_vlans = l2_info.num_vlans;
                decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
        if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
                goto done;
 
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
-                                        flow->actions.dst_dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
        if (rc)
                goto put_encap;
 
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
        return 0;
 }
 
+static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
+                               u16 src_fid)
+{
+       if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+               flow->src_fid = bp->pf.fw_fid;
+       else
+               flow->src_fid = src_fid;
+}
+
 /* Add a new flow or replace an existing flow.
  * Notes on locking:
  * There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
        rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
        if (rc)
                goto free_node;
-       flow->src_fid = src_fid;
+
+       bnxt_tc_set_src_fid(bp, flow, src_fid);
 
        if (!bnxt_tc_can_offload(bp, flow)) {
                rc = -ENOSPC;
index 6aa0eee88ea529963850828fc1ab46eb36d75095..a5eecd895a8253d753bea0fb273da0bf49005d13 100644 (file)
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
                                dev_err(&oct->pci_dev->dev,
                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
                                        core);
-                                       err_msg_was_printed[core] = true;
+                               err_msg_was_printed[core] = true;
                        }
                }
 
index d83a78be98a2cb90f5cea6b07eb257e11ad9ebed..fed2b2f909fcf06b98441863ac92ff63a2fd01f7 100644 (file)
@@ -5598,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
        u32 txq_dma;
 
        /* Allocate memory for TX descriptors */
-       aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+       aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
                                MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
                                &aggr_txq->descs_dma, GFP_KERNEL);
        if (!aggr_txq->descs)
index e5ff734d4f9b2ff9b56799cc803fdafe4f80ea1c..9eb7f65d8000d28190da780587dba562aed72152 100644 (file)
@@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                         val, reg);
 
        if (gmac->variant->soc_has_internal_phy) {
-               if (of_property_read_bool(priv->plat->phy_node,
-                                         "allwinner,leds-active-low"))
+               if (of_property_read_bool(node, "allwinner,leds-active-low"))
                        reg |= H3_EPHY_LED_POL;
                else
                        reg &= ~H3_EPHY_LED_POL;
index f63c2ddced3c9a1e90f4425b28be95d202379a71..d7250539d0bd0c61c92fc9460c9e1197bb57ac8f 100644 (file)
@@ -2588,6 +2588,7 @@ static int stmmac_open(struct net_device *dev)
 
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+       priv->mss = 0;
 
        ret = alloc_dma_desc_resources(priv);
        if (ret < 0) {
index 11c1e7950fe58002b1b2b52e6af395dbfc7b6863..77cc4fbaeace4836419b2232913f8d78351e0148 100644 (file)
@@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
                .flowi4_oif = dev->ifindex,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
+               .flowi4_mark = skb->mark,
                .daddr = ip4h->daddr,
                .saddr = ip4h->saddr,
        };
index fdb43dd9b5cd424f4dde02f1257070ffe4b50fb1..ab4614113403455c1eee1c2ad69c7cebc6da5c9d 100644 (file)
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
        return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
 }
 
+/* Center KSZ9031RNX FLP timing at 16ms. */
 static int ksz9031_center_flp_timing(struct phy_device *phydev)
 {
        int result;
 
-       /* Center KSZ9031RNX FLP timing at 16ms. */
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+       if (result)
+               return result;
+
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
-
        if (result)
                return result;
 
index e3bbc70372d3ba73517514c1eda6a861d35d9524..5dc9668dde34fe6b810c48f2bcd63e8609caa74e 100644 (file)
@@ -773,6 +773,7 @@ void phylink_stop(struct phylink *pl)
                sfp_upstream_stop(pl->sfp_bus);
 
        set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+       queue_work(system_power_efficient_wq, &pl->resolve);
        flush_work(&pl->resolve);
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
index e381811e5f1143f35432e6624e80c00b13f0b56e..9dfc1c4c954f3230c7f6419ac2c59ad85b26c1c4 100644 (file)
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
 {
        unsigned int los = sfp->state & SFP_F_LOS;
 
-       /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor
-        * SFP_OPTIONS_LOS_NORMAL are set?  For now, we assume
-        * the same as SFP_OPTIONS_LOS_NORMAL set.
+       /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
+        * are set, we assume that no LOS signal is available.
         */
-       if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
+       if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
                los ^= SFP_F_LOS;
+       else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
+               los = 0;
 
        if (los)
                sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
                sfp_sm_link_up(sfp);
 }
 
+static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_LOW) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_HIGH);
+}
+
+static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_HIGH) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_LOW);
+}
+
 static void sfp_sm_fault(struct sfp *sfp, bool warn)
 {
        if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
                return -EINVAL;
        }
 
+       /* If the module requires address swap mode, warn about it */
+       if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+               dev_warn(sfp->dev,
+                        "module address swap to access page 0xA2 is not supported.\n");
+
        return sfp_module_insert(sfp->sfp_bus, &sfp->id);
 }
 
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
        case SFP_S_WAIT_LOS:
                if (event == SFP_E_TX_FAULT)
                        sfp_sm_fault(sfp, true);
-               else if (event ==
-                        (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                         SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
+               else if (sfp_los_event_inactive(sfp, event))
                        sfp_sm_link_up(sfp);
                break;
 
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
                if (event == SFP_E_TX_FAULT) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_fault(sfp, true);
-               } else if (event ==
-                          (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                           SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
+               } else if (sfp_los_event_active(sfp, event)) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
                }
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
 {
        /* locking... and check module is present */
 
-       if (sfp->id.ext.sff8472_compliance) {
+       if (sfp->id.ext.sff8472_compliance &&
+           !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
                modinfo->type = ETH_MODULE_SFF_8472;
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
        } else {
index e9489b88407ce1677385fe480592958b57d02c8d..0a886fda01291efb5a6beb0a2b5eb2123c1f05ab 100644 (file)
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
        DEFINE_WAIT(wait);
        ssize_t ret = 0;
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (skb)
                goto put;
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
                       size_t total_len, int flags)
 {
        struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       struct sk_buff *skb = m->msg_control;
        int ret;
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+               if (skb)
+                       kfree_skb(skb);
                return -EINVAL;
-       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       }
+       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
index 95749006d687b971a49894c903fcc611bc25c375..4f4a842a1c9cb8ac3397b329854a0fc7bd2f6aa3 100644 (file)
@@ -1952,8 +1952,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (!skb) {
                /* Read frames from ring */
@@ -2069,22 +2072,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = tun_get(tfile);
+       struct sk_buff *skb = m->msg_control;
        int ret;
 
-       if (!tun)
-               return -EBADFD;
+       if (!tun) {
+               ret = -EBADFD;
+               goto out_free_skb;
+       }
 
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
                ret = -EINVAL;
-               goto out;
+               goto out_put_tun;
        }
        if (flags & MSG_ERRQUEUE) {
                ret = sock_recv_errqueue(sock->sk, m, total_len,
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
-       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > (ssize_t)total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2092,6 +2097,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 out:
        tun_put(tun);
        return ret;
+
+out_put_tun:
+       tun_put(tun);
+out_free_skb:
+       if (skb)
+               kfree_skb(skb);
+       return ret;
 }
 
 static int tun_peek_len(struct socket *sock)
index 4500880240f25745f410538fb1a53d42a5210cce..6572550cfe784104179f1fb946ee41e2a312b2ca 100644 (file)
@@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 };
 static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 };
 static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 };
 static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 };
-static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 };
+static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 };
 static const unsigned int dnv_emmc_pins[] = {
        142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
 };
index d45af31b86b41ef6c85591a040fe2dd46c60de3e..bdb8d174efefb7ee897c60bab250a126a53af729 100644 (file)
@@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
 {
        struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
        unsigned int reg = OUTPUT_EN;
-       unsigned int mask;
+       unsigned int mask, val, ret;
 
        armada_37xx_update_reg(&reg, offset);
        mask = BIT(offset);
 
-       return regmap_update_bits(info->regmap, reg, mask, mask);
+       ret = regmap_update_bits(info->regmap, reg, mask, mask);
+
+       if (ret)
+               return ret;
+
+       reg = OUTPUT_VAL;
+       val = value ? mask : 0;
+       regmap_update_bits(info->regmap, reg, mask, val);
+
+       return 0;
 }
 
 static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
index e9b83e291edf43e02e757625ffb4f8d251f6227e..c11b8f14d841e91268eb404b22bb017b83e05e8a 100644 (file)
@@ -2322,7 +2322,7 @@ static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
        int i;
 
        for (i = 0; i < pmx->nconfs; i++) {
-               retconf = &gemini_confs_3516[i];
+               retconf = &pmx->confs[i];
                if (retconf->pin == pin)
                        return retconf;
        }
index 4f2a726bbaeb234c1caf7454db0058da903305b9..f5f77432ce6f830677d09887a16b34e0af01b2a4 100644 (file)
@@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x2, "mmc0"),          /* D3 */
-                 SUNXI_FUNCTION(0x4, "uart0")),        /* RX */
+                 SUNXI_FUNCTION(0x3, "uart0")),        /* RX */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
index 97b48336f84a3fdd6a30e98a19e954d625d3cfee..a78d7b922ef47529ce0708201ccd7b60a876959f 100644 (file)
@@ -535,14 +535,16 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data_broken = {
        .pins = sun50i_h5_pins,
        .npins = ARRAY_SIZE(sun50i_h5_pins),
        .irq_banks = 2,
-       .irq_read_needs_mux = true
+       .irq_read_needs_mux = true,
+       .disable_strict_mode = true,
 };
 
 static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
        .pins = sun50i_h5_pins,
        .npins = ARRAY_SIZE(sun50i_h5_pins),
        .irq_banks = 3,
-       .irq_read_needs_mux = true
+       .irq_read_needs_mux = true,
+       .disable_strict_mode = true,
 };
 
 static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
index 472ef0d91b9929c4628084a020374b68d3de3c86..5553c0eb0f41c420e422686bdacb6ea3c1c2c14a 100644 (file)
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* MCLK */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* SCK */
                  SUNXI_FUNCTION(0x4, "i2c4"),          /* SCK */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* SDA */
                  SUNXI_FUNCTION(0x4, "i2c4"),          /* SDA */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
 
        /* Hole */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
index 9cd569ef43ecfbaaf10680b3ac091267c1d1577f..15015a24f8ad750d2c107bf278442340cef801fd 100644 (file)
@@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
 int qeth_set_features(struct net_device *, netdev_features_t);
 void qeth_recover_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
 int qeth_vm_request_mac(struct qeth_card *card);
 int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
 
index 98a7f84540ab2c51e483b7a4d9368170640fef77..430e3214f7e26791af247d402734efb0ebb9cfc3 100644 (file)
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -6439,6 +6444,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       /* GSO segmentation builds skbs with
+        *      a (small) linear part for the headers, and
+        *      page frags for the data.
+        * Compared to a linear skb, the header-only part consumes an
+        * additional buffer element. This reduces buffer utilization, and
+        * hurts throughput. So compress small segments into one element.
+        */
+       if (netif_needs_gso(skb, features)) {
+               /* match skb_segment(): */
+               unsigned int doffset = skb->data - skb_mac_header(skb);
+               unsigned int hsize = skb_shinfo(skb)->gso_size;
+               unsigned int hroom = skb_headroom(skb);
+
+               /* linearize only if resulting skb allocations are order-0: */
+               if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+                       features &= ~NETIF_F_SG;
+       }
+
+       return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
        int rc;
index 93d7e345d18043e18fe75c2b87bd4ff1bddfce32..5863ea170ff26447630ed22acd5174db23bf0ee2 100644 (file)
@@ -961,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_stop               = qeth_l2_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -1011,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
                card->dev->hw_features = NETIF_F_SG;
                card->dev->vlan_features = NETIF_F_SG;
+               card->dev->features |= NETIF_F_SG;
                /* OSA 3S and earlier has no RX/TX support */
                if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
                        card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1029,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
index 0f8c12738b067d94184862777b3f2d7e2594a846..6a73894b0cb51d2896bea853f6a33113fed98c13 100644 (file)
@@ -1377,6 +1377,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
                if (ipm) {
@@ -2918,6 +2919,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_stop               = qeth_l3_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -2958,6 +2960,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
+                               card->dev->features |= NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2985,8 +2988,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
        netif_keep_dst(card->dev);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
+       netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+                                         PAGE_SIZE);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
index 403a639574e5ea10c5c8500141204ebc513bd7c1..6e3d81969a77cc895580f79fa8e3aaa3b8bb4fee 100644 (file)
@@ -1673,6 +1673,7 @@ struct aac_dev
        struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
        u8                      adapter_shutdown;
        u32                     handle_pci_error;
+       bool                    init_reset;
 };
 
 #define aac_adapter_interrupt(dev) \
index 525a652dab48e9923af67ce6e884154855df308c..bec9f3193f607c0aa1ca5f728f5eac85de518d61 100644 (file)
@@ -467,35 +467,6 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
        return 0;
 }
 
-#ifdef CONFIG_EEH
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
-       /* Check for an EEH failure for the given
-        * device node. Function eeh_dev_check_failure()
-        * returns 0 if there has not been an EEH error
-        * otherwise returns a non-zero value.
-        *
-        * Need to be called before any PCI operation,
-        * i.e.,before aac_adapter_check_health()
-        */
-       struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
-
-       if (eeh_dev_check_failure(edev)) {
-               /* The EEH mechanisms will handle this
-                * error and reset the device if
-                * necessary.
-                */
-               return 1;
-       }
-       return 0;
-}
-#else
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
-       return 0;
-}
-#endif
-
 /*
  *     Define the highest level of host to adapter communication routines.
  *     These routines will support host to adapter FS commuication. These
@@ -701,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (aac_check_eeh_failure(dev))
+                               if (unlikely(pci_channel_offline(dev->pdev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -801,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (aac_check_eeh_failure(dev))
+               if (unlikely(pci_channel_offline(dev->pdev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
@@ -1583,6 +1554,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
         * will ensure that i/o is queisced and the card is flushed in that
         * case.
         */
+       aac_free_irq(aac);
        aac_fib_map_free(aac);
        dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
                          aac->comm_phys);
@@ -1590,7 +1562,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
        aac->comm_phys = 0;
        kfree(aac->queues);
        aac->queues = NULL;
-       aac_free_irq(aac);
        kfree(aac->fsa_dev);
        aac->fsa_dev = NULL;
 
index c9252b138c1fe0e21d217b0fb305cc45afc1545a..bdf127aaab41d814e2337d2944166a0498bf1a66 100644 (file)
@@ -1680,6 +1680,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        aac->cardtype = index;
        INIT_LIST_HEAD(&aac->entry);
 
+       if (aac_reset_devices || reset_devices)
+               aac->init_reset = true;
+
        aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
        if (!aac->fibs)
                goto out_free_host;
index 93ef7c37e568e0e2ca4a38d8a08dd1b987482626..6201666941717042e3c7a2a1cd5f459b41b3360c 100644 (file)
@@ -561,11 +561,16 @@ int _aac_rx_init(struct aac_dev *dev)
        dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
        dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
-       if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
-         !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               /* Make sure the Hardware FIFO is empty */
-               while ((++restart < 512) &&
-                 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+
+       if (((status & 0x0c) != 0x0c) || dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
+                       /* Make sure the Hardware FIFO is empty */
+                       while ((++restart < 512) &&
+                              (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+               }
+       }
+
        /*
         *      Check to see if the board panic'd while booting.
         */
index 0c9361c87ec8de8b853f6ccaa6132663a4b982bd..fde6b6aa86e38a1af487d94b1117f3ffc340c5a5 100644 (file)
@@ -868,9 +868,13 @@ int aac_src_init(struct aac_dev *dev)
        /* Failure to reset here is an option ... */
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-       if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               ++restart;
+
+       if (dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+                       ++restart;
+       }
+
        /*
         *      Check to see if the board panic'd while booting.
         */
@@ -1014,9 +1018,13 @@ int aac_srcv_init(struct aac_dev *dev)
        /* Failure to reset here is an option ... */
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-       if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               ++restart;
+
+       if (dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+                       ++restart;
+       }
+
        /*
         *      Check to see if flash update is running.
         *      Wait for the adapter to be up and running. Wait up to 5 minutes
index 1cbc497e00bd95ffff6ee2020f06531f8059846c..00742c50cd44ed6e452dc50b4eb9bf5bd3f36e80 100644 (file)
@@ -2148,11 +2148,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
                q->limits.cluster = 0;
 
        /*
-        * set a reasonable default alignment on word boundaries: the
-        * host and device may alter it using
-        * blk_queue_update_dma_alignment() later.
+        * Set a reasonable default alignment:  The larger of 32-byte (dword),
+        * which is a common minimum for HBAs, and the minimum DMA alignment,
+        * which is set by the platform.
+        *
+        * Devices that require a bigger alignment can increase it later.
         */
-       blk_queue_dma_alignment(q, 0x03);
+       blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
index 011c3369082c6f19772e227989cd7eb6b335179a..a355d989b414f9059abe8e04bad9bbf6d2459c34 100644 (file)
@@ -6559,12 +6559,15 @@ static int ufshcd_config_vreg(struct device *dev,
                struct ufs_vreg *vreg, bool on)
 {
        int ret = 0;
-       struct regulator *reg = vreg->reg;
-       const char *name = vreg->name;
+       struct regulator *reg;
+       const char *name;
        int min_uV, uA_load;
 
        BUG_ON(!vreg);
 
+       reg = vreg->reg;
+       name = vreg->name;
+
        if (regulator_count_voltages(reg) > 0) {
                min_uV = on ? vreg->min_uV : 0;
                ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
index d79090ed7f9c71ee5bcbed31e27a05a5da47bb83..1799d3f26a9e29858e1a4362fe4fc491609750dd 100644 (file)
@@ -1778,9 +1778,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
        }
        in += sizeof(u32);
 
-       rc = ssi_hash_init(state, ctx);
-       if (rc)
-               goto out;
+       /* call init() to allocate bufs if the user hasn't */
+       if (!state->digest_buff) {
+               rc = ssi_hash_init(state, ctx);
+               if (rc)
+                       goto out;
+       }
 
        dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
                                ctx->inter_digestsize, DMA_BIDIRECTIONAL);
index 2d62a8c5733241738f68bd87a53247f2059dbdf8..ae6ed96d7874c4ce34d7ce3574b9eded74dbb625 100644 (file)
@@ -361,3 +361,8 @@ static struct comedi_driver ni_atmio_driver = {
        .detach         = ni_atmio_detach,
 };
 module_comedi_driver(ni_atmio_driver);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
+
index 068aece25d37f17414a3767b79fc8510cdf6bb3e..cded30f145aa2423b13cafdf2e149c607cb355d2 100644 (file)
@@ -394,7 +394,7 @@ struct octeon_hcd {
                                result = -1;                                \
                                break;                                      \
                        } else                                              \
-                               cvmx_wait(100);                             \
+                               __delay(100);                               \
                }                                                           \
        } while (0);                                                        \
        result; })
@@ -774,7 +774,7 @@ retry:
        usbn_clk_ctl.s.hclk_rst = 1;
        cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
        /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
-       cvmx_wait(64);
+       __delay(64);
        /*
         * 3. Program the power-on reset field in the USBN clock-control
         *    register:
@@ -795,7 +795,7 @@ retry:
        cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
                            usbn_usbp_ctl_status.u64);
        /* 6. Wait 10 cycles */
-       cvmx_wait(10);
+       __delay(10);
        /*
         * 7. Clear ATE_RESET field in the USBN clock-control register:
         *    USBN_USBP_CTL_STATUS[ATE_RESET] = 0
index c0664dc80bf24684184c83bad118d311d756b75c..446310775e9021bcc7e9f72cb94545e0b9c1b012 100644 (file)
@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
        if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
            (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
                len = pcur_bss->Ssid.SsidLength;
-
-               wrqu->essid.length = len;
-
                memcpy(extra, pcur_bss->Ssid.Ssid, len);
-
-               wrqu->essid.flags = 1;
        } else {
-               ret = -1;
-               goto exit;
+               len = 0;
+               *extra = 0;
        }
-
-exit:
-
+       wrqu->essid.length = len;
+       wrqu->essid.flags = 1;
 
        return ret;
 }
index ce7ad0acee7aa784772c77339130a3d6289641cd..247788a16f0b62f74f86d0537d51ce0358bdb61d 100644 (file)
@@ -27,23 +27,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
+       int ret;
 
        if (!test_bit(SERPORT_ACTIVE, &serport->flags))
                return 0;
 
-       return serdev_controller_receive_buf(ctrl, cp, count);
+       ret = serdev_controller_receive_buf(ctrl, cp, count);
+
+       dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
+                               "receive_buf returns %d (count = %zu)\n",
+                               ret, count);
+       if (ret < 0)
+               return 0;
+       else if (ret > count)
+               return count;
+
+       return ret;
 }
 
 static void ttyport_write_wakeup(struct tty_port *port)
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
+       struct tty_struct *tty;
+
+       tty = tty_port_tty_get(port);
+       if (!tty)
+               return;
 
-       if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) &&
+       if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
            test_bit(SERPORT_ACTIVE, &serport->flags))
                serdev_controller_write_wakeup(ctrl);
 
-       wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT);
+       wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+
+       tty_kref_put(tty);
 }
 
 static const struct tty_port_client_operations client_ops = {
@@ -136,8 +154,10 @@ static void ttyport_close(struct serdev_controller *ctrl)
 
        clear_bit(SERPORT_ACTIVE, &serport->flags);
 
+       tty_lock(tty);
        if (tty->ops->close)
                tty->ops->close(tty, NULL);
+       tty_unlock(tty);
 
        tty_release_struct(tty, serport->tty_idx);
 }
index 362c25ff188a549f5d2e111a8c4ef99248a5b920..ae6a256524d8b618d4116fde8a66518e4e93cce4 100644 (file)
@@ -122,12 +122,14 @@ static void __init init_port(struct earlycon_device *device)
        serial8250_early_out(port, UART_FCR, 0);        /* no fifo */
        serial8250_early_out(port, UART_MCR, 0x3);      /* DTR + RTS */
 
-       divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
-       c = serial8250_early_in(port, UART_LCR);
-       serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
-       serial8250_early_out(port, UART_DLL, divisor & 0xff);
-       serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
-       serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+       if (port->uartclk && device->baud) {
+               divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
+               c = serial8250_early_in(port, UART_LCR);
+               serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
+               serial8250_early_out(port, UART_DLL, divisor & 0xff);
+               serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
+               serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+       }
 }
 
 int __init early_serial8250_setup(struct earlycon_device *device,
index b7e0e34166414fafa3c6e705cce6bd6e952f1c31..54adf8d563501ab844cea41edf0427fdb83a1a8e 100644 (file)
@@ -5135,6 +5135,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
        { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
        { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
 
+       /* Amazon PCI serial device */
+       { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
        /*
         * These entries match devices with class COMMUNICATION_SERIAL,
         * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
index 8b351444cc40d015fefde2f40143a452232d32c9..9a2ab6751a23c504177fbac78cc7e95cdb21a1ae 100644 (file)
@@ -180,9 +180,9 @@ static int ulpi_of_register(struct ulpi *ulpi)
        /* Find a ulpi bus underneath the parent or the grandparent */
        parent = ulpi->dev.parent;
        if (parent->of_node)
-               np = of_find_node_by_name(parent->of_node, "ulpi");
+               np = of_get_child_by_name(parent->of_node, "ulpi");
        else if (parent->parent && parent->parent->of_node)
-               np = of_find_node_by_name(parent->parent->of_node, "ulpi");
+               np = of_get_child_by_name(parent->parent->of_node, "ulpi");
        if (!np)
                return 0;
 
index da8acd980fc68e1c67b302ab45a62580d99fb0eb..55b198ba629b33b4923247736be8fb743286c324 100644 (file)
@@ -905,14 +905,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
        }
 }
 
+static const __u8 bos_desc_len[256] = {
+       [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+       [USB_CAP_TYPE_EXT]          = USB_DT_USB_EXT_CAP_SIZE,
+       [USB_SS_CAP_TYPE]           = USB_DT_USB_SS_CAP_SIZE,
+       [USB_SSP_CAP_TYPE]          = USB_DT_USB_SSP_CAP_SIZE(1),
+       [CONTAINER_ID_TYPE]         = USB_DT_USB_SS_CONTN_ID_SIZE,
+       [USB_PTM_CAP_TYPE]          = USB_DT_USB_PTM_ID_SIZE,
+};
+
 /* Get BOS descriptor set */
 int usb_get_bos_descriptor(struct usb_device *dev)
 {
        struct device *ddev = &dev->dev;
        struct usb_bos_descriptor *bos;
        struct usb_dev_cap_header *cap;
+       struct usb_ssp_cap_descriptor *ssp_cap;
        unsigned char *buffer;
-       int length, total_len, num, i;
+       int length, total_len, num, i, ssac;
+       __u8 cap_type;
        int ret;
 
        bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -965,7 +976,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        dev->bos->desc->bNumDeviceCaps = i;
                        break;
                }
+               cap_type = cap->bDevCapabilityType;
                length = cap->bLength;
+               if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+                       dev->bos->desc->bNumDeviceCaps = i;
+                       break;
+               }
+
                total_len -= length;
 
                if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -973,7 +990,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        continue;
                }
 
-               switch (cap->bDevCapabilityType) {
+               switch (cap_type) {
                case USB_CAP_TYPE_WIRELESS_USB:
                        /* Wireless USB cap descriptor is handled by wusb */
                        break;
@@ -986,8 +1003,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                                (struct usb_ss_cap_descriptor *)buffer;
                        break;
                case USB_SSP_CAP_TYPE:
-                       dev->bos->ssp_cap =
-                               (struct usb_ssp_cap_descriptor *)buffer;
+                       ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+                       ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+                               USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+                       if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+                               dev->bos->ssp_cap = ssp_cap;
                        break;
                case CONTAINER_ID_TYPE:
                        dev->bos->ss_id =
index 705c573d0257e28b5142070caaca17e06e5a25fd..a3fad4ec9870d21e602fa80d8cf564fff5dd6d62 100644 (file)
@@ -1442,14 +1442,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        int number_of_packets = 0;
        unsigned int stream_id = 0;
        void *buf;
-
-       if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
-                               USBDEVFS_URB_SHORT_NOT_OK |
+       unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
                                USBDEVFS_URB_BULK_CONTINUATION |
                                USBDEVFS_URB_NO_FSBR |
                                USBDEVFS_URB_ZERO_PACKET |
-                               USBDEVFS_URB_NO_INTERRUPT))
-               return -EINVAL;
+                               USBDEVFS_URB_NO_INTERRUPT;
+       /* USBDEVFS_URB_ISO_ASAP is a special case */
+       if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+               mask |= USBDEVFS_URB_ISO_ASAP;
+
+       if (uurb->flags & ~mask)
+                       return -EINVAL;
+
        if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
                return -EINVAL;
        if (uurb->buffer_length > 0 && !uurb->buffer)
index 7ccdd3d4db84c9d4675084d4a575f458fd1e1d44..cf7bbcb9a63cc9acaa4dfe225bba01167ced2051 100644 (file)
@@ -4948,6 +4948,15 @@ loop:
                usb_put_dev(udev);
                if ((status == -ENOTCONN) || (status == -ENOTSUPP))
                        break;
+
+               /* When halfway through our retry count, power-cycle the port */
+               if (i == (SET_CONFIG_TRIES / 2) - 1) {
+                       dev_info(&port_dev->dev, "attempt power cycle\n");
+                       usb_hub_set_port_power(hdev, hub, port1, false);
+                       msleep(2 * hub_power_on_good_delay(hub));
+                       usb_hub_set_port_power(hdev, hub, port1, true);
+                       msleep(hub_power_on_good_delay(hub));
+               }
        }
        if (hub->hdev->parent ||
                        !hcd->driver->port_handed_over ||
index f1dbab6f798fdc75dbde8039b8d0c5339871fcde..a10b346b9777dba58abe8346cb4926e42d8bb7aa 100644 (file)
@@ -146,6 +146,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+       { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
        { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
 
index 31cce7805eb2e93706468409a39f287fdebd5f5a..0a19a76645adee42100e99171570f10c3de3739f 100644 (file)
@@ -508,8 +508,8 @@ choice
          controller, and the relevant drivers for each function declared
          by the device.
 
-source "drivers/usb/gadget/legacy/Kconfig"
-
 endchoice
 
+source "drivers/usb/gadget/legacy/Kconfig"
+
 endif # USB_GADGET
index eec14e6ed20be0a43f414233fccd38efe51f7f10..77c7ecca816aa026677ec869e087f0dc747f55a7 100644 (file)
@@ -146,7 +146,6 @@ int config_ep_by_speed(struct usb_gadget *g,
                        struct usb_function *f,
                        struct usb_ep *_ep)
 {
-       struct usb_composite_dev        *cdev = get_gadget_data(g);
        struct usb_endpoint_descriptor *chosen_desc = NULL;
        struct usb_descriptor_header **speed_desc = NULL;
 
@@ -226,8 +225,12 @@ ep_found:
                        _ep->maxburst = comp_desc->bMaxBurst + 1;
                        break;
                default:
-                       if (comp_desc->bMaxBurst != 0)
+                       if (comp_desc->bMaxBurst != 0) {
+                               struct usb_composite_dev *cdev;
+
+                               cdev = get_gadget_data(g);
                                ERROR(cdev, "ep0 bMaxBurst must be 0\n");
+                       }
                        _ep->maxburst = 1;
                        break;
                }
index 97ea059a7aa471192710be177276c8b969619e2b..b6cf5ab5a0a135bb36e09de7b96e13645caa1abe 100644 (file)
@@ -1012,7 +1012,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                else
                        ret = ep->status;
                goto error_mutex;
-       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
                ret = -ENOMEM;
        } else {
                req->buf      = data;
@@ -2282,9 +2282,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                int i;
 
                if (len < sizeof(*d) ||
-                   d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   !d->Reserved1)
+                   d->bFirstInterfaceNumber >= ffs->interfaces_count)
                        return -EINVAL;
+               if (d->Reserved1 != 1) {
+                       /*
+                        * According to the spec, Reserved1 must be set to 1
+                        * but older kernels incorrectly rejected non-zero
+                        * values.  We fix it here to avoid returning EINVAL
+                        * in response to values we used to accept.
+                        */
+                       pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
+                       d->Reserved1 = 1;
+               }
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
                                return -EINVAL;
index a12fb459dbd9f6b8fccb06ce4d5fd18084f0b38b..9570bbeced4f86e541a690dfe2112cff12a785c7 100644 (file)
 # both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
 #
 
+menuconfig USB_GADGET_LEGACY
+       bool "Legacy USB Gadget Support"
+       help
+          Legacy USB gadgets are USB gadgets that do not use the USB gadget
+          configfs interface.
+
+if USB_GADGET_LEGACY
+
 config USB_ZERO
        tristate "Gadget Zero (DEVELOPMENT)"
        select USB_LIBCOMPOSITE
@@ -490,3 +498,5 @@ config USB_G_WEBCAM
 
          Say "y" to link the driver statically, or "m" to build a
          dynamically linked module called "g_webcam".
+
+endif
index d39f070acbd705573b49a1b8df1b1f0077470940..01b44e15962378d02e260ee37872ded457bc763d 100644 (file)
@@ -642,7 +642,6 @@ static const struct of_device_id bdc_of_match[] = {
 static struct platform_driver bdc_driver = {
        .driver         = {
                .name   = BRCM_BDC_NAME,
-               .owner  = THIS_MODULE,
                .pm = &bdc_pm_ops,
                .of_match_table = bdc_of_match,
        },
index 61422d624ad090f1dff586d59a9a31e2ce2ea0fb..93eff7dec2f5e9d7d9449b1189f8e5d2c91728e6 100644 (file)
@@ -1069,8 +1069,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
 static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
                                            enum usb_device_speed speed)
 {
-       if (udc->gadget->ops->udc_set_speed)
-               udc->gadget->ops->udc_set_speed(udc->gadget, speed);
+       if (udc->gadget->ops->udc_set_speed) {
+               enum usb_device_speed s;
+
+               s = min(speed, udc->gadget->max_speed);
+               udc->gadget->ops->udc_set_speed(udc->gadget, s);
+       }
 }
 
 /**
index bc37f40baacf2b54fc2528c59f1b6d80ebae3a5b..6e87af2483679aac59f23e942dde9367a30d35e4 100644 (file)
 #define USB3_EP0_SS_MAX_PACKET_SIZE    512
 #define USB3_EP0_HSFS_MAX_PACKET_SIZE  64
 #define USB3_EP0_BUF_SIZE              8
-#define USB3_MAX_NUM_PIPES             30
+#define USB3_MAX_NUM_PIPES             6       /* This includes PIPE 0 */
 #define USB3_WAIT_US                   3
 #define USB3_DMA_NUM_SETTING_AREA      4
 /*
index 19f00424f53ed3b6246788dbec370e198a7b86a9..3ed75aaa09d9d37b786c7865af231274fa8d4542 100644 (file)
@@ -827,7 +827,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
                        default:                /* unknown */
                                break;
                        }
-                       temp = (cap >> 8) & 0xff;
+                       offset = (cap >> 8) & 0xff;
                }
        }
 #endif
index e1fba4688509df32d0aee83be2f53abbf65f37b5..15f7d422885f0c95d887d22eca4baf85a509ebf3 100644 (file)
@@ -934,6 +934,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
        if (!vdev)
                return;
 
+       if (vdev->real_port == 0 ||
+                       vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+               xhci_dbg(xhci, "Bad vdev->real_port.\n");
+               goto out;
+       }
+
        tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
        list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
                /* is this a hub device that added a tt_info to the tts list */
@@ -947,6 +953,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
                        }
                }
        }
+out:
        /* we are now at a leaf device */
        xhci_debugfs_remove_slot(xhci, slot_id);
        xhci_free_virt_device(xhci, slot_id);
index c239c688076cf924060eccdb9698b2a0a86ab75d..6eb87c6e4d2420a3b19a6c41c1b797fac5e642dc 100644 (file)
@@ -2477,12 +2477,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 */
                if (list_empty(&ep_ring->td_list)) {
                        /*
-                        * A stopped endpoint may generate an extra completion
-                        * event if the device was suspended.  Don't print
-                        * warnings.
+                        * Don't print wanings if it's due to a stopped endpoint
+                        * generating an extra completion event if the device
+                        * was suspended. Or, a event for the last TRB of a
+                        * short TD we already got a short event for.
+                        * The short TD is already removed from the TD list.
                         */
+
                        if (!(trb_comp_code == COMP_STOPPED ||
-                               trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+                             trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+                             ep_ring->last_td_was_short)) {
                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
                                                ep_index);
index aaa7d901a06de68a7902bbba02707ec29d6bcbd5..3b3513874cfd1e75a5380ee208f02c1144919cd1 100644 (file)
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
+#define QUECTEL_PRODUCT_BG96                   0x0296
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1182,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index ab5a2ac4993ab234f55ecdb0c4669277e6700f2a..aaf4813e4971eeb98ccae06aa37b7047404baac2 100644 (file)
@@ -31,12 +31,14 @@ static const struct usb_device_id id_table[] = {
 };
 
 static const struct usb_device_id dbc_id_table[] = {
+       { USB_DEVICE(0x1d6b, 0x0010) },
        { USB_DEVICE(0x1d6b, 0x0011) },
        { },
 };
 
 static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(0x0525, 0x127a) },
+       { USB_DEVICE(0x1d6b, 0x0010) },
        { USB_DEVICE(0x1d6b, 0x0011) },
        { },
 };
index 1fcd758a961f5e192082efef9fb69c8efe3bd962..3734a25e09e539f05f16e9f16b5dfb371d0e3799 100644 (file)
@@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                }
        }
 
+       /* All Seagate disk enclosures have broken ATA pass-through support */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+               flags |= US_FL_NO_ATA_1X;
+
        usb_stor_adjust_quirks(udev, &flags);
 
        if (flags & US_FL_IGNORE_UAS) {
index 465d7da849c3415dc0d7fa25068db3ca65e37b9b..bcb2744c59772ce6453bbf280afae83b948163d9 100644 (file)
@@ -1,13 +1,53 @@
 
-menu "USB Power Delivery and Type-C drivers"
+menuconfig TYPEC
+       tristate "USB Type-C Support"
+       help
+         USB Type-C Specification defines a cable and connector for USB where
+         only one type of plug is supported on both ends, i.e. there will not
+         be Type-A plug on one end of the cable and Type-B plug on the other.
+         Determination of the host-to-device relationship happens through a
+         specific Configuration Channel (CC) which goes through the USB Type-C
+         cable. The Configuration Channel may also be used to detect optional
+         Accessory Modes - Analog Audio and Debug - and if USB Power Delivery
+         is supported, the Alternate Modes, where the connector is used for
+         something else then USB communication.
+
+         USB Power Delivery Specification defines a protocol that can be used
+         to negotiate the voltage and current levels with the connected
+         partners. USB Power Delivery allows higher voltages then the normal
+         5V, up to 20V, and current up to 5A over the cable. The USB Power
+         Delivery protocol is also used to negotiate the optional Alternate
+         Modes when they are supported. USB Power Delivery does not depend on
+         USB Type-C connector, however it is mostly used together with USB
+         Type-C connectors.
+
+         USB Type-C and USB Power Delivery Specifications define a set of state
+         machines that need to be implemented in either software or firmware.
+         Simple USB Type-C PHYs, for example USB Type-C Port Controller
+         Interface Specification compliant "Port Controllers" need the state
+         machines to be handled in the OS, but stand-alone USB Type-C and Power
+         Delivery controllers handle the state machines inside their firmware.
+         The USB Type-C and Power Delivery controllers usually function
+         autonomously, and do not necessarily require drivers.
+
+         Enable this configurations option if you have USB Type-C connectors on
+         your system and 1) you know your USB Type-C hardware requires OS
+         control (a driver) to function, or 2) if you need to be able to read
+         the status of the USB Type-C ports in your system, or 3) if you need
+         to be able to swap the power role (decide are you supplying or
+         consuming power over the cable) or data role (host or device) when
+         both roles are supported.
+
+         For more information, see the kernel documentation for USB Type-C
+         Connector Class API (Documentation/driver-api/usb/typec.rst)
+         <https://www.kernel.org/doc/html/latest/driver-api/usb/typec.html>
+         and ABI (Documentation/ABI/testing/sysfs-class-typec).
 
-config TYPEC
-       tristate
+if TYPEC
 
 config TYPEC_TCPM
        tristate "USB Type-C Port Controller Manager"
        depends on USB
-       select TYPEC
        help
          The Type-C Port Controller Manager provides a USB PD and USB Type-C
          state machine for use with Type-C Port Controllers.
@@ -22,7 +62,6 @@ config TYPEC_WCOVE
        depends on INTEL_SOC_PMIC
        depends on INTEL_PMC_IPC
        depends on BXT_WC_PMIC_OPREGION
-       select TYPEC
        help
          This driver adds support for USB Type-C detection on Intel Broxton
          platforms that have Intel Whiskey Cove PMIC. The driver can detect the
@@ -31,14 +70,13 @@ config TYPEC_WCOVE
          To compile this driver as module, choose M here: the module will be
          called typec_wcove
 
-endif
+endif # TYPEC_TCPM
 
 source "drivers/usb/typec/ucsi/Kconfig"
 
 config TYPEC_TPS6598X
        tristate "TI TPS6598x USB Power Delivery controller driver"
        depends on I2C
-       select TYPEC
        help
          Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
          Delivery controller.
@@ -46,4 +84,4 @@ config TYPEC_TPS6598X
          If you choose to build this driver as a dynamically linked module, the
          module will be called tps6598x.ko.
 
-endmenu
+endif # TYPEC
index d0c31cee472099598eb1f727cb4f9164dade2a84..e36d6c73c4a4184c6246b14ab27b8e0ecb393be8 100644 (file)
@@ -1,7 +1,6 @@
 config TYPEC_UCSI
        tristate "USB Type-C Connector System Software Interface driver"
        depends on !CPU_BIG_ENDIAN
-       select TYPEC
        help
          USB Type-C Connector System Software Interface (UCSI) is a
          specification for an interface that allows the operating system to
index 713e941709632f61664d5e97ed081163883835ac..6b3278c4b72a0d745a724b3ef93cce0dc3dccd7d 100644 (file)
@@ -1098,7 +1098,6 @@ static int hcd_name_to_id(const char *name)
 static int vhci_setup(struct usb_hcd *hcd)
 {
        struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
-       hcd->self.sg_tablesize = ~0;
        if (usb_hcd_is_primary_hcd(hcd)) {
                vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
                vhci->vhci_hcd_hs->vhci = vhci;
index 8d626d7c2e7e79db8d243278e805c96ad563bb3d..c7bdeb6556469efb93e2a6a7e742da3a37ad7e69 100644 (file)
@@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
                /* On error, stop handling until the next kick. */
                if (unlikely(headcount < 0))
                        goto out;
-               if (nvq->rx_array)
-                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
-               /* On overrun, truncate and discard */
-               if (unlikely(headcount > UIO_MAXIOV)) {
-                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
-                       err = sock->ops->recvmsg(sock, &msg,
-                                                1, MSG_DONTWAIT | MSG_TRUNC);
-                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
-                       continue;
-               }
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
                         * they refilled. */
                        goto out;
                }
+               if (nvq->rx_array)
+                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+               /* On overrun, truncate and discard */
+               if (unlikely(headcount > UIO_MAXIOV)) {
+                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
+                       err = sock->ops->recvmsg(sock, &msg,
+                                                1, MSG_DONTWAIT | MSG_TRUNC);
+                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
+                       continue;
+               }
                /* We don't need to be notified again. */
                iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
                fixup = msg.msg_iter;
index 48230a5e12f262b67d28d87adc713f462e8ec5fc..bf7ff3934d7fff5169e5252cd8fc0a29ea25a133 100644 (file)
@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
        /* device_register() causes the bus infrastructure to look for a
         * matching driver. */
        err = device_register(&dev->dev);
+       if (err)
+               ida_simple_remove(&virtio_index_ida, dev->index);
 out:
        if (err)
                virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
index 7960746f759788d545b9e85e384a56cbf99a7606..a1fb52cb3f0ab5c0f066d3d773a82c73665f54da 100644 (file)
@@ -174,13 +174,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
        while ((page = balloon_page_pop(&pages))) {
                balloon_page_enqueue(&vb->vb_dev_info, page);
 
-               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
-
                set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
                vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
                if (!virtio_has_feature(vb->vdev,
                                        VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
                        adjust_managed_page_count(page, -1);
+               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
        }
 
        num_allocated_pages = vb->num_pfns;
index 28fa85276eec6679cd1ad7a0b408975cbea9204f..60316b52d6591459d4c25bc0d434c4bfea3d2fef 100644 (file)
@@ -2268,7 +2268,7 @@ static int show_timer(struct seq_file *m, void *v)
        notify = timer->it_sigev_notify;
 
        seq_printf(m, "ID: %d\n", timer->it_id);
-       seq_printf(m, "signal: %d/%p\n",
+       seq_printf(m, "signal: %d/%px\n",
                   timer->sigq->info.si_signo,
                   timer->sigq->info.si_value.sival_ptr);
        seq_printf(m, "notify: %s/%s.%d\n",
index f36ecc2a57128cdf5df582b0b7b00d125ffd7ca7..3b0ba54cc4d5b0ea9bc7a11d48b476b48e9e9e22 100644 (file)
@@ -216,6 +216,8 @@ static inline void debugfs_remove(struct dentry *dentry)
 static inline void debugfs_remove_recursive(struct dentry *dentry)
 { }
 
+const struct file_operations *debugfs_real_fops(const struct file *filp);
+
 static inline int debugfs_file_get(struct dentry *dentry)
 {
        return 0;
index e8f8e8fb244d649830dfc499163a1e8a7d0e476f..81ed9b2d84dcc78e1b2213e9a22efd0f4f384330 100644 (file)
@@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
        return ret;
 }
 
-#ifdef CONFIG_HAS_DMA
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef ARCH_DMA_MINALIGN
@@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)
 #endif
        return 1;
 }
-#endif
 
 /* flags for the coherent memory api */
 #define DMA_MEMORY_EXCLUSIVE           0x01
index f3e97c5f94c96bbaf4ef338f9bad6145f552b1be..6c9336626592b0a07e4216ea97a72a050e7146d4 100644 (file)
@@ -708,6 +708,7 @@ struct vmbus_channel {
        u8 monitor_bit;
 
        bool rescind; /* got rescind msg */
+       struct completion rescind_event;
 
        u32 ringbuffer_gpadlhandle;
 
index 34d59bfdce2d2b12b0f6d7b61c601bfc0e2cb518..464458d20b16501ef45633c09cb2b7705e9f3f84 100644 (file)
 #define LPTIM2_OUT     "lptim2_out"
 #define LPTIM3_OUT     "lptim3_out"
 
-#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
 bool is_stm32_lptim_trigger(struct iio_trigger *trig);
 #else
 static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
 {
+#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+       pr_warn_once("stm32 lptim_trigger not linked in\n");
+#endif
        return false;
 }
 #endif
index dd418955962bc23a6e30af9c2edd3fed57118f69..39fb3700f7a92aae1a6c3d417f5effbfb93d6494 100644 (file)
@@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
        data->chip = chip;
 }
 
-static inline int irq_balancing_disabled(unsigned int irq)
+static inline bool irq_balancing_disabled(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
        return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
 }
 
-static inline int irq_is_percpu(unsigned int irq)
+static inline bool irq_is_percpu(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)
        return desc->status_use_accessors & IRQ_PER_CPU;
 }
 
-static inline int irq_is_percpu_devid(unsigned int irq)
+static inline bool irq_is_percpu_devid(unsigned int irq)
 {
        struct irq_desc *desc;
 
index e69402d4a8aecbdc6fe4a40e8ad7e01957badbd6..d609e6dc5bad00bb54f9258c077dc3220cbcb938 100644 (file)
@@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
        struct serdev_device *serdev = ctrl->serdev;
 
        if (!serdev || !serdev->ops->receive_buf)
-               return -EINVAL;
+               return 0;
 
        return serdev->ops->receive_buf(serdev, data, count);
 }
index bc486ef23f20f91ce3ed183e935399d1e4c55e18..a38c80e9f91efee011f22bc4e6755f3e4d85ff69 100644 (file)
@@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
 }
 
 /*
- * If users == 1, we are the only owner and are can avoid redundant
- * atomic change.
+ * If users == 1, we are the only owner and can avoid redundant atomic changes.
  */
 
 /**
index e32dfe098e822f559810d785a685e05054974cc0..40839c02d28c04389218a72012f784efc8ca5ff8 100644 (file)
@@ -117,6 +117,12 @@ struct attribute_group {
        .show   = _name##_show,                                         \
 }
 
+#define __ATTR_RO_MODE(_name, _mode) {                                 \
+       .attr   = { .name = __stringify(_name),                         \
+                   .mode = VERIFY_OCTAL_PERMISSIONS(_mode) },          \
+       .show   = _name##_show,                                         \
+}
+
 #define __ATTR_WO(_name) {                                             \
        .attr   = { .name = __stringify(_name), .mode = S_IWUSR },      \
        .store  = _name##_store,                                        \
index 16f949eef52fdfd7c90fa15b44093334d1355aaf..2f8f93da5dc2660f4db37c04f8a434809b3120a1 100644 (file)
@@ -503,7 +503,8 @@ struct sctp_datamsg {
        /* Did the messenge fail to send? */
        int send_error;
        u8 send_failed:1,
-          can_delay;       /* should this message be Nagle delayed */
+          can_delay:1, /* should this message be Nagle delayed */
+          abandoned:1; /* should this message be abandoned */
 };
 
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
index 524cee4f4c817b3583385e9ae9f991503ab934fd..01dbfea3267277ad5efacef4b59b4a1c615de66b 100644 (file)
@@ -14,7 +14,6 @@ struct tcf_sample {
        struct psample_group __rcu *psample_group;
        u32 psample_group_num;
        struct list_head tcfm_list;
-       struct rcu_head rcu;
 };
 #define to_sample(a) ((struct tcf_sample *)a)
 
index 4e09398009c10a72478b43d3cffc24ba01612b91..6998707e81f343ef8d893c0b2ba16db541082230 100644 (file)
@@ -844,12 +844,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
 }
 #endif
 
-/* TCP_SKB_CB reference means this can not be used from early demux */
 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
        if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-           skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+           skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
                return true;
 #endif
        return false;
index 0f9cbf96c093d86ae926ae5380f82eb454fecd02..6df6fe0c21980b4eb686bc20ff10f0c2bf28280b 100644 (file)
@@ -159,11 +159,11 @@ struct expander_device {
 
 struct sata_device {
        unsigned int class;
-       struct smp_resp        rps_resp; /* report_phy_sata_resp */
        u8     port_no;        /* port number, if this is a PM (Port) */
 
        struct ata_port *ap;
        struct ata_host ata_host;
+       struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
        u8     fis[ATA_RESP_FIS_SIZE];
 };
 
index 4cd0f05d01134d1a1e2d5bd231407bfd7d92d250..8989a92c571a2d7036b74b233913b588e4e4248c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/tracepoint.h>
+#include <linux/bpf.h>
 
 #define __XDP_ACT_MAP(FN)      \
        FN(ABORTED)             \
index 41a0a81b01e6bc4c4c5a65f6c36f4be9b01c160e..c4c79aa331bd123e9d340ec7283ec97e89699584 100644 (file)
@@ -880,6 +880,8 @@ struct usb_wireless_cap_descriptor {        /* Ultra Wide Band */
        __u8  bReserved;
 } __attribute__((packed));
 
+#define USB_DT_USB_WIRELESS_CAP_SIZE   11
+
 /* USB 2.0 Extension descriptor */
 #define        USB_CAP_TYPE_EXT                2
 
@@ -1072,6 +1074,7 @@ struct usb_ptm_cap_descriptor {
        __u8  bDevCapabilityType;
 } __attribute__((packed));
 
+#define USB_DT_USB_PTM_ID_SIZE         3
 /*
  * The size of the descriptor for the Sublink Speed Attribute Count
  * (SSAC) specified in bmAttributes[4:0].
index b9f8686a84cf1a5ee9d2b92d21579af11d8690aa..86b50aa26ee80adac9ba7ac52248c64cbea19b26 100644 (file)
@@ -1447,7 +1447,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
        rcu_read_lock();
        prog = rcu_dereference(progs)->progs;
        for (; *prog; prog++)
-               cnt++;
+               if (*prog != &dummy_bpf_prog.prog)
+                       cnt++;
        rcu_read_unlock();
        return cnt;
 }
index 68ec884440b75da08824249db74bb992f6d938ce..8455b89d1bbf698f86c44bd1e1846b4c7d4ba60f 100644 (file)
@@ -1,3 +1,18 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
index 04892a82f6ac36c92324806b66a1c1855880c8f7..41376c3ac93b06c8163d6d764fc9a015bafdcdb4 100644 (file)
@@ -780,8 +780,8 @@ static int takedown_cpu(unsigned int cpu)
        BUG_ON(cpu_online(cpu));
 
        /*
-        * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
-        * runnable tasks from the cpu, there's only the idle task left now
+        * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
+        * all runnable tasks from the CPU, there's only the idle task left now
         * that the migration thread is done doing the stop_machine thing.
         *
         * Wait for the stop thread to go away.
@@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown.single        = NULL,
                .cant_stop              = true,
        },
-       [CPUHP_AP_SMPCFD_DYING] = {
-               .name                   = "smpcfd:dying",
-               .startup.single         = NULL,
-               .teardown.single        = smpcfd_dying_cpu,
-       },
        /*
         * Handled on controll processor until the plugged processor manages
         * this itself.
@@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
                .startup.single         = NULL,
                .teardown.single        = rcutree_dying_cpu,
        },
+       [CPUHP_AP_SMPCFD_DYING] = {
+               .name                   = "smpcfd:dying",
+               .startup.single         = NULL,
+               .teardown.single        = smpcfd_dying_cpu,
+       },
        /* Entry state on starting. Interrupts enabled from here on. Transient
         * state for synchronsization */
        [CPUHP_AP_ONLINE] = {
index e74be38245adf732f34c55c0a676004f59870ba4..ed5d34925ad0617a40aeed3774b0e393aec03e99 100644 (file)
@@ -350,7 +350,7 @@ poll_again:
                        }
                        kdb_printf("\n");
                        for (i = 0; i < count; i++) {
-                               if (kallsyms_symbol_next(p_tmp, i) < 0)
+                               if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
                                        break;
                                kdb_printf("%s ", p_tmp);
                                *(p_tmp + len) = '\0';
index 16beab4767e1e686e8ccd3642a82cbc4adde7f59..5961ef6dfd646f0943c667c660a74b2bf6196b46 100644 (file)
@@ -6639,6 +6639,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
        struct perf_namespaces_event *namespaces_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
+       u16 header_size = namespaces_event->event_id.header.size;
        int ret;
 
        if (!perf_event_namespaces_match(event))
@@ -6649,7 +6650,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
        ret = perf_output_begin(&handle, event,
                                namespaces_event->event_id.header.size);
        if (ret)
-               return;
+               goto out;
 
        namespaces_event->event_id.pid = perf_event_pid(event,
                                                        namespaces_event->task);
@@ -6661,6 +6662,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
        perf_event__output_id_sample(event, &handle, &sample);
 
        perf_output_end(&handle);
+out:
+       namespaces_event->event_id.header.size = header_size;
 }
 
 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
index 7df2480005f863693f20d5450515d85085c225b6..0ba0dd8863a779f5b318ef07f03c2915ea57cd57 100644 (file)
@@ -384,7 +384,9 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
 {
        struct cpumap *cm = this_cpu_ptr(m->maps);
 
-       return (m->global_available - cpudown) ? cm->available : 0;
+       if (!cpudown)
+               return m->global_available;
+       return m->global_available - cm->available;
 }
 
 /**
index 9776da8db180d63c94f0a698bac844e91c24c0ce..670d8d7d8087ccabb8d9fe2c8397134d1559bcd8 100644 (file)
@@ -4790,7 +4790,8 @@ void lockdep_invariant_state(bool force)
         * Verify the former, enforce the latter.
         */
        WARN_ON_ONCE(!force && current->lockdep_depth);
-       invalidate_xhlock(&xhlock(current->xhlock_idx));
+       if (current->xhlocks)
+               invalidate_xhlock(&xhlock(current->xhlock_idx));
 }
 
 static int cross_lock(struct lockdep_map *lock)
index 5d81206a572d721e7d96b129f160a4e16d2e2f2e..b9006617710f591bc659e8f522c6d73baa2a04d5 100644 (file)
@@ -3141,9 +3141,6 @@ void dump_stack_print_info(const char *log_lvl)
 void show_regs_print_info(const char *log_lvl)
 {
        dump_stack_print_info(log_lvl);
-
-       printk("%stask: %p task.stack: %p\n",
-              log_lvl, current, task_stack_page(current));
 }
 
 #endif
index 4037e19bbca25939f0dd57b05f8fb25de8a90908..2fe3aa853e4dbacef363b70246390f94cc67932c 100644 (file)
@@ -3413,9 +3413,9 @@ void set_task_rq_fair(struct sched_entity *se,
  * _IFF_ we look at the pure running and runnable sums. Because they
  * represent the very same entity, just at different points in the hierarchy.
  *
- *
- * Per the above update_tg_cfs_util() is trivial (and still 'wrong') and
- * simply copies the running sum over.
+ * Per the above update_tg_cfs_util() is trivial and simply copies the running
+ * sum over (but still wrong, because the group entity and group rq do not have
+ * their PELT windows aligned).
  *
  * However, update_tg_cfs_runnable() is more complex. So we have:
  *
@@ -3424,11 +3424,11 @@ void set_task_rq_fair(struct sched_entity *se,
  * And since, like util, the runnable part should be directly transferable,
  * the following would _appear_ to be the straight forward approach:
  *
- *   grq->avg.load_avg = grq->load.weight * grq->avg.running_avg       (3)
+ *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg      (3)
  *
  * And per (1) we have:
  *
- *   ge->avg.running_avg == grq->avg.running_avg
+ *   ge->avg.runnable_avg == grq->avg.runnable_avg
  *
  * Which gives:
  *
@@ -3447,27 +3447,28 @@ void set_task_rq_fair(struct sched_entity *se,
  * to (shortly) return to us. This only works by keeping the weights as
  * integral part of the sum. We therefore cannot decompose as per (3).
  *
- * OK, so what then?
+ * Another reason this doesn't work is that runnable isn't a 0-sum entity.
+ * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
+ * rq itself is runnable anywhere between 2/3 and 1 depending on how the
+ * runnable section of these tasks overlap (or not). If they were to perfectly
+ * align the rq as a whole would be runnable 2/3 of the time. If however we
+ * always have at least 1 runnable task, the rq as a whole is always runnable.
  *
+ * So we'll have to approximate.. :/
  *
- * Another way to look at things is:
+ * Given the constraint:
  *
- *   grq->avg.load_avg = \Sum se->avg.load_avg
+ *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
  *
- * Therefore, per (2):
+ * We can construct a rule that adds runnable to a rq by assuming minimal
+ * overlap.
  *
- *   grq->avg.load_avg = \Sum se->load.weight * se->avg.runnable_avg
+ * On removal, we'll assume each task is equally runnable; which yields:
  *
- * And the very thing we're propagating is a change in that sum (someone
- * joined/left). So we can easily know the runnable change, which would be, per
- * (2) the already tracked se->load_avg divided by the corresponding
- * se->weight.
+ *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
  *
- * Basically (4) but in differential form:
+ * XXX: only do this for the part of runnable > running ?
  *
- *   d(runnable_avg) += se->avg.load_avg / se->load.weight
- *                                                                (5)
- *   ge->avg.load_avg += ge->load.weight * d(runnable_avg)
  */
 
 static inline void
@@ -3479,6 +3480,14 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        if (!delta)
                return;
 
+       /*
+        * The relation between sum and avg is:
+        *
+        *   LOAD_AVG_MAX - 1024 + sa->period_contrib
+        *
+        * however, the PELT windows are not aligned between grq and gse.
+        */
+
        /* Set new sched_entity's utilization */
        se->avg.util_avg = gcfs_rq->avg.util_avg;
        se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
@@ -3491,33 +3500,68 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
 static inline void
 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long runnable_sum = gcfs_rq->prop_runnable_sum;
-       long runnable_load_avg, load_avg;
-       s64 runnable_load_sum, load_sum;
+       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       unsigned long runnable_load_avg, load_avg;
+       u64 runnable_load_sum, load_sum = 0;
+       s64 delta_sum;
 
        if (!runnable_sum)
                return;
 
        gcfs_rq->prop_runnable_sum = 0;
 
+       if (runnable_sum >= 0) {
+               /*
+                * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
+                * the CPU is saturated running == runnable.
+                */
+               runnable_sum += se->avg.load_sum;
+               runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
+       } else {
+               /*
+                * Estimate the new unweighted runnable_sum of the gcfs_rq by
+                * assuming all tasks are equally runnable.
+                */
+               if (scale_load_down(gcfs_rq->load.weight)) {
+                       load_sum = div_s64(gcfs_rq->avg.load_sum,
+                               scale_load_down(gcfs_rq->load.weight));
+               }
+
+               /* But make sure to not inflate se's runnable */
+               runnable_sum = min(se->avg.load_sum, load_sum);
+       }
+
+       /*
+        * runnable_sum can't be lower than running_sum
+        * As running sum is scale with cpu capacity wehreas the runnable sum
+        * is not we rescale running_sum 1st
+        */
+       running_sum = se->avg.util_sum /
+               arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+       runnable_sum = max(runnable_sum, running_sum);
+
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, LOAD_AVG_MAX);
 
-       add_positive(&se->avg.load_sum, runnable_sum);
-       add_positive(&se->avg.load_avg, load_avg);
+       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
+       delta_avg = load_avg - se->avg.load_avg;
 
-       add_positive(&cfs_rq->avg.load_avg, load_avg);
-       add_positive(&cfs_rq->avg.load_sum, load_sum);
+       se->avg.load_sum = runnable_sum;
+       se->avg.load_avg = load_avg;
+       add_positive(&cfs_rq->avg.load_avg, delta_avg);
+       add_positive(&cfs_rq->avg.load_sum, delta_sum);
 
        runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
        runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
+       delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
+       delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
 
-       add_positive(&se->avg.runnable_load_sum, runnable_sum);
-       add_positive(&se->avg.runnable_load_avg, runnable_load_avg);
+       se->avg.runnable_load_sum = runnable_sum;
+       se->avg.runnable_load_avg = runnable_load_avg;
 
        if (se->on_rq) {
-               add_positive(&cfs_rq->avg.runnable_load_avg, runnable_load_avg);
-               add_positive(&cfs_rq->avg.runnable_load_sum, runnable_load_sum);
+               add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
+               add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
        }
 }
 
index 98feab7933c76a0d178cd7da0115376641e7bbad..929ecb7d6b78a70f4ec4548a582f78c749d7d3ee 100644 (file)
@@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
 
        wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
        spin_lock_irqsave(&wq_head->lock, flags);
-       __add_wait_queue_entry_tail(wq_head, wq_entry);
+       __add_wait_queue(wq_head, wq_entry);
        spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue);
index 27d1f4ffa3def946525b2d248757fac3620504e5..0ce99c379c3089a4857d082b64ede99feeea5282 100644 (file)
@@ -759,6 +759,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
 
 static DEFINE_MUTEX(bpf_event_mutex);
 
+#define BPF_TRACE_MAX_PROGS 64
+
 int perf_event_attach_bpf_prog(struct perf_event *event,
                               struct bpf_prog *prog)
 {
@@ -772,6 +774,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
                goto unlock;
 
        old_array = event->tp_event->prog_array;
+       if (old_array &&
+           bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
+               ret = -E2BIG;
+               goto unlock;
+       }
+
        ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
        if (ret < 0)
                goto unlock;
index abd07a443219853b022bef41cb072e90ff8f07f0..178bb9833311f83205317b07fe64cb2e45a9f734 100644 (file)
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                if (state == DCCP_TIME_WAIT)
                        timeo = DCCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index c6bc0c4d19c624888b0d0b5a4246c7183edf63f5..77ea45da0fe9c746907a312989658af3ad3b198d 100644 (file)
@@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_filter);
 
+static void tcp_v4_restore_cb(struct sk_buff *skb)
+{
+       memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
+               sizeof(struct inet_skb_parm));
+}
+
+static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
+                          const struct tcphdr *th)
+{
+       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
+        * barrier() makes sure compiler wont play fool^Waliasing games.
+        */
+       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
+               sizeof(struct inet_skb_parm));
+       barrier();
+
+       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+                                   skb->len - th->doff * 4);
+       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
+       TCP_SKB_CB(skb)->sacked  = 0;
+       TCP_SKB_CB(skb)->has_rxtstamp =
+                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
+}
+
 /*
  *     From tcp_input.c
  */
@@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
-       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
-        * barrier() makes sure compiler wont play fool^Waliasing games.
-        */
-       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
-               sizeof(struct inet_skb_parm));
-       barrier();
-
-       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
-       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
-                                   skb->len - th->doff * 4);
-       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
-       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
-       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
-       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
-       TCP_SKB_CB(skb)->sacked  = 0;
-       TCP_SKB_CB(skb)->has_rxtstamp =
-                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
-
 lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
                               th->dest, sdif, &refcounted);
@@ -1679,14 +1689,19 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       iph = ip_hdr(skb);
+                       tcp_v4_fill_cb(skb, iph, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
                }
                if (nsk == sk) {
                        reqsk_put(req);
+                       tcp_v4_restore_cb(skb);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
@@ -1712,6 +1727,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
+       tcp_v4_fill_cb(skb, iph, th);
 
        skb->dev = NULL;
 
@@ -1742,6 +1758,8 @@ no_tcp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
 csum_error:
                __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1768,6 +1786,8 @@ do_time_wait:
                goto discard_it;
        }
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
@@ -1784,6 +1804,7 @@ do_time_wait:
                if (sk2) {
                        inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
+                       tcp_v4_restore_cb(skb);
                        refcounted = false;
                        goto process;
                }
index e36eff0403f4e80c4f7291a70614f40125652133..b079b619b60ca577d5ef20a5065fce87acecd96c 100644 (file)
@@ -310,10 +310,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                if (state == TCP_TIME_WAIT)
                        timeo = TCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index d60ddcb0bfe240d5351089ed43464683e68c1db8..d7dc23c1b2ca32fb554cccf1fbf50f736a7f6f4c 100644 (file)
@@ -1098,6 +1098,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
        ipip6_tunnel_link(sitn, t);
        t->parms.iph.ttl = p->iph.ttl;
        t->parms.iph.tos = p->iph.tos;
+       t->parms.iph.frag_off = p->iph.frag_off;
        if (t->parms.link != p->link || t->fwmark != fwmark) {
                t->parms.link = p->link;
                t->fwmark = fwmark;
index 6bb98c93edfe2ed2f16fe5229605f8108cfc7f9a..1f04ec0e4a7aa2c11b8ee27cbdd4067b5bcf32e5 100644 (file)
@@ -1454,7 +1454,6 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               tcp_v6_fill_cb(skb, hdr, th);
                if (tcp_v6_inbound_md5_hash(sk, skb)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
@@ -1467,8 +1466,12 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       hdr = ipv6_hdr(skb);
+                       tcp_v6_fill_cb(skb, hdr, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
@@ -1492,8 +1495,6 @@ process:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       tcp_v6_fill_cb(skb, hdr, th);
-
        if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
@@ -1501,6 +1502,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        hdr = ipv6_hdr(skb);
+       tcp_v6_fill_cb(skb, hdr, th);
 
        skb->dev = NULL;
 
@@ -1590,7 +1592,6 @@ do_time_wait:
                tcp_v6_timewait_ack(sk, skb);
                break;
        case TCP_TW_RST:
-               tcp_v6_restore_cb(skb);
                tcp_v6_send_reset(sk, skb);
                inet_twsk_deschedule_put(inet_twsk(sk));
                goto discard_it;
index 8f7cf4c042be2b9b4379968655bea594a2928546..dcd818fa837e0af91978d6f1128085f93eb80f15 100644 (file)
@@ -860,6 +860,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
 static int rxrpc_release_sock(struct sock *sk)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sk);
+       struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
        _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
@@ -895,8 +896,8 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_release_calls_on_socket(rx);
        flush_workqueue(rxrpc_workqueue);
        rxrpc_purge_queue(&sk->sk_receive_queue);
-       rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
-       rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
+       rxrpc_queue_work(&rxnet->client_conn_reaper);
 
        rxrpc_put_local(rx->local);
        rx->local = NULL;
index bda952ffe6a6eab394e39220a6fe6a6af19c8e08..ad2ab11031899fd0d1b398622deee579b6fa9f42 100644 (file)
@@ -123,7 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                else
                        ack_at = expiry;
 
-               ack_at = jiffies + expiry;
+               ack_at += now;
                if (time_before(ack_at, call->ack_at)) {
                        WRITE_ONCE(call->ack_at, ack_at);
                        rxrpc_reduce_call_timer(call, ack_at, now,
@@ -426,7 +426,7 @@ recheck_state:
        next = call->expect_rx_by;
 
 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
-       
+
        set(call->expect_req_by);
        set(call->expect_term_by);
        set(call->ack_at);
index 9e9a8db1bc9cd0f1afd3efd7e9e26c4d2890a7d3..4ca11be6be3cadcfda93eab7892292ca1ec127b5 100644 (file)
@@ -30,22 +30,18 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
        struct rxrpc_channel *chan;
        struct msghdr msg;
-       struct kvec iov;
+       struct kvec iov[3];
        struct {
                struct rxrpc_wire_header whdr;
                union {
-                       struct {
-                               __be32 code;
-                       } abort;
-                       struct {
-                               struct rxrpc_ackpacket ack;
-                               u8 padding[3];
-                               struct rxrpc_ackinfo info;
-                       };
+                       __be32 abort_code;
+                       struct rxrpc_ackpacket ack;
                };
        } __attribute__((packed)) pkt;
+       struct rxrpc_ackinfo ack_info;
        size_t len;
-       u32 serial, mtu, call_id;
+       int ioc;
+       u32 serial, mtu, call_id, padding;
 
        _enter("%d", conn->debug_id);
 
@@ -66,6 +62,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
 
+       iov[0].iov_base = &pkt;
+       iov[0].iov_len  = sizeof(pkt.whdr);
+       iov[1].iov_base = &padding;
+       iov[1].iov_len  = 3;
+       iov[2].iov_base = &ack_info;
+       iov[2].iov_len  = sizeof(ack_info);
+
        pkt.whdr.epoch          = htonl(conn->proto.epoch);
        pkt.whdr.cid            = htonl(conn->proto.cid);
        pkt.whdr.callNumber     = htonl(call_id);
@@ -80,8 +83,10 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        len = sizeof(pkt.whdr);
        switch (chan->last_type) {
        case RXRPC_PACKET_TYPE_ABORT:
-               pkt.abort.code  = htonl(chan->last_abort);
-               len += sizeof(pkt.abort);
+               pkt.abort_code  = htonl(chan->last_abort);
+               iov[0].iov_len += sizeof(pkt.abort_code);
+               len += sizeof(pkt.abort_code);
+               ioc = 1;
                break;
 
        case RXRPC_PACKET_TYPE_ACK:
@@ -94,13 +99,19 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                pkt.ack.serial          = htonl(skb ? sp->hdr.serial : 0);
                pkt.ack.reason          = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
                pkt.ack.nAcks           = 0;
-               pkt.info.rxMTU          = htonl(rxrpc_rx_mtu);
-               pkt.info.maxMTU         = htonl(mtu);
-               pkt.info.rwind          = htonl(rxrpc_rx_window_size);
-               pkt.info.jumbo_max      = htonl(rxrpc_rx_jumbo_max);
+               ack_info.rxMTU          = htonl(rxrpc_rx_mtu);
+               ack_info.maxMTU         = htonl(mtu);
+               ack_info.rwind          = htonl(rxrpc_rx_window_size);
+               ack_info.jumbo_max      = htonl(rxrpc_rx_jumbo_max);
                pkt.whdr.flags          |= RXRPC_SLOW_START_OK;
-               len += sizeof(pkt.ack) + sizeof(pkt.info);
+               padding                 = 0;
+               iov[0].iov_len += sizeof(pkt.ack);
+               len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+               ioc = 3;
                break;
+
+       default:
+               return;
        }
 
        /* Resync with __rxrpc_disconnect_call() and check that the last call
@@ -110,9 +121,6 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        if (READ_ONCE(chan->last_call) != call_id)
                return;
 
-       iov.iov_base    = &pkt;
-       iov.iov_len     = len;
-
        serial = atomic_inc_return(&conn->serial);
        pkt.whdr.serial = htonl(serial);
 
@@ -127,7 +135,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                break;
        }
 
-       kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len);
+       kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
        _leave("");
        return;
 }
index 1aad04a32d5e203ab17928e2247275c8b01c954d..c628351eb9008da7059102f48dad7f605343de5b 100644 (file)
@@ -424,7 +424,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
        if (earliest != now + MAX_JIFFY_OFFSET) {
                _debug("reschedule reaper %ld", (long)earliest - (long)now);
                ASSERT(time_after(earliest, now));
-               rxrpc_set_service_reap_timer(rxnet, earliest);          
+               rxrpc_set_service_reap_timer(rxnet, earliest);
        }
 
        while (!list_empty(&graveyard)) {
index 23a5e61d8f79a01622c29de07061fcff94a14f3c..6fc61400337fb3e8a96658ed685efa9a8280f70e 100644 (file)
@@ -976,7 +976,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
                rxrpc_reduce_call_timer(call, expect_rx_by, now,
                                        rxrpc_timer_set_for_normal);
        }
-       
+
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
                rxrpc_input_data(call, skb, skew);
@@ -1213,7 +1213,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                                goto reupgrade;
                        conn->service_id = sp->hdr.serviceId;
                }
-               
+
                if (sp->hdr.callNumber == 0) {
                        /* Connection-level packet */
                        _debug("CONN %p {%d}", conn, conn->debug_id);
index a1c53ac066a10bda169b0222b6d6177066c6dca9..09f2a3e0522163e0e5ae900555c56b74ace26b7a 100644 (file)
@@ -233,7 +233,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                if (resend_at < 1)
                        resend_at = 1;
 
-               resend_at = now + rxrpc_resend_timeout;
+               resend_at += now;
                WRITE_ONCE(call->resend_at, resend_at);
                rxrpc_reduce_call_timer(call, resend_at, now,
                                        rxrpc_timer_set_for_send);
index 8b5abcd2f32faeaa2a283bcc8fb388201f7a86e2..9438969290a6147c16c971558aeef3d01d21dde5 100644 (file)
@@ -96,23 +96,16 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        return ret;
 }
 
-static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
+static void tcf_sample_cleanup(struct tc_action *a, int bind)
 {
-       struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu);
+       struct tcf_sample *s = to_sample(a);
        struct psample_group *psample_group;
 
-       psample_group = rcu_dereference_protected(s->psample_group, 1);
+       psample_group = rtnl_dereference(s->psample_group);
        RCU_INIT_POINTER(s->psample_group, NULL);
        psample_group_put(psample_group);
 }
 
-static void tcf_sample_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_sample *s = to_sample(a);
-
-       call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
-}
-
 static bool tcf_sample_dev_ok_push(struct net_device *dev)
 {
        switch (dev->type) {
@@ -264,7 +257,6 @@ static int __init sample_init_module(void)
 
 static void __exit sample_cleanup_module(void)
 {
-       rcu_barrier();
        tcf_unregister_action(&act_sample_ops, &sample_net_ops);
 }
 
index 7b261afc47b9d709fdd780a93aaba874f35d79be..7f8baa48e7c2a834aea292106fd319c2489432a3 100644 (file)
@@ -53,6 +53,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
        msg->send_failed = 0;
        msg->send_error = 0;
        msg->can_delay = 1;
+       msg->abandoned = 0;
        msg->expires_at = 0;
        INIT_LIST_HEAD(&msg->chunks);
 }
@@ -304,6 +305,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
        if (!chunk->asoc->peer.prsctp_capable)
                return 0;
 
+       if (chunk->msg->abandoned)
+               return 1;
+
+       if (!chunk->has_tsn &&
+           !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
+               return 0;
+
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
            time_after(jiffies, chunk->msg->expires_at)) {
                struct sctp_stream_out *streamout =
@@ -316,6 +324,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)