Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Mar 2018 01:48:43 +0000 (18:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Mar 2018 01:48:43 +0000 (18:48 -0700)
Merge misc fixes from Andrew Morton:
 "13 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm, thp: do not cause memcg oom for thp
  mm/vmscan: wake up flushers for legacy cgroups too
  Revert "mm: page_alloc: skip over regions of invalid pfns where possible"
  mm/shmem: do not wait for lock_page() in shmem_unused_huge_shrink()
  mm/thp: do not wait for lock_page() in deferred_split_scan()
  mm/khugepaged.c: convert VM_BUG_ON() to collapse fail
  x86/mm: implement free pmd/pte page interfaces
  mm/vmalloc: add interfaces to free unmapped page table
  h8300: remove extraneous __BIG_ENDIAN definition
  hugetlbfs: check for pgoff value overflow
  lockdep: fix fs_reclaim warning
  MAINTAINERS: update Mark Fasheh's e-mail
  mm/mempolicy.c: avoid use uninitialized preferred_node

259 files changed:
Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/net/renesas,ravb.txt
Documentation/networking/segmentation-offloads.txt
Makefile
arch/x86/mm/init_64.c
arch/x86/net/bpf_jit_comp.c
drivers/acpi/acpi_watchdog.c
drivers/acpi/battery.c
drivers/acpi/nfit/core.c
drivers/acpi/numa.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/ast/ast_tables.h
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/ipu-v3/ipu-prg.c
drivers/mmc/core/block.c
drivers/mmc/core/card.h
drivers/mmc/core/quirks.h
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/net/can/cc770/cc770.c
drivers/net/can/cc770/cc770.h
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/can/m_can/m_can.c
drivers/net/can/peak_canfd/peak_canfd.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/dsa/Makefile
drivers/net/dsa/b53/b53_common.c
drivers/net/ethernet/8390/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/aquantia/atlantic/ver.h
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/fman_dtsec.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/natsemi/Kconfig
drivers/net/ethernet/natsemi/Makefile
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/phy/bcm-phy-lib.c
drivers/net/phy/marvell.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/realtek.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/fw/runtime.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
drivers/nvdimm/blk.c
drivers/nvdimm/btt.c
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/region_devs.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/soc/fsl/qbman/qman.c
drivers/vhost/net.c
drivers/watchdog/wdat_wdt.c
fs/sysfs/symlink.c
include/linux/cgroup-defs.h
include/linux/if_tun.h
include/linux/if_vlan.h
include/linux/net.h
include/linux/netfilter/x_tables.h
include/linux/phy.h
include/linux/rhashtable.h
include/linux/skbuff.h
include/linux/u64_stats_sync.h
include/net/ip.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/mac80211.h
include/net/route.h
include/net/sch_generic.h
include/net/sock.h
include/trace/events/mmc.h
include/uapi/linux/if_ether.h
kernel/bpf/syscall.c
kernel/fail_function.c
kernel/memremap.c
kernel/module.c
kernel/trace/bpf_trace.c
lib/rhashtable.c
lib/test_bpf.c
lib/test_rhashtable.c
net/8021q/vlan_core.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/icmp_socket.c
net/batman-adv/log.c
net/batman-adv/multicast.c
net/batman-adv/routing.c
net/bluetooth/smp.c
net/bridge/netfilter/ebt_among.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/devlink.c
net/core/filter.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/dccp/proto.c
net/dsa/legacy.c
net/ieee802154/6lowpan/core.c
net/ipv4/inet_diag.c
net/ipv4/inet_fragment.c
net/ipv4/ip_sockglue.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_timer.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv6/datagram.c
net/ipv6/ip6_gre.c
net/ipv6/ndisc.c
net/ipv6/route.c
net/ipv6/seg6_iptunnel.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/mac80211/debugfs.c
net/mac80211/mlme.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_set_hash.c
net/netfilter/x_tables.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_recent.c
net/netlink/genetlink.c
net/openvswitch/meter.c
net/sched/act_bpf.c
net/sched/act_csum.c
net/sched/act_ipt.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/sch_generic.c
net/sched/sch_netem.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/offload.c
net/smc/af_smc.c
net/smc/smc_close.c
net/socket.c
net/xfrm/xfrm_ipcomp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
tools/bpf/bpftool/common.c

index 6394ea9e3b9e5b45a8803dd5a6c7e98426a1b889..58b12e25bbb16d5ce85b7d11be530ec80dab71d0 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
 - ddc: phandle to the hdmi ddc node
 - phy: phandle to the hdmi phy node
 - samsung,syscon-phandle: phandle for system controller node for PMU.
+- #sound-dai-cells: should be 0.
 
 Required properties for Exynos 4210, 4212, 5420 and 5433:
 - clocks: list of clock IDs from SoC clock driver.
index 1d4d0f49c9d06eb66d9957fb0661cec35ddc7af9..8c033d48e2baf05efc326dafbe74b93ff98bae87 100644 (file)
@@ -50,14 +50,15 @@ Example:
                        compatible = "marvell,mv88e6085";
                        reg = <0>;
                        reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
-               };
-               mdio {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       switch1phy0: switch1phy0@0 {
-                               reg = <0>;
-                               interrupt-parent = <&switch0>;
-                               interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+                       mdio {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               switch1phy0: switch1phy0@0 {
+                                       reg = <0>;
+                                       interrupt-parent = <&switch0>;
+                                       interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+                               };
                        };
                };
        };
@@ -74,23 +75,24 @@ Example:
                        compatible = "marvell,mv88e6390";
                        reg = <0>;
                        reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
-               };
-               mdio {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       switch1phy0: switch1phy0@0 {
-                               reg = <0>;
-                               interrupt-parent = <&switch0>;
-                               interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+                       mdio {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               switch1phy0: switch1phy0@0 {
+                                       reg = <0>;
+                                       interrupt-parent = <&switch0>;
+                                       interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+                               };
                        };
-               };
 
-               mdio1 {
-                       compatible = "marvell,mv88e6xxx-mdio-external";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       switch1phy9: switch1phy0@9 {
-                               reg = <9>;
+                       mdio1 {
+                               compatible = "marvell,mv88e6xxx-mdio-external";
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               switch1phy9: switch1phy0@9 {
+                                       reg = <9>;
+                               };
                        };
                };
        };
index 92fd4b2f17b24b5b89c08394e9bd3e3bc8d141e7..b4dc455eb1554e2df9fa8c6b3b3c0a8fd3a8cb9b 100644 (file)
@@ -27,7 +27,11 @@ Required properties:
        SoC-specific version corresponding to the platform first followed by
        the generic version.
 
-- reg: offset and length of (1) the register block and (2) the stream buffer.
+- reg: Offset and length of (1) the register block and (2) the stream buffer.
+       The region for the register block is mandatory.
+       The region for the stream buffer is optional, as it is only present on
+       R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
+       and M3-N (R8A77965).
 - interrupts: A list of interrupt-specifiers, one for each entry in
              interrupt-names.
              If interrupt-names is not present, an interrupt specifier
index d47480b61ac6d0611c0e1cbfe378c14941f1cfb5..aca542ec125c96bdc95411359fceffcaee9898a0 100644 (file)
@@ -20,8 +20,8 @@ TCP Segmentation Offload
 
 TCP segmentation allows a device to segment a single frame into multiple
 frames with a data payload size specified in skb_shinfo()->gso_size.
-When TCP segmentation requested the bit for either SKB_GSO_TCP or
-SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
+When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
+SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
 skb_shinfo()->gso_size should be set to a non-zero value.
 
 TCP segmentation is dependent on support for the use of partial checksum
@@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
 
 Therefore, any code in the core networking stack must be aware of the
 possibility that gso_size will be GSO_BY_FRAGS and handle that case
-appropriately. (For size checks, the skb_gso_validate_*_len family of
-helpers do this automatically.)
+appropriately.
+
+There are some helpers to make this easier:
+
+ - skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
+   an skb is an SCTP GSO skb.
+
+ - For size checks, the skb_gso_validate_*_len family of helpers correctly
+   considers GSO_BY_FRAGS.
+
+ - For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
+   will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
 
 This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
 set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
index d65e2e229017502a97ed45a179c9a31203dfbd7a..486db374d1c1ada440cc8b023c3b5c3e91b41da0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += $(call cc-option,-fno-strict-overflow)
 
+# clang sets -fmerge-all-constants by default as optimization, but this
+# is non-conforming behavior for C and in fact breaks the kernel, so we
+# need to disable it here generally.
+KBUILD_CFLAGS  += $(call cc-option,-fno-merge-all-constants)
+
+# for gcc -fno-merge-all-constants disables everything, but it is fine
+# to have actual conforming behavior enabled.
+KBUILD_CFLAGS  += $(call cc-option,-fmerge-constants)
+
 # Make sure -fstack-check isn't enabled (like gentoo apparently did)
 KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
 
index 8b72923f1d35c07c5ded42ae36873790da02d247..af11a2890235584a5f07cfe7f83a00ea71fc47f9 100644 (file)
@@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
 
 #define PAGE_INUSE 0xFD
 
-static void __meminit free_pagetable(struct page *page, int order,
-               struct vmem_altmap *altmap)
+static void __meminit free_pagetable(struct page *page, int order)
 {
        unsigned long magic;
        unsigned int nr_pages = 1 << order;
 
-       if (altmap) {
-               vmem_altmap_free(altmap, nr_pages);
-               return;
-       }
-
        /* bootmem page has reserved flag */
        if (PageReserved(page)) {
                __ClearPageReserved(page);
@@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order,
                free_pages((unsigned long)page_address(page), order);
 }
 
-static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
+static void __meminit free_hugepage_table(struct page *page,
                struct vmem_altmap *altmap)
+{
+       if (altmap)
+               vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
+       else
+               free_pagetable(page, get_order(PMD_SIZE));
+}
+
+static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 {
        pte_t *pte;
        int i;
@@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
        }
 
        /* free a pte talbe */
-       free_pagetable(pmd_page(*pmd), 0, altmap);
+       free_pagetable(pmd_page(*pmd), 0);
        spin_lock(&init_mm.page_table_lock);
        pmd_clear(pmd);
        spin_unlock(&init_mm.page_table_lock);
 }
 
-static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
-               struct vmem_altmap *altmap)
+static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 {
        pmd_t *pmd;
        int i;
@@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
        }
 
        /* free a pmd talbe */
-       free_pagetable(pud_page(*pud), 0, altmap);
+       free_pagetable(pud_page(*pud), 0);
        spin_lock(&init_mm.page_table_lock);
        pud_clear(pud);
        spin_unlock(&init_mm.page_table_lock);
 }
 
-static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
-               struct vmem_altmap *altmap)
+static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
 {
        pud_t *pud;
        int i;
@@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
        }
 
        /* free a pud talbe */
-       free_pagetable(p4d_page(*p4d), 0, altmap);
+       free_pagetable(p4d_page(*p4d), 0);
        spin_lock(&init_mm.page_table_lock);
        p4d_clear(p4d);
        spin_unlock(&init_mm.page_table_lock);
@@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
 
 static void __meminit
 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
-                struct vmem_altmap *altmap, bool direct)
+                bool direct)
 {
        unsigned long next, pages = 0;
        pte_t *pte;
@@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                         * freed when offlining, or simplely not in use.
                         */
                        if (!direct)
-                               free_pagetable(pte_page(*pte), 0, altmap);
+                               free_pagetable(pte_page(*pte), 0);
 
                        spin_lock(&init_mm.page_table_lock);
                        pte_clear(&init_mm, addr, pte);
@@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 
                        page_addr = page_address(pte_page(*pte));
                        if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
-                               free_pagetable(pte_page(*pte), 0, altmap);
+                               free_pagetable(pte_page(*pte), 0);
 
                                spin_lock(&init_mm.page_table_lock);
                                pte_clear(&init_mm, addr, pte);
@@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                        if (IS_ALIGNED(addr, PMD_SIZE) &&
                            IS_ALIGNED(next, PMD_SIZE)) {
                                if (!direct)
-                                       free_pagetable(pmd_page(*pmd),
-                                                      get_order(PMD_SIZE),
-                                                      altmap);
+                                       free_hugepage_table(pmd_page(*pmd),
+                                                           altmap);
 
                                spin_lock(&init_mm.page_table_lock);
                                pmd_clear(pmd);
@@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                                page_addr = page_address(pmd_page(*pmd));
                                if (!memchr_inv(page_addr, PAGE_INUSE,
                                                PMD_SIZE)) {
-                                       free_pagetable(pmd_page(*pmd),
-                                                      get_order(PMD_SIZE),
-                                                      altmap);
+                                       free_hugepage_table(pmd_page(*pmd),
+                                                           altmap);
 
                                        spin_lock(&init_mm.page_table_lock);
                                        pmd_clear(pmd);
@@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                }
 
                pte_base = (pte_t *)pmd_page_vaddr(*pmd);
-               remove_pte_table(pte_base, addr, next, altmap, direct);
-               free_pte_table(pte_base, pmd, altmap);
+               remove_pte_table(pte_base, addr, next, direct);
+               free_pte_table(pte_base, pmd);
        }
 
        /* Call free_pmd_table() in remove_pud_table(). */
@@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                            IS_ALIGNED(next, PUD_SIZE)) {
                                if (!direct)
                                        free_pagetable(pud_page(*pud),
-                                                      get_order(PUD_SIZE),
-                                                      altmap);
+                                                      get_order(PUD_SIZE));
 
                                spin_lock(&init_mm.page_table_lock);
                                pud_clear(pud);
@@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                                if (!memchr_inv(page_addr, PAGE_INUSE,
                                                PUD_SIZE)) {
                                        free_pagetable(pud_page(*pud),
-                                                      get_order(PUD_SIZE),
-                                                      altmap);
+                                                      get_order(PUD_SIZE));
 
                                        spin_lock(&init_mm.page_table_lock);
                                        pud_clear(pud);
@@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 
                pmd_base = pmd_offset(pud, 0);
                remove_pmd_table(pmd_base, addr, next, direct, altmap);
-               free_pmd_table(pmd_base, pud, altmap);
+               free_pmd_table(pmd_base, pud);
        }
 
        if (direct)
@@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
                 * to adapt for boot-time switching between 4 and 5 level page tables.
                 */
                if (CONFIG_PGTABLE_LEVELS == 5)
-                       free_pud_table(pud_base, p4d, altmap);
+                       free_pud_table(pud_base, p4d);
        }
 
        if (direct)
index 45e4eb5bcbb2ab4894b12d6a948b19eb25250af4..ce5b2ebd57015db862192c598a5886776c30faac 100644 (file)
@@ -1188,7 +1188,7 @@ skip_init_addrs:
         * may converge on the last pass. In such case do one more
         * pass to emit the final image
         */
-       for (pass = 0; pass < 10 || image; pass++) {
+       for (pass = 0; pass < 20 || image; pass++) {
                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
                if (proglen <= 0) {
                        image = NULL;
@@ -1215,6 +1215,7 @@ skip_init_addrs:
                        }
                }
                oldproglen = proglen;
+               cond_resched();
        }
 
        if (bpf_jit_enable > 1)
index 11b113f8e36741aeb00e921ee64a5ae871f8d55f..ebb626ffb5fa2d38c853ddcbbe7227aa09c7f7dd 100644 (file)
@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
                res.start = gas->address;
                if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
                        res.flags = IORESOURCE_MEM;
-                       res.end = res.start + ALIGN(gas->access_width, 4);
+                       res.end = res.start + ALIGN(gas->access_width, 4) - 1;
                } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
                        res.flags = IORESOURCE_IO;
-                       res.end = res.start + gas->access_width;
+                       res.end = res.start + gas->access_width - 1;
                } else {
                        pr_warn("Unsupported address space: %u\n",
                                gas->space_id);
index 7128488a3a728ff54f00fc1085c45afb5152f089..f2eb6c37ea0aa9aed03f562883634add68078f55 100644 (file)
@@ -70,7 +70,6 @@ static async_cookie_t async_cookie;
 static bool battery_driver_registered;
 static int battery_bix_broken_package;
 static int battery_notification_delay_ms;
-static int battery_full_discharging;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
                return -ENODEV;
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
-               if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
-                       if (battery_full_discharging && battery->rate_now == 0)
-                               val->intval = POWER_SUPPLY_STATUS_FULL;
-                       else
-                               val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
-               } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+               if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
+                       val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+               else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
                        val->intval = POWER_SUPPLY_STATUS_CHARGING;
                else if (acpi_battery_is_charged(battery))
                        val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
        return 0;
 }
 
-static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
-{
-       battery_full_discharging = 1;
-       return 0;
-}
-
 static const struct dmi_system_id bat_dmi_table[] __initconst = {
        {
                .callback = battery_bix_broken_package_quirk,
@@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
                },
        },
-       {
-               .callback = battery_full_discharging_quirk,
-               .ident = "ASUS GL502VSK",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
-               },
-       },
-       {
-               .callback = battery_full_discharging_quirk,
-               .ident = "ASUS UX305LA",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
-               },
-       },
-       {
-               .callback = battery_full_discharging_quirk,
-               .ident = "ASUS UX360UA",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
-               },
-       },
-       {
-               .callback = battery_full_discharging_quirk,
-               .ident = "ASUS UX410UAK",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
-               },
-       },
        {},
 };
 
index bbe48ad20886c8530fe525ffe9f35725d1df1ddc..eb09ef55c38a2779c046241c337ea7be3cf75b79 100644 (file)
@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
        else
                ndr_desc->numa_node = NUMA_NO_NODE;
 
-       if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
+       /*
+        * Persistence domain bits are hierarchical, if
+        * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
+        * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
+        */
+       if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
                set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
-
-       if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
+       else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
                set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
 
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
index 8ccaae3550d284be2070f7e06dd6794c5bc8c5b3..85167603b9c94318bcef7c260de689c13e4e4545 100644 (file)
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
  */
 int acpi_map_pxm_to_online_node(int pxm)
 {
-       int node, n, dist, min_dist;
+       int node, min_node;
 
        node = acpi_map_pxm_to_node(pxm);
 
        if (node == NUMA_NO_NODE)
                node = 0;
 
+       min_node = node;
        if (!node_online(node)) {
-               min_dist = INT_MAX;
+               int min_dist = INT_MAX, dist, n;
+
                for_each_online_node(n) {
                        dist = node_distance(node, n);
                        if (dist < min_dist) {
                                min_dist = dist;
-                               node = n;
+                               min_node = n;
                        }
                }
        }
 
-       return node;
+       return min_node;
 }
 EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
 
index 60bf04b8f1034c1d24996e1aa3b8264fa4ef7d74..366a49c7c08f22c00a22e9ecd6b202b166c1e83e 100644 (file)
@@ -231,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -264,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
        /* QCA ROME chipset */
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -386,10 +386,10 @@ static const struct usb_device_id blacklist_table[] = {
  */
 static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
        {
-               /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
+               /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
                },
        },
        {}
index 6314dfb02969a9191940e8dcf8b0d0fc987453c1..40b9fb247010169d047f27b1532ef20e69ecab68 100644 (file)
@@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
 
        bt_dev_dbg(bdev, "Host wake IRQ");
 
-       pm_request_resume(bdev->dev);
+       pm_runtime_get(bdev->dev);
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 
        return IRQ_HANDLED;
 }
@@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
        .usb_auto_sleep = 0,
        .usb_resume_timeout = 0,
        .break_to_host = 0,
-       .pulsed_host_wake = 0,
+       .pulsed_host_wake = 1,
 };
 
 static int bcm_setup_sleep(struct hci_uart *hu)
@@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
        } else if (!bcm->rx_skb) {
                /* Delay auto-suspend when receiving completed packet */
                mutex_lock(&bcm_device_lock);
-               if (bcm->dev && bcm_device_exists(bcm->dev))
-                       pm_request_resume(bcm->dev->dev);
+               if (bcm->dev && bcm_device_exists(bcm->dev)) {
+                       pm_runtime_get(bcm->dev->dev);
+                       pm_runtime_mark_last_busy(bcm->dev->dev);
+                       pm_runtime_put_autosuspend(bcm->dev->dev);
+               }
                mutex_unlock(&bcm_device_lock);
        }
 
index af1b879a9ee9bf38b30e3394715d764dc55aaee0..66cb10cdc7c3e4410eb2b130b8b4d02c1732cdd6 100644 (file)
@@ -2063,9 +2063,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        DRM_INFO("amdgpu: finishing device.\n");
        adev->shutdown = true;
-       if (adev->mode_info.mode_config_initialized)
-               drm_crtc_force_disable_all(adev->ddev);
-
+       if (adev->mode_info.mode_config_initialized){
+               if (!amdgpu_device_has_dc_support(adev))
+                       drm_crtc_force_disable_all(adev->ddev);
+               else
+                       drm_atomic_helper_shutdown(adev->ddev);
+       }
        amdgpu_ib_pool_fini(adev);
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
index c345e645f1d72c763f027faaa36be21da75aedbc..63c67346d316ac26a53a6f52e34feb8857dc9550 100644 (file)
@@ -3134,8 +3134,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
 
        switch (aplane->base.type) {
        case DRM_PLANE_TYPE_PRIMARY:
-               aplane->base.format_default = true;
-
                res = drm_universal_plane_init(
                                dm->adev->ddev,
                                &aplane->base,
@@ -4794,6 +4792,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
                        return -EDEADLK;
 
                crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+
                if (crtc->primary == plane && crtc_state->active) {
                        if (!plane_state->fb)
                                return -EINVAL;
index 9bd142f65f9baa9b9881dcde18c6ec9d8e527416..e1acc10e35a2fd6a4215c3d3179ac50b27bda6ce 100644 (file)
@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
                struct cea_sad *sad = &sads[i];
 
                edid_caps->audio_modes[i].format_code = sad->format;
-               edid_caps->audio_modes[i].channel_count = sad->channels;
+               edid_caps->audio_modes[i].channel_count = sad->channels + 1;
                edid_caps->audio_modes[i].sample_rate = sad->freq;
                edid_caps->audio_modes[i].sample_size = sad->byte2;
        }
index a993279a8f2d85181cf4193d99b0916a977339ae..f11f17fe08f98196fe6f105f5bb86860810b1d0b 100644 (file)
@@ -496,6 +496,9 @@ struct dce_hwseq_registers {
        HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
        HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
        HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
+       HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
+       HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
+       HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
        HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
        HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
        HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
@@ -591,7 +594,10 @@ struct dce_hwseq_registers {
        type DENTIST_DISPCLK_WDIVIDER; \
        type VGA_TEST_ENABLE; \
        type VGA_TEST_RENDER_START; \
-       type D1VGA_MODE_ENABLE;
+       type D1VGA_MODE_ENABLE; \
+       type D2VGA_MODE_ENABLE; \
+       type D3VGA_MODE_ENABLE; \
+       type D4VGA_MODE_ENABLE;
 
 struct dce_hwseq_shift {
        HWSEQ_REG_FIELD_LIST(uint8_t)
index 3931412ab6d32e139a9653a12fe5c2d6de8c9337..87093894ea9e73f8e4a4b1f46787b6a30a2bcb5a 100644 (file)
@@ -128,23 +128,22 @@ static void set_truncation(
                return;
        }
        /* on other format-to do */
-       if (params->flags.TRUNCATE_ENABLED == 0 ||
-                       params->flags.TRUNCATE_DEPTH == 2)
+       if (params->flags.TRUNCATE_ENABLED == 0)
                return;
        /*Set truncation depth and Enable truncation*/
        REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
                                FMT_TRUNCATE_EN, 1,
                                FMT_TRUNCATE_DEPTH,
-                               params->flags.TRUNCATE_MODE,
+                               params->flags.TRUNCATE_DEPTH,
                                FMT_TRUNCATE_MODE,
-                               params->flags.TRUNCATE_DEPTH);
+                               params->flags.TRUNCATE_MODE);
 }
 
 
 /**
  *     set_spatial_dither
  *     1) set spatial dithering mode: pattern of seed
- *     2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
+ *     2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
  *     3) set random seed
  *     4) set random mode
  *             lfsr is reset every frame or not reset
index 072e4485e85e8f4c473e96dedb4b6665f3941206..dc1e010725c13d7f56bde96c1047626ef25745c0 100644 (file)
@@ -238,14 +238,24 @@ static void enable_power_gating_plane(
 static void disable_vga(
        struct dce_hwseq *hws)
 {
-       unsigned int in_vga_mode = 0;
-
-       REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
-
-       if (in_vga_mode == 0)
+       unsigned int in_vga1_mode = 0;
+       unsigned int in_vga2_mode = 0;
+       unsigned int in_vga3_mode = 0;
+       unsigned int in_vga4_mode = 0;
+
+       REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
+       REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
+       REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
+       REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
+
+       if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
+                       in_vga3_mode == 0 && in_vga4_mode == 0)
                return;
 
        REG_WRITE(D1VGA_CONTROL, 0);
+       REG_WRITE(D2VGA_CONTROL, 0);
+       REG_WRITE(D3VGA_CONTROL, 0);
+       REG_WRITE(D4VGA_CONTROL, 0);
 
        /* HW Engineer's Notes:
         *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
index 5f4c2e833a650dd6be2e6afb5e9835cf7e434e17..d665dd5af5dd80f2348dd1290c41ecb4756ab9d7 100644 (file)
@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
        {0x67, 0x22, 0x00},                     /* 0E: VCLK157_5        */
        {0x6A, 0x22, 0x00},                     /* 0F: VCLK162          */
        {0x4d, 0x4c, 0x80},                     /* 10: VCLK154          */
-       {0xa7, 0x78, 0x80},                     /* 11: VCLK83.5         */
+       {0x68, 0x6f, 0x80},                     /* 11: VCLK83.5         */
        {0x28, 0x49, 0x80},                     /* 12: VCLK106.5        */
        {0x37, 0x49, 0x80},                     /* 13: VCLK146.25       */
        {0x1f, 0x45, 0x80},                     /* 14: VCLK148.5        */
@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
        {0x67, 0x22, 0x00},                     /* 0E: VCLK157_5        */
        {0x6A, 0x22, 0x00},                     /* 0F: VCLK162          */
        {0x4d, 0x4c, 0x80},                     /* 10: VCLK154          */
-       {0xa7, 0x78, 0x80},                     /* 11: VCLK83.5         */
+       {0x68, 0x6f, 0x80},                     /* 11: VCLK83.5         */
        {0x28, 0x49, 0x80},                     /* 12: VCLK106.5        */
        {0x37, 0x49, 0x80},                     /* 13: VCLK146.25       */
        {0x1f, 0x45, 0x80},                     /* 14: VCLK148.5        */
index c0530a1af5e39da421b87ba14d1b5f31f1a864f6..2dc5e8bed17214f187fdbe93e28b72cceeb6b376 100644 (file)
@@ -461,6 +461,12 @@ int drm_mode_getfb(struct drm_device *dev,
        if (!fb)
                return -ENOENT;
 
+       /* Multi-planar framebuffers need getfb2. */
+       if (fb->format->num_planes > 1) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        r->height = fb->height;
        r->width = fb->width;
        r->depth = fb->format->depth;
@@ -484,6 +490,7 @@ int drm_mode_getfb(struct drm_device *dev,
                ret = -ENODEV;
        }
 
+out:
        drm_framebuffer_put(fb);
 
        return ret;
index f51645a08dcaf489e0668af616fe39d421822a38..6aff9d096e13d08addb7b0bdf143aa9e73c2223b 100644 (file)
@@ -2175,8 +2175,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
                intel_prepare_dp_ddi_buffers(encoder, crtc_state);
 
        intel_ddi_init_dp_buf_reg(encoder);
-       if (!is_mst)
-               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
@@ -2274,14 +2273,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        struct intel_dp *intel_dp = &dig_port->dp;
-       bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
 
        /*
         * Power down sink before disabling the port, otherwise we end
         * up getting interrupts from the sink on detecting link loss.
         */
-       if (!is_mst)
-               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 
        intel_disable_ddi_buf(encoder);
 
index 348a4f7ffb674b435bdb77089f9c229abe171ab5..53747318f4a7162fe79f2217108ad889e138b3e7 100644 (file)
@@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
         */
        tmp = I915_READ_CTL(engine);
        if (tmp & RING_WAIT) {
-               i915_handle_error(dev_priv, 0,
+               i915_handle_error(dev_priv, BIT(engine->id),
                                  "Kicking stuck wait on %s",
                                  engine->name);
                I915_WRITE_CTL(engine, tmp);
@@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
                default:
                        return ENGINE_DEAD;
                case 1:
-                       i915_handle_error(dev_priv, 0,
+                       i915_handle_error(dev_priv, ALL_ENGINES,
                                          "Kicking stuck semaphore on %s",
                                          engine->name);
                        I915_WRITE_CTL(engine, tmp);
index 9a9961802f5c39ce7270217903c550b2f01ed92d..e83af0f2be869a105036bf49c4f1b612acab83ee 100644 (file)
@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
                                  struct drm_crtc_state *old_crtc_state)
 {
        drm_crtc_vblank_on(crtc);
+}
 
+static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
+                                 struct drm_crtc_state *old_crtc_state)
+{
        spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
                WARN_ON(drm_crtc_vblank_get(crtc));
@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
        .mode_set_nofb = ipu_crtc_mode_set_nofb,
        .atomic_check = ipu_crtc_atomic_check,
        .atomic_begin = ipu_crtc_atomic_begin,
+       .atomic_flush = ipu_crtc_atomic_flush,
        .atomic_disable = ipu_crtc_atomic_disable,
        .atomic_enable = ipu_crtc_atomic_enable,
 };
index 57ed56d8623fcb67133f1fe79f390ad45c38c257..d9113faaa62f56400e5e974a5005edc928d0fae3 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_plane_helper.h>
 
 #include "video/imx-ipu-v3.h"
+#include "imx-drm.h"
 #include "ipuv3-plane.h"
 
 struct ipu_plane_state {
@@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
        kfree(ipu_plane);
 }
 
-void ipu_plane_state_reset(struct drm_plane *plane)
+static void ipu_plane_state_reset(struct drm_plane *plane)
 {
        struct ipu_plane_state *ipu_state;
 
@@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)
        plane->state = &ipu_state->base;
 }
 
-struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
+static struct drm_plane_state *
+ipu_plane_duplicate_state(struct drm_plane *plane)
 {
        struct ipu_plane_state *state;
 
@@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
        return &state->base;
 }
 
-void ipu_plane_destroy_state(struct drm_plane *plane,
-                            struct drm_plane_state *state)
+static void ipu_plane_destroy_state(struct drm_plane *plane,
+                                   struct drm_plane_state *state)
 {
        struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
 
index 2e2ca3c6b47d374fc6650d15bfab5d7934740a2f..df9469a8fdb1689ec2a5f8b4c3e1a5995b4e4e82 100644 (file)
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
                /* don't do anything if sink is not display port, i.e.,
                 * passive dp->(dvi|hdmi) adaptor
                 */
-               if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
-                       int saved_dpms = connector->dpms;
-                       /* Only turn off the display if it's physically disconnected */
-                       if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
-                               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-                       } else if (radeon_dp_needs_link_train(radeon_connector)) {
-                               /* Don't try to start link training before we
-                                * have the dpcd */
-                               if (!radeon_dp_getdpcd(radeon_connector))
-                                       return;
-
-                               /* set it to OFF so that drm_helper_connector_dpms()
-                                * won't return immediately since the current state
-                                * is ON at this point.
-                                */
-                               connector->dpms = DRM_MODE_DPMS_OFF;
-                               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
-                       }
-                       connector->dpms = saved_dpms;
+               if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+                   radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+                   radeon_dp_needs_link_train(radeon_connector)) {
+                       /* Don't start link training before we have the DPCD */
+                       if (!radeon_dp_getdpcd(radeon_connector))
+                               return;
+
+                       /* Turn the connector off and back on immediately, which
+                        * will trigger link training
+                        */
+                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
                }
        }
 }
index 4570da0227b4e49523b7f9c697fce54777dacd7e..d9a71f361b1440fd6a84c4dcbd9f295c7e873a39 100644 (file)
@@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)
        /* drm_vblank_init calls kcalloc, which can fail */
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
        if (ret)
-               goto free_mem_region;
+               goto cleanup_mode_config;
 
        drm->irq_enabled = true;
 
@@ -139,7 +139,6 @@ finish_poll:
        sun4i_framebuffer_free(drm);
 cleanup_mode_config:
        drm_mode_config_cleanup(drm);
-free_mem_region:
        of_reserved_mem_device_release(dev);
 free_drm:
        drm_dev_unref(drm);
index 500b6fb3e0284d2fdfc71265a64f0d5b51fe4f99..fa4bcd092eaf20f9f04faaaf49ca9339f134f385 100644 (file)
@@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
                                             &sun4i_hdmi_regmap_config);
        if (IS_ERR(hdmi->regmap)) {
                dev_err(dev, "Couldn't create HDMI encoder regmap\n");
-               return PTR_ERR(hdmi->regmap);
+               ret = PTR_ERR(hdmi->regmap);
+               goto err_disable_mod_clk;
        }
 
        ret = sun4i_tmds_create(hdmi);
@@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
                hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
                if (IS_ERR(hdmi->ddc_parent_clk)) {
                        dev_err(dev, "Couldn't get the HDMI DDC clock\n");
-                       return PTR_ERR(hdmi->ddc_parent_clk);
+                       ret = PTR_ERR(hdmi->ddc_parent_clk);
+                       goto err_disable_mod_clk;
                }
        } else {
                hdmi->ddc_parent_clk = hdmi->tmds_clk;
index 2de586b7c98b58ea4d03470f720744840d3a026f..a818ca4916051ade239efa0f4789d5c3cab36165 100644 (file)
@@ -103,6 +103,7 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
 
        if (enabled) {
                clk_prepare_enable(clk);
+               clk_rate_exclusive_get(clk);
        } else {
                clk_rate_exclusive_put(clk);
                clk_disable_unprepare(clk);
@@ -262,7 +263,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
                                        const struct drm_display_mode *mode)
 {
        /* Configure the dot clock */
-       clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
+       clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
 
        /* Set the resolution */
        regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -423,7 +424,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
        WARN_ON(!tcon->quirks->has_channel_1);
 
        /* Configure the dot clock */
-       clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
+       clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
 
        /* Adjust clock delay */
        clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
index b8403ed48285288c277d224e253285caebed3adb..fbffe1948b3bb2d5311ecbee000ee879fbd6ce28 100644 (file)
@@ -1903,8 +1903,12 @@ cleanup:
        if (!IS_ERR(primary))
                drm_plane_cleanup(primary);
 
-       if (group && tegra->domain) {
-               iommu_detach_group(tegra->domain, group);
+       if (group && dc->domain) {
+               if (group == tegra->group) {
+                       iommu_detach_group(dc->domain, group);
+                       tegra->group = NULL;
+               }
+
                dc->domain = NULL;
        }
 
@@ -1913,8 +1917,10 @@ cleanup:
 
 static int tegra_dc_exit(struct host1x_client *client)
 {
+       struct drm_device *drm = dev_get_drvdata(client->parent);
        struct iommu_group *group = iommu_group_get(client->dev);
        struct tegra_dc *dc = host1x_client_to_dc(client);
+       struct tegra_drm *tegra = drm->dev_private;
        int err;
 
        devm_free_irq(dc->dev, dc->irq, dc);
@@ -1926,7 +1932,11 @@ static int tegra_dc_exit(struct host1x_client *client)
        }
 
        if (group && dc->domain) {
-               iommu_detach_group(dc->domain, group);
+               if (group == tegra->group) {
+                       iommu_detach_group(dc->domain, group);
+                       tegra->group = NULL;
+               }
+
                dc->domain = NULL;
        }
 
index d50bddb2e4474e456b8105d6b90b47488ff21263..7fcf4a24284088ba798296016797032f70e5fa96 100644 (file)
@@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)
 
        drm_kms_helper_poll_fini(drm);
        tegra_drm_fb_exit(drm);
+       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
 
        err = host1x_device_exit(device);
index 4d2ed966f9e3248a074ab25dcdfb6156e9c93200..87c5d89bc9baf3cd09012f5a2385510b645ebe1b 100644 (file)
@@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client)
        struct tegra_dsi *dsi = host1x_client_to_dsi(client);
 
        tegra_output_exit(&dsi->output);
-       regulator_disable(dsi->vdd);
 
        return 0;
 }
index 36a06a99369821aed88bbf9701550848b420493e..94dac79ac3c9641b84aa1af53093e104e0661ca4 100644 (file)
@@ -297,6 +297,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
        case WIN_COLOR_DEPTH_B8G8R8X8:
                *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
                return 0;
+
+       case WIN_COLOR_DEPTH_B5G6R5:
+               *alpha = opaque;
+               return 0;
        }
 
        return -EINVAL;
@@ -330,9 +334,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
        unsigned int zpos[2];
        unsigned int i;
 
-       for (i = 0; i < 3; i++)
-               state->dependent[i] = false;
-
        for (i = 0; i < 2; i++)
                zpos[i] = 0;
 
@@ -346,6 +347,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
 
                index = tegra_plane_get_overlap_index(tegra, p);
 
+               state->dependent[index] = false;
+
                /*
                 * If any of the other planes is on top of this plane and uses
                 * a format with an alpha component, mark this plane as being
index b5b335c9b2bbe504fdddf47246820e1e64199d18..2ebdc6d5a76e60a33d6a271ff158258a61b7908c 100644 (file)
@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        unsigned long start = vma->vm_start;
        unsigned long size = vma->vm_end - vma->vm_start;
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       unsigned long offset;
        unsigned long page, pos;
 
-       if (offset + size > info->fix.smem_len)
+       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+               return -EINVAL;
+
+       offset = vma->vm_pgoff << PAGE_SHIFT;
+
+       if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
                return -EINVAL;
 
        pos = (unsigned long)info->fix.smem_start + offset;
index 184340d486c377d38a2f21bb66cc56385a52d37b..86d25f18aa992745e9c4ebb3c89e2bd3199212b9 100644 (file)
@@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
  */
 void vmw_svga_disable(struct vmw_private *dev_priv)
 {
+       /*
+        * Disabling SVGA will turn off device modesetting capabilities, so
+        * notify KMS about that so that it doesn't cache atomic state that
+        * isn't valid anymore, for example crtcs turned on.
+        * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
+        * but vmw_kms_lost_device() takes the reservation sem and thus we'll
+        * end up with lock order reversal. Thus, a master may actually perform
+        * a new modeset just after we call vmw_kms_lost_device() and race with
+        * vmw_svga_disable(), but that should at worst cause atomic KMS state
+        * to be inconsistent with the device, causing modesetting problems.
+        *
+        */
+       vmw_kms_lost_device(dev_priv->dev);
        ttm_write_lock(&dev_priv->reservation_sem, false);
        spin_lock(&dev_priv->svga_lock);
        if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
index d08753e8fd94f077b3815f2d64f5ac63d0cdd7f2..9116fe8baebcab24575a0b6dc329578ac4db0b40 100644 (file)
@@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
+void vmw_kms_lost_device(struct drm_device *dev);
 
 int vmw_dumb_create(struct drm_file *file_priv,
                    struct drm_device *dev,
index ead61015cd79ceb0a2602615c72cddf7020da85d..3c824fd7cbf36d64e72e758bdb7c5b5e3b270dd6 100644 (file)
@@ -31,7 +31,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_rect.h>
 
-
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
@@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
  * Helper to be used if an error forces the caller to undo the actions of
  * vmw_kms_helper_resource_prepare.
  */
-void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
 {
-       vmw_kms_helper_buffer_revert(res->backup);
+       struct vmw_resource *res = ctx->res;
+
+       vmw_kms_helper_buffer_revert(ctx->buf);
+       vmw_dmabuf_unreference(&ctx->buf);
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
@@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
  * interrupted by a signal.
  */
 int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
-                                   bool interruptible)
+                                   bool interruptible,
+                                   struct vmw_validation_ctx *ctx)
 {
        int ret = 0;
 
+       ctx->buf = NULL;
+       ctx->res = res;
+
        if (interruptible)
                ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
        else
@@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
                                                    res->dev_priv->has_mob);
                if (ret)
                        goto out_unreserve;
+
+               ctx->buf = vmw_dmabuf_reference(res->backup);
        }
        ret = vmw_resource_validate(res);
        if (ret)
@@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
        return 0;
 
 out_revert:
-       vmw_kms_helper_buffer_revert(res->backup);
+       vmw_kms_helper_buffer_revert(ctx->buf);
 out_unreserve:
        vmw_resource_unreserve(res, false, NULL, 0);
 out_unlock:
@@ -2581,11 +2589,13 @@ out_unlock:
  * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
  * ref-counted fence pointer is returned here.
  */
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
-                            struct vmw_fence_obj **out_fence)
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
+                                   struct vmw_fence_obj **out_fence)
 {
-       if (res->backup || out_fence)
-               vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+       struct vmw_resource *res = ctx->res;
+
+       if (ctx->buf || out_fence)
+               vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
                                             out_fence, NULL);
 
        vmw_resource_unreserve(res, false, NULL, 0);
@@ -2851,3 +2861,14 @@ int vmw_kms_set_config(struct drm_mode_set *set,
 
        return drm_atomic_helper_set_config(set, ctx);
 }
+
+
+/**
+ * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
+ *
+ * @dev: Pointer to the drm device
+ */
+void vmw_kms_lost_device(struct drm_device *dev)
+{
+       drm_atomic_helper_shutdown(dev);
+}
index cd9da2dd79af1a062d4aaa3d6b6ded468bd4207e..3d2ca280eaa72ee1a5f8b216bfe7b54467e3d842 100644 (file)
@@ -240,6 +240,11 @@ struct vmw_display_unit {
        int set_gui_y;
 };
 
+struct vmw_validation_ctx {
+       struct vmw_resource *res;
+       struct vmw_dma_buffer *buf;
+};
+
 #define vmw_crtc_to_du(x) \
        container_of(x, struct vmw_display_unit, crtc)
 #define vmw_connector_to_du(x) \
@@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
                                  struct drm_vmw_fence_rep __user *
                                  user_fence_rep);
 int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
-                                   bool interruptible);
-void vmw_kms_helper_resource_revert(struct vmw_resource *res);
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+                                   bool interruptible,
+                                   struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
                                    struct vmw_fence_obj **out_fence);
 int vmw_kms_readback(struct vmw_private *dev_priv,
                     struct drm_file *file_priv,
@@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
 
 int vmw_kms_set_config(struct drm_mode_set *set,
                       struct drm_modeset_acquire_ctx *ctx);
-
 #endif
index 63a4cd794b73a12821ea8adbfb72e997367c5193..3ec9eae831b8f15295a6da11a9a267e3e2b4fcb8 100644 (file)
@@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
        struct vmw_framebuffer_surface *vfbs =
                container_of(framebuffer, typeof(*vfbs), base);
        struct vmw_kms_sou_surface_dirty sdirty;
+       struct vmw_validation_ctx ctx;
        int ret;
 
        if (!srf)
                srf = &vfbs->surface->res;
 
-       ret = vmw_kms_helper_resource_prepare(srf, true);
+       ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
        if (ret)
                return ret;
 
@@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
        ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
                                   dest_x, dest_y, num_clips, inc,
                                   &sdirty.base);
-       vmw_kms_helper_resource_finish(srf, out_fence);
+       vmw_kms_helper_resource_finish(&ctx, out_fence);
 
        return ret;
 }
index b68d74888ab1100be82f8a2a9fdc3234a4e04293..6b969e5dea2a862b392153822649217783ac661b 100644 (file)
@@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
        struct vmw_framebuffer_surface *vfbs =
                container_of(framebuffer, typeof(*vfbs), base);
        struct vmw_stdu_dirty sdirty;
+       struct vmw_validation_ctx ctx;
        int ret;
 
        if (!srf)
                srf = &vfbs->surface->res;
 
-       ret = vmw_kms_helper_resource_prepare(srf, true);
+       ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
        if (ret)
                return ret;
 
@@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
                                   dest_x, dest_y, num_clips, inc,
                                   &sdirty.base);
 out_finish:
-       vmw_kms_helper_resource_finish(srf, out_fence);
+       vmw_kms_helper_resource_finish(&ctx, out_fence);
 
        return ret;
 }
index 97b99500153d3e1477e40e3c0c4d5958c37a9c71..83f9dd934a5dc37ecb95f57b91d19af0c08f9e51 100644 (file)
@@ -250,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
 {
        int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
        struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
-       struct ipu_prg_channel *chan = &prg->chan[prg_chan];
+       struct ipu_prg_channel *chan;
        u32 val;
 
-       if (!chan->enabled || prg_chan < 0)
+       if (prg_chan < 0)
+               return;
+
+       chan = &prg->chan[prg_chan];
+       if (!chan->enabled)
                return;
 
        pm_runtime_get_sync(prg->dev);
@@ -280,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
 {
        int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
        struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
-       struct ipu_prg_channel *chan = &prg->chan[prg_chan];
+       struct ipu_prg_channel *chan;
        u32 val;
        int ret;
 
        if (prg_chan < 0)
                return prg_chan;
 
+       chan = &prg->chan[prg_chan];
+
        if (chan->enabled) {
                ipu_pre_update(prg->pres[chan->used_pre], *eba);
                return 0;
index 20135a5de748846ec9101b5b88860d6da3c3850e..2cfb963d9f379ed5a29c6d357430f7dd3a1a4a79 100644 (file)
@@ -72,6 +72,7 @@ MODULE_ALIAS("mmc:block");
 #define MMC_BLK_TIMEOUT_MS  (10 * 1000)
 #define MMC_SANITIZE_REQ_TIMEOUT 240000
 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
 
 #define mmc_req_rel_wr(req)    ((req->cmd_flags & REQ_FUA) && \
                                  (rq_data_dir(req) == WRITE))
@@ -586,6 +587,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
                return data.error;
        }
 
+       /*
+        * Make sure the cache of the PARTITION_CONFIG register and
+        * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
+        * changed it successfully.
+        */
+       if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
+           (cmd.opcode == MMC_SWITCH)) {
+               struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
+               u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
+
+               /*
+                * Update cache so the next mmc_blk_part_switch call operates
+                * on up-to-date data.
+                */
+               card->ext_csd.part_config = value;
+               main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
+       }
+
        /*
         * According to the SD specs, some commands require a delay after
         * issuing the command.
index 79a5b985ccf5ee8fe5ba06b5aec717f36799794e..9c821eedd1566750ce044507bd986b5cad9a2ec0 100644 (file)
@@ -82,6 +82,7 @@ struct mmc_fixup {
 #define CID_MANFID_APACER       0x27
 #define CID_MANFID_KINGSTON     0x70
 #define CID_MANFID_HYNIX       0x90
+#define CID_MANFID_NUMONYX     0xFE
 
 #define END_FIXUP { NULL }
 
index 75d317623852dc9f55586e41a176311a48144e1d..5153577754f02861ceab4689813441f9ac4ea443 100644 (file)
@@ -109,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
         */
        MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
                              0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+       /*
+        * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI
+        * feature is used so disable the HPI feature for such buggy cards.
+        */
+       MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
+                             0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
 
        END_FIXUP
 };
index fa41d9422d57e04e9b12ef097a74d52418bdd87b..a84aa3f1ae8547c4cdbf24cb05ef7e32dca8d94a 100644 (file)
@@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
 static int dw_mci_exynos_runtime_resume(struct device *dev)
 {
        struct dw_mci *host = dev_get_drvdata(dev);
+       int ret;
+
+       ret = dw_mci_runtime_resume(dev);
+       if (ret)
+               return ret;
 
        dw_mci_exynos_config_smu(host);
-       return dw_mci_runtime_resume(dev);
+
+       return ret;
 }
 
 /**
index d9b4acefed31e6303aa614577f6b455afa3c04ba..06d47414d0c19796beb70f130d192b3a4c3e592a 100644 (file)
@@ -413,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host)
        cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
        if (cto_div == 0)
                cto_div = 1;
-       cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz);
+
+       cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
+                                 host->bus_hz);
 
        /* add a bit spare time */
        cto_ms += 10;
@@ -562,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
                                        (sizeof(struct idmac_desc_64addr) *
                                                        (i + 1))) >> 32;
                        /* Initialize reserved and buffer size fields to "0" */
+                       p->des0 = 0;
                        p->des1 = 0;
                        p->des2 = 0;
                        p->des3 = 0;
@@ -584,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
                     i++, p++) {
                        p->des3 = cpu_to_le32(host->sg_dma +
                                        (sizeof(struct idmac_desc) * (i + 1)));
+                       p->des0 = 0;
                        p->des1 = 0;
                }
 
@@ -1799,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host)
        }
 
        if (host->use_dma == TRANS_MODE_IDMAC)
-               /* It is also recommended that we reset and reprogram idmac */
-               dw_mci_idmac_reset(host);
+               /* It is also required that we reinit idmac */
+               dw_mci_idmac_init(host);
 
        ret = true;
 
@@ -1948,8 +1952,9 @@ static void dw_mci_set_drto(struct dw_mci *host)
        drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
        if (drto_div == 0)
                drto_div = 1;
-       drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div,
-                              host->bus_hz);
+
+       drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
+                                  host->bus_hz);
 
        /* add a bit spare time */
        drto_ms += 10;
index 4065da58789d2d091cd918b8cd30c518aac331d7..32321bd596d880027358db10e9eb5f5b45957c1d 100644 (file)
@@ -680,7 +680,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        host->hw_name   = "ACPI";
        host->ops       = &sdhci_acpi_ops_dflt;
        host->irq       = platform_get_irq(pdev, 0);
-       if (host->irq <= 0) {
+       if (host->irq < 0) {
                err = -EINVAL;
                goto err_free;
        }
index 1e37313054f3950ee30e6c6fccad874d9262013a..6da69af103e60d9e26ed30815fc946be56aee224 100644 (file)
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
        return 0;
 }
 
-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static void cc770_tx(struct net_device *dev, int mo)
 {
        struct cc770_priv *priv = netdev_priv(dev);
-       struct net_device_stats *stats = &dev->stats;
-       struct can_frame *cf = (struct can_frame *)skb->data;
-       unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+       struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
        u8 dlc, rtr;
        u32 id;
        int i;
 
-       if (can_dropped_invalid_skb(dev, skb))
-               return NETDEV_TX_OK;
-
-       if ((cc770_read_reg(priv,
-                           msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
-               netdev_err(dev, "TX register is still occupied!\n");
-               return NETDEV_TX_BUSY;
-       }
-
-       netif_stop_queue(dev);
-
        dlc = cf->can_dlc;
        id = cf->can_id;
-       if (cf->can_id & CAN_RTR_FLAG)
-               rtr = 0;
-       else
-               rtr = MSGCFG_DIR;
+       rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
+
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
        cc770_write_reg(priv, msgobj[mo].ctrl1,
                        RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
-       cc770_write_reg(priv, msgobj[mo].ctrl0,
-                       MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+
        if (id & CAN_EFF_FLAG) {
                id &= CAN_EFF_MASK;
                cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
        for (i = 0; i < dlc; i++)
                cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
 
-       /* Store echo skb before starting the transfer */
-       can_put_echo_skb(skb, dev, 0);
-
        cc770_write_reg(priv, msgobj[mo].ctrl1,
-                       RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+                       RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
+}
 
-       stats->tx_bytes += dlc;
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       unsigned int mo = obj2msgobj(CC770_OBJ_TX);
 
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
 
-       /*
-        * HM: We had some cases of repeated IRQs so make sure the
-        * INT is acknowledged I know it's already further up, but
-        * doing again fixed the issue
-        */
-       cc770_write_reg(priv, msgobj[mo].ctrl0,
-                       MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+       netif_stop_queue(dev);
+
+       if ((cc770_read_reg(priv,
+                           msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+               netdev_err(dev, "TX register is still occupied!\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       priv->tx_skb = skb;
+       cc770_tx(dev, mo);
 
        return NETDEV_TX_OK;
 }
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
        struct cc770_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
        unsigned int mo = obj2msgobj(o);
+       struct can_frame *cf;
+       u8 ctrl1;
+
+       ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
 
-       /* Nothing more to send, switch off interrupts */
        cc770_write_reg(priv, msgobj[mo].ctrl0,
                        MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
-       /*
-        * We had some cases of repeated IRQ so make sure the
-        * INT is acknowledged
+       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                       RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
+
+       if (unlikely(!priv->tx_skb)) {
+               netdev_err(dev, "missing tx skb in tx interrupt\n");
+               return;
+       }
+
+       if (unlikely(ctrl1 & MSGLST_SET)) {
+               stats->rx_over_errors++;
+               stats->rx_errors++;
+       }
+
+       /* When the CC770 is sending an RTR message and it receives a regular
+        * message that matches the id of the RTR message, it will overwrite the
+        * outgoing message in the TX register. When this happens we must
+        * process the received message and try to transmit the outgoing skb
+        * again.
         */
-       cc770_write_reg(priv, msgobj[mo].ctrl0,
-                       MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+       if (unlikely(ctrl1 & NEWDAT_SET)) {
+               cc770_rx(dev, mo, ctrl1);
+               cc770_tx(dev, mo);
+               return;
+       }
 
+       cf = (struct can_frame *)priv->tx_skb->data;
+       stats->tx_bytes += cf->can_dlc;
        stats->tx_packets++;
+
+       can_put_echo_skb(priv->tx_skb, dev, 0);
        can_get_echo_skb(dev, 0);
+       priv->tx_skb = NULL;
+
        netif_wake_queue(dev);
 }
 
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
        priv->can.do_set_bittiming = cc770_set_bittiming;
        priv->can.do_set_mode = cc770_set_mode;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+       priv->tx_skb = NULL;
 
        memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
 
index a1739db98d911f006f82a44682f0be1b8694a01c..95752e1d128397260968ee100b0b3035923547c9 100644 (file)
@@ -193,6 +193,8 @@ struct cc770_priv {
        u8 cpu_interface;       /* CPU interface register */
        u8 clkout;              /* Clock out register */
        u8 bus_config;          /* Bus conffiguration register */
+
+       struct sk_buff *tx_skb;
 };
 
 struct net_device *alloc_cc770dev(int sizeof_priv);
index 2772d05ff11caafbdf074aebccd32b415feae0c4..fedd927ba6ed998fe75260d5baa8c5e1bb274c4f 100644 (file)
@@ -30,6 +30,7 @@
 #define IFI_CANFD_STCMD_ERROR_ACTIVE           BIT(2)
 #define IFI_CANFD_STCMD_ERROR_PASSIVE          BIT(3)
 #define IFI_CANFD_STCMD_BUSOFF                 BIT(4)
+#define IFI_CANFD_STCMD_ERROR_WARNING          BIT(5)
 #define IFI_CANFD_STCMD_BUSMONITOR             BIT(16)
 #define IFI_CANFD_STCMD_LOOPBACK               BIT(18)
 #define IFI_CANFD_STCMD_DISABLE_CANFD          BIT(24)
 #define IFI_CANFD_TXSTCMD_OVERFLOW             BIT(13)
 
 #define IFI_CANFD_INTERRUPT                    0xc
+#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF       BIT(0)
 #define IFI_CANFD_INTERRUPT_ERROR_WARNING      BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG    BIT(2)
+#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC  BIT(3)
 #define IFI_CANFD_INTERRUPT_ERROR_COUNTER      BIT(10)
 #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY       BIT(16)
 #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE      BIT(22)
 #define IFI_CANFD_INTERRUPT_SET_IRQ            ((u32)BIT(31))
 
 #define IFI_CANFD_IRQMASK                      0x10
+#define IFI_CANFD_IRQMASK_ERROR_BUSOFF         BIT(0)
+#define IFI_CANFD_IRQMASK_ERROR_WARNING                BIT(1)
+#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG      BIT(2)
+#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC    BIT(3)
 #define IFI_CANFD_IRQMASK_SET_ERR              BIT(7)
 #define IFI_CANFD_IRQMASK_SET_TS               BIT(15)
 #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY         BIT(16)
 #define IFI_CANFD_SYSCLOCK                     0x50
 
 #define IFI_CANFD_VER                          0x54
+#define IFI_CANFD_VER_REV_MASK                 0xff
+#define IFI_CANFD_VER_REV_MIN_SUPPORTED                0x15
 
 #define IFI_CANFD_IP_ID                                0x58
 #define IFI_CANFD_IP_ID_VALUE                  0xD073CAFD
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
 
        if (enable) {
                enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
-                       IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+                       IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
+                       IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
+                       IFI_CANFD_IRQMASK_ERROR_WARNING |
+                       IFI_CANFD_IRQMASK_ERROR_BUSOFF;
                if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
                        enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
        }
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
        return 1;
 }
 
-static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+static int ifi_canfd_handle_lec_err(struct net_device *ndev)
 {
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf;
        struct sk_buff *skb;
+       u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
        const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
                            IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
                            IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
 
        switch (new_state) {
        case CAN_STATE_ERROR_ACTIVE:
+               /* error active state */
+               priv->can.can_stats.error_warning++;
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               break;
+       case CAN_STATE_ERROR_WARNING:
                /* error warning state */
                priv->can.can_stats.error_warning++;
                priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
        ifi_canfd_get_berr_counter(ndev, &bec);
 
        switch (new_state) {
-       case CAN_STATE_ERROR_ACTIVE:
+       case CAN_STATE_ERROR_WARNING:
                /* error warning state */
                cf->can_id |= CAN_ERR_CRTL;
                cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
        return 1;
 }
 
-static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd)
+static int ifi_canfd_handle_state_errors(struct net_device *ndev)
 {
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
+       u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
        int work_done = 0;
-       u32 isr;
 
-       /*
-        * The ErrWarn condition is a little special, since the bit is
-        * located in the INTERRUPT register instead of STCMD register.
-        */
-       isr = readl(priv->base + IFI_CANFD_INTERRUPT);
-       if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) &&
+       if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
+           (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
+               netdev_dbg(ndev, "Error, entered active state\n");
+               work_done += ifi_canfd_handle_state_change(ndev,
+                                               CAN_STATE_ERROR_ACTIVE);
+       }
+
+       if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
            (priv->can.state != CAN_STATE_ERROR_WARNING)) {
-               /* Clear the interrupt */
-               writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
-                      priv->base + IFI_CANFD_INTERRUPT);
                netdev_dbg(ndev, "Error, entered warning state\n");
                work_done += ifi_canfd_handle_state_change(ndev,
                                                CAN_STATE_ERROR_WARNING);
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
 {
        struct net_device *ndev = napi->dev;
        struct ifi_canfd_priv *priv = netdev_priv(ndev);
-       const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
-                                    IFI_CANFD_STCMD_BUSOFF;
-       int work_done = 0;
-
-       u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
        u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
-       u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
+       int work_done = 0;
 
        /* Handle bus state changes */
-       if ((stcmd & stcmd_state_mask) ||
-           ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
-               work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
+       work_done += ifi_canfd_handle_state_errors(ndev);
 
        /* Handle lost messages on RX */
        if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
 
        /* Handle lec errors on the bus */
        if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
-               work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+               work_done += ifi_canfd_handle_lec_err(ndev);
 
        /* Handle normal messages on RX */
        if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
        struct net_device_stats *stats = &ndev->stats;
        const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
                                IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+                               IFI_CANFD_INTERRUPT_ERROR_COUNTER |
+                               IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
                                IFI_CANFD_INTERRUPT_ERROR_WARNING |
-                               IFI_CANFD_INTERRUPT_ERROR_COUNTER;
+                               IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
        const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
                                IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
-       const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
-                                        IFI_CANFD_INTERRUPT_ERROR_WARNING));
+       const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
        u32 isr;
 
        isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *addr;
        int irq, ret;
-       u32 id;
+       u32 id, rev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        addr = devm_ioremap_resource(dev, res);
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
+       if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
+               dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
+                       rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
+               return -EINVAL;
+       }
+
        ndev = alloc_candev(sizeof(*priv), 1);
        if (!ndev)
                return -ENOMEM;
index 2594f7779c6f147d71fdda29f2eab22b0c9e1955..b397a33f3d32b5e3c28398a660c736d45a74179d 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/iopoll.h>
 #include <linux/can/dev.h>
+#include <linux/pinctrl/consumer.h>
 
 /* napi related */
 #define M_CAN_NAPI_WEIGHT      64
@@ -253,7 +254,7 @@ enum m_can_mram_cfg {
 
 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
 #define RXFC_FWM_SHIFT 24
-#define RXFC_FWM_MASK  (0x7f < RXFC_FWM_SHIFT)
+#define RXFC_FWM_MASK  (0x7f << RXFC_FWM_SHIFT)
 #define RXFC_FS_SHIFT  16
 #define RXFC_FS_MASK   (0x7f << RXFC_FS_SHIFT)
 
@@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)
                m_can_clk_stop(priv);
        }
 
+       pinctrl_pm_select_sleep_state(dev);
+
        priv->can.state = CAN_STATE_SLEEPING;
 
        return 0;
@@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct m_can_priv *priv = netdev_priv(ndev);
 
+       pinctrl_pm_select_default_state(dev);
+
        m_can_init_ram(priv);
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
index 55513411a82e68e11d6b1ca30e90ea4337a0f2ee..ed8561d4a90f4b5e25683a5483f0d98248d7dca2 100644 (file)
@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
 
                spin_lock_irqsave(&priv->echo_lock, flags);
                can_get_echo_skb(priv->ndev, msg->client);
-               spin_unlock_irqrestore(&priv->echo_lock, flags);
 
                /* count bytes of the echo instead of skb */
                stats->tx_bytes += cf_len;
@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
                /* restart tx queue (a slot is free) */
                netif_wake_queue(priv->ndev);
 
+               spin_unlock_irqrestore(&priv->echo_lock, flags);
                return 0;
        }
 
@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
 
        /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
        if (pucan_status_is_rx_barrier(msg)) {
-               unsigned long flags;
 
                if (priv->enable_tx_path) {
                        int err = priv->enable_tx_path(priv);
@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
                                return err;
                }
 
-               /* restart network queue only if echo skb array is free */
-               spin_lock_irqsave(&priv->echo_lock, flags);
-
-               if (!priv->can.echo_skb[priv->echo_idx]) {
-                       spin_unlock_irqrestore(&priv->echo_lock, flags);
-
-                       netif_wake_queue(ndev);
-               } else {
-                       spin_unlock_irqrestore(&priv->echo_lock, flags);
-               }
+               /* start network queue (echo_skb array is empty) */
+               netif_start_queue(ndev);
 
                return 0;
        }
@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
         */
        should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
 
-       spin_unlock_irqrestore(&priv->echo_lock, flags);
-
-       /* write the skb on the interface */
-       priv->write_tx_msg(priv, msg);
-
        /* stop network tx queue if not enough room to save one more msg too */
        if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
                should_stop_tx_queue |= (room_left <
@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
        if (should_stop_tx_queue)
                netif_stop_queue(ndev);
 
+       spin_unlock_irqrestore(&priv->echo_lock, flags);
+
+       /* write the skb on the interface */
+       priv->write_tx_msg(priv, msg);
+
        return NETDEV_TX_OK;
 }
 
index 788c3464a3b0e95aaa101591750b9de493a34a18..3c51a884db87bc90e71d5df8d5b0a91eadf69cdb 100644 (file)
@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg)
                priv->tx_pages_free++;
                spin_unlock_irqrestore(&priv->tx_lock, flags);
 
-               /* wake producer up */
-               netif_wake_queue(priv->ucan.ndev);
+               /* wake producer up (only if enough room in echo_skb array) */
+               spin_lock_irqsave(&priv->ucan.echo_lock, flags);
+               if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
+                       netif_wake_queue(priv->ucan.ndev);
+
+               spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
        }
 
        /* re-enable Rx DMA transfer for this CAN */
index d040aeb45172662320bf63c75b094cde35b9b294..15c2a831edf192b2678901c9a4c6fce7e9df62cd 100644 (file)
@@ -1,7 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_NET_DSA_BCM_SF2)  += bcm-sf2.o
 bcm-sf2-objs                   := bcm_sf2.o bcm_sf2_cfp.o
-obj-$(CONFIG_NET_DSA_LOOP)     += dsa_loop.o dsa_loop_bdinfo.o
+obj-$(CONFIG_NET_DSA_LOOP)     += dsa_loop.o
+ifdef CONFIG_NET_DSA_LOOP
+obj-$(CONFIG_FIXED_PHY)                += dsa_loop_bdinfo.o
+endif
 obj-$(CONFIG_NET_DSA_MT7530)   += mt7530.o
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
 obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
index db830a1141d99774f6e21037a63840518dfdab25..63e02a54d5379202592a9dcdbbb3e0e095996173 100644 (file)
@@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
        unsigned int i;
 
        for (i = 0; i < mib_size; i++)
-               memcpy(data + i * ETH_GSTRING_LEN,
-                      mibs[i].name, ETH_GSTRING_LEN);
+               strlcpy(data + i * ETH_GSTRING_LEN,
+                       mibs[i].name, ETH_GSTRING_LEN);
 }
 EXPORT_SYMBOL(b53_get_strings);
 
index 29c3075bfb052f1dbc7e788bf093bd0b8a94f152..fdc673484addcf09a8edd0f69f1c9d167816619f 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config NET_VENDOR_8390
-       bool "National Semi-conductor 8390 devices"
+       bool "National Semiconductor 8390 devices"
        default y
        depends on NET_VENDOR_NATSEMI
        ---help---
index 0b49f1aeebd3dd98d6e0491aa1cc3e46a996a5a9..fc7383106946ca6461f62ea305be0f03bb59c227 100644 (file)
@@ -36,6 +36,8 @@
 #define AQ_CFG_TX_FRAME_MAX  (16U * 1024U)
 #define AQ_CFG_RX_FRAME_MAX  (4U * 1024U)
 
+#define AQ_CFG_TX_CLEAN_BUDGET 256U
+
 /* LRO */
 #define AQ_CFG_IS_LRO_DEF           1U
 
index ebbaf63eaf475123a0d67b7eef8cc1ed42e348e6..c96a92118b8b85272e7c3551dc5de31da3bf8852 100644 (file)
@@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
        self->ndev->hw_features |= aq_hw_caps->hw_features;
        self->ndev->features = aq_hw_caps->hw_features;
        self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+       self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
        self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
        self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
 
@@ -937,3 +939,23 @@ err_exit:
 out:
        return err;
 }
+
+void aq_nic_shutdown(struct aq_nic_s *self)
+{
+       int err = 0;
+
+       if (!self->ndev)
+               return;
+
+       rtnl_lock();
+
+       netif_device_detach(self->ndev);
+
+       err = aq_nic_stop(self);
+       if (err < 0)
+               goto err_exit;
+       aq_nic_deinit(self);
+
+err_exit:
+       rtnl_unlock();
+}
\ No newline at end of file
index d16b0f1a95aa485753f90afda57ad0edf86081c1..219b550d16650bd6b205fb6e10855627a0fd277b 100644 (file)
@@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
 u32 aq_nic_get_fw_version(struct aq_nic_s *self);
 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
+void aq_nic_shutdown(struct aq_nic_s *self);
 
 #endif /* AQ_NIC_H */
index 87c4308b52a7cc7666a88984712d24198214a741..ecc6306f940f5d9f975d9cd422114f0be05c3435 100644 (file)
@@ -323,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+static void aq_pci_shutdown(struct pci_dev *pdev)
+{
+       struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+       aq_nic_shutdown(self);
+
+       pci_disable_device(pdev);
+
+       if (system_state == SYSTEM_POWER_OFF) {
+               pci_wake_from_d3(pdev, false);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+}
+
 static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
 {
        struct aq_nic_s *self = pci_get_drvdata(pdev);
@@ -345,6 +359,7 @@ static struct pci_driver aq_pci_ops = {
        .remove = aq_pci_remove,
        .suspend = aq_pci_suspend,
        .resume = aq_pci_resume,
+       .shutdown = aq_pci_shutdown,
 };
 
 module_pci_driver(aq_pci_ops);
index 0be6a11370bb3e233370c0dd377c8558310ab0f5..b5f1f62e8e253785436fa7cd9119a8467edf4fd4 100644 (file)
@@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
                netif_stop_subqueue(ndev, ring->idx);
 }
 
-void aq_ring_tx_clean(struct aq_ring_s *self)
+bool aq_ring_tx_clean(struct aq_ring_s *self)
 {
        struct device *dev = aq_nic_get_dev(self->aq_nic);
+       unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
 
-       for (; self->sw_head != self->hw_head;
+       for (; self->sw_head != self->hw_head && budget--;
                self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
                struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 
@@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
                buff->pa = 0U;
                buff->eop_index = 0xffffU;
        }
+
+       return !!budget;
 }
 
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
index 965fae0fb6e0ddee8165a91097aeb148cb1308ab..ac1329f4051d7f3681e18f0e886fe9ec58accc88 100644 (file)
@@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self);
 void aq_ring_update_queue_state(struct aq_ring_s *ring);
 void aq_ring_queue_wake(struct aq_ring_s *ring);
 void aq_ring_queue_stop(struct aq_ring_s *ring);
-void aq_ring_tx_clean(struct aq_ring_s *self);
+bool aq_ring_tx_clean(struct aq_ring_s *self);
 int aq_ring_rx_clean(struct aq_ring_s *self,
                     struct napi_struct *napi,
                     int *work_done,
index f890b8a5a8623ef20a4c3ca016b4dbe2ad16f475..d335c334fa561ed2ae1a8dad45fcd9af822ee0a7 100644 (file)
@@ -35,12 +35,12 @@ struct aq_vec_s {
 static int aq_vec_poll(struct napi_struct *napi, int budget)
 {
        struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
+       unsigned int sw_tail_old = 0U;
        struct aq_ring_s *ring = NULL;
+       bool was_tx_cleaned = true;
+       unsigned int i = 0U;
        int work_done = 0;
        int err = 0;
-       unsigned int i = 0U;
-       unsigned int sw_tail_old = 0U;
-       bool was_tx_cleaned = false;
 
        if (!self) {
                err = -EINVAL;
@@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
 
                        if (ring[AQ_VEC_TX_ID].sw_head !=
                            ring[AQ_VEC_TX_ID].hw_head) {
-                               aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+                               was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
                                aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
-                               was_tx_cleaned = true;
                        }
 
                        err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
@@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
                        }
                }
 
-               if (was_tx_cleaned)
+               if (!was_tx_cleaned)
                        work_done = budget;
 
                if (work_done < budget) {
index 967f0fd07fcf2d5c377476b6feabba9e9b4f1eac..d3b847ec7465cc6a87f345d53b777662c969140b 100644 (file)
 
 #define HW_ATL_UCP_0X370_REG    0x0370U
 
+#define HW_ATL_MIF_CMD          0x0200U
+#define HW_ATL_MIF_ADDR         0x0208U
+#define HW_ATL_MIF_VAL          0x020CU
+
 #define HW_ATL_FW_SM_RAM        0x2U
 #define HW_ATL_MPI_FW_VERSION  0x18
 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
@@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
 
 static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
 {
+       u32 gsr, val;
        int k = 0;
-       u32 gsr;
 
        aq_hw_write_reg(self, 0x404, 0x40e1);
        AQ_HW_SLEEP(50);
 
        /* Cleanup SPI */
-       aq_hw_write_reg(self, 0x534, 0xA0);
-       aq_hw_write_reg(self, 0x100, 0x9F);
-       aq_hw_write_reg(self, 0x100, 0x809F);
+       val = aq_hw_read_reg(self, 0x53C);
+       aq_hw_write_reg(self, 0x53C, val | 0x10);
 
        gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
        aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
@@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
        aq_hw_write_reg(self, 0x404, 0x80e0);
        aq_hw_write_reg(self, 0x32a8, 0x0);
        aq_hw_write_reg(self, 0x520, 0x1);
+
+       /* Reset SPI again because of possible interrupted SPI burst */
+       val = aq_hw_read_reg(self, 0x53C);
+       aq_hw_write_reg(self, 0x53C, val | 0x10);
        AQ_HW_SLEEP(10);
+       /* Clear SPI reset state */
+       aq_hw_write_reg(self, 0x53C, val & ~0x10);
+
        aq_hw_write_reg(self, 0x404, 0x180e0);
 
        for (k = 0; k < 1000; k++) {
@@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
                aq_pr_err("FW kickstart failed\n");
                return -EIO;
        }
+       /* Old FW requires fixed delay after init */
+       AQ_HW_SLEEP(15);
 
        return 0;
 }
 
 static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
 {
-       u32 gsr, rbl_status;
+       u32 gsr, val, rbl_status;
        int k;
 
        aq_hw_write_reg(self, 0x404, 0x40e1);
@@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
        /* Alter RBL status */
        aq_hw_write_reg(self, 0x388, 0xDEAD);
 
+       /* Cleanup SPI */
+       val = aq_hw_read_reg(self, 0x53C);
+       aq_hw_write_reg(self, 0x53C, val | 0x10);
+
        /* Global software reset*/
        hw_atl_rx_rx_reg_res_dis_set(self, 0U);
        hw_atl_tx_tx_reg_res_dis_set(self, 0U);
@@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
                aq_pr_err("FW kickstart failed\n");
                return -EIO;
        }
+       /* Old FW requires fixed delay after init */
+       AQ_HW_SLEEP(15);
 
        return 0;
 }
@@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
                }
        }
 
-       aq_hw_write_reg(self, 0x00000208U, a);
-
-       for (++cnt; --cnt;) {
-               u32 i = 0U;
+       aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
 
-               aq_hw_write_reg(self, 0x00000200U, 0x00008000U);
+       for (++cnt; --cnt && !err;) {
+               aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
 
-               for (i = 1024U;
-                       (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
-               }
+               if (IS_CHIP_FEATURE(REVISION_B1))
+                       AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
+                                                          HW_ATL_MIF_ADDR),
+                                      1, 1000U);
+               else
+                       AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
+                                                          HW_ATL_MIF_CMD)),
+                                      1, 1000U);
 
-               *(p++) = aq_hw_read_reg(self, 0x0000020CU);
+               *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
+               a += 4;
        }
 
        hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
@@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
        u32 val = hw_atl_reg_glb_mif_id_get(self);
        u32 mif_rev = val & 0xFFU;
 
-       if ((3U & mif_rev) == 1U) {
-               chip_features |=
-                       HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
+       if ((0xFU & mif_rev) == 1U) {
+               chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
                        HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
                        HAL_ATLANTIC_UTILS_CHIP_MIPS;
-       } else if ((3U & mif_rev) == 2U) {
-               chip_features |=
-                       HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+       } else if ((0xFU & mif_rev) == 2U) {
+               chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+                       HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+                       HAL_ATLANTIC_UTILS_CHIP_MIPS |
+                       HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+                       HAL_ATLANTIC_UTILS_CHIP_RPF2;
+       } else if ((0xFU & mif_rev) == 0xAU) {
+               chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
                        HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
                        HAL_ATLANTIC_UTILS_CHIP_MIPS |
                        HAL_ATLANTIC_UTILS_CHIP_TPO2 |
index 2c690947910a3927f559efd63df20d99b0e8010b..cd8f18f39c611f8f709f71c7a1c23da8332a3fa4 100644 (file)
@@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox {
 #define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ       0x00000010U
 #define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0  0x01000000U
 #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0  0x02000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1  0x04000000U
 
 #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
        self->chip_features)
index 5265b937677bcada0c38e7b41ee3b744299cbece..a445de6837a6c8bff1c250d4702612f4795b2477 100644 (file)
@@ -13,7 +13,7 @@
 #define NIC_MAJOR_DRIVER_VERSION           2
 #define NIC_MINOR_DRIVER_VERSION           0
 #define NIC_BUILD_DRIVER_VERSION           2
-#define NIC_REVISION_DRIVER_VERSION        0
+#define NIC_REVISION_DRIVER_VERSION        1
 
 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
 
index 16f9bee992fedfab2069a2324c38fd4a5f142c93..0f65768026072ae7ded390fef283269f180f6e24 100644 (file)
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
        /* Optional regulator for PHY */
        priv->regulator = devm_regulator_get_optional(dev, "phy");
        if (IS_ERR(priv->regulator)) {
-               if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
+                       err = -EPROBE_DEFER;
+                       goto out_clk_disable;
+               }
                dev_err(dev, "no regulator found\n");
                priv->regulator = NULL;
        }
index f15a8fc6dfc97419f8e1492dd1717b9d2e562b84..3fc549b88c43b082bd22023f99ec13fbf1d78525 100644 (file)
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                                             struct bcm_sysport_tx_ring *ring)
 {
-       unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
        unsigned int pkts_compl = 0, bytes_compl = 0;
        struct net_device *ndev = priv->netdev;
+       unsigned int txbds_processed = 0;
        struct bcm_sysport_cb *cb;
+       unsigned int txbds_ready;
+       unsigned int c_index;
        u32 hw_ind;
 
        /* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
        /* Compute how many descriptors have been processed since last call */
        hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
        c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
-       ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
-       last_c_index = ring->c_index;
-       num_tx_cbs = ring->size;
-
-       c_index &= (num_tx_cbs - 1);
-
-       if (c_index >= last_c_index)
-               last_tx_cn = c_index - last_c_index;
-       else
-               last_tx_cn = num_tx_cbs - last_c_index + c_index;
+       txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
 
        netif_dbg(priv, tx_done, ndev,
-                 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
-                 ring->index, c_index, last_tx_cn, last_c_index);
+                 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+                 ring->index, ring->c_index, c_index, txbds_ready);
 
-       while (last_tx_cn-- > 0) {
-               cb = ring->cbs + last_c_index;
+       while (txbds_processed < txbds_ready) {
+               cb = &ring->cbs[ring->clean_index];
                bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
 
                ring->desc_count++;
-               last_c_index++;
-               last_c_index &= (num_tx_cbs - 1);
+               txbds_processed++;
+
+               if (likely(ring->clean_index < ring->size - 1))
+                       ring->clean_index++;
+               else
+                       ring->clean_index = 0;
        }
 
        u64_stats_update_begin(&priv->syncp);
@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
        ring->index = index;
        ring->size = size;
+       ring->clean_index = 0;
        ring->alloc_size = ring->size;
        ring->desc_cpu = p;
        ring->desc_count = ring->size;
index f5a984c1c986535f3421bafd9c851ec995ccf3b0..19c91c76e32763f399ebc8d67c7a0b647da44572 100644 (file)
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
        unsigned int    desc_count;     /* Number of descriptors */
        unsigned int    curr_desc;      /* Current descriptor */
        unsigned int    c_index;        /* Last consumer index */
-       unsigned int    p_index;        /* Current producer index */
+       unsigned int    clean_index;    /* Current clean index */
        struct bcm_sysport_cb *cbs;     /* Transmit control blocks */
        struct dma_desc *desc_cpu;      /* CPU view of the descriptor */
        struct bcm_sysport_priv *priv;  /* private context backpointer */
index 74fc9af4aadb4358a53858fa93e6b185637a618e..b8388e93520a1a45b6d20b7369f0e6c109c22f36 100644 (file)
@@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
        bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
        if (IS_ERR(bp->ptp_clock)) {
                bp->ptp_clock = NULL;
-               BNX2X_ERR("PTP clock registeration failed\n");
+               BNX2X_ERR("PTP clock registration failed\n");
        }
 }
 
index 1500243b988650625c5deeaf5ac9759e2670b514..c7e5e6f09647d5d798e50db2beb1ee7b1e28f277 100644 (file)
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
            (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u16 vlan_proto = tpa_info->metadata >>
                        RX_CMP_FLAGS2_METADATA_TPID_SFT;
-               u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
+               u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
 
                __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
             cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
            (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
-               u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
+               u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
                u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
 
                __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
        struct hwrm_vnic_tpa_cfg_input req = {0};
 
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+               return 0;
+
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
 
        if (tpa_flags) {
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
        return rc;
 }
 
-static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
-                          int ring_grps, int cp_rings, int vnics)
+static void
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
+                            int tx_rings, int rx_rings, int ring_grps,
+                            int cp_rings, int vnics)
 {
-       struct hwrm_func_cfg_input req = {0};
        u32 enables = 0;
-       int rc;
 
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-       req.fid = cpu_to_le16(0xffff);
+       bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
+       req->fid = cpu_to_le16(0xffff);
        enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
-       req.num_tx_rings = cpu_to_le16(tx_rings);
+       req->num_tx_rings = cpu_to_le16(tx_rings);
        if (bp->flags & BNXT_FLAG_NEW_RM) {
                enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
                enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
                           FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
                enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
 
-               req.num_rx_rings = cpu_to_le16(rx_rings);
-               req.num_hw_ring_grps = cpu_to_le16(ring_grps);
-               req.num_cmpl_rings = cpu_to_le16(cp_rings);
-               req.num_stat_ctxs = req.num_cmpl_rings;
-               req.num_vnics = cpu_to_le16(vnics);
+               req->num_rx_rings = cpu_to_le16(rx_rings);
+               req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+               req->num_cmpl_rings = cpu_to_le16(cp_rings);
+               req->num_stat_ctxs = req->num_cmpl_rings;
+               req->num_vnics = cpu_to_le16(vnics);
        }
-       if (!enables)
+       req->enables = cpu_to_le32(enables);
+}
+
+static void
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
+                            struct hwrm_func_vf_cfg_input *req, int tx_rings,
+                            int rx_rings, int ring_grps, int cp_rings,
+                            int vnics)
+{
+       u32 enables = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
+       enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+       enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+       enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+                             FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+       enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+       enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+       req->num_tx_rings = cpu_to_le16(tx_rings);
+       req->num_rx_rings = cpu_to_le16(rx_rings);
+       req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+       req->num_cmpl_rings = cpu_to_le16(cp_rings);
+       req->num_stat_ctxs = req->num_cmpl_rings;
+       req->num_vnics = cpu_to_le16(vnics);
+
+       req->enables = cpu_to_le32(enables);
+}
+
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+                          int ring_grps, int cp_rings, int vnics)
+{
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+                                    cp_rings, vnics);
+       if (!req.enables)
                return 0;
 
-       req.enables = cpu_to_le32(enables);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                return -ENOMEM;
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
                           int ring_grps, int cp_rings, int vnics)
 {
        struct hwrm_func_vf_cfg_input req = {0};
-       u32 enables = 0;
        int rc;
 
        if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
                return 0;
        }
 
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
-       enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
-       enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
-       enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                             FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
-       enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
-       enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
-       req.num_tx_rings = cpu_to_le16(tx_rings);
-       req.num_rx_rings = cpu_to_le16(rx_rings);
-       req.num_hw_ring_grps = cpu_to_le16(ring_grps);
-       req.num_cmpl_rings = cpu_to_le16(cp_rings);
-       req.num_stat_ctxs = req.num_cmpl_rings;
-       req.num_vnics = cpu_to_le16(vnics);
-
-       req.enables = cpu_to_le32(enables);
+       __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+                                    cp_rings, vnics);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                return -ENOMEM;
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
 }
 
 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
-                                   int ring_grps, int cp_rings)
+                                   int ring_grps, int cp_rings, int vnics)
 {
        struct hwrm_func_vf_cfg_input req = {0};
-       u32 flags, enables;
+       u32 flags;
        int rc;
 
        if (!(bp->flags & BNXT_FLAG_NEW_RM))
                return 0;
 
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+       __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+                                    cp_rings, vnics);
        flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
                FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
                FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
                FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
                FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
                FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
-       enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
-                 FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
-                 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
-                 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
-                 FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
 
        req.flags = cpu_to_le32(flags);
-       req.enables = cpu_to_le32(enables);
-       req.num_tx_rings = cpu_to_le16(tx_rings);
-       req.num_rx_rings = cpu_to_le16(rx_rings);
-       req.num_cmpl_rings = cpu_to_le16(cp_rings);
-       req.num_hw_ring_grps = cpu_to_le16(ring_grps);
-       req.num_stat_ctxs = cpu_to_le16(cp_rings);
-       req.num_vnics = cpu_to_le16(1);
-       if (bp->flags & BNXT_FLAG_RFS)
-               req.num_vnics = cpu_to_le16(rx_rings + 1);
        rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                return -ENOMEM;
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 }
 
 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
-                                   int ring_grps, int cp_rings)
+                                   int ring_grps, int cp_rings, int vnics)
 {
        struct hwrm_func_cfg_input req = {0};
-       u32 flags, enables;
+       u32 flags;
        int rc;
 
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-       req.fid = cpu_to_le16(0xffff);
+       __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+                                    cp_rings, vnics);
        flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
-       enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
-       req.num_tx_rings = cpu_to_le16(tx_rings);
-       if (bp->flags & BNXT_FLAG_NEW_RM) {
+       if (bp->flags & BNXT_FLAG_NEW_RM)
                flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
                         FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
                         FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
                         FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
                         FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
-               enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
-                          FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                          FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
-                          FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
-                          FUNC_CFG_REQ_ENABLES_NUM_VNICS;
-               req.num_rx_rings = cpu_to_le16(rx_rings);
-               req.num_cmpl_rings = cpu_to_le16(cp_rings);
-               req.num_hw_ring_grps = cpu_to_le16(ring_grps);
-               req.num_stat_ctxs = cpu_to_le16(cp_rings);
-               req.num_vnics = cpu_to_le16(1);
-               if (bp->flags & BNXT_FLAG_RFS)
-                       req.num_vnics = cpu_to_le16(rx_rings + 1);
-       }
+
        req.flags = cpu_to_le32(flags);
-       req.enables = cpu_to_le32(enables);
        rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                return -ENOMEM;
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 }
 
 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
-                                int ring_grps, int cp_rings)
+                                int ring_grps, int cp_rings, int vnics)
 {
        if (bp->hwrm_spec_code < 0x10801)
                return 0;
 
        if (BNXT_PF(bp))
                return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
-                                               ring_grps, cp_rings);
+                                               ring_grps, cp_rings, vnics);
 
        return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
-                                       cp_rings);
+                                       cp_rings, vnics);
 }
 
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)
                if (rc)
                        goto msix_setup_exit;
 
-               bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
                bp->cp_nr_rings = (min == 1) ?
                                  max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
                                  bp->tx_nr_rings + bp->rx_nr_rings;
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)
        bp->rx_nr_rings = 1;
        bp->tx_nr_rings = 1;
        bp->cp_nr_rings = 1;
-       bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
        bp->flags |= BNXT_FLAG_SHARED_RINGS;
        bp->irq_tbl[0].vector = bp->pdev->irq;
        return 0;
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
        int max_rx, max_tx, tx_sets = 1;
        int tx_rings_needed;
        int rx_rings = rx;
-       int cp, rc;
+       int cp, vnics, rc;
 
        if (tcs)
                tx_sets = tcs;
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
        if (max_tx < tx_rings_needed)
                return -ENOMEM;
 
+       vnics = 1;
+       if (bp->flags & BNXT_FLAG_RFS)
+               vnics += rx_rings;
+
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                rx_rings <<= 1;
        cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
-       return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
+       return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
+                                    vnics);
 }
 
 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
                return 0;
 
        bnxt_hwrm_func_qcaps(bp);
-       __bnxt_close_nic(bp, true, false);
+
+       if (netif_running(bp->dev))
+               __bnxt_close_nic(bp, true, false);
+
        bnxt_clear_int_mode(bp);
        rc = bnxt_init_int_mode(bp);
-       if (rc)
-               dev_close(bp->dev);
-       else
-               rc = bnxt_open_nic(bp, true, false);
+
+       if (netif_running(bp->dev)) {
+               if (rc)
+                       dev_close(bp->dev);
+               else
+                       rc = bnxt_open_nic(bp, true, false);
+       }
+
        return rc;
 }
 
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                goto init_err_pci_clean;
 
+       /* No TC has been set yet and rings may have been trimmed due to
+        * limited MSIX, so we re-initialize the TX rings per TC.
+        */
+       bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+
        bnxt_get_wol_settings(bp);
        if (bp->flags & BNXT_FLAG_WOL_CAP)
                device_set_wakeup_enable(&pdev->dev, bp->wol);
index 1989c470172cba7ac56e8c030f847cd0c5a32531..5e3d62189cab8e05b5c65f2cae0e11fa6e3cf459 100644 (file)
@@ -189,6 +189,7 @@ struct rx_cmp_ext {
        #define RX_CMP_FLAGS2_T_L4_CS_CALC                      (0x1 << 3)
        #define RX_CMP_FLAGS2_META_FORMAT_VLAN                  (0x1 << 4)
        __le32 rx_cmp_meta_data;
+       #define RX_CMP_FLAGS2_METADATA_TCI_MASK                 0xffff
        #define RX_CMP_FLAGS2_METADATA_VID_MASK                 0xfff
        #define RX_CMP_FLAGS2_METADATA_TPID_MASK                0xffff0000
         #define RX_CMP_FLAGS2_METADATA_TPID_SFT                 16
index fbe6e208e17b9ad190d1ee017c68075d0af70bb9..65c2cee357669a7a7b5784b3c7b9a3805095304f 100644 (file)
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
        if (rc)
                netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
                            __func__, flow_handle, rc);
+
+       if (rc)
+               rc = -EIO;
        return rc;
 }
 
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
        req.action_flags = cpu_to_le16(action_flags);
 
        mutex_lock(&bp->hwrm_cmd_lock);
-
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc)
                *flow_handle = resp->flow_handle;
-
        mutex_unlock(&bp->hwrm_cmd_lock);
 
+       if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
+               rc = -ENOSPC;
+       else if (rc)
+               rc = -EIO;
        return rc;
 }
 
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
                netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
        mutex_unlock(&bp->hwrm_cmd_lock);
 
+       if (rc)
+               rc = -EIO;
        return rc;
 }
 
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+
+       if (rc)
+               rc = -EIO;
        return rc;
 }
 
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
                netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
        mutex_unlock(&bp->hwrm_cmd_lock);
 
+       if (rc)
+               rc = -EIO;
        return rc;
 }
 
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+
+       if (rc)
+               rc = -EIO;
        return rc;
 }
 
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
        flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
                                           &tc_flow_cmd->cookie,
                                           tc_info->flow_ht_params);
-       if (!flow_node) {
-               netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
-                           tc_flow_cmd->cookie);
+       if (!flow_node)
                return -EINVAL;
-       }
 
        return __bnxt_tc_del_flow(bp, flow_node);
 }
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
        flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
                                           &tc_flow_cmd->cookie,
                                           tc_info->flow_ht_params);
-       if (!flow_node) {
-               netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
-                           tc_flow_cmd->cookie);
+       if (!flow_node)
                return -1;
-       }
 
        flow = &flow_node->flow;
        curr_stats = &flow->stats;
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
        } else {
                netdev_info(bp->dev, "error rc=%d", rc);
        }
-
        mutex_unlock(&bp->hwrm_cmd_lock);
+
+       if (rc)
+               rc = -EIO;
        return rc;
 }
 
index c1841db1b500fa49f823c79e56cb3bc05f3f9199..f2593978ae75fb195f462a957d443eeddbbddb46 100644 (file)
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
 
                tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
 
-               usleep_range(10, 20);
+               udelay(10);
                timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
        }
 
index 7b452e85de2ad18a5613ac41687a9eaeab872f9f..61022b5f6743e7e52376bbb44b1cca64d7a64277 100644 (file)
@@ -4970,7 +4970,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev)
        /* Initialize the device structure. */
        dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
        dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
-       dev->needs_free_netdev = true;
 }
 
 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
@@ -5181,6 +5180,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->name = pci_name(pdev);
        adapter->mbox = func;
        adapter->pf = func;
+       adapter->params.chip = chip;
+       adapter->adap_idx = adap_idx;
        adapter->msg_enable = DFLT_MSG_ENABLE;
        adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
                                    (sizeof(struct mbox_cmd) *
index 5eb999af2c40004fc028ecd67901c492916480c5..bd3f6e4d134138424ec5d627e6ce1937a7529079 100644 (file)
@@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev)
 
        if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
                dev_warn(geth->dev, "TX queue base it not aligned\n");
+               kfree(skb_tab);
                return -ENOMEM;
        }
 
index 7caa8da484217e074a9b7938a220a292b536667a..e4ec32a9ca1526daf07b151d69b56090bc218562 100644 (file)
@@ -2008,7 +2008,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
        }
 
        if (unlikely(err < 0)) {
-               percpu_stats->tx_errors++;
                percpu_stats->tx_fifo_errors++;
                return err;
        }
@@ -2278,7 +2277,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
        vaddr = phys_to_virt(addr);
        prefetch(vaddr + qm_fd_get_offset(fd));
 
-       fd_format = qm_fd_get_format(fd);
        /* The only FD types that we may receive are contig and S/G */
        WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
 
@@ -2311,8 +2309,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
 
        skb_len = skb->len;
 
-       if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+       if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
+               percpu_stats->rx_dropped++;
                return qman_cb_dqrr_consume;
+       }
 
        percpu_stats->rx_packets++;
        percpu_stats->rx_bytes += skb_len;
@@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev)
        struct device *dev;
        int err;
 
-       dev = &pdev->dev;
+       dev = pdev->dev.parent;
        net_dev = dev_get_drvdata(dev);
 
        priv = netdev_priv(net_dev);
index 7a7f3a42b2aa1ee12e467d9295b8422a835bdd12..d4604bc8eb5b04742534100c4c285065bda2021e 100644 (file)
@@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev)
        fec_enet_mii_remove(fep);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
index ea43b497414986c55d07ce9b175082202f951044..7af31ddd093f8520a276a092ffd3481528177999 100644 (file)
@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
        set_bucket(dtsec->regs, bucket, true);
 
        /* Create element to be added to the driver hash table */
-       hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+       hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
        if (!hash_entry)
                return -ENOMEM;
        hash_entry->addr = addr;
index 86944bc3b273fd97232a60e68f25046e68042882..74bd260ca02a887869a507f8746dfc928522d4be 100644 (file)
@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
 
 static int hns_gmac_get_sset_count(int stringset)
 {
-       if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+       if (stringset == ETH_SS_STATS)
                return ARRAY_SIZE(g_gmac_stats_string);
 
        return 0;
index b62816c1574eb840f74a334b904f9fd993733116..93e71e27401b4da815e899753dc7be1a83ff3f14 100644 (file)
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
 
 int hns_ppe_get_sset_count(int stringset)
 {
-       if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+       if (stringset == ETH_SS_STATS)
                return ETH_PPE_STATIC_NUM;
        return 0;
 }
index 6f3570cfb501604bea3f22d73374dd8dc28d756f..e2e28532e4dc2d03cf15330c621f8fb49469e382 100644 (file)
@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
  */
 int hns_rcb_get_ring_sset_count(int stringset)
 {
-       if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+       if (stringset == ETH_SS_STATS)
                return HNS_RING_STATIC_REG_NUM;
 
        return 0;
index 7ea7f8a4aa2a9456f2d71cceccae9eff2b83421a..2e14a3ae1d8be0f9841a5c53f456c4d2e4f4d270 100644 (file)
@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
                        cnt--;
 
                return cnt;
-       } else {
+       } else if (stringset == ETH_SS_STATS) {
                return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+       } else {
+               return -EOPNOTSUPP;
        }
 }
 
index afb7ebe20b2438e9500f5dff2b1126ccde9c4670..824fd44e25f0d694a20bed7733dfebfaff9d0195 100644 (file)
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
 #define E1000_ICR_RXO           0x00000040 /* Receiver Overrun */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO Access Complete */
+#define E1000_ICR_SRPD          0x00010000 /* Small Receive Packet Detected */
+#define E1000_ICR_ACK           0x00020000 /* Receive ACK Frame Detected */
+#define E1000_ICR_MNG           0x00040000 /* Manageability Event Detected */
 #define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */
 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_INT_ASSERTED 0x80000000
 #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
 #define E1000_ICR_TXQ0          0x00400000 /* Tx Queue 0 Interrupt */
 #define E1000_ICR_TXQ1          0x00800000 /* Tx Queue 1 Interrupt */
-#define E1000_ICR_OTHER         0x01000000 /* Other Interrupts */
+#define E1000_ICR_OTHER         0x01000000 /* Other Interrupt */
 
 /* PBA ECC Register */
 #define E1000_PBA_ECC_COUNTER_MASK  0xFFF00000 /* ECC counter mask */
        E1000_IMS_RXSEQ  |    \
        E1000_IMS_LSC)
 
+/* These are all of the events related to the OTHER interrupt.
+ */
+#define IMS_OTHER_MASK ( \
+       E1000_IMS_LSC  | \
+       E1000_IMS_RXO  | \
+       E1000_IMS_MDAC | \
+       E1000_IMS_SRPD | \
+       E1000_IMS_ACK  | \
+       E1000_IMS_MNG)
+
 /* Interrupt Mask Set */
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* Receiver Overrun */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO Access Complete */
+#define E1000_IMS_SRPD      E1000_ICR_SRPD      /* Small Receive Packet */
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive ACK Frame Detected */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability Event */
 #define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */
 #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
 #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
index 31277d3bb7dc1241032695d2d9424779654f4f5f..1dddfb7b2de6c988c9686e82c49d8081f91d5f31 100644 (file)
@@ -1367,9 +1367,6 @@ out:
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
- *
- *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
- *  up).
  **/
 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
@@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * Change or Rx Sequence Error interrupt.
         */
        if (!mac->get_link_status)
-               return 1;
+               return 0;
+       mac->get_link_status = false;
 
        /* First we want to see if the MII Status Register reports
         * link.  If so, then we want to get the current speed/duplex
@@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         */
        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
        if (ret_val)
-               return ret_val;
+               goto out;
 
        if (hw->mac.type == e1000_pchlan) {
                ret_val = e1000_k1_gig_workaround_hv(hw, link);
                if (ret_val)
-                       return ret_val;
+                       goto out;
        }
 
        /* When connected at 10Mbps half-duplex, some parts are excessively
@@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
-                       return ret_val;
+                       goto out;
 
                if (hw->mac.type == e1000_pch2lan)
                        emi_addr = I82579_RX_CONFIG;
@@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                hw->phy.ops.release(hw);
 
                if (ret_val)
-                       return ret_val;
+                       goto out;
 
                if (hw->mac.type >= e1000_pch_spt) {
                        u16 data;
@@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        if (speed == SPEED_1000) {
                                ret_val = hw->phy.ops.acquire(hw);
                                if (ret_val)
-                                       return ret_val;
+                                       goto out;
 
                                ret_val = e1e_rphy_locked(hw,
                                                          PHY_REG(776, 20),
                                                          &data);
                                if (ret_val) {
                                        hw->phy.ops.release(hw);
-                                       return ret_val;
+                                       goto out;
                                }
 
                                ptr_gap = (data & (0x3FF << 2)) >> 2;
@@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                                }
                                hw->phy.ops.release(hw);
                                if (ret_val)
-                                       return ret_val;
+                                       goto out;
                        } else {
                                ret_val = hw->phy.ops.acquire(hw);
                                if (ret_val)
-                                       return ret_val;
+                                       goto out;
 
                                ret_val = e1e_wphy_locked(hw,
                                                          PHY_REG(776, 20),
                                                          0xC023);
                                hw->phy.ops.release(hw);
                                if (ret_val)
-                                       return ret_val;
+                                       goto out;
 
                        }
                }
@@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
                if (ret_val)
-                       return ret_val;
+                       goto out;
        }
        if (hw->mac.type >= e1000_pch_lpt) {
                /* Set platform power management values for
@@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                 */
                ret_val = e1000_platform_pm_pch_lpt(hw, link);
                if (ret_val)
-                       return ret_val;
+                       goto out;
        }
 
        /* Clear link partner's EEE ability */
@@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        }
 
        if (!link)
-               return 0;       /* No link detected */
-
-       mac->get_link_status = false;
+               goto out;
 
        switch (hw->mac.type) {
        case e1000_pch2lan:
@@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * different link partner.
         */
        ret_val = e1000e_config_fc_after_link_up(hw);
-       if (ret_val) {
+       if (ret_val)
                e_dbg("Error configuring flow control\n");
-               return ret_val;
-       }
 
-       return 1;
+       return ret_val;
+
+out:
+       mac->get_link_status = true;
+       return ret_val;
 }
 
 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
index f457c5703d0c45d4c9f661395acca1a9814de686..5bdc3a2d4fd70aed476c8c0f17c180570b9eafb2 100644 (file)
@@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
- *
- *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
- *  up).
  **/
 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 {
@@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
         * Change or Rx Sequence Error interrupt.
         */
        if (!mac->get_link_status)
-               return 1;
+               return 0;
+       mac->get_link_status = false;
 
        /* First we want to see if the MII Status Register reports
         * link.  If so, then we want to get the current speed/duplex
         * of the PHY.
         */
        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
-       if (ret_val)
-               return ret_val;
-
-       if (!link)
-               return 0;       /* No link detected */
-
-       mac->get_link_status = false;
+       if (ret_val || !link)
+               goto out;
 
        /* Check if there was DownShift, must be checked
         * immediately after link-up
@@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
         * different link partner.
         */
        ret_val = e1000e_config_fc_after_link_up(hw);
-       if (ret_val) {
+       if (ret_val)
                e_dbg("Error configuring flow control\n");
-               return ret_val;
-       }
 
-       return 1;
+       return ret_val;
+
+out:
+       mac->get_link_status = true;
+       return ret_val;
 }
 
 /**
index 1298b69f990b40628ef1fbb353e6d9f1bfafdd76..dc853b0863aff1b4f66043a1fe70bca6112d5aeb 100644 (file)
@@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 icr;
-       bool enable = true;
-
-       icr = er32(ICR);
-       if (icr & E1000_ICR_RXO) {
-               ew32(ICR, E1000_ICR_RXO);
-               enable = false;
-               /* napi poll will re-enable Other, make sure it runs */
-               if (napi_schedule_prep(&adapter->napi)) {
-                       adapter->total_rx_bytes = 0;
-                       adapter->total_rx_packets = 0;
-                       __napi_schedule(&adapter->napi);
-               }
-       }
+       u32 icr = er32(ICR);
+
+       if (icr & adapter->eiac_mask)
+               ew32(ICS, (icr & adapter->eiac_mask));
+
        if (icr & E1000_ICR_LSC) {
-               ew32(ICR, E1000_ICR_LSC);
                hw->mac.get_link_status = true;
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (enable && !test_bit(__E1000_DOWN, &adapter->state))
-               ew32(IMS, E1000_IMS_OTHER);
+       if (!test_bit(__E1000_DOWN, &adapter->state))
+               ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
 
        return IRQ_HANDLED;
 }
@@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
                       hw->hw_addr + E1000_EITR_82574(vector));
        else
                writel(1, hw->hw_addr + E1000_EITR_82574(vector));
-       adapter->eiac_mask |= E1000_IMS_OTHER;
 
        /* Cause Tx interrupts on every write back */
        ivar |= BIT(31);
@@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
 
        if (adapter->msix_entries) {
                ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
-               ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC);
+               ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
+                    IMS_OTHER_MASK);
        } else if (hw->mac.type >= e1000_pch_lpt) {
                ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
        } else {
@@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
 {
        struct pci_dev *pdev = adapter->pdev;
 
-       ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
-                                       GFP_KERNEL);
+       ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
+                                        GFP_KERNEL);
        if (!ring->desc)
                return -ENOMEM;
 
@@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
                napi_complete_done(napi, work_done);
                if (!test_bit(__E1000_DOWN, &adapter->state)) {
                        if (adapter->msix_entries)
-                               ew32(IMS, adapter->rx_ring->ims_val |
-                                    E1000_IMS_OTHER);
+                               ew32(IMS, adapter->rx_ring->ims_val);
                        else
                                e1000_irq_enable(adapter);
                }
@@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
        case e1000_media_type_copper:
                if (hw->mac.get_link_status) {
                        ret_val = hw->mac.ops.check_for_link(hw);
-                       link_active = ret_val > 0;
+                       link_active = !hw->mac.get_link_status;
                } else {
                        link_active = true;
                }
index b698fb481b2ecb4d7f79da60775b063f7da58dfc..996dc099cd584c23167e4100b11701ba3025809a 100644 (file)
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
 }
 EXPORT_SYMBOL(mlxsw_afa_block_jump);
 
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
+{
+       if (block->finished)
+               return -EINVAL;
+       mlxsw_afa_set_goto_set(block->cur_set,
+                              MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+       block->finished = true;
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_terminate);
+
 static struct mlxsw_afa_fwd_entry *
 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
 {
index 43132293475ce9ce916eac0b49b8a9c822ffb214..b91f2b0829b04a417c5799ed2d353163f81db844 100644 (file)
@@ -65,6 +65,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
 u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
index c7e941aecc2a906861a9ab8ef19ad7071a60f1e6..bf400c75fcc8bc82253fe5237a476655ce323bf4 100644 (file)
@@ -655,13 +655,17 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
 }
 
 static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
-                                   struct mlxsw_sp_span_entry *span_entry)
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
+                                   enum mlxsw_sp_span_type type,
+                                   struct mlxsw_sp_port *port,
+                                   bool bind)
 {
        struct mlxsw_sp_span_inspected_port *p;
 
        list_for_each_entry(p, &span_entry->bound_ports_list, list)
-               if (port->local_port == p->local_port)
+               if (type == p->type &&
+                   port->local_port == p->local_port &&
+                   bind == p->bound)
                        return p;
        return NULL;
 }
@@ -691,8 +695,22 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
        struct mlxsw_sp_span_inspected_port *inspected_port;
        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
        char sbib_pl[MLXSW_REG_SBIB_LEN];
+       int i;
        int err;
 
+       /* A given (source port, direction) can only be bound to one analyzer,
+        * so if a binding is requested, check for conflicts.
+        */
+       if (bind)
+               for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+                       struct mlxsw_sp_span_entry *curr =
+                               &mlxsw_sp->span.entries[i];
+
+                       if (mlxsw_sp_span_entry_bound_port_find(curr, type,
+                                                               port, bind))
+                               return -EEXIST;
+               }
+
        /* if it is an egress SPAN, bind a shared buffer to it */
        if (type == MLXSW_SP_SPAN_EGRESS) {
                u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
@@ -720,6 +738,7 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
        }
        inspected_port->local_port = port->local_port;
        inspected_port->type = type;
+       inspected_port->bound = bind;
        list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
 
        return 0;
@@ -746,7 +765,8 @@ mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
        char sbib_pl[MLXSW_REG_SBIB_LEN];
 
-       inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
+       inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
+                                                            port, bind);
        if (!inspected_port)
                return;
 
index 4ec1ca3c96c8c5dfc6eb24d2e90e56e415c21243..92064db2ae442136141b01e64832b1487acbe43d 100644 (file)
@@ -120,6 +120,9 @@ struct mlxsw_sp_span_inspected_port {
        struct list_head list;
        enum mlxsw_sp_span_type type;
        u8 local_port;
+
+       /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
+       bool bound;
 };
 
 struct mlxsw_sp_span_entry {
@@ -553,6 +556,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
                                u16 group_id);
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
index 0897a5435cc2e205957d2158d184b3bc465a71ac..92d90ed7207e622ee70943e33d01f698b5d679f4 100644 (file)
@@ -528,6 +528,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
        return mlxsw_afa_block_jump(rulei->act_block, group_id);
 }
 
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
+{
+       return mlxsw_afa_block_terminate(rulei->act_block);
+}
+
 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
 {
        return mlxsw_afa_block_append_drop(rulei->act_block);
index 93728c694e6df9985cadfccf72ab6f3a9f52f8da..0a9adc5962fb72b8dbeb3b61b3e0a28093982197 100644 (file)
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
        MLXSW_SP_CPU_PORT_SB_CM,
+       MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+       MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+       MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+       MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+       MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
        MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_SB_CM(10000, 0, 0),
+       MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
index 6ce00e28d4eac8043abb8a77c86a0aabe30531b3..89dbf569dff50c0db7d97d3b4e80e8bd7cf494d6 100644 (file)
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
        tcf_exts_to_list(exts, &actions);
        list_for_each_entry(a, &actions, list) {
                if (is_tcf_gact_ok(a)) {
-                       err = mlxsw_sp_acl_rulei_act_continue(rulei);
+                       err = mlxsw_sp_acl_rulei_act_terminate(rulei);
                        if (err)
                                return err;
                } else if (is_tcf_gact_shot(a)) {
index a10ef50e4f12c3c949f0f2120c71d3562d8a0559..017fb23225897983b0440e1bf67db2ab0ae31fc1 100644 (file)
@@ -1,16 +1,16 @@
 #
-# National Semi-conductor device configuration
+# National Semiconductor device configuration
 #
 
 config NET_VENDOR_NATSEMI
-       bool "National Semi-conductor devices"
+       bool "National Semiconductor devices"
        default y
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
 
          Note that the answer to this question doesn't directly affect the
          kernel: saying N will just cause the configurator to skip all
-         the questions about National Semi-conductor devices. If you say Y,
+         the questions about National Semiconductor devices. If you say Y,
          you will be asked for your specific card in the following questions.
 
 if NET_VENDOR_NATSEMI
index cc664977596e2426de92453285d028bebd868ddf..a759aa09ef5960b979b951e00c4497fbb4bf2276 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 #
-# Makefile for the National Semi-conductor Sonic devices.
+# Makefile for the National Semiconductor Sonic devices.
 #
 
 obj-$(CONFIG_MACSONIC) += macsonic.o
index 6f546e869d8d69fd17c7eaeeec579d1134d3364d..00f41c145d4d01674d146fe1eda41a346b3cc5a1 100644 (file)
@@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
        if (rc)
                return rc;
 
-       /* Free Task CXT */
+       /* Free Task CXT ( Intentionally RoCE as task-id is shared between
+        * RoCE and iWARP )
+        */
+       proto = PROTOCOLID_ROCE;
        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
                                    qed_cxt_get_proto_tid_count(p_hwfn, proto));
        if (rc)
index ca4a81dc1ace685f4bb1cda85dc0e85746b4583a..d5d02be7294741a2adc46ac97f91f07d5ffea491 100644 (file)
@@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
        iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
 
        if (eth_type == ETH_P_IP) {
+               if (iph->protocol != IPPROTO_TCP) {
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected ip protocol on ll2 %x\n",
+                                 iph->protocol);
+                       return -EINVAL;
+               }
+
                cm_info->local_ip[0] = ntohl(iph->daddr);
                cm_info->remote_ip[0] = ntohl(iph->saddr);
                cm_info->ip_version = TCP_IPV4;
@@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
                *payload_len = ntohs(iph->tot_len) - ip_hlen;
        } else if (eth_type == ETH_P_IPV6) {
                ip6h = (struct ipv6hdr *)iph;
+
+               if (ip6h->nexthdr != IPPROTO_TCP) {
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected ip protocol on ll2 %x\n",
+                                 iph->protocol);
+                       return -EINVAL;
+               }
+
                for (i = 0; i < 4; i++) {
                        cm_info->local_ip[i] =
                            ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
@@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
                /* Missing lower byte is now available */
                mpa_len = fpdu->fpdu_length | *mpa_data;
                fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
-               fpdu->mpa_frag_len = fpdu->fpdu_length;
                /* one byte of hdr */
+               fpdu->mpa_frag_len = 1;
                fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
                DP_VERBOSE(p_hwfn,
                           QED_MSG_RDMA,
index 5d040b873137d0917637b6df42eddc4e0e031595..a411f9c702a16ae6963aa5c7eda112cc5a72404d 100644 (file)
@@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
 
        qed_rdma_free_reserved_lkey(p_hwfn);
+       qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
        qed_rdma_resc_free(p_hwfn);
 }
 
index 2db70eabddfec1edfe38b524233c3baaa9f883f5..a01e7d6e5442f079e9006811b82b4feb02dc23bc 100644 (file)
@@ -288,7 +288,7 @@ int __init qede_init(void)
        }
 
        /* Must register notifier before pci ops, since we might miss
-        * interface rename after pci probe and netdev registeration.
+        * interface rename after pci probe and netdev registration.
         */
        ret = register_netdevice_notifier(&qede_netdev_notifier);
        if (ret) {
@@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        if (rc)
                goto err3;
 
-       /* Prepare the lock prior to the registeration of the netdev,
+       /* Prepare the lock prior to the registration of the netdev,
         * as once it's registered we might reach flows requiring it
         * [it's even possible to reach a flow needing it directly
         * from there, although it's unlikely].
@@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
        link_params.link_up = true;
        edev->ops->common->set_link(edev->cdev, &link_params);
 
-       qede_rdma_dev_event_open(edev);
-
        edev->state = QEDE_STATE_OPEN;
 
        DP_INFO(edev, "Ending successfully qede load\n");
@@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
                        DP_NOTICE(edev, "Link is up\n");
                        netif_tx_start_all_queues(edev->ndev);
                        netif_carrier_on(edev->ndev);
+                       qede_rdma_dev_event_open(edev);
                }
        } else {
                if (netif_carrier_ok(edev->ndev)) {
                        DP_NOTICE(edev, "Link is down\n");
                        netif_tx_disable(edev->ndev);
                        netif_carrier_off(edev->ndev);
+                       qede_rdma_dev_event_close(edev);
                }
        }
 }
index 9b2280badaf77666ceab5cf0409f484ed08719b8..02adb513f4756cb58c423936213bdcb4158d1dfa 100644 (file)
@@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
        ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
        if (IS_ERR(ptp->clock)) {
                rc = -EINVAL;
-               DP_ERR(edev, "PTP clock registeration failed\n");
+               DP_ERR(edev, "PTP clock registration failed\n");
                goto err2;
        }
 
index 9cbb27263742bf0506684bd2e76d517037217475..d5a32b7c7dc5a4d97c89ba9d33ca769e51c00daf 100644 (file)
@@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
        while (tx_q->tpd.consume_idx != hw_consume_idx) {
                tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
                if (tpbuf->dma_addr) {
-                       dma_unmap_single(adpt->netdev->dev.parent,
-                                        tpbuf->dma_addr, tpbuf->length,
-                                        DMA_TO_DEVICE);
+                       dma_unmap_page(adpt->netdev->dev.parent,
+                                      tpbuf->dma_addr, tpbuf->length,
+                                      DMA_TO_DEVICE);
                        tpbuf->dma_addr = 0;
                }
 
@@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
 
                tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
                tpbuf->length = mapped_len;
-               tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
-                                                skb->data, tpbuf->length,
-                                                DMA_TO_DEVICE);
+               tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+                                              virt_to_page(skb->data),
+                                              offset_in_page(skb->data),
+                                              tpbuf->length,
+                                              DMA_TO_DEVICE);
                ret = dma_mapping_error(adpt->netdev->dev.parent,
                                        tpbuf->dma_addr);
                if (ret)
@@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
        if (mapped_len < len) {
                tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
                tpbuf->length = len - mapped_len;
-               tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
-                                                skb->data + mapped_len,
-                                                tpbuf->length, DMA_TO_DEVICE);
+               tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+                                              virt_to_page(skb->data +
+                                                           mapped_len),
+                                              offset_in_page(skb->data +
+                                                             mapped_len),
+                                              tpbuf->length, DMA_TO_DEVICE);
                ret = dma_mapping_error(adpt->netdev->dev.parent,
                                        tpbuf->dma_addr);
                if (ret)
index 012fb66eed8dd618d63fbeaad184accb0c08fc39..f0afb88d7bc2b02de3dc1054ec2ec5803f452a35 100644 (file)
@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
        pdata = netdev_priv(dev);
        BUG_ON(!pdata);
        BUG_ON(!pdata->ioaddr);
-       WARN_ON(dev->phydev);
 
        SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
+       unregister_netdev(dev);
+
        mdiobus_unregister(pdata->mii_bus);
        mdiobus_free(pdata->mii_bus);
 
-       unregister_netdev(dev);
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "smsc911x-memory");
        if (!res)
index 111e7ca9df5600c0ecd29bca11cf25a7f3e7e616..f5c5984afefb988c6c0ecd75234473f7084a2c32 100644 (file)
@@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev)
        val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
        writel(val, priv->base + AVE_IIRQC);
 
-       val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+       val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
        ave_irq_restore(ndev, val);
 
        napi_enable(&priv->napi_rx);
index 63d3d6b215f3096da6da95fe489bf1172c831a28..a94f50442613e9f77cec6aff24fbf19a5a33756b 100644 (file)
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
        dev->ethtool_ops = &vnet_ethtool_ops;
        dev->watchdog_timeo = VNET_TX_TIMEOUT;
 
-       dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+       dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
                           NETIF_F_HW_CSUM | NETIF_F_SG;
        dev->features = dev->hw_features;
 
index 1b1b78fdc1384975856fe6a3d8368fe868dfbc23..b2b30c9df037700728a51f2eb1ae007f8368ba3e 100644 (file)
@@ -1014,7 +1014,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
                /* set speed_in input in case RMII mode is used in 100Mbps */
                if (phy->speed == 100)
                        mac_control |= BIT(15);
-               else if (phy->speed == 10)
+               /* in band mode only works in 10Mbps RGMII mode */
+               else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
                        mac_control |= BIT(18); /* In Band mode */
 
                if (priv->rx_pause)
index 0db3bd1ea06f5a71eb6fc67ba00abb066c7f3414..32861036c3fcc9ec9e34938e0f916198307b419c 100644 (file)
@@ -173,6 +173,7 @@ struct rndis_device {
        struct list_head req_list;
 
        struct work_struct mcast_work;
+       u32 filter;
 
        bool link_state;        /* 0 - link up, 1 - link down */
 
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
 void rndis_set_subchannel(struct work_struct *w);
-bool rndis_filter_opened(const struct netvsc_device *nvdev);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
index 0265d703eb030515dacab7a83e84c169f89250dd..7472172823f3edffb0dfd481dc67f03c12a6a6b9 100644 (file)
@@ -90,6 +90,11 @@ static void free_netvsc_device(struct rcu_head *head)
                = container_of(head, struct netvsc_device, rcu);
        int i;
 
+       kfree(nvdev->extension);
+       vfree(nvdev->recv_buf);
+       vfree(nvdev->send_buf);
+       kfree(nvdev->send_section_map);
+
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
                vfree(nvdev->chan_table[i].mrc.slots);
 
@@ -211,12 +216,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
                net_device->recv_buf_gpadl_handle = 0;
        }
 
-       if (net_device->recv_buf) {
-               /* Free up the receive buffer */
-               vfree(net_device->recv_buf);
-               net_device->recv_buf = NULL;
-       }
-
        if (net_device->send_buf_gpadl_handle) {
                ret = vmbus_teardown_gpadl(device->channel,
                                           net_device->send_buf_gpadl_handle);
@@ -231,12 +230,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
                }
                net_device->send_buf_gpadl_handle = 0;
        }
-       if (net_device->send_buf) {
-               /* Free up the send buffer */
-               vfree(net_device->send_buf);
-               net_device->send_buf = NULL;
-       }
-       kfree(net_device->send_section_map);
 }
 
 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -562,26 +555,29 @@ void netvsc_device_remove(struct hv_device *device)
                = rtnl_dereference(net_device_ctx->nvdev);
        int i;
 
-       cancel_work_sync(&net_device->subchan_work);
-
        netvsc_revoke_buf(device, net_device);
 
        RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
 
+       /* And disassociate NAPI context from device */
+       for (i = 0; i < net_device->num_chn; i++)
+               netif_napi_del(&net_device->chan_table[i].napi);
+
        /*
         * At this point, no one should be accessing net_device
         * except in here
         */
        netdev_dbg(ndev, "net device safe to remove\n");
 
+       /* older versions require that buffer be revoked before close */
+       if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
+               netvsc_teardown_gpadl(device, net_device);
+
        /* Now, we can close the channel safely */
        vmbus_close(device->channel);
 
-       netvsc_teardown_gpadl(device, net_device);
-
-       /* And dissassociate NAPI context from device */
-       for (i = 0; i < net_device->num_chn; i++)
-               netif_napi_del(&net_device->chan_table[i].napi);
+       if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
+               netvsc_teardown_gpadl(device, net_device);
 
        /* Release all resources */
        free_netvsc_device_rcu(net_device);
@@ -645,14 +641,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
        queue_sends =
                atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
 
-       if (net_device->destroy && queue_sends == 0)
-               wake_up(&net_device->wait_drain);
+       if (unlikely(net_device->destroy)) {
+               if (queue_sends == 0)
+                       wake_up(&net_device->wait_drain);
+       } else {
+               struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
-       if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
-           (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
-            queue_sends < 1)) {
-               netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
-               ndev_ctx->eth_stats.wake_queue++;
+               if (netif_tx_queue_stopped(txq) &&
+                   (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
+                    queue_sends < 1)) {
+                       netif_tx_wake_queue(txq);
+                       ndev_ctx->eth_stats.wake_queue++;
+               }
        }
 }
 
index cdb78eefab671496d5b6c406c34b25e95278d14b..f28c85d212cee1ecfe2cf5f70717d4091acc5197 100644 (file)
 
 #include "hyperv_net.h"
 
-#define RING_SIZE_MIN          64
+#define RING_SIZE_MIN  64
+#define RETRY_US_LO    5000
+#define RETRY_US_HI    10000
+#define RETRY_MAX      2000    /* >10 sec */
 
 #define LINKCHANGE_INT (2 * HZ)
 #define VF_TAKEOVER_INT (HZ / 10)
@@ -89,15 +92,20 @@ static void netvsc_change_rx_flags(struct net_device *net, int change)
 static void netvsc_set_rx_mode(struct net_device *net)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
-       struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
-       struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
+       struct net_device *vf_netdev;
+       struct netvsc_device *nvdev;
 
+       rcu_read_lock();
+       vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
        if (vf_netdev) {
                dev_uc_sync(vf_netdev, net);
                dev_mc_sync(vf_netdev, net);
        }
 
-       rndis_filter_update(nvdev);
+       nvdev = rcu_dereference(ndev_ctx->nvdev);
+       if (nvdev)
+               rndis_filter_update(nvdev);
+       rcu_read_unlock();
 }
 
 static int netvsc_open(struct net_device *net)
@@ -118,10 +126,8 @@ static int netvsc_open(struct net_device *net)
        }
 
        rdev = nvdev->extension;
-       if (!rdev->link_state) {
+       if (!rdev->link_state)
                netif_carrier_on(net);
-               netif_tx_wake_all_queues(net);
-       }
 
        if (vf_netdev) {
                /* Setting synthetic device up transparently sets
@@ -137,36 +143,25 @@ static int netvsc_open(struct net_device *net)
        return 0;
 }
 
-static int netvsc_close(struct net_device *net)
+static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
 {
-       struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct net_device *vf_netdev
-               = rtnl_dereference(net_device_ctx->vf_netdev);
-       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
-       int ret = 0;
-       u32 aread, i, msec = 10, retry = 0, retry_max = 20;
-       struct vmbus_channel *chn;
-
-       netif_tx_disable(net);
-
-       /* No need to close rndis filter if it is removed already */
-       if (!nvdev)
-               goto out;
-
-       ret = rndis_filter_close(nvdev);
-       if (ret != 0) {
-               netdev_err(net, "unable to close device (ret %d).\n", ret);
-               return ret;
-       }
+       unsigned int retry = 0;
+       int i;
 
        /* Ensure pending bytes in ring are read */
-       while (true) {
-               aread = 0;
+       for (;;) {
+               u32 aread = 0;
+
                for (i = 0; i < nvdev->num_chn; i++) {
-                       chn = nvdev->chan_table[i].channel;
+                       struct vmbus_channel *chn
+                               = nvdev->chan_table[i].channel;
+
                        if (!chn)
                                continue;
 
+                       /* make sure receive not running now */
+                       napi_synchronize(&nvdev->chan_table[i].napi);
+
                        aread = hv_get_bytes_to_read(&chn->inbound);
                        if (aread)
                                break;
@@ -176,22 +171,40 @@ static int netvsc_close(struct net_device *net)
                                break;
                }
 
-               retry++;
-               if (retry > retry_max || aread == 0)
-                       break;
+               if (aread == 0)
+                       return 0;
 
-               msleep(msec);
+               if (++retry > RETRY_MAX)
+                       return -ETIMEDOUT;
 
-               if (msec < 1000)
-                       msec *= 2;
+               usleep_range(RETRY_US_LO, RETRY_US_HI);
        }
+}
 
-       if (aread) {
-               netdev_err(net, "Ring buffer not empty after closing rndis\n");
-               ret = -ETIMEDOUT;
+static int netvsc_close(struct net_device *net)
+{
+       struct net_device_context *net_device_ctx = netdev_priv(net);
+       struct net_device *vf_netdev
+               = rtnl_dereference(net_device_ctx->vf_netdev);
+       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+       int ret;
+
+       netif_tx_disable(net);
+
+       /* No need to close rndis filter if it is removed already */
+       if (!nvdev)
+               return 0;
+
+       ret = rndis_filter_close(nvdev);
+       if (ret != 0) {
+               netdev_err(net, "unable to close device (ret %d).\n", ret);
+               return ret;
        }
 
-out:
+       ret = netvsc_wait_until_empty(nvdev);
+       if (ret)
+               netdev_err(net, "Ring buffer not empty after closing rndis\n");
+
        if (vf_netdev)
                dev_close(vf_netdev);
 
@@ -840,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,
        }
 }
 
+static int netvsc_detach(struct net_device *ndev,
+                        struct netvsc_device *nvdev)
+{
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hdev = ndev_ctx->device_ctx;
+       int ret;
+
+       /* Don't try continuing to try and setup sub channels */
+       if (cancel_work_sync(&nvdev->subchan_work))
+               nvdev->num_chn = 1;
+
+       /* If device was up (receiving) then shutdown */
+       if (netif_running(ndev)) {
+               netif_tx_disable(ndev);
+
+               ret = rndis_filter_close(nvdev);
+               if (ret) {
+                       netdev_err(ndev,
+                                  "unable to close device (ret %d).\n", ret);
+                       return ret;
+               }
+
+               ret = netvsc_wait_until_empty(nvdev);
+               if (ret) {
+                       netdev_err(ndev,
+                                  "Ring buffer not empty after closing rndis\n");
+                       return ret;
+               }
+       }
+
+       netif_device_detach(ndev);
+
+       rndis_filter_device_remove(hdev, nvdev);
+
+       return 0;
+}
+
+static int netvsc_attach(struct net_device *ndev,
+                        struct netvsc_device_info *dev_info)
+{
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hdev = ndev_ctx->device_ctx;
+       struct netvsc_device *nvdev;
+       struct rndis_device *rdev;
+       int ret;
+
+       nvdev = rndis_filter_device_add(hdev, dev_info);
+       if (IS_ERR(nvdev))
+               return PTR_ERR(nvdev);
+
+       /* Note: enable and attach happen when sub-channels setup */
+
+       netif_carrier_off(ndev);
+
+       if (netif_running(ndev)) {
+               ret = rndis_filter_open(nvdev);
+               if (ret)
+                       return ret;
+
+               rdev = nvdev->extension;
+               if (!rdev->link_state)
+                       netif_carrier_on(ndev);
+       }
+
+       return 0;
+}
+
 static int netvsc_set_channels(struct net_device *net,
                               struct ethtool_channels *channels)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct hv_device *dev = net_device_ctx->device_ctx;
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        unsigned int orig, count = channels->combined_count;
        struct netvsc_device_info device_info;
-       bool was_opened;
-       int ret = 0;
+       int ret;
 
        /* We do not support separate count for rx, tx, or other */
        if (count == 0 ||
@@ -866,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,
                return -EINVAL;
 
        orig = nvdev->num_chn;
-       was_opened = rndis_filter_opened(nvdev);
-       if (was_opened)
-               rndis_filter_close(nvdev);
 
        memset(&device_info, 0, sizeof(device_info));
        device_info.num_chn = count;
@@ -877,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,
        device_info.recv_sections = nvdev->recv_section_cnt;
        device_info.recv_section_size = nvdev->recv_section_size;
 
-       rndis_filter_device_remove(dev, nvdev);
+       ret = netvsc_detach(net, nvdev);
+       if (ret)
+               return ret;
 
-       nvdev = rndis_filter_device_add(dev, &device_info);
-       if (IS_ERR(nvdev)) {
-               ret = PTR_ERR(nvdev);
+       ret = netvsc_attach(net, &device_info);
+       if (ret) {
                device_info.num_chn = orig;
-               nvdev = rndis_filter_device_add(dev, &device_info);
-
-               if (IS_ERR(nvdev)) {
-                       netdev_err(net, "restoring channel setting failed: %ld\n",
-                                  PTR_ERR(nvdev));
-                       return ret;
-               }
+               if (netvsc_attach(net, &device_info))
+                       netdev_err(net, "restoring channel setting failed\n");
        }
 
-       if (was_opened)
-               rndis_filter_open(nvdev);
-
-       /* We may have missed link change notifications */
-       net_device_ctx->last_reconfig = 0;
-       schedule_delayed_work(&net_device_ctx->dwork, 0);
-
        return ret;
 }
 
@@ -964,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        struct net_device_context *ndevctx = netdev_priv(ndev);
        struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
        struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
-       struct hv_device *hdev = ndevctx->device_ctx;
        int orig_mtu = ndev->mtu;
        struct netvsc_device_info device_info;
-       bool was_opened;
        int ret = 0;
 
        if (!nvdev || nvdev->destroy)
@@ -980,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
                        return ret;
        }
 
-       netif_device_detach(ndev);
-       was_opened = rndis_filter_opened(nvdev);
-       if (was_opened)
-               rndis_filter_close(nvdev);
-
        memset(&device_info, 0, sizeof(device_info));
        device_info.num_chn = nvdev->num_chn;
        device_info.send_sections = nvdev->send_section_cnt;
@@ -992,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        device_info.recv_sections = nvdev->recv_section_cnt;
        device_info.recv_section_size = nvdev->recv_section_size;
 
-       rndis_filter_device_remove(hdev, nvdev);
+       ret = netvsc_detach(ndev, nvdev);
+       if (ret)
+               goto rollback_vf;
 
        ndev->mtu = mtu;
 
-       nvdev = rndis_filter_device_add(hdev, &device_info);
-       if (IS_ERR(nvdev)) {
-               ret = PTR_ERR(nvdev);
-
-               /* Attempt rollback to original MTU */
-               ndev->mtu = orig_mtu;
-               nvdev = rndis_filter_device_add(hdev, &device_info);
-
-               if (vf_netdev)
-                       dev_set_mtu(vf_netdev, orig_mtu);
-
-               if (IS_ERR(nvdev)) {
-                       netdev_err(ndev, "restoring mtu failed: %ld\n",
-                                  PTR_ERR(nvdev));
-                       return ret;
-               }
-       }
+       ret = netvsc_attach(ndev, &device_info);
+       if (ret)
+               goto rollback;
 
-       if (was_opened)
-               rndis_filter_open(nvdev);
+       return 0;
 
-       netif_device_attach(ndev);
+rollback:
+       /* Attempt rollback to original MTU */
+       ndev->mtu = orig_mtu;
 
-       /* We may have missed link change notifications */
-       schedule_delayed_work(&ndevctx->dwork, 0);
+       if (netvsc_attach(ndev, &device_info))
+               netdev_err(ndev, "restoring mtu failed\n");
+rollback_vf:
+       if (vf_netdev)
+               dev_set_mtu(vf_netdev, orig_mtu);
 
        return ret;
 }
@@ -1526,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
 {
        struct net_device_context *ndevctx = netdev_priv(ndev);
        struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
-       struct hv_device *hdev = ndevctx->device_ctx;
        struct netvsc_device_info device_info;
        struct ethtool_ringparam orig;
        u32 new_tx, new_rx;
-       bool was_opened;
        int ret = 0;
 
        if (!nvdev || nvdev->destroy)
@@ -1555,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,
        device_info.recv_sections = new_rx;
        device_info.recv_section_size = nvdev->recv_section_size;
 
-       netif_device_detach(ndev);
-       was_opened = rndis_filter_opened(nvdev);
-       if (was_opened)
-               rndis_filter_close(nvdev);
-
-       rndis_filter_device_remove(hdev, nvdev);
-
-       nvdev = rndis_filter_device_add(hdev, &device_info);
-       if (IS_ERR(nvdev)) {
-               ret = PTR_ERR(nvdev);
+       ret = netvsc_detach(ndev, nvdev);
+       if (ret)
+               return ret;
 
+       ret = netvsc_attach(ndev, &device_info);
+       if (ret) {
                device_info.send_sections = orig.tx_pending;
                device_info.recv_sections = orig.rx_pending;
-               nvdev = rndis_filter_device_add(hdev, &device_info);
-               if (IS_ERR(nvdev)) {
-                       netdev_err(ndev, "restoring ringparam failed: %ld\n",
-                                  PTR_ERR(nvdev));
-                       return ret;
-               }
-       }
-
-       if (was_opened)
-               rndis_filter_open(nvdev);
-       netif_device_attach(ndev);
 
-       /* We may have missed link change notifications */
-       ndevctx->last_reconfig = 0;
-       schedule_delayed_work(&ndevctx->dwork, 0);
+               if (netvsc_attach(ndev, &device_info))
+                       netdev_err(ndev, "restoring ringparam failed");
+       }
 
        return ret;
 }
@@ -1846,8 +1877,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
 
        /* set multicast etc flags on VF */
        dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
+
+       /* sync address list from ndev to VF */
+       netif_addr_lock_bh(ndev);
        dev_uc_sync(vf_netdev, ndev);
        dev_mc_sync(vf_netdev, ndev);
+       netif_addr_unlock_bh(ndev);
 
        if (netif_running(ndev)) {
                ret = dev_open(vf_netdev);
@@ -2063,8 +2098,8 @@ no_net:
 static int netvsc_remove(struct hv_device *dev)
 {
        struct net_device_context *ndev_ctx;
-       struct net_device *vf_netdev;
-       struct net_device *net;
+       struct net_device *vf_netdev, *net;
+       struct netvsc_device *nvdev;
 
        net = hv_get_drvdata(dev);
        if (net == NULL) {
@@ -2074,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)
 
        ndev_ctx = netdev_priv(net);
 
-       netif_device_detach(net);
-
        cancel_delayed_work_sync(&ndev_ctx->dwork);
 
+       rcu_read_lock();
+       nvdev = rcu_dereference(ndev_ctx->nvdev);
+
+       if  (nvdev)
+               cancel_work_sync(&nvdev->subchan_work);
+
        /*
         * Call to the vsc driver to let it know that the device is being
         * removed. Also blocks mtu and channel changes.
@@ -2087,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)
        if (vf_netdev)
                netvsc_unregister_vf(vf_netdev);
 
+       if (nvdev)
+               rndis_filter_device_remove(dev, nvdev);
+
        unregister_netdevice(net);
 
-       rndis_filter_device_remove(dev,
-                                  rtnl_dereference(ndev_ctx->nvdev));
        rtnl_unlock();
+       rcu_read_unlock();
 
        hv_set_drvdata(dev, NULL);
 
index 8927c483c21738a3f9b7885c35391cff8f052f08..a6ec41c399d6c980b86ac1242179e99de542e5ea 100644 (file)
@@ -264,13 +264,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,
        }
 }
 
-static void rndis_filter_receive_response(struct rndis_device *dev,
-                                      struct rndis_message *resp)
+static void rndis_filter_receive_response(struct net_device *ndev,
+                                         struct netvsc_device *nvdev,
+                                         const struct rndis_message *resp)
 {
+       struct rndis_device *dev = nvdev->extension;
        struct rndis_request *request = NULL;
        bool found = false;
        unsigned long flags;
-       struct net_device *ndev = dev->ndev;
+
+       /* This should never happen, it means control message
+        * response received after device removed.
+        */
+       if (dev->state == RNDIS_DEV_UNINITIALIZED) {
+               netdev_err(ndev,
+                          "got rndis message uninitialized\n");
+               return;
+       }
 
        spin_lock_irqsave(&dev->request_lock, flags);
        list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -352,7 +362,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
 
 static int rndis_filter_receive_data(struct net_device *ndev,
                                     struct netvsc_device *nvdev,
-                                    struct rndis_device *dev,
                                     struct rndis_message *msg,
                                     struct vmbus_channel *channel,
                                     void *data, u32 data_buflen)
@@ -372,7 +381,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
         * should be the data packet size plus the trailer padding size
         */
        if (unlikely(data_buflen < rndis_pkt->data_len)) {
-               netdev_err(dev->ndev, "rndis message buffer "
+               netdev_err(ndev, "rndis message buffer "
                           "overflow detected (got %u, min %u)"
                           "...dropping this message!\n",
                           data_buflen, rndis_pkt->data_len);
@@ -400,35 +409,20 @@ int rndis_filter_receive(struct net_device *ndev,
                         void *data, u32 buflen)
 {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-       struct rndis_device *rndis_dev = net_dev->extension;
        struct rndis_message *rndis_msg = data;
 
-       /* Make sure the rndis device state is initialized */
-       if (unlikely(!rndis_dev)) {
-               netif_dbg(net_device_ctx, rx_err, ndev,
-                         "got rndis message but no rndis device!\n");
-               return NVSP_STAT_FAIL;
-       }
-
-       if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
-               netif_dbg(net_device_ctx, rx_err, ndev,
-                         "got rndis message uninitialized\n");
-               return NVSP_STAT_FAIL;
-       }
-
        if (netif_msg_rx_status(net_device_ctx))
                dump_rndis_message(ndev, rndis_msg);
 
        switch (rndis_msg->ndis_msg_type) {
        case RNDIS_MSG_PACKET:
-               return rndis_filter_receive_data(ndev, net_dev,
-                                                rndis_dev, rndis_msg,
+               return rndis_filter_receive_data(ndev, net_dev, rndis_msg,
                                                 channel, data, buflen);
        case RNDIS_MSG_INIT_C:
        case RNDIS_MSG_QUERY_C:
        case RNDIS_MSG_SET_C:
                /* completion msgs */
-               rndis_filter_receive_response(rndis_dev, rndis_msg);
+               rndis_filter_receive_response(ndev, net_dev, rndis_msg);
                break;
 
        case RNDIS_MSG_INDICATE:
@@ -825,13 +819,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
        struct rndis_set_request *set;
        int ret;
 
+       if (dev->filter == new_filter)
+               return 0;
+
        request = get_rndis_request(dev, RNDIS_MSG_SET,
                        RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
                        sizeof(u32));
        if (!request)
                return -ENOMEM;
 
-
        /* Setup the rndis set */
        set = &request->request_msg.msg.set_req;
        set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
@@ -842,8 +838,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
               &new_filter, sizeof(u32));
 
        ret = rndis_filter_send_request(dev, request);
-       if (ret == 0)
+       if (ret == 0) {
                wait_for_completion(&request->wait_event);
+               dev->filter = new_filter;
+       }
 
        put_rndis_request(dev, request);
 
@@ -861,9 +859,9 @@ static void rndis_set_multicast(struct work_struct *w)
                filter = NDIS_PACKET_TYPE_PROMISCUOUS;
        } else {
                if (flags & IFF_ALLMULTI)
-                       flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+                       filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
                if (flags & IFF_BROADCAST)
-                       flags |= NDIS_PACKET_TYPE_BROADCAST;
+                       filter |= NDIS_PACKET_TYPE_BROADCAST;
        }
 
        rndis_filter_set_packet_filter(rdev, filter);
@@ -1120,6 +1118,7 @@ void rndis_set_subchannel(struct work_struct *w)
        for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
                ndev_ctx->tx_table[i] = i % nvdev->num_chn;
 
+       netif_device_attach(ndev);
        rtnl_unlock();
        return;
 
@@ -1130,6 +1129,8 @@ failed:
 
        nvdev->max_chn = 1;
        nvdev->num_chn = 1;
+
+       netif_device_attach(ndev);
 unlock:
        rtnl_unlock();
 }
@@ -1332,6 +1333,10 @@ out:
                net_device->num_chn = 1;
        }
 
+       /* No sub channels, device is ready */
+       if (net_device->num_chn == 1)
+               netif_device_attach(net);
+
        return net_device;
 
 err_dev_remv:
@@ -1344,16 +1349,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
 {
        struct rndis_device *rndis_dev = net_dev->extension;
 
-       /* Don't try and setup sub channels if about to halt */
-       cancel_work_sync(&net_dev->subchan_work);
-
        /* Halt and release the rndis device */
        rndis_filter_halt_device(rndis_dev);
 
        net_dev->extension = NULL;
 
        netvsc_device_remove(dev);
-       kfree(rndis_dev);
 }
 
 int rndis_filter_open(struct netvsc_device *nvdev)
@@ -1371,10 +1372,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)
 
        return rndis_filter_close_device(nvdev->extension);
 }
-
-bool rndis_filter_opened(const struct netvsc_device *nvdev)
-{
-       const struct rndis_device *dev = nvdev->extension;
-
-       return dev->state == RNDIS_DEV_DATAINITIALIZED;
-}
index 7de88b33d5b96d7f18a5f7c242a54c935b587086..9cbb0c8a896aff9d192850ad15734dc2872c0dfb 100644 (file)
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
-               goto unregister;
+               goto put_dev;
 
        /* need to be already registered so that ->init has run and
         * the MAC addr is set
@@ -3316,7 +3316,8 @@ del_dev:
        macsec_del_dev(macsec);
 unlink:
        netdev_upper_dev_unlink(real_dev, dev);
-unregister:
+put_dev:
+       dev_put(real_dev);
        unregister_netdevice(dev);
        return err;
 }
index 8fc02d9db3d011ee1c193b9cdfb8c26e042e6f3e..725f4b4afc6da946e967d4070b9cf76143360332 100644 (file)
@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
        lowerdev_features &= (features | ~NETIF_F_LRO);
        features = netdev_increment_features(lowerdev_features, features, mask);
        features |= ALWAYS_ON_FEATURES;
-       features &= ~NETIF_F_NETNS_LOCAL;
+       features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
 
        return features;
 }
index 171010eb4d9c5c36da0be9888fb75cc54e136768..5ad130c3da43c869b39dc8ec83ec6795aa82be7d 100644 (file)
@@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
-               memcpy(data + i * ETH_GSTRING_LEN,
-                      bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
+               strlcpy(data + i * ETH_GSTRING_LEN,
+                       bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
 }
 EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
 
index 22d9bc9c33a4bce864505babe6b3060348542a62..0e0978d8a0eb332bc488bcb1be0f916af784cfa1 100644 (file)
@@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
-               memcpy(data + i * ETH_GSTRING_LEN,
-                      marvell_hw_stats[i].string, ETH_GSTRING_LEN);
+               strlcpy(data + i * ETH_GSTRING_LEN,
+                       marvell_hw_stats[i].string, ETH_GSTRING_LEN);
        }
 }
 
index 0f45310300f667bab84655d301d64eb8196ae128..f41b224a9cdbf49ccf82d72b5052686548c005a7 100644 (file)
@@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
-/* This routine returns -1 as an indication to the caller that the
- * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
- * MMD extended PHY registers.
- */
-static int
-ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
-{
-       return -1;
-}
-
-/* This routine does nothing since the Micrel ksz9021 does not support
- * standard IEEE MMD extended PHY registers.
- */
-static int
-ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
-{
-       return -1;
-}
-
 static int kszphy_get_sset_count(struct phy_device *phydev)
 {
        return ARRAY_SIZE(kszphy_hw_stats);
@@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
-               memcpy(data + i * ETH_GSTRING_LEN,
-                      kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
+               strlcpy(data + i * ETH_GSTRING_LEN,
+                       kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
        }
 }
 
@@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = {
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
-       .read_mmd       = ksz9021_rd_mmd_phyreg,
-       .write_mmd      = ksz9021_wr_mmd_phyreg,
+       .read_mmd       = genphy_read_mmd_unsupported,
+       .write_mmd      = genphy_write_mmd_unsupported,
 }, {
        .phy_id         = PHY_ID_KSZ9031,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
index a6f924fee5840ddee83a4159f59f3231dbf49655..9aabfa1a455a89e364fe38e8d99b0d1a5d3e9944 100644 (file)
@@ -617,6 +617,77 @@ static void phy_error(struct phy_device *phydev)
        phy_trigger_machine(phydev, false);
 }
 
+/**
+ * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
+ * @phydev: target phy_device struct
+ */
+static int phy_disable_interrupts(struct phy_device *phydev)
+{
+       int err;
+
+       /* Disable PHY interrupts */
+       err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+       if (err)
+               goto phy_err;
+
+       /* Clear the interrupt */
+       err = phy_clear_interrupt(phydev);
+       if (err)
+               goto phy_err;
+
+       return 0;
+
+phy_err:
+       phy_error(phydev);
+
+       return err;
+}
+
+/**
+ * phy_change - Called by the phy_interrupt to handle PHY changes
+ * @phydev: phy_device struct that interrupted
+ */
+static irqreturn_t phy_change(struct phy_device *phydev)
+{
+       if (phy_interrupt_is_valid(phydev)) {
+               if (phydev->drv->did_interrupt &&
+                   !phydev->drv->did_interrupt(phydev))
+                       return IRQ_NONE;
+
+               if (phydev->state == PHY_HALTED)
+                       if (phy_disable_interrupts(phydev))
+                               goto phy_err;
+       }
+
+       mutex_lock(&phydev->lock);
+       if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
+               phydev->state = PHY_CHANGELINK;
+       mutex_unlock(&phydev->lock);
+
+       /* reschedule state queue work to run as soon as possible */
+       phy_trigger_machine(phydev, true);
+
+       if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+               goto phy_err;
+       return IRQ_HANDLED;
+
+phy_err:
+       phy_error(phydev);
+       return IRQ_NONE;
+}
+
+/**
+ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
+ * @work: work_struct that describes the work to be done
+ */
+void phy_change_work(struct work_struct *work)
+{
+       struct phy_device *phydev =
+         &