Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
authorKalle Valo <kvalo@codeaurora.org>
Wed, 25 Apr 2018 08:30:54 +0000 (11:30 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Wed, 25 Apr 2018 08:30:54 +0000 (11:30 +0300)
ath.git patches for 4.18. Major changes:

ath10k

* enable temperature reads for QCA6174 and QCA9377

* add firmware memory dump support for QCA9984

* continue adding WCN3990 support via SNOC bus

39 files changed:
Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath10k/Makefile
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/hif.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/sdio.c
drivers/net/wireless/ath/ath10k/snoc.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/snoc.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c
drivers/net/wireless/ath/ath6kl/debug.c
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/wcn36xx/dxe.c
drivers/net/wireless/ath/wcn36xx/dxe.h
drivers/net/wireless/ath/wcn36xx/hal.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wcn36xx/txrx.c
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
drivers/net/wireless/ath/wil6210/main.c

index 3d2a031217da0851acad1ce8cc1716b376801adc..7fd4e8ce4149a9663ec93f95c9ce5c5dc4741aca 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 - compatible: Should be one of the following:
        * "qcom,ath10k"
        * "qcom,ipq4019-wifi"
+       * "qcom,wcn3990-wifi"
 
 PCI based devices uses compatible string "qcom,ath10k" and takes calibration
 data along with board specific data via "qcom,ath10k-calibration-data".
@@ -18,8 +19,12 @@ In general, entry "qcom,ath10k-pre-calibration-data" and
 "qcom,ath10k-calibration-data" conflict with each other and only one
 can be provided per device.
 
+SNOC based devices (i.e. wcn3990) uses compatible string "qcom,wcn3990-wifi".
+
 Optional properties:
 - reg: Address and length of the register set for the device.
+- reg-names: Must include the list of following reg names,
+            "membase"
 - resets: Must contain an entry for each entry in reset-names.
           See ../reset/reseti.txt for details.
 - reset-names: Must include the list of following reset names,
@@ -49,6 +54,8 @@ Optional properties:
                                 hw versions.
 - qcom,ath10k-pre-calibration-data : pre calibration data as an array,
                                     the length can vary between hw versions.
+- <supply-name>-supply: handle to the regulator device tree node
+                          optional "supply-name" is "vdd-0.8-cx-mx".
 
 Example (to supply the calibration data alone):
 
@@ -119,3 +126,27 @@ wifi0: wifi@a000000 {
        qcom,msi_base = <0x40>;
        qcom,ath10k-pre-calibration-data = [ 01 02 03 ... ];
 };
+
+Example (to supply wcn3990 SoC wifi block details):
+
+wifi@18000000 {
+               compatible = "qcom,wcn3990-wifi";
+               reg = <0x18800000 0x800000>;
+               reg-names = "membase";
+               clocks = <&clock_gcc clk_aggre2_noc_clk>;
+               clock-names = "smmu_aggre2_noc_clk"
+               interrupts =
+                          <0 130 0 /* CE0 */ >,
+                          <0 131 0 /* CE1 */ >,
+                          <0 132 0 /* CE2 */ >,
+                          <0 133 0 /* CE3 */ >,
+                          <0 134 0 /* CE4 */ >,
+                          <0 135 0 /* CE5 */ >,
+                          <0 136 0 /* CE6 */ >,
+                          <0 137 0 /* CE7 */ >,
+                          <0 138 0 /* CE8 */ >,
+                          <0 139 0 /* CE9 */ >,
+                          <0 140 0 /* CE10 */ >,
+                          <0 141 0 /* CE11 */ >;
+               vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+};
index deb5ae21a559bef7c24e48dff919b8f7bfec541c..84f071ac0d84d5c001ada50134b2ba3f79d9d71b 100644 (file)
@@ -4,12 +4,16 @@ config ATH10K
        select ATH_COMMON
        select CRC32
        select WANT_DEV_COREDUMP
+       select ATH10K_CE
         ---help---
           This module adds support for wireless adapters based on
           Atheros IEEE 802.11ac family of chipsets.
 
           If you choose to build a module, it'll be called ath10k.
 
+config ATH10K_CE
+       bool
+
 config ATH10K_PCI
        tristate "Atheros ath10k PCI support"
        depends on ATH10K && PCI
@@ -36,6 +40,14 @@ config ATH10K_USB
          This module adds experimental support for USB bus. Currently
          work in progress and will not fully work.
 
+config ATH10K_SNOC
+        tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+        depends on ATH10K && ARCH_QCOM
+        ---help---
+          This module adds support for integrated WCN3990 chip connected
+          to system NOC(SNOC). Currently work in progress and will not
+          fully work.
+
 config ATH10K_DEBUG
        bool "Atheros ath10k debugging"
        depends on ATH10K
index 6739ac26fd2979f8f49f3b320bd6ee3004a89bac..44d60a61b242dad031ccda4356180ab31a52df1f 100644 (file)
@@ -22,10 +22,10 @@ ath10k_core-$(CONFIG_THERMAL) += thermal.o
 ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
 ath10k_core-$(CONFIG_PM) += wow.o
 ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
+ath10k_core-$(CONFIG_ATH10K_CE) += ce.o
 
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
-ath10k_pci-y += pci.o \
-               ce.o
+ath10k_pci-y += pci.o
 
 ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
 
@@ -35,5 +35,8 @@ ath10k_sdio-y += sdio.o
 obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
 ath10k_usb-y += usb.o
 
+obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
+ath10k_snoc-y += snoc.o
+
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
index b9def7bace2fb8a2960d9928f4976f727288ed22..3b96a43fbda41c12701de113db010acb57297cbe 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
  * the buffer is sent/received.
  */
 
+static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
+                                       struct ath10k_ce_pipe *ce_state)
+{
+       u32 ce_id = ce_state->id;
+       u32 addr = 0;
+
+       switch (ce_id) {
+       case 0:
+               addr = 0x00032000;
+               break;
+       case 3:
+               addr = 0x0003200C;
+               break;
+       case 4:
+               addr = 0x00032010;
+               break;
+       case 5:
+               addr = 0x00032014;
+               break;
+       case 7:
+               addr = 0x0003201C;
+               break;
+       default:
+               ath10k_warn(ar, "invalid CE id: %d", ce_id);
+               break;
+       }
+       return addr;
+}
+
+static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
+                                        struct ath10k_ce_pipe *ce_state)
+{
+       u32 ce_id = ce_state->id;
+       u32 addr = 0;
+
+       switch (ce_id) {
+       case 1:
+               addr = 0x00032034;
+               break;
+       case 2:
+               addr = 0x00032038;
+               break;
+       case 5:
+               addr = 0x00032044;
+               break;
+       case 7:
+               addr = 0x0003204C;
+               break;
+       case 8:
+               addr = 0x00032050;
+               break;
+       case 9:
+               addr = 0x00032054;
+               break;
+       case 10:
+               addr = 0x00032058;
+               break;
+       case 11:
+               addr = 0x0003205C;
+               break;
+       default:
+               ath10k_warn(ar, "invalid CE id: %d", ce_id);
+               break;
+       }
+
+       return addr;
+}
+
 static inline unsigned int
 ath10k_set_ring_byte(unsigned int offset,
                     struct ath10k_hw_ce_regs_addr_map *addr_map)
@@ -116,11 +185,46 @@ static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
                                ar->hw_ce_regs->sr_wr_index_addr);
 }
 
+static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
+                                                        u32 ce_id)
+{
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+       return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
+}
+
 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
                                                    u32 ce_ctrl_addr)
 {
-       return ath10k_ce_read32(ar, ce_ctrl_addr +
-                               ar->hw_ce_regs->current_srri_addr);
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+       struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+       u32 index;
+
+       if (ar->hw_params.rri_on_ddr &&
+           (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+               index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
+       else
+               index = ath10k_ce_read32(ar, ce_ctrl_addr +
+                                        ar->hw_ce_regs->current_srri_addr);
+
+       return index;
+}
+
+static inline void
+ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+                                         struct ath10k_ce_pipe *ce_state,
+                                         unsigned int value)
+{
+       ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
+}
+
+static inline void
+ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
+                                          struct ath10k_ce_pipe *ce_state,
+                                          unsigned int value)
+{
+       ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
 }
 
 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
@@ -181,11 +285,31 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
                          ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
 }
 
+static inline
+       u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
+{
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+       return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
+               CE_DDR_RRI_MASK;
+}
+
 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
                                                     u32 ce_ctrl_addr)
 {
-       return ath10k_ce_read32(ar, ce_ctrl_addr +
-                               ar->hw_ce_regs->current_drri_addr);
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+       struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+       u32 index;
+
+       if (ar->hw_params.rri_on_ddr &&
+           (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+               index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
+       else
+               index = ath10k_ce_read32(ar, ce_ctrl_addr +
+                                        ar->hw_ce_regs->current_drri_addr);
+
+       return index;
 }
 
 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
@@ -376,8 +500,14 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
        write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 
        /* WORKAROUND */
-       if (!(flags & CE_SEND_FLAG_GATHER))
-               ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+       if (!(flags & CE_SEND_FLAG_GATHER)) {
+               if (ar->hw_params.shadow_reg_support)
+                       ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
+                                                                 write_index);
+               else
+                       ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+                                                          write_index);
+       }
 
        src_ring->write_index = write_index;
 exit:
@@ -395,7 +525,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
        struct ath10k_ce_ring *src_ring = ce_state->src_ring;
        struct ce_desc_64 *desc, sdesc;
        unsigned int nentries_mask = src_ring->nentries_mask;
-       unsigned int sw_index = src_ring->sw_index;
+       unsigned int sw_index;
        unsigned int write_index = src_ring->write_index;
        u32 ctrl_addr = ce_state->ctrl_addr;
        __le32 *addr;
@@ -409,6 +539,11 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
                ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
                            __func__, nbytes, ce_state->src_sz_max);
 
+       if (ar->hw_params.rri_on_ddr)
+               sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
+       else
+               sw_index = src_ring->sw_index;
+
        if (unlikely(CE_RING_DELTA(nentries_mask,
                                   write_index, sw_index - 1) <= 0)) {
                ret = -ENOSR;
@@ -464,6 +599,7 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
        return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
                                    buffer, nbytes, transfer_id, flags);
 }
+EXPORT_SYMBOL(ath10k_ce_send_nolock);
 
 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 {
@@ -491,6 +627,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 
        src_ring->per_transfer_context[src_ring->write_index] = NULL;
 }
+EXPORT_SYMBOL(__ath10k_ce_send_revert);
 
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
                   void *per_transfer_context,
@@ -510,6 +647,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 
        return ret;
 }
+EXPORT_SYMBOL(ath10k_ce_send);
 
 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
 {
@@ -525,6 +663,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
 
        return delta;
 }
+EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
 
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
 {
@@ -539,6 +678,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
 
        return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
 }
+EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
 
 static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
                                   dma_addr_t paddr)
@@ -615,13 +755,14 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
        /* Prevent CE ring stuck issue that will occur when ring is full.
         * Make sure that write index is 1 less than read index.
         */
-       if ((cur_write_idx + nentries)  == dest_ring->sw_index)
+       if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
                nentries -= 1;
 
        write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
        ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
        dest_ring->write_index = write_index;
 }
+EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
 
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
                          dma_addr_t paddr)
@@ -636,6 +777,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
 
        return ret;
 }
+EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
 
 /*
  * Guts of ath10k_ce_completed_recv_next.
@@ -748,6 +890,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                                            per_transfer_ctx,
                                                            nbytesp);
 }
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
 
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
@@ -766,6 +909,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 
        return ret;
 }
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
 
 static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
                                       void **per_transfer_contextp,
@@ -882,6 +1026,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
                                                  per_transfer_contextp,
                                                  bufferp);
 }
+EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
 
 /*
  * Guts of ath10k_ce_completed_send_next.
@@ -915,7 +1060,10 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
                src_ring->hw_index = read_index;
        }
 
-       read_index = src_ring->hw_index;
+       if (ar->hw_params.rri_on_ddr)
+               read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+       else
+               read_index = src_ring->hw_index;
 
        if (read_index == sw_index)
                return -EIO;
@@ -936,6 +1084,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
 
        return 0;
 }
+EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
 
 static void ath10k_ce_extract_desc_data(struct ath10k *ar,
                                        struct ath10k_ce_ring *src_ring,
@@ -1025,6 +1174,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 
        return ret;
 }
+EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
 
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp)
@@ -1040,6 +1190,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
 
        return ret;
 }
+EXPORT_SYMBOL(ath10k_ce_completed_send_next);
 
 /*
  * Guts of interrupt handler for per-engine interrupts on a particular CE.
@@ -1078,6 +1229,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
 
        spin_unlock_bh(&ce->ce_lock);
 }
+EXPORT_SYMBOL(ath10k_ce_per_engine_service);
 
 /*
  * Handler for per-engine interrupts on ALL active CEs.
@@ -1102,6 +1254,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
                ath10k_ce_per_engine_service(ar, ce_id);
        }
 }
+EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
 
 /*
  * Adjust interrupts for the copy complete handler.
@@ -1139,6 +1292,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
 
        return 0;
 }
+EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
 
 void ath10k_ce_enable_interrupts(struct ath10k *ar)
 {
@@ -1154,6 +1308,7 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
                ath10k_ce_per_engine_handler_adjust(ce_state);
        }
 }
+EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
 
 static int ath10k_ce_init_src_ring(struct ath10k *ar,
                                   unsigned int ce_id,
@@ -1234,6 +1389,22 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
+                                      struct ath10k_ce_ring *src_ring,
+                                      u32 nentries)
+{
+       src_ring->shadow_base_unaligned = kcalloc(nentries,
+                                                 sizeof(struct ce_desc),
+                                                 GFP_KERNEL);
+       if (!src_ring->shadow_base_unaligned)
+               return -ENOMEM;
+
+       src_ring->shadow_base = (struct ce_desc *)
+                       PTR_ALIGN(src_ring->shadow_base_unaligned,
+                                 CE_DESC_RING_ALIGN);
+       return 0;
+}
+
 static struct ath10k_ce_ring *
 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
                         const struct ce_attr *attr)
@@ -1241,6 +1412,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
        struct ath10k_ce_ring *src_ring;
        u32 nentries = attr->src_nentries;
        dma_addr_t base_addr;
+       int ret;
 
        nentries = roundup_pow_of_two(nentries);
 
@@ -1277,6 +1449,19 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
                        ALIGN(src_ring->base_addr_ce_space_unaligned,
                              CE_DESC_RING_ALIGN);
 
+       if (ar->hw_params.shadow_reg_support) {
+               ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+               if (ret) {
+                       dma_free_coherent(ar->dev,
+                                         (nentries * sizeof(struct ce_desc) +
+                                          CE_DESC_RING_ALIGN),
+                                         src_ring->base_addr_owner_space_unaligned,
+                                         base_addr);
+                       kfree(src_ring);
+                       return ERR_PTR(ret);
+               }
+       }
+
        return src_ring;
 }
 
@@ -1287,6 +1472,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
        struct ath10k_ce_ring *src_ring;
        u32 nentries = attr->src_nentries;
        dma_addr_t base_addr;
+       int ret;
 
        nentries = roundup_pow_of_two(nentries);
 
@@ -1322,6 +1508,19 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
                        ALIGN(src_ring->base_addr_ce_space_unaligned,
                              CE_DESC_RING_ALIGN);
 
+       if (ar->hw_params.shadow_reg_support) {
+               ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+               if (ret) {
+                       dma_free_coherent(ar->dev,
+                                         (nentries * sizeof(struct ce_desc) +
+                                          CE_DESC_RING_ALIGN),
+                                         src_ring->base_addr_owner_space_unaligned,
+                                         base_addr);
+                       kfree(src_ring);
+                       return ERR_PTR(ret);
+               }
+       }
+
        return src_ring;
 }
 
@@ -1454,6 +1653,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
 
        return 0;
 }
+EXPORT_SYMBOL(ath10k_ce_init_pipe);
 
 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
 {
@@ -1479,6 +1679,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
        ath10k_ce_deinit_src_ring(ar, ce_id);
        ath10k_ce_deinit_dest_ring(ar, ce_id);
 }
+EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
 
 static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
@@ -1486,6 +1687,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
        struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
 
        if (ce_state->src_ring) {
+               if (ar->hw_params.shadow_reg_support)
+                       kfree(ce_state->src_ring->shadow_base_unaligned);
                dma_free_coherent(ar->dev,
                                  (ce_state->src_ring->nentries *
                                   sizeof(struct ce_desc) +
@@ -1515,6 +1718,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
        struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
 
        if (ce_state->src_ring) {
+               if (ar->hw_params.shadow_reg_support)
+                       kfree(ce_state->src_ring->shadow_base_unaligned);
                dma_free_coherent(ar->dev,
                                  (ce_state->src_ring->nentries *
                                   sizeof(struct ce_desc_64) +
@@ -1545,6 +1750,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 
        ce_state->ops->ce_free_pipe(ar, ce_id);
 }
+EXPORT_SYMBOL(ath10k_ce_free_pipe);
 
 void ath10k_ce_dump_registers(struct ath10k *ar,
                              struct ath10k_fw_crash_data *crash_data)
@@ -1584,6 +1790,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
 
        spin_unlock_bh(&ce->ce_lock);
 }
+EXPORT_SYMBOL(ath10k_ce_dump_registers);
 
 static const struct ath10k_ce_ops ce_ops = {
        .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
@@ -1680,3 +1887,47 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
 
        return 0;
 }
+EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
+
+void ath10k_ce_alloc_rri(struct ath10k *ar)
+{
+       int i;
+       u32 value;
+       u32 ctrl1_regs;
+       u32 ce_base_addr;
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+       ce->vaddr_rri = dma_alloc_coherent(ar->dev,
+                                          (CE_COUNT * sizeof(u32)),
+                                          &ce->paddr_rri, GFP_KERNEL);
+
+       if (!ce->vaddr_rri)
+               return;
+
+       ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
+                         lower_32_bits(ce->paddr_rri));
+       ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
+                         (upper_32_bits(ce->paddr_rri) &
+                         CE_DESC_FLAGS_GET_MASK));
+
+       for (i = 0; i < CE_COUNT; i++) {
+               ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
+               ce_base_addr = ath10k_ce_base_address(ar, i);
+               value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
+               value |= ar->hw_ce_regs->upd->mask;
+               ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
+       }
+
+       memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
+}
+EXPORT_SYMBOL(ath10k_ce_alloc_rri);
+
+void ath10k_ce_free_rri(struct ath10k *ar)
+{
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+       dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
+                         ce->vaddr_rri,
+                         ce->paddr_rri);
+}
+EXPORT_SYMBOL(ath10k_ce_free_rri);
index 2c3c8f5e90ea0e499b2bf4f3b67c60a1e68186cb..dbeffaef60247587caeb5f336ef004e4b0f57dcb 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -48,6 +49,9 @@ struct ath10k_ce_pipe;
 #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
 #define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
 
+#define CE_DDR_RRI_MASK                        GENMASK(15, 0)
+#define CE_DDR_DRRI_SHIFT              16
+
 struct ce_desc {
        __le32 addr;
        __le16 nbytes;
@@ -113,6 +117,9 @@ struct ath10k_ce_ring {
        /* CE address space */
        u32 base_addr_ce_space;
 
+       char *shadow_base_unaligned;
+       struct ce_desc *shadow_base;
+
        /* keep last */
        void *per_transfer_context[0];
 };
@@ -153,6 +160,8 @@ struct ath10k_ce {
        spinlock_t ce_lock;
        const struct ath10k_bus_ops *bus_ops;
        struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+       u32 *vaddr_rri;
+       dma_addr_t paddr_rri;
 };
 
 /*==================Send====================*/
@@ -261,6 +270,8 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar);
 void ath10k_ce_enable_interrupts(struct ath10k *ar);
 void ath10k_ce_dump_registers(struct ath10k *ar,
                              struct ath10k_fw_crash_data *crash_data);
+void ath10k_ce_alloc_rri(struct ath10k *ar);
+void ath10k_ce_free_rri(struct ath10k *ar);
 
 /* ce_attr.flags values */
 /* Use NonSnooping PCIe accesses? */
@@ -327,6 +338,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
        return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
 }
 
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \
+               - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
+
 #define CE_SRC_RING_TO_DESC(baddr, idx) \
        (&(((struct ce_desc *)baddr)[idx]))
 
@@ -355,14 +369,18 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
        (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
                CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
 #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS                   0x0000
+#define CE_INTERRUPT_SUMMARY           (GENMASK(CE_COUNT_MAX - 1, 0))
 
 static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
 {
        struct ath10k_ce *ce = ath10k_ce_priv(ar);
 
-       return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
-               ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
-               CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
+       if (!ar->hw_params.per_ce_irq)
+               return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+                       ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+                       CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
+       else
+               return CE_INTERRUPT_SUMMARY;
 }
 
 #endif /* _CE_H_ */
index 8a3020dbd4cff318eca1f803d3ec76caf5c75b87..4cf54a7ef09a1505ef7f7231d776880c87977e62 100644 (file)
@@ -90,6 +90,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA988X_HW_2_0_VERSION,
@@ -119,6 +121,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA9887_HW_1_0_VERSION,
@@ -148,6 +153,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -176,6 +184,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -204,6 +215,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA6174_HW_3_0_VERSION,
@@ -232,6 +246,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA6174_HW_3_2_VERSION,
@@ -263,6 +280,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -297,6 +317,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA9984_HW_1_0_DEV_VERSION,
@@ -336,6 +359,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA9888_HW_2_0_DEV_VERSION,
@@ -374,6 +400,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
@@ -402,6 +431,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA9377_HW_1_1_DEV_VERSION,
@@ -432,6 +464,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = QCA4019_HW_1_0_DEV_VERSION,
@@ -467,6 +502,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+               .per_ce_irq = false,
+               .shadow_reg_support = false,
+               .rri_on_ddr = false,
        },
        {
                .id = WCN3990_HW_1_0_DEV_VERSION,
@@ -487,6 +525,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
                .target_64bit = true,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
+               .per_ce_irq = true,
+               .shadow_reg_support = true,
+               .rri_on_ddr = true,
        },
 };
 
@@ -1253,14 +1294,61 @@ out:
        return ret;
 }
 
+static int ath10k_core_search_bd(struct ath10k *ar,
+                                const char *boardname,
+                                const u8 *data,
+                                size_t len)
+{
+       size_t ie_len;
+       struct ath10k_fw_ie *hdr;
+       int ret = -ENOENT, ie_id;
+
+       while (len > sizeof(struct ath10k_fw_ie)) {
+               hdr = (struct ath10k_fw_ie *)data;
+               ie_id = le32_to_cpu(hdr->id);
+               ie_len = le32_to_cpu(hdr->len);
+
+               len -= sizeof(*hdr);
+               data = hdr->data;
+
+               if (len < ALIGN(ie_len, 4)) {
+                       ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+                                  ie_id, ie_len, len);
+                       return -EINVAL;
+               }
+
+               switch (ie_id) {
+               case ATH10K_BD_IE_BOARD:
+                       ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+                                                           boardname);
+                       if (ret == -ENOENT)
+                               /* no match found, continue */
+                               break;
+
+                       /* either found or error, so stop searching */
+                       goto out;
+               }
+
+               /* jump over the padding */
+               ie_len = ALIGN(ie_len, 4);
+
+               len -= ie_len;
+               data += ie_len;
+       }
+
+out:
+       /* return result of parse_bd_ie_board() or -ENOENT */
+       return ret;
+}
+
 static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
                                              const char *boardname,
+                                             const char *fallback_boardname,
                                              const char *filename)
 {
-       size_t len, magic_len, ie_len;
-       struct ath10k_fw_ie *hdr;
+       size_t len, magic_len;
        const u8 *data;
-       int ret, ie_id;
+       int ret;
 
        ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
                                                        ar->hw_params.fw.dir,
@@ -1298,69 +1386,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
        data += magic_len;
        len -= magic_len;
 
-       while (len > sizeof(struct ath10k_fw_ie)) {
-               hdr = (struct ath10k_fw_ie *)data;
-               ie_id = le32_to_cpu(hdr->id);
-               ie_len = le32_to_cpu(hdr->len);
-
-               len -= sizeof(*hdr);
-               data = hdr->data;
-
-               if (len < ALIGN(ie_len, 4)) {
-                       ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
-                                  ie_id, ie_len, len);
-                       ret = -EINVAL;
-                       goto err;
-               }
-
-               switch (ie_id) {
-               case ATH10K_BD_IE_BOARD:
-                       ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
-                                                           boardname);
-                       if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') {
-                               /* try default bdf if variant was not found */
-                               char *s, *v = ",variant=";
-                               char boardname2[100];
-
-                               strlcpy(boardname2, boardname,
-                                       sizeof(boardname2));
-
-                               s = strstr(boardname2, v);
-                               if (s)
-                                       *s = '\0';  /* strip ",variant=%s" */
-
-                               ret = ath10k_core_parse_bd_ie_board(ar, data,
-                                                                   ie_len,
-                                                                   boardname2);
-                       }
-
-                       if (ret == -ENOENT)
-                               /* no match found, continue */
-                               break;
-                       else if (ret)
-                               /* there was an error, bail out */
-                               goto err;
+       /* attempt to find boardname in the IE list */
+       ret = ath10k_core_search_bd(ar, boardname, data, len);
 
-                       /* board data found */
-                       goto out;
-               }
+       /* if we didn't find it and have a fallback name, try that */
+       if (ret == -ENOENT && fallback_boardname)
+               ret = ath10k_core_search_bd(ar, fallback_boardname, data, len);
 
-               /* jump over the padding */
-               ie_len = ALIGN(ie_len, 4);
-
-               len -= ie_len;
-               data += ie_len;
-       }
-
-out:
-       if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
+       if (ret == -ENOENT) {
                ath10k_err(ar,
                           "failed to fetch board data for %s from %s/%s\n",
                           boardname, ar->hw_params.fw.dir, filename);
                ret = -ENODATA;
-               goto err;
        }
 
+       if (ret)
+               goto err;
+
        return 0;
 
 err:
@@ -1369,12 +1411,12 @@ err:
 }
 
 static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
-                                        size_t name_len)
+                                        size_t name_len, bool with_variant)
 {
        /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
        char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
 
-       if (ar->id.bdf_ext[0] != '\0')
+       if (with_variant && ar->id.bdf_ext[0] != '\0')
                scnprintf(variant, sizeof(variant), ",variant=%s",
                          ar->id.bdf_ext);
 
@@ -1400,17 +1442,26 @@ out:
 
 static int ath10k_core_fetch_board_file(struct ath10k *ar)
 {
-       char boardname[100];
+       char boardname[100], fallback_boardname[100];
        int ret;
 
-       ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname));
+       ret = ath10k_core_create_board_name(ar, boardname,
+                                           sizeof(boardname), true);
        if (ret) {
                ath10k_err(ar, "failed to create board name: %d", ret);
                return ret;
        }
 
+       ret = ath10k_core_create_board_name(ar, fallback_boardname,
+                                           sizeof(boardname), false);
+       if (ret) {
+               ath10k_err(ar, "failed to create fallback board name: %d", ret);
+               return ret;
+       }
+
        ar->bd_api = 2;
        ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+                                                fallback_boardname,
                                                 ATH10K_BOARD_API2_FILE);
        if (!ret)
                goto success;
@@ -2472,6 +2523,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                ar->hw->wiphy->hw_version = target_info.version;
                break;
        case ATH10K_BUS_SNOC:
+               memset(&target_info, 0, sizeof(target_info));
+               ret = ath10k_hif_get_target_info(ar, &target_info);
+               if (ret) {
+                       ath10k_err(ar, "could not get target info (%d)\n", ret);
+                       goto err_power_down;
+               }
+               ar->target_version = target_info.version;
+               ar->hw->wiphy->hw_version = target_info.version;
                break;
        default:
                ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
index c17d805d68cc70c7f621f6c8f0eb842e94c9f171..e4ac8f2831fdf2997b328b59bf217cda5523e1ab 100644 (file)
@@ -52,6 +52,8 @@
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
+#define ATH10K_INVALID_RSSI 128
+
 #define ATH10K_MAX_NUM_MGMT_PENDING 128
 
 /* number of failed packets (20 packets with 16 sw reties each) */
index 6da4e3369c5a797eddaf917fae23a3d06272ed6f..1a59ea0068c209dcc8f929c17013d416630207e9 100644 (file)
 
 #include <linux/kernel.h>
 #include "core.h"
+#include "bmi.h"
 #include "debug.h"
 
 struct ath10k_hif_sg_item {
        u16 transfer_id;
        void *transfer_context; /* NULL = tx completion callback not called */
        void *vaddr; /* for debugging mostly */
-       u32 paddr;
+       dma_addr_t paddr;
        u16 len;
 };
 
@@ -93,6 +94,9 @@ struct ath10k_hif_ops {
        /* fetch calibration data from target eeprom */
        int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
                                size_t *data_len);
+
+       int (*get_target_info)(struct ath10k *ar,
+                              struct bmi_target_info *target_info);
 };
 
 static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -218,4 +222,13 @@ static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
        return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
 }
 
+static inline int ath10k_hif_get_target_info(struct ath10k *ar,
+                                            struct bmi_target_info *tgt_info)
+{
+       if (!ar->hif.ops->get_target_info)
+               return -EOPNOTSUPP;
+
+       return ar->hif.ops->get_target_info(ar, tgt_info);
+}
+
 #endif /* _HIF_H_ */
index 492dc5b4bbf22c44d41c4fb3b450e23bd2478aef..8902720b4e49ab29bb97abb3c63bfc444ebf15a8 100644 (file)
@@ -542,8 +542,14 @@ static const char *htc_service_name(enum ath10k_htc_svc_id id)
                return "NMI Data";
        case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
                return "HTT Data";
+       case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
+               return "HTT Data";
+       case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
+               return "HTT Data";
        case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
                return "RAW";
+       case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
+               return "PKTLOG";
        }
 
        return "Unknown";
index a2f8814b3e53c7186a28e954418f57d836def7a1..34877597dd6a88f4b5ebdebd561d1feda027782b 100644 (file)
@@ -248,6 +248,7 @@ enum ath10k_htc_svc_gid {
        ATH10K_HTC_SVC_GRP_WMI = 1,
        ATH10K_HTC_SVC_GRP_NMI = 2,
        ATH10K_HTC_SVC_GRP_HTT = 3,
+       ATH10K_LOG_SERVICE_GROUP = 6,
 
        ATH10K_HTC_SVC_GRP_TEST = 254,
        ATH10K_HTC_SVC_GRP_LAST = 255,
@@ -273,6 +274,9 @@ enum ath10k_htc_svc_id {
 
        ATH10K_HTC_SVC_ID_HTT_DATA_MSG  = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
 
+       ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1),
+       ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2),
+       ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0),
        /* raw stream service (i.e. flash, tcmd, calibration apps) */
        ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
 };
index 625198dea18b6db7687d180ae7702ebe9a2ce68c..21a67f82f037df83f33fc0b31d1360c11ac9fb0b 100644 (file)
@@ -257,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
                return status;
        }
 
-       status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
+       status = ath10k_htt_send_frag_desc_bank_cfg(htt);
        if (status)
                return status;
 
-       status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
+       status = ath10k_htt_send_rx_ring_cfg(htt);
        if (status) {
                ath10k_warn(ar, "failed to setup rx ring: %d\n",
                            status);
index 8cc2a8b278e4d003126be9e8f1f792086f25d3c2..5d3ff80f3a1f9dd8a27019e85e0345b269998b19 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -127,6 +128,19 @@ struct htt_msdu_ext_desc_64 {
                                 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
                                 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
 
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64             BIT(16)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64         BIT(17)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64         BIT(18)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64         BIT(19)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64         BIT(20)
+#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64          BIT(21)
+
+#define HTT_MSDU_CHECKSUM_ENABLE_64  (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
+                                    | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
+                                    | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
+                                    | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
+                                    | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
+
 enum htt_data_tx_desc_flags0 {
        HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
        HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -533,12 +547,18 @@ struct htt_ver_resp {
        u8 rsvd0;
 } __packed;
 
+#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
+
+#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK    GENMASK(7, 0)
+
 struct htt_mgmt_tx_completion {
        u8 rsvd0;
        u8 rsvd1;
-       u8 rsvd2;
+       u8 flags;
        __le32 desc_id;
        __le32 status;
+       __le32 ppdu_id;
+       __le32 info;
 } __packed;
 
 #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x1F)
@@ -1648,6 +1668,7 @@ struct htt_resp {
 struct htt_tx_done {
        u16 msdu_id;
        u16 status;
+       u8 ack_rssi;
 };
 
 enum htt_tx_compl_state {
@@ -1848,6 +1869,57 @@ struct ath10k_htt_tx_ops {
        void (*htt_free_txbuff)(struct ath10k_htt *htt);
 };
 
+static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
+{
+       if (!htt->tx_ops->htt_send_rx_ring_cfg)
+               return -EOPNOTSUPP;
+
+       return htt->tx_ops->htt_send_rx_ring_cfg(htt);
+}
+
+static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+       if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
+               return -EOPNOTSUPP;
+
+       return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
+}
+
+static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
+{
+       if (!htt->tx_ops->htt_alloc_frag_desc)
+               return -EOPNOTSUPP;
+
+       return htt->tx_ops->htt_alloc_frag_desc(htt);
+}
+
+static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
+{
+       if (htt->tx_ops->htt_free_frag_desc)
+               htt->tx_ops->htt_free_frag_desc(htt);
+}
+
+static inline int ath10k_htt_tx(struct ath10k_htt *htt,
+                               enum ath10k_hw_txrx_mode txmode,
+                               struct sk_buff *msdu)
+{
+       return htt->tx_ops->htt_tx(htt, txmode, msdu);
+}
+
+static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
+{
+       if (!htt->tx_ops->htt_alloc_txbuff)
+               return -EOPNOTSUPP;
+
+       return htt->tx_ops->htt_alloc_txbuff(htt);
+}
+
+static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
+{
+       if (htt->tx_ops->htt_free_txbuff)
+               htt->tx_ops->htt_free_txbuff(htt);
+}
+
 struct ath10k_htt_rx_ops {
        size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
        void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
@@ -1857,6 +1929,43 @@ struct ath10k_htt_rx_ops {
        void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
 };
 
+static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
+{
+       if (!htt->rx_ops->htt_get_rx_ring_size)
+               return 0;
+
+       return htt->rx_ops->htt_get_rx_ring_size(htt);
+}
+
+static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
+                                                void *vaddr)
+{
+       if (htt->rx_ops->htt_config_paddrs_ring)
+               htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
+}
+
+static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
+                                             dma_addr_t paddr,
+                                             int idx)
+{
+       if (htt->rx_ops->htt_set_paddrs_ring)
+               htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
+}
+
+static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
+{
+       if (!htt->rx_ops->htt_get_vaddr_ring)
+               return NULL;
+
+       return htt->rx_ops->htt_get_vaddr_ring(htt);
+}
+
+static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
+{
+       if (htt->rx_ops->htt_reset_paddrs_ring)
+               htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
+}
+
 #define RX_HTT_HDR_STATUS_LEN 64
 
 /* This structure layout is programmed via rx ring setup
index 5e02e26158f62f8fcb64a1cd34acd926b752b515..bd23f6940488566b5962720572952543a0fde9ad 100644 (file)
@@ -25,6 +25,7 @@
 #include "mac.h"
 
 #include <linux/log2.h>
+#include <linux/bitfield.h>
 
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -181,7 +182,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
                rxcb = ATH10K_SKB_RXCB(skb);
                rxcb->paddr = paddr;
                htt->rx_ring.netbufs_ring[idx] = skb;
-               htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
+               ath10k_htt_set_paddrs_ring(htt, paddr, idx);
                htt->rx_ring.fill_cnt++;
 
                if (htt->rx_ring.in_ord_rx) {
@@ -286,8 +287,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
        ath10k_htt_rx_ring_free(htt);
 
        dma_free_coherent(htt->ar->dev,
-                         htt->rx_ops->htt_get_rx_ring_size(htt),
-                         htt->rx_ops->htt_get_vaddr_ring(htt),
+                         ath10k_htt_get_rx_ring_size(htt),
+                         ath10k_htt_get_vaddr_ring(htt),
                          htt->rx_ring.base_paddr);
 
        dma_free_coherent(htt->ar->dev,
@@ -314,7 +315,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
        idx = htt->rx_ring.sw_rd_idx.msdu_payld;
        msdu = htt->rx_ring.netbufs_ring[idx];
        htt->rx_ring.netbufs_ring[idx] = NULL;
-       htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
+       ath10k_htt_reset_paddrs_ring(htt, idx);
 
        idx++;
        idx &= htt->rx_ring.size_mask;
@@ -586,13 +587,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
        if (!htt->rx_ring.netbufs_ring)
                goto err_netbuf;
 
-       size = htt->rx_ops->htt_get_rx_ring_size(htt);
+       size = ath10k_htt_get_rx_ring_size(htt);
 
        vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
        if (!vaddr_ring)
                goto err_dma_ring;
 
-       htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
+       ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
        htt->rx_ring.base_paddr = paddr;
 
        vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -626,7 +627,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 
 err_dma_idx:
        dma_free_coherent(htt->ar->dev,
-                         htt->rx_ops->htt_get_rx_ring_size(htt),
+                         ath10k_htt_get_rx_ring_size(htt),
                          vaddr_ring,
                          htt->rx_ring.base_paddr);
 err_dma_ring:
@@ -2719,12 +2720,21 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
                struct htt_tx_done tx_done = {};
                int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+               int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
 
                tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
 
                switch (status) {
                case HTT_MGMT_TX_STATUS_OK:
                        tx_done.status = HTT_TX_COMPL_STATE_ACK;
+                       if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+                                    ar->wmi.svc_map) &&
+                           (resp->mgmt_tx_completion.flags &
+                            HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
+                               tx_done.ack_rssi =
+                               FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
+                                         info);
+                       }
                        break;
                case HTT_MGMT_TX_STATUS_RETRY:
                        tx_done.status = HTT_TX_COMPL_STATE_NOACK;
index d334b7be1fea42499b20770d4f8ebb47c6fcf501..5d8b97a0ccaa537b9cd022fd081d6f94f06c9191 100644 (file)
@@ -443,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
        struct ath10k *ar = htt->ar;
        int ret;
 
-       ret = htt->tx_ops->htt_alloc_txbuff(htt);
+       ret = ath10k_htt_alloc_txbuff(htt);
        if (ret) {
                ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
                return ret;
        }
 
-       ret = htt->tx_ops->htt_alloc_frag_desc(htt);
+       ret = ath10k_htt_alloc_frag_desc(htt);
        if (ret) {
                ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
                goto free_txbuf;
@@ -473,10 +473,10 @@ free_txq:
        ath10k_htt_tx_free_txq(htt);
 
 free_frag_desc:
-       htt->tx_ops->htt_free_frag_desc(htt);
+       ath10k_htt_free_frag_desc(htt);
 
 free_txbuf:
-       htt->tx_ops->htt_free_txbuff(htt);
+       ath10k_htt_free_txbuff(htt);
 
        return ret;
 }
@@ -530,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
        if (!htt->tx_mem_allocated)
                return;
 
-       htt->tx_ops->htt_free_txbuff(htt);
+       ath10k_htt_free_txbuff(htt);
        ath10k_htt_tx_free_txq(htt);
-       htt->tx_ops->htt_free_frag_desc(htt);
+       ath10k_htt_free_frag_desc(htt);
        ath10k_htt_tx_free_txdone_fifo(htt);
        htt->tx_mem_allocated = false;
 }
@@ -1475,8 +1475,11 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
-               if (ar->hw_params.continuous_frag_desc)
-                       ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+               if (ar->hw_params.continuous_frag_desc) {
+                       memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
+                       ext_desc->tso_flag[3] |=
+                               __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
+               }
        }
 
        /* Prevent firmware from sending up tx inspection requests. There's
index 497ac33e0fbf2ddc01eac4a72669584f9569a1af..677535b3d2070eea5d20983b89c9aba44be4a829 100644 (file)
@@ -310,6 +310,12 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
        .wm_high        = &wcn3990_dst_wm_high,
 };
 
+static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+       .shift = 19,
+       .mask = 0x00080000,
+       .enable = 0x00000000,
+};
+
 const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
        .sr_base_addr           = 0x00000000,
        .sr_size_addr           = 0x00000008,
@@ -320,8 +326,6 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
        .dst_wr_index_addr      = 0x00000040,
        .current_srri_addr      = 0x00000044,
        .current_drri_addr      = 0x00000048,
-       .ddr_addr_for_rri_low   = 0x00000004,
-       .ddr_addr_for_rri_high  = 0x00000008,
        .ce_rri_low             = 0x0024C004,
        .ce_rri_high            = 0x0024C008,
        .host_ie_addr           = 0x0000002c,
@@ -331,6 +335,7 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
        .misc_regs              = &wcn3990_misc_reg,
        .wm_srcr                = &wcn3990_wm_src_ring,
        .wm_dstr                = &wcn3990_wm_dst_ring,
+       .upd                    = &wcn3990_ctrl1_upd,
 };
 
 const struct ath10k_hw_values wcn3990_values = {
index 413b1b4321f77f7dfbd947844e1fad40d36f0e08..b8bdabe7307340c8b75af9c9dec3f1dd7c1690d1 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -131,7 +132,7 @@ enum qca9377_chip_id_rev {
 
 /* WCN3990 1.0 definitions */
 #define WCN3990_HW_1_0_DEV_VERSION     ATH10K_HW_WCN3990
-#define WCN3990_HW_1_0_FW_DIR          ATH10K_FW_DIR "/WCN3990/hw3.0"
+#define WCN3990_HW_1_0_FW_DIR          ATH10K_FW_DIR "/WCN3990/hw1.0"
 
 #define ATH10K_FW_FILE_BASE            "firmware"
 #define ATH10K_FW_API_MAX              6
@@ -335,6 +336,12 @@ struct ath10k_hw_ce_dst_src_wm_regs {
        struct ath10k_hw_ce_regs_addr_map *wm_low;
        struct ath10k_hw_ce_regs_addr_map *wm_high; };
 
+struct ath10k_hw_ce_ctrl1_upd {
+       u32 shift;
+       u32 mask;
+       u32 enable;
+};
+
 struct ath10k_hw_ce_regs {
        u32 sr_base_addr;
        u32 sr_size_addr;
@@ -357,7 +364,9 @@ struct ath10k_hw_ce_regs {
        struct ath10k_hw_ce_cmd_halt *cmd_halt;
        struct ath10k_hw_ce_host_ie *host_ie;
        struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
-       struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; };
+       struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+       struct ath10k_hw_ce_ctrl1_upd *upd;
+};
 
 struct ath10k_hw_values {
        u32 rtc_state_val_on;
@@ -568,6 +577,15 @@ struct ath10k_hw_params {
 
        /* Target rx ring fill level */
        u32 rx_ring_fill_level;
+
+       /* target supporting per ce IRQ */
+       bool per_ce_irq;
+
+       /* target supporting shadow register for ce write */
+       bool shadow_reg_support;
+
+       /* target supporting retention restore on ddr */
+       bool rri_on_ddr;
 };
 
 struct htt_rx_desc;
index bf05a3689558424e406b52b472b605bbe62f898b..3d7119ad7c7af23a8e88faf0c344eb580a6f9227 100644 (file)
@@ -3598,7 +3598,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
 
        switch (txpath) {
        case ATH10K_MAC_TX_HTT:
-               ret = htt->tx_ops->htt_tx(htt, txmode, skb);
+               ret = ath10k_htt_tx(htt, txmode, skb);
                break;
        case ATH10K_MAC_TX_HTT_MGMT:
                ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -4679,6 +4679,13 @@ static int ath10k_start(struct ieee80211_hw *hw)
                }
        }
 
+       param = ar->wmi.pdev_param->idle_ps_config;
+       ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+       if (ret && ret != -EOPNOTSUPP) {
+               ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
+               goto err_core_stop;
+       }
+
        __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
 
        /*
@@ -5717,6 +5724,12 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
                arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
        }
 
+       if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+               arg.scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
+               ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
+               ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+       }
+
        if (req->n_channels) {
                arg.n_channels = req->n_channels;
                for (i = 0; i < arg.n_channels; i++)
@@ -8433,6 +8446,17 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_dfs_detector_exit;
        }
 
+       if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+               ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+               if (ret) {
+                       ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
+                       goto err_dfs_detector_exit;
+               }
+
+               ar->hw->wiphy->features |=
+                       NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+       }
+
        ar->hw->wiphy->cipher_suites = cipher_suites;
 
        /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
index fd1566cd7d2ba847b467829b598512dfb8beccdf..af2cf55c4c1e631ea075e5baa5742651b77435c1 100644 (file)
@@ -1383,8 +1383,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
 
        for (i = 0; i < n_items - 1; i++) {
                ath10k_dbg(ar, ATH10K_DBG_PCI,
-                          "pci tx item %d paddr 0x%08x len %d n_items %d\n",
-                          i, items[i].paddr, items[i].len, n_items);
+                          "pci tx item %d paddr %pad len %d n_items %d\n",
+                          i, &items[i].paddr, items[i].len, n_items);
                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
                                items[i].vaddr, items[i].len);
 
@@ -1401,8 +1401,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
        /* `i` is equal to `n_items -1` after for() */
 
        ath10k_dbg(ar, ATH10K_DBG_PCI,
-                  "pci tx item %d paddr 0x%08x len %d n_items %d\n",
-                  i, items[i].paddr, items[i].len, n_items);
+                  "pci tx item %d paddr %pad len %d n_items %d\n",
+                  i, &items[i].paddr, items[i].len, n_items);
        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
                        items[i].vaddr, items[i].len);
 
index 03a69e5b11165753717517f098480306af881f26..2d04c54a41538f44bdfd3d34593dab22570d21cf 100644 (file)
@@ -1957,25 +1957,25 @@ static int ath10k_sdio_probe(struct sdio_func *func,
        ar_sdio = ath10k_sdio_priv(ar);
 
        ar_sdio->irq_data.irq_proc_reg =
-               kzalloc(sizeof(struct ath10k_sdio_irq_proc_regs),
-                       GFP_KERNEL);
+               devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
+                            GFP_KERNEL);
        if (!ar_sdio->irq_data.irq_proc_reg) {
                ret = -ENOMEM;
                goto err_core_destroy;
        }
 
        ar_sdio->irq_data.irq_en_reg =
-               kzalloc(sizeof(struct ath10k_sdio_irq_enable_regs),
-                       GFP_KERNEL);
+               devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
+                            GFP_KERNEL);
        if (!ar_sdio->irq_data.irq_en_reg) {
                ret = -ENOMEM;
-               goto err_free_proc_reg;
+               goto err_core_destroy;
        }
 
-       ar_sdio->bmi_buf = kzalloc(BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+       ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
        if (!ar_sdio->bmi_buf) {
                ret = -ENOMEM;
-               goto err_free_en_reg;
+               goto err_core_destroy;
        }
 
        ar_sdio->func = func;
@@ -1995,7 +1995,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
        ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
        if (!ar_sdio->workqueue) {
                ret = -ENOMEM;
-               goto err_free_bmi_buf;
+               goto err_core_destroy;
        }
 
        for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
@@ -2011,7 +2011,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
                ret = -ENODEV;
                ath10k_err(ar, "unsupported device id %u (0x%x)\n",
                           dev_id_base, id->device);
-               goto err_free_bmi_buf;
+               goto err_core_destroy;
        }
 
        ar->id.vendor = id->vendor;
@@ -2040,12 +2040,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
 
 err_free_wq:
        destroy_workqueue(ar_sdio->workqueue);
-err_free_bmi_buf:
-       kfree(ar_sdio->bmi_buf);
-err_free_en_reg:
-       kfree(ar_sdio->irq_data.irq_en_reg);
-err_free_proc_reg:
-       kfree(ar_sdio->irq_data.irq_proc_reg);
 err_core_destroy:
        ath10k_core_destroy(ar);
 
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
new file mode 100644 (file)
index 0000000..47a4d2a
--- /dev/null
@@ -0,0 +1,1414 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "ce.h"
+#include "snoc.h"
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#define  WCN3990_CE_ATTR_FLAGS 0
+#define ATH10K_SNOC_RX_POST_RETRY_MS 50
+#define CE_POLL_PIPE 4
+
+static char *const ce_name[] = {
+       "WLAN_CE_0",
+       "WLAN_CE_1",
+       "WLAN_CE_2",
+       "WLAN_CE_3",
+       "WLAN_CE_4",
+       "WLAN_CE_5",
+       "WLAN_CE_6",
+       "WLAN_CE_7",
+       "WLAN_CE_8",
+       "WLAN_CE_9",
+       "WLAN_CE_10",
+       "WLAN_CE_11",
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+       {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+       {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+       {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+       {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+       {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static const struct ath10k_snoc_drv_priv drv_priv = {
+       .hw_rev = ATH10K_HW_WCN3990,
+       .dma_mask = DMA_BIT_MASK(37),
+};
+
+static struct ce_attr host_ce_config_wlan[] = {
+       /* CE0: host->target HTC control streams */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 16,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = ath10k_snoc_htc_tx_cb,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 64,
+               .recv_cb = ath10k_snoc_htc_rx_cb,
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+               .send_cb = ath10k_snoc_htc_tx_cb,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = 256,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+               .send_cb = ath10k_snoc_htt_tx_cb,
+       },
+
+       /* CE5: target->host HTT (ipa_uc->target ) */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 512,
+               .dest_nentries = 512,
+               .recv_cb = ath10k_snoc_htt_rx_cb,
+       },
+
+       /* CE6: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE7: ce_diag, the Diagnostic Window */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 2,
+               .src_sz_max = 2048,
+               .dest_nentries = 2,
+       },
+
+       /* CE8: Target to uMC */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 128,
+       },
+
+       /* CE9 target->host HTT */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+       },
+
+       /* CE10: target->host HTT */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+       },
+
+       /* CE11: target -> host PKTLOG */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 512,
+               .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+       },
+};
+
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(3),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(3),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(3),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(3),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(3),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(0),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       { /* not used */
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(0),
+       },
+       { /* not used */
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(2),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+               __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
+               __cpu_to_le32(4),
+       },
+       {
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(1),
+       },
+       { /* not used */
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+               __cpu_to_le32(PIPEDIR_OUT),
+               __cpu_to_le32(5),
+       },
+       { /* in = DL = target -> host */
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(9),
+       },
+       { /* in = DL = target -> host */
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(10),
+       },
+       { /* in = DL = target -> host pktlog */
+               __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
+               __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
+               __cpu_to_le32(11),
+       },
+       /* (Additions here) */
+
+       { /* must be last */
+               __cpu_to_le32(0),
+               __cpu_to_le32(0),
+               __cpu_to_le32(0),
+       },
+};
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+       iowrite32(value, ar_snoc->mem + offset);
+}
+
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       u32 val;
+
+       val = ioread32(ar_snoc->mem + offset);
+
+       return val;
+}
+
+static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
+{
+       struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+       struct ath10k *ar = pipe->hif_ce_state;
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct sk_buff *skb;
+       dma_addr_t paddr;
+       int ret;
+
+       skb = dev_alloc_skb(pipe->buf_sz);
+       if (!skb)
+               return -ENOMEM;
+
+       WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+       paddr = dma_map_single(ar->dev, skb->data,
+                              skb->len + skb_tailroom(skb),
+                              DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+               ath10k_warn(ar, "failed to dma map snoc rx buf\n");
+               dev_kfree_skb_any(skb);
+               return -EIO;
+       }
+
+       ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+       spin_lock_bh(&ce->ce_lock);
+       ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
+       spin_unlock_bh(&ce->ce_lock);
+       if (ret) {
+               dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
+{
+       struct ath10k *ar = pipe->hif_ce_state;
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+       int ret, num;
+
+       if (pipe->buf_sz == 0)
+               return;
+
+       if (!ce_pipe->dest_ring)
+               return;
+
+       spin_lock_bh(&ce->ce_lock);
+       num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+       spin_unlock_bh(&ce->ce_lock);
+       while (num--) {
+               ret = __ath10k_snoc_rx_post_buf(pipe);
+               if (ret) {
+                       if (ret == -ENOSPC)
+                               break;
+                       ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
+                       mod_timer(&ar_snoc->rx_post_retry, jiffies +
+                                 ATH10K_SNOC_RX_POST_RETRY_MS);
+                       break;
+               }
+       }
+}
+
+static void ath10k_snoc_rx_post(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       int i;
+
+       for (i = 0; i < CE_COUNT; i++)
+               ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
+}
+
+static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+                                     void (*callback)(struct ath10k *ar,
+                                                      struct sk_buff *skb))
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
+       struct sk_buff *skb;
+       struct sk_buff_head list;
+       void *transfer_context;
+       unsigned int nbytes, max_nbytes;
+
+       __skb_queue_head_init(&list);
+       while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+                                            &nbytes) == 0) {
+               skb = transfer_context;
+               max_nbytes = skb->len + skb_tailroom(skb);
+               dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+                                max_nbytes, DMA_FROM_DEVICE);
+
+               if (unlikely(max_nbytes < nbytes)) {
+                       ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+                                   nbytes, max_nbytes);
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+
+               skb_put(skb, nbytes);
+               __skb_queue_tail(&list, skb);
+       }
+
+       while ((skb = __skb_dequeue(&list))) {
+               ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
+                          ce_state->id, skb->len);
+
+               callback(ar, skb);
+       }
+
+       ath10k_snoc_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       /* CE4 polling needs to be done whenever CE pipe which transports
+        * HTT Rx (target->host) is processed.
+        */
+       ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+
+       ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+       skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+       ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+       ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
+}
+
+static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
+{
+       struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+       struct ath10k *ar = ar_snoc->ar;
+
+       ath10k_snoc_rx_post(ar);
+}
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct sk_buff_head list;
+       struct sk_buff *skb;
+
+       __skb_queue_head_init(&list);
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+               if (!skb)
+                       continue;
+
+               __skb_queue_tail(&list, skb);
+       }
+
+       while ((skb = __skb_dequeue(&list)))
+               ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct sk_buff *skb;
+
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+                                skb->len, DMA_TO_DEVICE);
+               ath10k_htt_hif_tx_complete(ar, skb);
+       }
+}
+
+static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+                                struct ath10k_hif_sg_item *items, int n_items)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_snoc_pipe *snoc_pipe;
+       struct ath10k_ce_pipe *ce_pipe;
+       int err, i = 0;
+
+       snoc_pipe = &ar_snoc->pipe_info[pipe_id];
+       ce_pipe = snoc_pipe->ce_hdl;
+       spin_lock_bh(&ce->ce_lock);
+
+       for (i = 0; i < n_items - 1; i++) {
+               ath10k_dbg(ar, ATH10K_DBG_SNOC,
+                          "snoc tx item %d paddr %pad len %d n_items %d\n",
+                          i, &items[i].paddr, items[i].len, n_items);
+
+               err = ath10k_ce_send_nolock(ce_pipe,
+                                           items[i].transfer_context,
+                                           items[i].paddr,
+                                           items[i].len,
+                                           items[i].transfer_id,
+                                           CE_SEND_FLAG_GATHER);
+               if (err)
+                       goto err;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC,
+                  "snoc tx item %d paddr %pad len %d n_items %d\n",
+                  i, &items[i].paddr, items[i].len, n_items);
+
+       err = ath10k_ce_send_nolock(ce_pipe,
+                                   items[i].transfer_context,
+                                   items[i].paddr,
+                                   items[i].len,
+                                   items[i].transfer_id,
+                                   0);
+       if (err)
+               goto err;
+
+       spin_unlock_bh(&ce->ce_lock);
+
+       return 0;
+
+err:
+       for (; i > 0; i--)
+               __ath10k_ce_send_revert(ce_pipe);
+
+       spin_unlock_bh(&ce->ce_lock);
+       return err;
+}
+
+static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
+                                          struct bmi_target_info *target_info)
+{
+       target_info->version = ATH10K_HW_WCN3990;
+       target_info->type = ATH10K_HW_WCN3990;
+
+       return 0;
+}
+
+static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
+
+       return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+                                               int force)
+{
+       int resources;
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
+
+       if (!force) {
+               resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
+
+               if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+                       return;
+       }
+       ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
+                                              u16 service_id,
+                                              u8 *ul_pipe, u8 *dl_pipe)
+{
+       const struct service_to_pipe *entry;
+       bool ul_set = false, dl_set = false;
+       int i;
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
+
+       for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+               entry = &target_service_to_ce_map_wlan[i];
+
+               if (__le32_to_cpu(entry->service_id) != service_id)
+                       continue;
+
+               switch (__le32_to_cpu(entry->pipedir)) {
+               case PIPEDIR_NONE:
+                       break;
+               case PIPEDIR_IN:
+                       WARN_ON(dl_set);
+                       *dl_pipe = __le32_to_cpu(entry->pipenum);
+                       dl_set = true;
+                       break;
+               case PIPEDIR_OUT:
+                       WARN_ON(ul_set);
+                       *ul_pipe = __le32_to_cpu(entry->pipenum);
+                       ul_set = true;
+                       break;
+               case PIPEDIR_INOUT:
+                       WARN_ON(dl_set);
+                       WARN_ON(ul_set);
+                       *dl_pipe = __le32_to_cpu(entry->pipenum);
+                       *ul_pipe = __le32_to_cpu(entry->pipenum);
+                       dl_set = true;
+                       ul_set = true;
+                       break;
+               }
+       }
+
+       if (WARN_ON(!ul_set || !dl_set))
+               return -ENOENT;
+
+       return 0;
+}
+
+static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+                                            u8 *ul_pipe, u8 *dl_pipe)
+{
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
+
+       (void)ath10k_snoc_hif_map_service_to_pipe(ar,
+                                                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                                                ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+{
+       ath10k_ce_disable_interrupts(ar);
+}
+
+static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+{
+       ath10k_ce_enable_interrupts(ar);
+}
+
+static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+       struct ath10k_ce_pipe *ce_pipe;
+       struct ath10k_ce_ring *ce_ring;
+       struct sk_buff *skb;
+       struct ath10k *ar;
+       int i;
+
+       ar = snoc_pipe->hif_ce_state;
+       ce_pipe = snoc_pipe->ce_hdl;
+       ce_ring = ce_pipe->dest_ring;
+
+       if (!ce_ring)
+               return;
+
+       if (!snoc_pipe->buf_sz)
+               return;
+
+       for (i = 0; i < ce_ring->nentries; i++) {
+               skb = ce_ring->per_transfer_context[i];
+               if (!skb)
+                       continue;
+
+               ce_ring->per_transfer_context[i] = NULL;
+
+               dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+       }
+}
+
+static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+       struct ath10k_ce_pipe *ce_pipe;
+       struct ath10k_ce_ring *ce_ring;
+       struct ath10k_snoc *ar_snoc;
+       struct sk_buff *skb;
+       struct ath10k *ar;
+       int i;
+
+       ar = snoc_pipe->hif_ce_state;
+       ar_snoc = ath10k_snoc_priv(ar);
+       ce_pipe = snoc_pipe->ce_hdl;
+       ce_ring = ce_pipe->src_ring;
+
+       if (!ce_ring)
+               return;
+
+       if (!snoc_pipe->buf_sz)
+               return;
+
+       for (i = 0; i < ce_ring->nentries; i++) {
+               skb = ce_ring->per_transfer_context[i];
+               if (!skb)
+                       continue;
+
+               ce_ring->per_transfer_context[i] = NULL;
+
+               ath10k_htc_tx_completion_handler(ar, skb);
+       }
+}
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_snoc_pipe *pipe_info;
+       int pipe_num;
+
+       del_timer_sync(&ar_snoc->rx_post_retry);
+       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+               pipe_info = &ar_snoc->pipe_info[pipe_num];
+               ath10k_snoc_rx_pipe_cleanup(pipe_info);
+               ath10k_snoc_tx_pipe_cleanup(pipe_info);
+       }
+}
+
+static void ath10k_snoc_hif_stop(struct ath10k *ar)
+{
+       ath10k_snoc_irq_disable(ar);
+       ath10k_snoc_buffer_cleanup(ar);
+       napi_synchronize(&ar->napi);
+       napi_disable(&ar->napi);
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+}
+
+static int ath10k_snoc_hif_start(struct ath10k *ar)
+{
+       ath10k_snoc_irq_enable(ar);
+       ath10k_snoc_rx_post(ar);
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+       return 0;
+}
+
+static int ath10k_snoc_init_pipes(struct ath10k *ar)
+{
+       int i, ret;
+
+       for (i = 0; i < CE_COUNT; i++) {
+               ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+               if (ret) {
+                       ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+                                  i, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_snoc_wlan_enable(struct ath10k *ar)
+{
+       return 0;
+}
+
+static void ath10k_snoc_wlan_disable(struct ath10k *ar)
+{
+}
+
+static void ath10k_snoc_hif_power_down(struct ath10k *ar)
+{
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+       ath10k_snoc_wlan_disable(ar);
+       ath10k_ce_free_rri(ar);
+}
+
+static int ath10k_snoc_hif_power_up(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
+                  __func__, ar->state);
+
+       ret = ath10k_snoc_wlan_enable(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
+               return ret;
+       }
+
+       ath10k_ce_alloc_rri(ar);
+
+       ret = ath10k_snoc_init_pipes(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+               goto err_wlan_enable;
+       }
+
+       napi_enable(&ar->napi);
+       return 0;
+
+err_wlan_enable:
+       ath10k_snoc_wlan_disable(ar);
+
+       return ret;
+}
+
+static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
+       .read32         = ath10k_snoc_read32,
+       .write32        = ath10k_snoc_write32,
+       .start          = ath10k_snoc_hif_start,
+       .stop           = ath10k_snoc_hif_stop,
+       .map_service_to_pipe    = ath10k_snoc_hif_map_service_to_pipe,
+       .get_default_pipe       = ath10k_snoc_hif_get_default_pipe,
+       .power_up               = ath10k_snoc_hif_power_up,
+       .power_down             = ath10k_snoc_hif_power_down,
+       .tx_sg                  = ath10k_snoc_hif_tx_sg,
+       .send_complete_check    = ath10k_snoc_hif_send_complete_check,
+       .get_free_queue_number  = ath10k_snoc_hif_get_free_queue_number,
+       .get_target_info        = ath10k_snoc_hif_get_target_info,
+};
+
+static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
+       .read32         = ath10k_snoc_read32,
+       .write32        = ath10k_snoc_write32,
+};
+
+int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       int i;
+
+       for (i = 0; i < CE_COUNT_MAX; i++) {
+               if (ar_snoc->ce_irqs[i].irq_line == irq)
+                       return i;
+       }
+       ath10k_err(ar, "No matching CE id for irq %d\n", irq);
+
+       return -EINVAL;
+}
+
+static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
+
+       if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
+               ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+                           ce_id);
+               return IRQ_HANDLED;
+       }
+
+       ath10k_snoc_irq_disable(ar);
+       napi_schedule(&ar->napi);
+
+       return IRQ_HANDLED;
+}
+
+static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
+{
+       struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+       int done = 0;
+
+       ath10k_ce_per_engine_service_any(ar);
+       done = ath10k_htt_txrx_compl_task(ar, budget);
+
+       if (done < budget) {
+               napi_complete(ctx);
+               ath10k_snoc_irq_enable(ar);
+       }
+
+       return done;
+}
+
+void ath10k_snoc_init_napi(struct ath10k *ar)
+{
+       netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
+                      ATH10K_NAPI_BUDGET);
+}
+
+static int ath10k_snoc_request_irq(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       int irqflags = IRQF_TRIGGER_RISING;
+       int ret, id;
+
+       for (id = 0; id < CE_COUNT_MAX; id++) {
+               ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+                                 ath10k_snoc_per_engine_handler,
+                                 irqflags, ce_name[id], ar);
+               if (ret) {
+                       ath10k_err(ar,
+                                  "failed to register IRQ handler for CE %d: %d",
+                                  id, ret);
+                       goto err_irq;
+               }
+       }
+
+       return 0;
+
+err_irq:
+       for (id -= 1; id >= 0; id--)
+               free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+
+       return ret;
+}
+
+static void ath10k_snoc_free_irq(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       int id;
+
+       for (id = 0; id < CE_COUNT_MAX; id++)
+               free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+}
+
+static int ath10k_snoc_resource_init(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct platform_device *pdev;
+       struct resource *res;
+       int i, ret = 0;
+
+       pdev = ar_snoc->dev;
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
+       if (!res) {
+               ath10k_err(ar, "Memory base not found in DT\n");
+               return -EINVAL;
+       }
+
+       ar_snoc->mem_pa = res->start;
+       ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
+                                   resource_size(res));
+       if (!ar_snoc->mem) {
+               ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
+                          &ar_snoc->mem_pa);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < CE_COUNT; i++) {
+               res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
+               if (!res) {
+                       ath10k_err(ar, "failed to get IRQ%d\n", i);
+                       ret = -ENODEV;
+                       goto out;
+               }
+               ar_snoc->ce_irqs[i].irq_line = res->start;
+       }
+
+out:
+       return ret;
+}
+
+static int ath10k_snoc_setup_resource(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_snoc_pipe *pipe;
+       int i, ret;
+
+       timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
+       spin_lock_init(&ce->ce_lock);
+       for (i = 0; i < CE_COUNT; i++) {
+               pipe = &ar_snoc->pipe_info[i];
+               pipe->ce_hdl = &ce->ce_states[i];
+               pipe->pipe_num = i;
+               pipe->hif_ce_state = ar;
+
+               ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+               if (ret) {
+                       ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+                                  i, ret);
+                       return ret;
+               }
+
+               pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
+       }
+       ath10k_snoc_init_napi(ar);
+
+       return 0;
+}
+
+static void ath10k_snoc_release_resource(struct ath10k *ar)
+{
+       int i;
+
+       netif_napi_del(&ar->napi);
+       for (i = 0; i < CE_COUNT; i++)
+               ath10k_ce_free_pipe(ar, i);
+}
+
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+                               struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+       struct regulator *reg;
+       int ret = 0;
+
+       reg = devm_regulator_get_optional(dev, vreg_info->name);
+
+       if (IS_ERR(reg)) {
+               ret = PTR_ERR(reg);
+
+               if (ret  == -EPROBE_DEFER) {
+                       ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+                                  vreg_info->name);
+                       return ret;
+               }
+               if (vreg_info->required) {
+                       ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+                                  vreg_info->name, ret);
+                       return ret;
+               }
+               ath10k_dbg(ar, ATH10K_DBG_SNOC,
+                          "Optional regulator %s doesn't exist: %d\n",
+                          vreg_info->name, ret);
+               goto done;
+       }
+
+       vreg_info->reg = reg;
+
+done:
+       ath10k_dbg(ar, ATH10K_DBG_SNOC,
+                  "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
+                  vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+                  vreg_info->load_ua, vreg_info->settle_delay);
+
+       return 0;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+                              struct ath10k_wcn3990_clk_info *clk_info)
+{
+       struct clk *handle;
+       int ret = 0;
+
+       handle = devm_clk_get(dev, clk_info->name);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               if (clk_info->required) {
+                       ath10k_err(ar, "snoc clock %s isn't available: %d\n",
+                                  clk_info->name, ret);
+                       return ret;
+               }
+               ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
+                          clk_info->name,
+                          ret);
+               return 0;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
+                  clk_info->name, clk_info->freq);
+
+       clk_info->handle = handle;
+
+       return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_wcn3990_vreg_info *vreg_info;
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
+               vreg_info = &ar_snoc->vreg[i];
+
+               if (!vreg_info->reg)
+                       continue;
+
+               ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
+                          vreg_info->name);
+
+               ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+                                           vreg_info->max_v);
+               if (ret) {
+                       ath10k_err(ar,
+                                  "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
+                                  vreg_info->name, vreg_info->min_v, vreg_info->max_v);
+                       goto err_reg_config;
+               }
+
+               if (vreg_info->load_ua) {
+                       ret = regulator_set_load(vreg_info->reg,
+                                                vreg_info->load_ua);
+                       if (ret < 0) {
+                               ath10k_err(ar,
+                                          "failed to set regulator %s load: %d\n",
+                                          vreg_info->name,
+                                          vreg_info->load_ua);
+                               goto err_reg_config;
+                       }
+               }
+
+               ret = regulator_enable(vreg_info->reg);
+               if (ret) {
+                       ath10k_err(ar, "failed to enable regulator %s\n",
+                                  vreg_info->name);
+                       goto err_reg_config;
+               }
+
+               if (vreg_info->settle_delay)
+                       udelay(vreg_info->settle_delay);
+       }
+
+       return 0;
+
+err_reg_config:
+       for (; i >= 0; i--) {
+               vreg_info = &ar_snoc->vreg[i];
+
+               if (!vreg_info->reg)
+                       continue;
+
+               regulator_disable(vreg_info->reg);
+               regulator_set_load(vreg_info->reg, 0);
+               regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+       }
+
+       return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_wcn3990_vreg_info *vreg_info;
+       int ret = 0;
+       int i;
+
+       for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
+               vreg_info = &ar_snoc->vreg[i];
+
+               if (!vreg_info->reg)
+                       continue;
+
+               ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
+                          vreg_info->name);
+
+               ret = regulator_disable(vreg_info->reg);
+               if (ret)
+                       ath10k_err(ar, "failed to disable regulator %s\n",
+                                  vreg_info->name);
+
+               ret = regulator_set_load(vreg_info->reg, 0);
+               if (ret < 0)
+                       ath10k_err(ar, "failed to set load %s\n",
+                                  vreg_info->name);
+
+               ret = regulator_set_voltage(vreg_info->reg, 0,
+                                           vreg_info->max_v);
+               if (ret)
+                       ath10k_err(ar, "failed to set voltage %s\n",
+                                  vreg_info->name);
+       }
+
+       return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_wcn3990_clk_info *clk_info;
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+               clk_info = &ar_snoc->clk[i];
+
+               if (!clk_info->handle)
+                       continue;
+
+               ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
+                          clk_info->name);
+
+               if (clk_info->freq) {
+                       ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+                       if (ret) {
+                               ath10k_err(ar, "failed to set clock %s freq %u\n",
+                                          clk_info->name, clk_info->freq);
+                               goto err_clock_config;
+                       }
+               }
+
+               ret = clk_prepare_enable(clk_info->handle);
+               if (ret) {
+                       ath10k_err(ar, "failed to enable clock %s\n",
+                                  clk_info->name);
+                       goto err_clock_config;
+               }
+       }
+
+       return 0;
+
+err_clock_config:
+       for (; i >= 0; i--) {
+               clk_info = &ar_snoc->clk[i];
+
+               if (!clk_info->handle)
+                       continue;
+
+               clk_disable_unprepare(clk_info->handle);
+       }
+
+       return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+       struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+       struct ath10k_wcn3990_clk_info *clk_info;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+               clk_info = &ar_snoc->clk[i];
+
+               if (!clk_info->handle)
+                       continue;
+
+               ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
+                          clk_info->name);
+
+               clk_disable_unprepare(clk_info->handle);
+       }
+
+       return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+
+       ret = ath10k_wcn3990_vreg_on(ar);
+       if (ret)
+               return ret;
+
+       ret = ath10k_wcn3990_clk_init(ar);
+       if (ret)
+               goto vreg_off;
+
+       return ret;
+
+vreg_off:
+       ath10k_wcn3990_vreg_off(ar);
+       return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
+
+       ath10k_wcn3990_clk_deinit(ar);
+
+       ret = ath10k_wcn3990_vreg_off(ar);
+
+       return ret;
+}
+
+static const struct of_device_id ath10k_snoc_dt_match[] = {
+       { .compatible = "qcom,wcn3990-wifi",
+        .data = &drv_priv,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
+
+static int ath10k_snoc_probe(struct platform_device *pdev)
+{
+       const struct ath10k_snoc_drv_priv *drv_data;
+       const struct of_device_id *of_id;
+       struct ath10k_snoc *ar_snoc;
+       struct device *dev;
+       struct ath10k *ar;
+       int ret;
+       u32 i;
+
+       of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
+       if (!of_id) {
+               dev_err(&pdev->dev, "failed to find matching device tree id\n");
+               return -EINVAL;
+       }
+
+       drv_data = of_id->data;
+       dev = &pdev->dev;
+
+       ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
+       if (ret) {
+               dev_err(dev, "failed to set dma mask: %d", ret);
+               return ret;
+       }
+
+       ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
+                               drv_data->hw_rev, &ath10k_snoc_hif_ops);
+       if (!ar) {
+               dev_err(dev, "failed to allocate core\n");
+               return -ENOMEM;
+       }
+
+       ar_snoc = ath10k_snoc_priv(ar);
+       ar_snoc->dev = pdev;
+       platform_set_drvdata(pdev, ar);
+       ar_snoc->ar = ar;
+       ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
+       ar->ce_priv = &ar_snoc->ce;
+
+       ath10k_snoc_resource_init(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
+               goto err_core_destroy;
+       }
+
+       ath10k_snoc_setup_resource(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to setup resource: %d\n", ret);
+               goto err_core_destroy;
+       }
+       ret = ath10k_snoc_request_irq(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+               goto err_release_resource;
+       }
+
+       ar_snoc->vreg = vreg_cfg;
+       for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
+               ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+               if (ret)
+                       goto err_free_irq;
+       }
+
+       ar_snoc->clk = clk_cfg;
+       for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+               ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+               if (ret)
+                       goto err_free_irq;
+       }
+
+       ret = ath10k_hw_power_on(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to power on device: %d\n", ret);
+               goto err_free_irq;
+       }
+
+       ret = ath10k_core_register(ar, drv_data->hw_rev);
+       if (ret) {
+               ath10k_err(ar, "failed to register driver core: %d\n", ret);
+               goto err_hw_power_off;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
+       ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
+
+       return 0;
+
+err_hw_power_off:
+       ath10k_hw_power_off(ar);
+
+err_free_irq:
+       ath10k_snoc_free_irq(ar);
+
+err_release_resource:
+       ath10k_snoc_release_resource(ar);
+
+err_core_destroy:
+       ath10k_core_destroy(ar);
+
+       return ret;
+}
+
+static int ath10k_snoc_remove(struct platform_device *pdev)
+{
+       struct ath10k *ar = platform_get_drvdata(pdev);
+
+       ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
+       ath10k_core_unregister(ar);
+       ath10k_hw_power_off(ar);
+       ath10k_snoc_free_irq(ar);
+       ath10k_snoc_release_resource(ar);
+       ath10k_core_destroy(ar);
+
+       return 0;
+}
+
+static struct platform_driver ath10k_snoc_driver = {
+               .probe  = ath10k_snoc_probe,
+               .remove = ath10k_snoc_remove,
+               .driver = {
+                       .name   = "ath10k_snoc",
+                       .owner = THIS_MODULE,
+                       .of_match_table = ath10k_snoc_dt_match,
+               },
+};
+
+static int __init ath10k_snoc_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&ath10k_snoc_driver);
+       if (ret)
+               pr_err("failed to register ath10k snoc driver: %d\n",
+                      ret);
+
+       return ret;
+}
+module_init(ath10k_snoc_init);
+
+static void __exit ath10k_snoc_exit(void)
+{
+       platform_driver_unregister(&ath10k_snoc_driver);
+}
+module_exit(ath10k_snoc_exit);
+
+MODULE_AUTHOR("Qualcomm");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
new file mode 100644 (file)
index 0000000..05dc98f
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SNOC_H_
+#define _SNOC_H_
+
+#include "hw.h"
+#include "ce.h"
+#include "pci.h"
+
+struct ath10k_snoc_drv_priv {
+       enum ath10k_hw_rev hw_rev;
+       u64 dma_mask;
+};
+
+struct snoc_state {
+       u32 pipe_cfg_addr;
+       u32 svc_to_pipe_map;
+};
+
+struct ath10k_snoc_pipe {
+       struct ath10k_ce_pipe *ce_hdl;
+       u8 pipe_num;
+       struct ath10k *hif_ce_state;
+       size_t buf_sz;
+       /* protect ce info */
+       spinlock_t pipe_lock;
+       struct ath10k_snoc *ar_snoc;
+};
+
+struct ath10k_snoc_target_info {
+       u32 target_version;
+       u32 target_type;
+       u32 target_revision;
+       u32 soc_version;
+};
+
+struct ath10k_snoc_ce_irq {
+       u32 irq_line;
+};
+
+struct ath10k_wcn3990_vreg_info {
+       struct regulator *reg;
+       const char *name;
+       u32 min_v;
+       u32 max_v;
+       u32 load_ua;
+       unsigned long settle_delay;
+       bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+       struct clk *handle;
+       const char *name;
+       u32 freq;
+       bool required;
+};
+
+struct ath10k_snoc {
+       struct platform_device *dev;
+       struct ath10k *ar;
+       void __iomem *mem;
+       dma_addr_t mem_pa;
+       struct ath10k_snoc_target_info target_info;
+       size_t mem_len;
+       struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX];
+       struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
+       struct ath10k_ce ce;
+       struct timer_list rx_post_retry;
+       struct ath10k_wcn3990_vreg_info *vreg;
+       struct ath10k_wcn3990_clk_info *clk;
+};
+
+static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
+{
+       return (struct ath10k_snoc *)ar->drv_priv;
+}
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
+
+#endif /* _SNOC_H_ */
index 70e23bbf7171272b6ddc799ae7523994d4a71a0d..cda164f6e9f62f87e36c40c96f5fb7586d87a94c 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -119,6 +120,13 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                        info->flags &= ~IEEE80211_TX_STAT_ACK;
        }
 
+       if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
+           tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
+               info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
+                                               tx_done->ack_rssi;
+               info->status.is_valid_ack_signal = true;
+       }
+
        ieee80211_tx_status(htt->ar->hw, msdu);
        /* we do not own the msdu anymore */
 
index c35e45340b4f008392061981aa219e04bd54523c..e37d16b31afe073ebc4c21c7a4f7bc052eb0d977 100644 (file)
@@ -25,6 +25,7 @@ struct sk_buff;
 struct wmi_ops {
        void (*rx)(struct ath10k *ar, struct sk_buff *skb);
        void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+       void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
 
        int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
                         struct wmi_scan_ev_arg *arg);
@@ -54,6 +55,9 @@ struct wmi_ops {
                              struct wmi_wow_ev_arg *arg);
        int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
                            struct wmi_echo_ev_arg *arg);
+       int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
+                             struct wmi_svc_avail_ev_arg *arg);
+
        enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
        struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -115,6 +119,8 @@ struct wmi_ops {
                                         u32 value);
        struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
                                              const struct wmi_scan_chan_list_arg *arg);
+       struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
+                                                u32 prob_req_oui);
        struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
                                          const void *bcn, size_t bcn_len,
                                          u32 bcn_paddr, bool dtim_zero,
@@ -229,6 +235,17 @@ ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
        return 0;
 }
 
+static inline int
+ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
+                      size_t len)
+{
+       if (!ar->wmi.ops->map_svc_ext)
+               return -EOPNOTSUPP;
+
+       ar->wmi.ops->map_svc_ext(in, out, len);
+       return 0;
+}
+
 static inline int
 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
                     struct wmi_scan_ev_arg *arg)
@@ -329,6 +346,15 @@ ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
        return ar->wmi.ops->pull_rdy(ar, skb, arg);
 }
 
+static inline int
+ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
+                         struct wmi_svc_avail_ev_arg *arg)
+{
+       if (!ar->wmi.ops->pull_svc_avail)
+               return -EOPNOTSUPP;
+       return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
+}
+
 static inline int
 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
                         struct ath10k_fw_stats *stats)
@@ -890,6 +916,26 @@ ath10k_wmi_scan_chan_list(struct ath10k *ar,
        return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
 }
 
+static inline int
+ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
+{
+       struct sk_buff *skb;
+       u32 prob_req_oui;
+
+       prob_req_oui = (((u32)mac_addr[0]) << 16) |
+                      (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+       if (!ar->wmi.ops->gen_scan_prob_req_oui)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb,
+                       ar->wmi.cmd->scan_prob_req_oui_cmdid);
+}
+
 static inline int
 ath10k_wmi_peer_assoc(struct ath10k *ar,
                      const struct wmi_peer_assoc_complete_arg *arg)
index 9d1b0a459069d25ebbec35de5300c380015cb102..01f4eb201330f5be14e6fe41278cb71838481b44 100644 (file)
@@ -594,6 +594,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
        case WMI_TLV_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
+       case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
+               ath10k_wmi_event_service_available(ar, skb);
+               break;
        case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
                ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
                break;
@@ -1117,6 +1120,39 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
+                                         const void *ptr, void *data)
+{
+       struct wmi_svc_avail_ev_arg *arg = data;
+
+       switch (tag) {
+       case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
+               arg->service_map_ext_len = *(__le32 *)ptr;
+               arg->service_map_ext = ptr + sizeof(__le32);
+               return 0;
+       default:
+               break;
+       }
+       return -EPROTO;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
+                                           struct sk_buff *skb,
+                                           struct wmi_svc_avail_ev_arg *arg)
+{
+       int ret;
+
+       ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+                                 ath10k_wmi_tlv_svc_avail_parse, arg);
+
+       if (ret) {
+               ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
                                           struct ath10k_fw_stats_vdev *dst)
 {
@@ -1600,6 +1636,8 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
        cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
        cmd->ie_len = __cpu_to_le32(arg->ie_len);
        cmd->num_probes = __cpu_to_le32(3);
+       ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
+       ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
 
        /* FIXME: There are some scan flag inconsistencies across firmwares,
         * e.g. WMI-TLV inverts the logic behind the following flag.
@@ -2446,6 +2484,27 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
+{
+       struct wmi_scan_prob_req_oui_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (void *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+       cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
+       return skb;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
                                 const void *bcn, size_t bcn_len,
@@ -3416,6 +3475,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
        .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
        .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
        .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+       .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
        .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
        .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
        .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
@@ -3740,6 +3800,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
 static const struct wmi_ops wmi_tlv_ops = {
        .rx = ath10k_wmi_tlv_op_rx,
        .map_svc = wmi_tlv_svc_map,
+       .map_svc_ext = wmi_tlv_svc_map_ext,
 
        .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
        .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
@@ -3751,6 +3812,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
        .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+       .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
        .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
        .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
        .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
@@ -3782,6 +3844,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
        .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
        .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+       .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
        .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
        .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
        .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
index fa3773ec7c684b33c639cab22c614403bbe26e46..954c50bb3f6650d9fbaab4e6a40c6b270a71ad5a 100644 (file)
@@ -295,6 +295,7 @@ enum wmi_tlv_cmd_id {
 enum wmi_tlv_event_id {
        WMI_TLV_SERVICE_READY_EVENTID = 0x1,
        WMI_TLV_READY_EVENTID,
+       WMI_TLV_SERVICE_AVAILABLE_EVENTID,
        WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
        WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
        WMI_TLV_CHAN_INFO_EVENTID,
@@ -949,6 +950,275 @@ enum wmi_tlv_tag {
        WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
        WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
        WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
+       WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT,
+       WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD,
+       WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD,
+       WMI_TLV_TAG_STRUCT_LRO_INFO_CMD,
+       WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+       WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT,
+       WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD,
+       WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT,
+       WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD,
+       WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD,
+       WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT,
+       WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD,
+       WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT,
+       WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+       WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD,
+       WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+       WMI_TLV_TAG_STRUCT_SCPC_EVENT,
+       WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+       WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+       WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD,
+       WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT,
+       WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT,
+       WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+       WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM,
+       WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM,
+       WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK,
+       WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD,
+       WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST,
+       WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST,
+       WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP,
+       WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT,
+       WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT,
+       WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT,
+       WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT,
+       WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT,
+       WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT,
+       WMI_TLV_TAG_STRUCT_ATF_PEER_INFO,
+       WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE,
+       WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+       WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN,
+       WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO,
+       WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO,
+       WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+       WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+       WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+       WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU,
+       WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD,
+       WMI_TLV_TAG_STRUCT_ROAM_SET_MBO,
+       WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD,
+       WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT,
+       WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT,
+       WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT,
+       WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT,
+       WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ,
+       WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ,
+       WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ,
+       WMI_TLV_TAG_STRUCT_NDP_END_REQ,
+       WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT,
+       WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT,
+       WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT,
+       WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT,
+       WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT,
+       WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT,
+       WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT,
+       WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+       WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER,
+       WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD,
+       WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+       WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD,
+       WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES,
+       WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES,
+       WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS,
+       WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT,
+       WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES,
+       WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD,
+       WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT,
+       WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+       WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG,
+       WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD,
+       WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI,
+       WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST,
+       WMI_TLV_TAG_STRUCT_BWF_PEER_INFO,
+       WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD,
+       WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD,
+       WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT,
+       WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS,
+       WMI_TLV_TAG_STRUCT_RSSI_STATS,
+       WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD,
+       WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD,
+       WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT,
+       WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD,
+       WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD,
+       WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT,
+       WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD,
+       WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID,
+       WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM,
+       WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT,
+       WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT,
+       WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO,
+       WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT,
+       WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD,
+       WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT,
+       WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD,
+       WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT,
+       WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS,
+       WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS,
+       WMI_TLV_TAG_STRUCT_TX_STATS,
+       WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS,
+       WMI_TLV_TAG_STRUCT_RX_STATS,
+       WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS,
+       WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT,
+       WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH,
+       WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH,
+       WMI_TLV_TAG_STRUCT_TX_STATS_THRESH,
+       WMI_TLV_TAG_STRUCT_RX_STATS_THRESH,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD,
+       WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT,
+       WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO,
+       WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC,
+       WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO,
+       WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT,
+       WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD,
+       WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW,
+       WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT,
+       WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+       WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT,
+       WMI_TLV_TAG_STRUCT_VENDOR_OUI,
+       WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD,
+       WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT,
+       WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD,
+       WMI_TLV_TAG_STRUCT_PEER_STATS_INFO,
+       WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT,
+       WMI_TLV_TAG_STRUCT_PKGID_EVENT,
+       WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS,
+       WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD,
+       WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT,
+       WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT,
+       WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD,
+       WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD,
+       WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT,
+       WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS,
+       WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT,
+       WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG,
+       WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+       WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+       WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD,
+       WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+       WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT,
+       WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED,
+       WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT,
+       WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD,
+       WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO,
+       WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY,
+       WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT,
+       WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS,
+       WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+       WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT,
+       WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR,
+       WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT,
+       WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT,
+       WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT,
+       WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD,
+       WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS,
+       WMI_TLV_TAG_STRUCT_HE_RATE_SET,
+       WMI_TLV_TAG_STRUCT_CONGESTION_STATS,
+       WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD,
+       WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE,
+       WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+       WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID,
+       WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST,
+       WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO,
+       WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT,
+       WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO,
+       WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT,
+       WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES,
+       WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ,
+       WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP,
+       WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA,
+       WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE,
+       WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY,
+       WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST,
+       WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT,
+       WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM,
+       WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD,
+       WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM,
+       WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT,
+       WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM,
+       WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD,
+       WMI_TLV_TAG_STRUCT_PMK_CACHE,
+       WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+       WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM,
+       WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM,
+       WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM,
+       WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD,
+       WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+       WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT,
+       WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+       WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT,
+       WMI_TLV_TAG_STRUCT_BTM_CONFIG,
+       WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM,
+       WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST,
+       WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT,
+       WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM,
+       WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION,
+       WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT,
+       WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT,
+       WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD,
+       WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD,
+       WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT,
 
        WMI_TLV_TAG_MAX
 };
@@ -1068,16 +1338,74 @@ enum wmi_tlv_service {
        WMI_TLV_SERVICE_WLAN_STATS_REPORT,
        WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
        WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
+       WMI_TLV_SERVICE_RCPI_SUPPORT,
+       WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT,
+       WMI_TLV_SERVICE_PEER_STATS_INFO,
+       WMI_TLV_SERVICE_REGULATORY_DB,
+       WMI_TLV_SERVICE_11D_OFFLOAD,
+       WMI_TLV_SERVICE_HW_DATA_FILTERING,
+       WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART,
+       WMI_TLV_SERVICE_PKT_ROUTING,
+       WMI_TLV_SERVICE_CHECK_CAL_VERSION,
+       WMI_TLV_SERVICE_OFFCHAN_TX_WMI,
+       WMI_TLV_SERVICE_8SS_TX_BFEE,
+       WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT,
+       WMI_TLV_SERVICE_ACK_TIMEOUT,
+       WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64,
+       WMI_TLV_MAX_SERVICE = 128,
+
+/* NOTE:
+ * The above service flags are delivered in the wmi_service_bitmap field
+ * of the WMI_TLV_SERVICE_READY_EVENT message.
+ * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT
+ * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's
+ * wmi_service_bitmap field.
+ * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the
+ * WMI_TLV_SERVICE_READY_EVENT message.
+ */
+
+       WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+       WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT,
+       WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT,
+       WMI_TLV_SERVICE_FILS_SUPPORT,
+       WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD,
+       WMI_TLV_SERVICE_WLAN_DHCP_RENEW,
+       WMI_TLV_SERVICE_MAWC_SUPPORT,
+       WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG,
+       WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT,
+       WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT,
+       WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT,
+       WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT,
+       WMI_TLV_SERVICE_THERM_THROT,
+       WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT,
+       WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN,
+       WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+       WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+       WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+       WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+       WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+       WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+       WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+       WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+       WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+       WMI_TLV_SERVICE_STA_TWT = 152,
+       WMI_TLV_SERVICE_AP_TWT = 153,
+       WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+       WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+
+       WMI_TLV_MAX_EXT_SERVICE = 256,
 };
 
-#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
-       ((svc_id) < (len) && \
-        __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
-        BIT((svc_id) % (sizeof(u32))))
+#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+       ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \
+        (svc_id) >= (len) && \
+       __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \
+       BIT(((((svc_id) - (len)) % 32) & 0x1f)))
 
 #define SVCMAP(x, y, len) \
        do { \
-               if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+               if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
+                       (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
                        __set_bit(y, out); \
        } while (0)
 
@@ -1228,6 +1556,14 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
               WMI_SERVICE_MGMT_TX_WMI, len);
 }
 
+static inline void
+wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
+{
+       SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
+              WMI_SERVICE_SPOOF_MAC_SUPPORT,
+              WMI_TLV_MAX_SERVICE);
+}
+
 #undef SVCMAP
 
 struct wmi_tlv {
@@ -1370,6 +1706,15 @@ struct wmi_tlv_scan_chan_list_cmd {
        __le32 num_scan_chans;
 } __packed;
 
+struct wmi_scan_prob_req_oui_cmd {
+/* OUI to be used in Probe Request frame when random MAC address is
+ * requested part of scan parameters. This is applied to both FW internal
+ * scans and host initiated scans. Host can request for random MAC address
+ * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag.
+ */
+       __le32 prob_req_oui;
+}  __packed;
+
 struct wmi_tlv_start_scan_cmd {
        struct wmi_start_scan_common common;
        __le32 burst_duration_ms;
@@ -1378,6 +1723,8 @@ struct wmi_tlv_start_scan_cmd {
        __le32 num_ssids;
        __le32 ie_len;
        __le32 num_probes;
+       struct wmi_mac_addr mac_addr;
+       struct wmi_mac_addr mac_mask;
 } __packed;
 
 struct wmi_tlv_vdev_start_cmd {
index c5e1ca5945db79dd8b9d0a37559011d998b9595e..df2e92a6c9bd2a2dd2883b72facc7081df7258b1 100644 (file)
@@ -42,6 +42,7 @@ static struct wmi_cmd_map wmi_cmd_map = {
        .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
        .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
        .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+       .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
        .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
        .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
        .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
@@ -207,6 +208,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
        .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
        .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
        .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
        .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
        .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
        .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
@@ -374,6 +376,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
        .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
        .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
        .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
        .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
        .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
        .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
@@ -541,6 +544,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
        .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
        .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
        .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+       .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
        .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
        .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
        .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
@@ -1338,6 +1342,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
        .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
        .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
        .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
        .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
        .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
        .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
@@ -4357,7 +4362,7 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
                                                            rate_code[i],
                                                            type);
                        snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
-                       strncat(tpc_value, buff, strlen(buff));
+                       strlcat(tpc_value, buff, sizeof(tpc_value));
                }
                tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
                tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
@@ -4694,7 +4699,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
                                                               rate_code[i],
                                                               type, pream_idx);
                        snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
-                       strncat(tpc_value, buff, strlen(buff));
+                       strlcat(tpc_value, buff, sizeof(tpc_value));
                }
                tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
                tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
@@ -5059,7 +5064,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
                return;
        }
 
-       memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
        ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
                           arg.service_map_len);
 
@@ -5269,6 +5273,21 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
        return 0;
 }
 
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
+{
+       int ret;
+       struct wmi_svc_avail_ev_arg arg = {};
+
+       ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
+       if (ret) {
+               ath10k_warn(ar, "failed to parse servive available event: %d\n",
+                           ret);
+       }
+
+       ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
+                              __le32_to_cpu(arg.service_map_ext_len));
+}
+
 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
 {
        const struct wmi_pdev_temperature_event *ev;
@@ -5465,6 +5484,9 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
                ath10k_wmi_event_ready(ar, skb);
                ath10k_wmi_queue_set_coverage_class_work(ar);
                break;
+       case WMI_SERVICE_AVAILABLE_EVENTID:
+               ath10k_wmi_event_service_available(ar, skb);
+               break;
        default:
                ath10k_warn(ar, "Unknown eventid: %d\n", id);
                break;
@@ -5880,6 +5902,8 @@ int ath10k_wmi_connect(struct ath10k *ar)
        struct ath10k_htc_svc_conn_req conn_req;
        struct ath10k_htc_svc_conn_resp conn_resp;
 
+       memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+
        memset(&conn_req, 0, sizeof(conn_req));
        memset(&conn_resp, 0, sizeof(conn_resp));
 
@@ -7648,7 +7672,7 @@ ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
        cmd->param = __cpu_to_le32(param);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI,
-                  "wmi pdev get tcp config param:%d\n", param);
+                  "wmi pdev get tpc config param %d\n", param);
        return skb;
 }
 
@@ -7768,7 +7792,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
        len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
                         "HW rate", pdev->data_rc);
        len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-                        "Sched self tiggers", pdev->self_triggers);
+                        "Sched self triggers", pdev->self_triggers);
        len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
                         "Dropped due to SW retries",
                         pdev->sw_retry_failure);
index 6fbc84c2952101235ca7a0c9d5339db730dbb6fd..16a39244a34f0e724610166eb51ccacdd8813636 100644 (file)
@@ -201,6 +201,8 @@ enum wmi_service {
        WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
        WMI_SERVICE_HOST_DFS_CHECK_SUPPORT,
        WMI_SERVICE_TPC_STATS_FINAL,
+       WMI_SERVICE_RESET_CHIP,
+       WMI_SERVICE_SPOOF_MAC_SUPPORT,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -238,6 +240,8 @@ enum wmi_10x_service {
        WMI_10X_SERVICE_MESH,
        WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
        WMI_10X_SERVICE_PEER_STATS,
+       WMI_10X_SERVICE_RESET_CHIP,
+       WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
 };
 
 enum wmi_main_service {
@@ -548,6 +552,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
        SVCMAP(WMI_10X_SERVICE_PEER_STATS,
               WMI_SERVICE_PEER_STATS, len);
+       SVCMAP(WMI_10X_SERVICE_RESET_CHIP,
+              WMI_SERVICE_RESET_CHIP, len);
+       SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+              WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
 }
 
 static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -783,6 +791,7 @@ struct wmi_cmd_map {
        u32 stop_scan_cmdid;
        u32 scan_chan_list_cmdid;
        u32 scan_sch_prio_tbl_cmdid;
+       u32 scan_prob_req_oui_cmdid;
        u32 pdev_set_regdomain_cmdid;
        u32 pdev_set_channel_cmdid;
        u32 pdev_set_param_cmdid;
@@ -1183,6 +1192,7 @@ enum wmi_cmd_id {
 enum wmi_event_id {
        WMI_SERVICE_READY_EVENTID = 0x1,
        WMI_READY_EVENTID,
+       WMI_SERVICE_AVAILABLE_EVENTID,
 
        /* Scan specific events */
        WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
@@ -3159,6 +3169,8 @@ struct wmi_start_scan_arg {
        u16 channels[64];
        struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
        struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+       struct wmi_mac_addr mac_addr;
+       struct wmi_mac_addr mac_mask;
 };
 
 /* scan control flags */
@@ -3182,6 +3194,12 @@ struct wmi_start_scan_arg {
  */
 #define WMI_SCAN_CONTINUE_ON_ERROR 0x80
 
+/* Use random MAC address for TA for Probe Request frame and add
+ * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame.
+ * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored.
+ */
+#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ   0x1000
+
 /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
 #define WMI_SCAN_CLASS_MASK 0xFF000000
 
@@ -6632,6 +6650,11 @@ struct wmi_svc_rdy_ev_arg {
        const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
 };
 
+struct wmi_svc_avail_ev_arg {
+       __le32 service_map_ext_len;
+       const __le32 *service_map_ext;
+};
+
 struct wmi_rdy_ev_arg {
        __le32 sw_version;
        __le32 abi_version;
@@ -6812,6 +6835,10 @@ struct wmi_wow_ev_arg {
 #define WOW_MIN_PATTERN_SIZE   1
 #define WOW_MAX_PATTERN_SIZE   148
 #define WOW_MAX_PKT_OFFSET     128
+#define WOW_HDR_LEN    (sizeof(struct ieee80211_hdr_3addr) + \
+       sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+       offsetof(struct ieee80211_hdr_3addr, addr1))
 
 enum wmi_tdls_state {
        WMI_TDLS_DISABLE,
@@ -7052,6 +7079,7 @@ void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
                                 int left_len, struct wmi_phyerr_ev_arg *arg);
 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
index c4cbccb29b31a56330047ef59dff462be57cdb77..a6b179f88d36343f9fdd5b61fe08c9d8638567f7 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -76,6 +77,109 @@ static int ath10k_wow_cleanup(struct ath10k *ar)
        return 0;
 }
 
+/**
+ * Convert a 802.3 format to a 802.11 format.
+ *         +------------+-----------+--------+----------------+
+ * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
+ *         +------------+-----------+--------+----------------+
+ *                |__         |_______    |____________  |________
+ *                   |                |                |          |
+ *         +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
+ *         +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath10k_wow_convert_8023_to_80211
+                                       (struct cfg80211_pkt_pattern *new,
+                                       const struct cfg80211_pkt_pattern *old)
+{
+       u8 hdr_8023_pattern[ETH_HLEN] = {};
+       u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+       u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+       u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+       int total_len = old->pkt_offset + old->pattern_len;
+       int hdr_80211_end_offset;
+
+       struct ieee80211_hdr_3addr *new_hdr_pattern =
+               (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+       struct ieee80211_hdr_3addr *new_hdr_mask =
+               (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+       struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+       struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+       int hdr_len = sizeof(*new_hdr_pattern);
+
+       struct rfc1042_hdr *new_rfc_pattern =
+               (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+       struct rfc1042_hdr *new_rfc_mask =
+               (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+       int rfc_len = sizeof(*new_rfc_pattern);
+
+       memcpy(hdr_8023_pattern + old->pkt_offset,
+              old->pattern, ETH_HLEN - old->pkt_offset);
+       memcpy(hdr_8023_bit_mask + old->pkt_offset,
+              old->mask, ETH_HLEN - old->pkt_offset);
+
+       /* Copy destination address */
+       memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+       memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+       /* Copy source address */
+       memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+       memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+       /* Copy logic link type */
+       memcpy(&new_rfc_pattern->snap_type,
+              &old_hdr_pattern->h_proto,
+              sizeof(old_hdr_pattern->h_proto));
+       memcpy(&new_rfc_mask->snap_type,
+              &old_hdr_mask->h_proto,
+              sizeof(old_hdr_mask->h_proto));
+
+       /* Caculate new pkt_offset */
+       if (old->pkt_offset < ETH_ALEN)
+               new->pkt_offset = old->pkt_offset +
+                       offsetof(struct ieee80211_hdr_3addr, addr1);
+       else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+               new->pkt_offset = old->pkt_offset +
+                       offsetof(struct ieee80211_hdr_3addr, addr3) -
+                       offsetof(struct ethhdr, h_source);
+       else
+               new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+       /* Caculate new hdr end offset */
+       if (total_len > ETH_HLEN)
+               hdr_80211_end_offset = hdr_len + rfc_len;
+       else if (total_len > offsetof(struct ethhdr, h_proto))
+               hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+       else if (total_len > ETH_ALEN)
+               hdr_80211_end_offset = total_len - ETH_ALEN +
+                       offsetof(struct ieee80211_hdr_3addr, addr3);
+       else
+               hdr_80211_end_offset = total_len +
+                       offsetof(struct ieee80211_hdr_3addr, addr1);
+
+       new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+       memcpy((u8 *)new->pattern,
+              hdr_80211_pattern + new->pkt_offset,
+              new->pattern_len);
+       memcpy((u8 *)new->mask,
+              hdr_80211_bit_mask + new->pkt_offset,
+              new->pattern_len);
+
+       if (total_len > ETH_HLEN) {
+               /* Copy frame body */
+               memcpy((u8 *)new->pattern + new->pattern_len,
+                      (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+                      total_len - ETH_HLEN);
+               memcpy((u8 *)new->mask + new->pattern_len,
+                      (void *)old->mask + ETH_HLEN - old->pkt_offset,
+                      total_len - ETH_HLEN);
+
+               new->pattern_len += total_len - ETH_HLEN;
+       }
+}
+
 static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
                                      struct cfg80211_wowlan *wowlan)
 {
@@ -116,22 +220,40 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
 
        for (i = 0; i < wowlan->n_patterns; i++) {
                u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+               u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+               u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+               struct cfg80211_pkt_pattern new_pattern = {};
+               struct cfg80211_pkt_pattern old_pattern = patterns[i];
                int j;
 
+               new_pattern.pattern = ath_pattern;
+               new_pattern.mask = ath_bitmask;
                if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
                        continue;
-
                /* convert bytemask to bitmask */
                for (j = 0; j < patterns[i].pattern_len; j++)
                        if (patterns[i].mask[j / 8] & BIT(j % 8))
                                bitmask[j] = 0xff;
+               old_pattern.mask = bitmask;
+               new_pattern = old_pattern;
+
+               if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+                       if (patterns[i].pkt_offset < ETH_HLEN)
+                               ath10k_wow_convert_8023_to_80211(&new_pattern,
+                                                                &old_pattern);
+                       else
+                               new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+               }
+
+               if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+                       return -EINVAL;
 
                ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
                                                 pattern_id,
-                                                patterns[i].pattern,
-                                                bitmask,
-                                                patterns[i].pattern_len,
-                                                patterns[i].pkt_offset);
+                                                new_pattern.pattern,
+                                                new_pattern.mask,
+                                                new_pattern.pattern_len,
+                                                new_pattern.pkt_offset);
                if (ret) {
                        ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
                                    pattern_id,
@@ -345,6 +467,12 @@ int ath10k_wow_init(struct ath10k *ar)
                return -EINVAL;
 
        ar->wow.wowlan_support = ath10k_wowlan_support;
+
+       if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+               ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+               ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+       }
+
        ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
        ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
 
index 0f965e9f38a419f620b4c0a00371acbbe731ffc2..4e94b22eaada1da4fad5f6193a4c6a8c942b50c5 100644 (file)
@@ -645,7 +645,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
        len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
                         "CRC Err", tgt_stats->rx_crc_err);
        len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
-                        "Key chache miss", tgt_stats->rx_key_cache_miss);
+                        "Key cache miss", tgt_stats->rx_key_cache_miss);
        len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
                         "Decrypt Err", tgt_stats->rx_decrypt_err);
        len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
index 6fee9a464ccea1c1b51478af79f2741613812b6e..c8844f55574cb0550abaaa27ba762544da58e173 100644 (file)
@@ -41,7 +41,7 @@ static const int BIN_DELTA_MAX                = 10;
 
 /* we need at least 3 deltas / 4 samples for a reliable chirp detection */
 #define NUM_DIFFS 3
-static const int FFT_NUM_SAMPLES       = (NUM_DIFFS + 1);
+#define FFT_NUM_SAMPLES                (NUM_DIFFS + 1)
 
 /* Threshold for difference of delta peaks */
 static const int MAX_DIFF              = 2;
@@ -114,7 +114,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
 
                ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
                        datalen, num_fft_packets);
-               if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+               if (num_fft_packets < FFT_NUM_SAMPLES) {
                        ath_dbg(common, DFS, "not enough packets for chirp\n");
                        return false;
                }
@@ -136,7 +136,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
                        return false;
                ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
                        datalen, num_fft_packets);
-               if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+               if (num_fft_packets < FFT_NUM_SAMPLES) {
                        ath_dbg(common, DFS, "not enough packets for chirp\n");
                        return false;
                }
index 2c3b899a88fad7106f7dc57f85b436090c5072ae..bd2b946a65c93740b8fba13a36261f0231c6f9b0 100644 (file)
@@ -78,7 +78,6 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
                if (!cur_ctl)
                        goto out_fail;
 
-               spin_lock_init(&cur_ctl->skb_lock);
                cur_ctl->ctl_blk_order = i;
                if (i == 0) {
                        ch->head_blk_ctl = cur_ctl;
@@ -275,12 +274,14 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
        return 0;
 }
 
-static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
+static int wcn36xx_dxe_fill_skb(struct device *dev,
+                               struct wcn36xx_dxe_ctl *ctl,
+                               gfp_t gfp)
 {
        struct wcn36xx_dxe_desc *dxe = ctl->desc;
        struct sk_buff *skb;
 
-       skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+       skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
        if (skb == NULL)
                return -ENOMEM;
 
@@ -307,7 +308,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
        cur_ctl = wcn_ch->head_blk_ctl;
 
        for (i = 0; i < wcn_ch->desc_num; i++) {
-               wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
+               wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
                cur_ctl = cur_ctl->next;
        }
 
@@ -367,7 +368,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
        spin_lock_irqsave(&ch->lock, flags);
        ctl = ch->tail_blk_ctl;
        do {
-               if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)
+               if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
                        break;
                if (ctl->skb) {
                        dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
@@ -377,18 +378,16 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
                                /* Keep frame until TX status comes */
                                ieee80211_free_txskb(wcn->hw, ctl->skb);
                        }
-                       spin_lock(&ctl->skb_lock);
+
                        if (wcn->queues_stopped) {
                                wcn->queues_stopped = false;
                                ieee80211_wake_queues(wcn->hw);
                        }
-                       spin_unlock(&ctl->skb_lock);
 
                        ctl->skb = NULL;
                }
                ctl = ctl->next;
-       } while (ctl != ch->head_blk_ctl &&
-              !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD));
+       } while (ctl != ch->head_blk_ctl);
 
        ch->tail_blk_ctl = ctl;
        spin_unlock_irqrestore(&ch->lock, flags);
@@ -530,10 +529,10 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
                int_mask = WCN36XX_DXE_INT_CH3_MASK;
        }
 
-       while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) {
+       while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
                skb = ctl->skb;
                dma_addr = dxe->dst_addr_l;
-               ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
+               ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
                if (0 == ret) {
                        /* new skb allocation ok. Use the new one and queue
                         * the old one to network system.
@@ -654,8 +653,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
        spin_lock_irqsave(&ch->lock, flags);
        ctl = ch->head_blk_ctl;
 
-       spin_lock(&ctl->next->skb_lock);
-
        /*
         * If skb is not null that means that we reached the tail of the ring
         * hence ring is full. Stop queues to let mac80211 back off until ring
@@ -664,11 +661,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
        if (NULL != ctl->next->skb) {
                ieee80211_stop_queues(wcn->hw);
                wcn->queues_stopped = true;
-               spin_unlock(&ctl->next->skb_lock);
                spin_unlock_irqrestore(&ch->lock, flags);
                return -EBUSY;
        }
-       spin_unlock(&ctl->next->skb_lock);
 
        ctl->skb = NULL;
        desc = ctl->desc;
@@ -693,7 +688,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
 
        /* Set source address of the SKB we send */
        ctl = ctl->next;
-       ctl->skb = skb;
        desc = ctl->desc;
        if (ctl->bd_cpu_addr) {
                wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
@@ -702,10 +696,16 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
        }
 
        desc->src_addr_l = dma_map_single(wcn->dev,
-                                         ctl->skb->data,
-                                         ctl->skb->len,
+                                         skb->data,
+                                         skb->len,
                                          DMA_TO_DEVICE);
+       if (dma_mapping_error(wcn->dev, desc->src_addr_l)) {
+               dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
+               ret = -ENOMEM;
+               goto unlock;
+       }
 
+       ctl->skb = skb;
        desc->dst_addr_l = ch->dxe_wq;
        desc->fr_len = ctl->skb->len;
 
index ce580960d109203a1a67ab82d68ce4b7811a63da..31b81b7547a323b43d45aa41abe6af20277c4a6c 100644 (file)
@@ -422,7 +422,6 @@ struct wcn36xx_dxe_ctl {
        unsigned int            desc_phy_addr;
        int                     ctl_blk_order;
        struct sk_buff          *skb;
-       spinlock_t              skb_lock;
        void                    *bd_cpu_addr;
        dma_addr_t              bd_phy_addr;
 };
index 182963522941a436166bd665de147c5a678fb0e9..2aed6c233508a9babf682a2efeaf25f639ebafc7 100644 (file)
 /* version string max length (including NULL) */
 #define WCN36XX_HAL_VERSION_LENGTH  64
 
+/* How many frames until we start a-mpdu TX session */
+#define WCN36XX_AMPDU_START_THRESH     20
+
+#define WCN36XX_MAX_SCAN_SSIDS         9
+#define WCN36XX_MAX_SCAN_IE_LEN                500
+
 /* message types for messages exchanged between WDI and HAL */
 enum wcn36xx_hal_host_msg_type {
        /* Init/De-Init */
@@ -1170,7 +1176,7 @@ struct wcn36xx_hal_start_scan_offload_req_msg {
 
        /* IE field */
        u16 ie_len;
-       u8 ie[0];
+       u8 ie[WCN36XX_MAX_SCAN_IE_LEN];
 } __packed;
 
 struct wcn36xx_hal_start_scan_offload_rsp_msg {
index 69d6be59d97f00a402ea3c238c6a1f561142554a..e3b91b3b38efe939bfa3ebcb4391f3a022f28a8d 100644 (file)
@@ -353,6 +353,19 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
 
+       cancel_work_sync(&wcn->scan_work);
+
+       mutex_lock(&wcn->scan_lock);
+       if (wcn->scan_req) {
+               struct cfg80211_scan_info scan_info = {
+                       .aborted = true,
+               };
+
+               ieee80211_scan_completed(wcn->hw, &scan_info);
+       }
+       wcn->scan_req = NULL;
+       mutex_unlock(&wcn->scan_lock);
+
        wcn36xx_debugfs_exit(wcn);
        wcn36xx_smd_stop(wcn);
        wcn36xx_dxe_deinit(wcn);
@@ -549,6 +562,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                } else {
                        wcn36xx_smd_set_bsskey(wcn,
                                vif_priv->encrypt_type,
+                               vif_priv->bss_index,
                                key_conf->keyidx,
                                key_conf->keylen,
                                key);
@@ -566,10 +580,13 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
        case DISABLE_KEY:
                if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+                       if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX)
+                               wcn36xx_smd_remove_bsskey(wcn,
+                                       vif_priv->encrypt_type,
+                                       vif_priv->bss_index,
+                                       key_conf->keyidx);
+
                        vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
-                       wcn36xx_smd_remove_bsskey(wcn,
-                               vif_priv->encrypt_type,
-                               key_conf->keyidx);
                } else {
                        sta_priv->is_data_encrypted = false;
                        /* do not remove key if disassociated */
@@ -670,10 +687,18 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
        wcn->scan_aborted = true;
        mutex_unlock(&wcn->scan_lock);
 
-       /* ieee80211_scan_completed will be called on FW scan indication */
-       wcn36xx_smd_stop_hw_scan(wcn);
-
-       cancel_work_sync(&wcn->scan_work);
+       if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+               /* ieee80211_scan_completed will be called on FW scan
+                * indication */
+               wcn36xx_smd_stop_hw_scan(wcn);
+       } else {
+               struct cfg80211_scan_info scan_info = {
+                       .aborted = true,
+               };
+
+               cancel_work_sync(&wcn->scan_work);
+               ieee80211_scan_completed(wcn->hw, &scan_info);
+       }
 }
 
 static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
@@ -953,6 +978,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&wcn->conf_mutex);
 
+       vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
        list_add(&vif_priv->list, &wcn->vif_list);
        wcn36xx_smd_add_sta_self(wcn, vif);
 
index 8932af5e4d8df85e2571a3cd8909184791fdff76..ea74f2b92df50330066741a0b3ba472b9f088162 100644 (file)
@@ -620,9 +620,13 @@ out:
 int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
                              struct cfg80211_scan_request *req)
 {
+       struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
        struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
        int ret, i;
 
+       if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN)
+               return -EINVAL;
+
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
 
@@ -631,6 +635,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
        msg_body.max_ch_time = 100;
        msg_body.scan_hidden = 1;
        memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+       msg_body.bss_type = vif_priv->bss_type;
        msg_body.p2p_search = vif->p2p;
 
        msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
@@ -646,6 +651,14 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
        for (i = 0; i < msg_body.num_channel; i++)
                msg_body.channels[i] = req->channels[i]->hw_value;
 
+       msg_body.header.len -= WCN36XX_MAX_SCAN_IE_LEN;
+
+       if (req->ie_len > 0) {
+               msg_body.ie_len = req->ie_len;
+               msg_body.header.len += req->ie_len;
+               memcpy(msg_body.ie, req->ie, req->ie_len);
+       }
+
        PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
        wcn36xx_dbg(WCN36XX_DBG_HAL,
@@ -1399,9 +1412,10 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
        bss->spectrum_mgt_enable = 0;
        bss->tx_mgmt_power = 0;
        bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
-
        bss->action = update;
 
+       vif_priv->bss_type = bss->bss_type;
+
        wcn36xx_dbg(WCN36XX_DBG_HAL,
                    "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
                    bss->bssid, bss->self_mac_addr, bss->bss_type,
@@ -1446,6 +1460,10 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
        int ret = 0;
 
        mutex_lock(&wcn->hal_mutex);
+
+       if (vif_priv->bss_index == WCN36XX_HAL_BSS_INVALID_IDX)
+               goto out;
+
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
 
        msg_body.bss_index = vif_priv->bss_index;
@@ -1464,6 +1482,8 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
                wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
                goto out;
        }
+
+       vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
 out:
        mutex_unlock(&wcn->hal_mutex);
        return ret;
@@ -1630,6 +1650,7 @@ out:
 
 int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
                           enum ani_ed_type enc_type,
+                          u8 bssidx,
                           u8 keyidx,
                           u8 keylen,
                           u8 *key)
@@ -1639,7 +1660,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
 
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
-       msg_body.bss_idx = 0;
+       msg_body.bss_idx = bssidx;
        msg_body.enc_type = enc_type;
        msg_body.num_keys = 1;
        msg_body.keys[0].id = keyidx;
@@ -1700,6 +1721,7 @@ out:
 
 int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
                              enum ani_ed_type enc_type,
+                             u8 bssidx,
                              u8 keyidx)
 {
        struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
@@ -1707,7 +1729,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
 
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
-       msg_body.bss_idx = 0;
+       msg_body.bss_idx = bssidx;
        msg_body.enc_type = enc_type;
        msg_body.key_id = keyidx;
 
@@ -2132,11 +2154,13 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
                return -EIO;
        }
 
-       wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)\n", rsp->type);
 
        switch (rsp->type) {
        case WCN36XX_HAL_SCAN_IND_FAILED:
+       case WCN36XX_HAL_SCAN_IND_DEQUEUED:
                scan_info.aborted = true;
+               /* fall through */
        case WCN36XX_HAL_SCAN_IND_COMPLETED:
                mutex_lock(&wcn->scan_lock);
                wcn->scan_req = NULL;
@@ -2147,7 +2171,6 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
                break;
        case WCN36XX_HAL_SCAN_IND_STARTED:
        case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
-       case WCN36XX_HAL_SCAN_IND_DEQUEUED:
        case WCN36XX_HAL_SCAN_IND_PREEMPTED:
        case WCN36XX_HAL_SCAN_IND_RESTARTED:
                break;
index 8076edf40ac80a1a5889094a7a8ea9c84fb8b412..61bb8d43138cb4951dde0d18e3886906f0031ba6 100644 (file)
@@ -97,6 +97,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
                           u8 sta_index);
 int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
                           enum ani_ed_type enc_type,
+                          u8 bssidx,
                           u8 keyidx,
                           u8 keylen,
                           u8 *key);
@@ -106,6 +107,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
                              u8 sta_index);
 int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
                              enum ani_ed_type enc_type,
+                             u8 bssidx,
                              u8 keyidx);
 int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
 int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
index b1768ed6b0be0f9e498cadd78629621407167b48..a6902371e89cb9fd165b388ebde0869eeebc2368 100644 (file)
@@ -273,6 +273,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
        bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
                is_multicast_ether_addr(hdr->addr1);
        struct wcn36xx_tx_bd bd;
+       int ret;
 
        memset(&bd, 0, sizeof(bd));
 
@@ -317,5 +318,17 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
        buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32));
        bd.tx_bd_sign = 0xbdbdbdbd;
 
-       return wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
+       ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
+       if (ret && bd.tx_comp) {
+               /* If the skb has not been transmitted,
+                * don't keep a reference to it.
+                */
+               spin_lock_irqsave(&wcn->dxe_lock, flags);
+               wcn->tx_ack_skb = NULL;
+               spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+               ieee80211_wake_queues(wcn->hw);
+       }
+
+       return ret;
 }
index 5854adf43f3aa1498106a9f15d60f37d49f732e7..9343989d1169034ef925c548ae0dadc48b8348f8 100644 (file)
 #define WLAN_NV_FILE               "wlan/prima/WCNSS_qcom_wlan_nv.bin"
 #define WCN36XX_AGGR_BUFFER_SIZE 64
 
-/* How many frames until we start a-mpdu TX session */
-#define WCN36XX_AMPDU_START_THRESH     20
-
-#define WCN36XX_MAX_SCAN_SSIDS         9
-#define WCN36XX_MAX_SCAN_IE_LEN                500
-
 extern unsigned int wcn36xx_dbg_mask;
 
 enum wcn36xx_debug_mask {
@@ -123,6 +117,7 @@ struct wcn36xx_vif {
        bool is_joining;
        bool sta_assoc;
        struct wcn36xx_hal_mac_ssid ssid;
+       enum wcn36xx_hal_bss_type bss_type;
 
        /* Power management */
        enum wcn36xx_power_state pw_state;
index a4b413e8d55a3facf9f88604dba90744155278c0..82aec6b06d09b9169403fc16948c1c6f8d48d11c 100644 (file)
@@ -391,7 +391,7 @@ static void wil_fw_error_worker(struct work_struct *work)
        struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
                                                fw_error_worker);
        struct net_device *ndev = wil->main_ndev;
-       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       struct wireless_dev *wdev;
 
        wil_dbg_misc(wil, "fw error worker\n");
 
@@ -399,6 +399,7 @@ static void wil_fw_error_worker(struct work_struct *work)
                wil_info(wil, "No recovery - interface is down\n");
                return;
        }
+       wdev = ndev->ieee80211_ptr;
 
        /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
         * passed since last recovery attempt